aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c527.c4
-rw-r--r--drivers/net/3c527.h6
-rw-r--r--drivers/net/3c59x.c392
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c8
-rw-r--r--drivers/net/82596.c64
-rw-r--r--drivers/net/Kconfig47
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/Space.c6
-rw-r--r--drivers/net/ac3200.c2
-rw-r--r--drivers/net/appletalk/ipddp.c2
-rw-r--r--drivers/net/arcnet/capmode.c177
-rw-r--r--drivers/net/arcnet/com20020-isa.c4
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c14
-rw-r--r--drivers/net/arm/w90p910_ether.c3
-rw-r--r--drivers/net/at1700.c4
-rw-r--r--drivers/net/atl1c/atl1c.h9
-rw-r--r--drivers/net/atl1c/atl1c_hw.c107
-rw-r--r--drivers/net/atl1c/atl1c_hw.h49
-rw-r--r--drivers/net/atl1c/atl1c_main.c348
-rw-r--r--drivers/net/atlx/atl1.h4
-rw-r--r--drivers/net/au1000_eth.c33
-rw-r--r--drivers/net/ax88796.c4
-rw-r--r--drivers/net/b44.c146
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h31
-rw-r--r--drivers/net/benet/be_cmds.c91
-rw-r--r--drivers/net/benet/be_cmds.h48
-rw-r--r--drivers/net/benet/be_ethtool.c58
-rw-r--r--drivers/net/benet/be_hw.h14
-rw-r--r--drivers/net/benet/be_main.c376
-rw-r--r--drivers/net/bfin_mac.c123
-rw-r--r--drivers/net/bfin_mac.h5
-rw-r--r--drivers/net/bnx2.c264
-rw-r--r--drivers/net/bnx2.h12
-rw-r--r--drivers/net/bnx2x/Makefile7
-rw-r--r--drivers/net/bnx2x/bnx2x.h (renamed from drivers/net/bnx2x.h)235
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2252
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h652
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h (renamed from drivers/net/bnx2x_dump.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c1971
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h (renamed from drivers/net/bnx2x_fw_defs.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h (renamed from drivers/net/bnx2x_fw_file_hdr.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h (renamed from drivers/net/bnx2x_hsi.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h (renamed from drivers/net/bnx2x_init.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h (renamed from drivers/net/bnx2x_init_ops.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c (renamed from drivers/net/bnx2x_link.c)8
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h (renamed from drivers/net/bnx2x_link.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c (renamed from drivers/net/bnx2x_main.c)5976
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h (renamed from drivers/net/bnx2x_reg.h)0
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c1411
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h239
-rw-r--r--drivers/net/bonding/bond_alb.c37
-rw-r--r--drivers/net/bonding/bond_ipv6.c2
-rw-r--r--drivers/net/bonding/bond_main.c222
-rw-r--r--drivers/net/bonding/bond_sysfs.c316
-rw-r--r--drivers/net/bonding/bonding.h14
-rw-r--r--drivers/net/caif/Kconfig22
-rw-r--r--drivers/net/caif/Makefile14
-rw-r--r--drivers/net/caif/caif_serial.c12
-rw-r--r--drivers/net/caif/caif_spi.c850
-rw-r--r--drivers/net/caif/caif_spi_slave.c252
-rw-r--r--drivers/net/can/Kconfig9
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/flexcan.c1030
-rw-r--r--drivers/net/can/mscan/mscan.h2
-rw-r--r--drivers/net/can/usb/Kconfig6
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c1132
-rw-r--r--drivers/net/cassini.c25
-rw-r--r--drivers/net/cassini.h4
-rw-r--r--drivers/net/chelsio/common.h1
-rw-r--r--drivers/net/chelsio/subr.c49
-rw-r--r--drivers/net/cnic.c373
-rw-r--r--drivers/net/cnic.h23
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/cpmac.c13
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cs89x0.c162
-rw-r--r--drivers/net/cs89x0.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/cxgb3/sge.c14
-rw-r--r--drivers/net/cxgb3/t3_hw.c16
-rw-r--r--drivers/net/cxgb3/version.h4
-rw-r--r--drivers/net/cxgb4/cxgb4.h11
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c755
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/cxgb4/l2t.c7
-rw-r--r--drivers/net/cxgb4/sge.c67
-rw-r--r--drivers/net/cxgb4/t4_hw.c109
-rw-r--r--drivers/net/cxgb4/t4_hw.h45
-rw-r--r--drivers/net/cxgb4/t4_msg.h16
-rw-r--r--drivers/net/cxgb4/t4_regs.h7
-rw-r--r--drivers/net/cxgb4/t4fw_api.h63
-rw-r--r--drivers/net/cxgb4vf/Makefile7
-rw-r--r--drivers/net/cxgb4vf/adapter.h540
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c2888
-rw-r--r--drivers/net/cxgb4vf/sge.c2454
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h273
-rw-r--r--drivers/net/cxgb4vf/t4vf_defs.h121
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c1333
-rw-r--r--drivers/net/davinci_emac.c200
-rw-r--r--drivers/net/depca.c29
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/dnet.c9
-rw-r--r--drivers/net/e1000/e1000.h22
-rw-r--r--drivers/net/e1000/e1000_ethtool.c27
-rw-r--r--drivers/net/e1000/e1000_main.c86
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/defines.h4
-rw-r--r--drivers/net/e1000e/e1000.h10
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c144
-rw-r--r--drivers/net/e1000e/hw.h17
-rw-r--r--drivers/net/e1000e/ich8lan.c456
-rw-r--r--drivers/net/e1000e/lib.c2
-rw-r--r--drivers/net/e1000e/netdev.c359
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c5
-rw-r--r--drivers/net/ehea/ehea_main.c2
-rw-r--r--drivers/net/ehea/ehea_qmr.h2
-rw-r--r--drivers/net/enic/cq_desc.h2
-rw-r--r--drivers/net/enic/cq_enet_desc.h20
-rw-r--r--drivers/net/enic/enic.h21
-rw-r--r--drivers/net/enic/enic_main.c517
-rw-r--r--drivers/net/enic/enic_res.c53
-rw-r--r--drivers/net/enic/enic_res.h33
-rw-r--r--drivers/net/enic/rq_enet_desc.h2
-rw-r--r--drivers/net/enic/vnic_cq.c4
-rw-r--r--drivers/net/enic/vnic_cq.h2
-rw-r--r--drivers/net/enic/vnic_dev.c272
-rw-r--r--drivers/net/enic/vnic_dev.h21
-rw-r--r--drivers/net/enic/vnic_devcmd.h35
-rw-r--r--drivers/net/enic/vnic_enet.h4
-rw-r--r--drivers/net/enic/vnic_intr.c5
-rw-r--r--drivers/net/enic/vnic_intr.h8
-rw-r--r--drivers/net/enic/vnic_nic.h2
-rw-r--r--drivers/net/enic/vnic_resource.h2
-rw-r--r--drivers/net/enic/vnic_rq.c40
-rw-r--r--drivers/net/enic/vnic_rq.h16
-rw-r--r--drivers/net/enic/vnic_rss.h2
-rw-r--r--drivers/net/enic/vnic_stats.h2
-rw-r--r--drivers/net/enic/vnic_vic.c8
-rw-r--r--drivers/net/enic/vnic_vic.h2
-rw-r--r--drivers/net/enic/vnic_wq.c25
-rw-r--r--drivers/net/enic/vnic_wq.h16
-rw-r--r--drivers/net/enic/wq_enet_desc.h2
-rw-r--r--drivers/net/epic100.c47
-rw-r--r--drivers/net/eth16i.c4
-rw-r--r--drivers/net/ethoc.c160
-rw-r--r--drivers/net/fealnx.c68
-rw-r--r--drivers/net/fec.c110
-rw-r--r--drivers/net/fec_mpc52xx.c40
-rw-r--r--drivers/net/fec_mpc52xx_phy.c24
-rw-r--r--drivers/net/forcedeth.c62
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/fsl_pq_mdio.c1
-rw-r--r--drivers/net/fsl_pq_mdio.h2
-rw-r--r--drivers/net/gianfar.c154
-rw-r--r--drivers/net/gianfar.h13
-rw-r--r--drivers/net/greth.c1
-rw-r--r--drivers/net/hamachi.c63
-rw-r--r--drivers/net/hp100.c55
-rw-r--r--drivers/net/ibm_newemac/core.c6
-rw-r--r--drivers/net/igb/e1000_82575.c143
-rw-r--r--drivers/net/igb/e1000_defines.h16
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c43
-rw-r--r--drivers/net/igbvf/netdev.c11
-rw-r--r--drivers/net/ioc3-eth.c49
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/irda/irda-usb.h2
-rw-r--r--drivers/net/irda/ks959-sir.c2
-rw-r--r--drivers/net/irda/ksdazzle-sir.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c5
-rw-r--r--drivers/net/irda/vlsi_ir.h6
-rw-r--r--drivers/net/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c107
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c56
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c425
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c33
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c18
-rw-r--r--drivers/net/jazzsonic.c17
-rw-r--r--drivers/net/ks8842.c706
-rw-r--r--drivers/net/ksz884x.c61
-rw-r--r--drivers/net/lance.c56
-rw-r--r--drivers/net/ll_temac_main.c65
-rw-r--r--drivers/net/loopback.c33
-rw-r--r--drivers/net/mac8390.c57
-rw-r--r--drivers/net/mac89x0.c52
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macsonic.c37
-rw-r--r--drivers/net/macvlan.c95
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/mlx4/catas.c4
-rw-r--r--drivers/net/mlx4/en_ethtool.c38
-rw-r--r--drivers/net/mlx4/en_main.c29
-rw-r--r--drivers/net/mlx4/en_netdev.c1
-rw-r--r--drivers/net/mlx4/eq.c20
-rw-r--r--drivers/net/mlx4/main.c16
-rw-r--r--drivers/net/mlx4/mlx4.h15
-rw-r--r--drivers/net/mlx4/mlx4_en.h62
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mv643xx_eth.c12
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/myri_sbus.c6
-rw-r--r--drivers/net/natsemi.c56
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c16
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c13
-rw-r--r--drivers/net/ni52.c37
-rw-r--r--drivers/net/niu.c28
-rw-r--r--drivers/net/niu.h4
-rw-r--r--drivers/net/ns83820.c44
-rw-r--r--drivers/net/octeon/octeon_mgmt.c8
-rw-r--r--drivers/net/phy/broadcom.c46
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/marvell.c115
-rw-r--r--drivers/net/phy/mdio-octeon.c6
-rw-r--r--drivers/net/phy/micrel.c167
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp_generic.c37
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/ps3_gelic_wireless.h10
-rw-r--r--drivers/net/qla3xxx.c1435
-rw-r--r--drivers/net/qlcnic/qlcnic.h225
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c526
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c42
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h78
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c62
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c136
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1139
-rw-r--r--drivers/net/qlge/qlge.h30
-rw-r--r--drivers/net/qlge/qlge_dbg.c814
-rw-r--r--drivers/net/qlge/qlge_main.c47
-rw-r--r--drivers/net/qlge/qlge_mpi.c17
-rw-r--r--drivers/net/r6040.c302
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/s2io-regs.h2
-rw-r--r--drivers/net/s2io.c83
-rw-r--r--drivers/net/s2io.h2
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sfc/efx.c304
-rw-r--r--drivers/net/sfc/efx.h9
-rw-r--r--drivers/net/sfc/ethtool.c167
-rw-r--r--drivers/net/sfc/falcon.c200
-rw-r--r--drivers/net/sfc/falcon_boards.c35
-rw-r--r--drivers/net/sfc/falcon_xmac.c5
-rw-r--r--drivers/net/sfc/io.h37
-rw-r--r--drivers/net/sfc/mcdi.c98
-rw-r--r--drivers/net/sfc/mcdi_mac.c8
-rw-r--r--drivers/net/sfc/mcdi_phy.c41
-rw-r--r--drivers/net/sfc/mdio_10g.c39
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/mtd.c23
-rw-r--r--drivers/net/sfc/net_driver.h88
-rw-r--r--drivers/net/sfc/nic.c553
-rw-r--r--drivers/net/sfc/nic.h9
-rw-r--r--drivers/net/sfc/qt202x_phy.c42
-rw-r--r--drivers/net/sfc/rx.c469
-rw-r--r--drivers/net/sfc/selftest.c154
-rw-r--r--drivers/net/sfc/siena.c68
-rw-r--r--drivers/net/sfc/tenxpress.c12
-rw-r--r--drivers/net/sfc/tx.c41
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sh_eth.c57
-rw-r--r--drivers/net/sky2.c40
-rw-r--r--drivers/net/sky2.h6
-rw-r--r--drivers/net/smc91x.h37
-rw-r--r--drivers/net/smsc911x.c94
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/starfire.c47
-rw-r--r--drivers/net/stmmac/common.h1
-rw-r--r--drivers/net/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/stmmac/enh_desc.c2
-rw-r--r--drivers/net/stmmac/stmmac_main.c37
-rw-r--r--drivers/net/sun3_82586.c35
-rw-r--r--drivers/net/sunbmac.c6
-rw-r--r--drivers/net/sunhme.c10
-rw-r--r--drivers/net/sunlance.c6
-rw-r--r--drivers/net/sunqe.c10
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c799
-rw-r--r--drivers/net/tg3.h98
-rw-r--r--drivers/net/tulip/de2104x.c4
-rw-r--r--drivers/net/tulip/dmfe.c20
-rw-r--r--drivers/net/tulip/eeprom.c10
-rw-r--r--drivers/net/tulip/tulip.h64
-rw-r--r--drivers/net/tulip/tulip_core.c132
-rw-r--r--drivers/net/tulip/winbond-840.c4
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/typhoon.h26
-rw-r--r--drivers/net/ucc_geth.c16
-rw-r--r--drivers/net/ucc_geth.h46
-rw-r--r--drivers/net/usb/asix.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c8
-rw-r--r--drivers/net/usb/hso.c8
-rw-r--r--drivers/net/usb/ipheth.c13
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/pegasus.c125
-rw-r--r--drivers/net/usb/pegasus.h296
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/via-velocity.h12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxge/Makefile2
-rw-r--r--drivers/net/vxge/vxge-config.c4
-rw-r--r--drivers/net/vxge/vxge-config.h4
-rw-r--r--drivers/net/vxge/vxge-ethtool.c4
-rw-r--r--drivers/net/vxge/vxge-ethtool.h4
-rw-r--r--drivers/net/vxge/vxge-main.c599
-rw-r--r--drivers/net/vxge/vxge-main.h27
-rw-r--r--drivers/net/vxge/vxge-reg.h4
-rw-r--r--drivers/net/vxge/vxge-traffic.c8
-rw-r--r--drivers/net/vxge/vxge-traffic.h4
-rw-r--r--drivers/net/vxge/vxge-version.h9
-rw-r--r--drivers/net/wan/cosa.c10
-rw-r--r--drivers/net/wan/farsync.c125
-rw-r--r--drivers/net/wan/hd64570.h2
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/sdla.c13
-rw-r--r--drivers/net/wd.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c4
-rw-r--r--drivers/net/wimax/i2400m/fw.c8
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c2
-rw-r--r--drivers/net/wireless/adm8211.c58
-rw-r--r--drivers/net/wireless/adm8211.h6
-rw-r--r--drivers/net/wireless/airo.c56
-rw-r--r--drivers/net/wireless/at76c50x-usb.c270
-rw-r--r--drivers/net/wireless/at76c50x-usb.h41
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c7
-rw-r--r--drivers/net/wireless/ath/ar9170/led.c4
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c191
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h19
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c438
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c99
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c152
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h310
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c82
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c64
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c116
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c743
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h78
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h1319
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c492
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h2479
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c196
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h8251
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c71
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h (renamed from drivers/net/wireless/ath/ath9k/ar9003_initvals.h)254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1785
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c185
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c714
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c699
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h298
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h92
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c138
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c318
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h77
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c68
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c616
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c71
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h38
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c163
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c512
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c314
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h123
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c475
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c633
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h89
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c292
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h90
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c251
-rw-r--r--drivers/net/wireless/b43/b43.h6
-rw-r--r--drivers/net/wireless/b43/dma.c69
-rw-r--r--drivers/net/wireless/b43/dma.h8
-rw-r--r--drivers/net/wireless/b43/main.c4
-rw-r--r--drivers/net/wireless/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/b43/phy_lp.c8
-rw-r--r--drivers/net/wireless/b43/phy_n.c16
-rw-r--r--drivers/net/wireless/b43/sdio.c1
-rw-r--r--drivers/net/wireless/b43/wa.c8
-rw-r--r--drivers/net/wireless/b43/xmit.h20
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h6
-rw-r--r--drivers/net/wireless/b43legacy/dma.c49
-rw-r--r--drivers/net/wireless/b43legacy/dma.h8
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h10
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h18
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h10
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h32
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c34
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h16
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h122
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h63
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_tx.c16
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c208
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c98
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c538
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-calib.c)223
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c239
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c320
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c351
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c89
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c123
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c594
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h422
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c395
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c144
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h123
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c246
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c164
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c160
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c12
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h50
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h32
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h60
-rw-r--r--drivers/net/wireless/libertas/Makefile3
-rw-r--r--drivers/net/wireless/libertas/README12
-rw-r--r--drivers/net/wireless/libertas/assoc.c2264
-rw-r--r--drivers/net/wireless/libertas/assoc.h155
-rw-r--r--drivers/net/wireless/libertas/cfg.c1861
-rw-r--r--drivers/net/wireless/libertas/cfg.h15
-rw-r--r--drivers/net/wireless/libertas/cmd.c767
-rw-r--r--drivers/net/wireless/libertas/cmd.h27
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c190
-rw-r--r--drivers/net/wireless/libertas/debugfs.c191
-rw-r--r--drivers/net/wireless/libertas/decl.h7
-rw-r--r--drivers/net/wireless/libertas/defs.h18
-rw-r--r--drivers/net/wireless/libertas/dev.h68
-rw-r--r--drivers/net/wireless/libertas/ethtool.c29
-rw-r--r--drivers/net/wireless/libertas/host.h250
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c58
-rw-r--r--drivers/net/wireless/libertas/if_usb.c16
-rw-r--r--drivers/net/wireless/libertas/main.c326
-rw-r--r--drivers/net/wireless/libertas/mesh.c222
-rw-r--r--drivers/net/wireless/libertas/mesh.h19
-rw-r--r--drivers/net/wireless/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/libertas/rx.c129
-rw-r--r--drivers/net/wireless/libertas/scan.c1354
-rw-r--r--drivers/net/wireless/libertas/scan.h63
-rw-r--r--drivers/net/wireless/libertas/tx.c12
-rw-r--r--drivers/net/wireless/libertas/types.h66
-rw-r--r--drivers/net/wireless/libertas/wext.c2353
-rw-r--r--drivers/net/wireless/libertas/wext.h17
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c5
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h7
-rw-r--r--drivers/net/wireless/libertas_tf/main.c18
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c106
-rw-r--r--drivers/net/wireless/mwl8k.c254
-rw-r--r--drivers/net/wireless/orinoco/cfg.c5
-rw-r--r--drivers/net/wireless/orinoco/fw.c2
-rw-r--r--drivers/net/wireless/orinoco/hermes.h18
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c10
-rw-r--r--drivers/net/wireless/orinoco/hw.c6
-rw-r--r--drivers/net/wireless/orinoco/main.c10
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c18
-rw-r--r--drivers/net/wireless/orinoco/wext.c6
-rw-r--r--drivers/net/wireless/p54/eeprom.c80
-rw-r--r--drivers/net/wireless/p54/fwio.c53
-rw-r--r--drivers/net/wireless/p54/led.c8
-rw-r--r--drivers/net/wireless/p54/main.c17
-rw-r--r--drivers/net/wireless/p54/net2280.h16
-rw-r--r--drivers/net/wireless/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/p54/p54spi.c5
-rw-r--r--drivers/net/wireless/p54/p54spi.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.c6
-rw-r--r--drivers/net/wireless/p54/p54usb.h6
-rw-r--r--drivers/net/wireless/p54/txrx.c36
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c15
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h18
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h2
-rw-r--r--drivers/net/wireless/ray_cs.c23
-rw-r--r--drivers/net/wireless/rndis_wlan.c90
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c85
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c87
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c63
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h93
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c734
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h54
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c366
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c293
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h37
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h69
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c139
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c73
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c75
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h20
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c118
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h39
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c130
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c59
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c34
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_grf5101.c12
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_max2820.c19
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_rtl8225.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_sa2400.c28
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rtl8225.c8
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h3
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig4
-rw-r--r--drivers/net/wireless/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h102
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h34
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c27
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c40
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c10
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h14
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h58
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c41
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h117
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c275
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h124
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h16
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ini.h123
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c289
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c11
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.c257
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.h109
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c11
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c36
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h26
-rw-r--r--drivers/net/wireless/wl3501.h16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h15
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h14
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/net/xilinx_emaclite.c2
-rw-r--r--drivers/net/xtsonic.c12
654 files changed, 54742 insertions, 36853 deletions
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 38395dfa4963..70705d1306b9 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -729,14 +729,14 @@ static void mc32_halt_transceiver(struct net_device *dev)
729 * mc32_load_rx_ring - load the ring of receive buffers 729 * mc32_load_rx_ring - load the ring of receive buffers
730 * @dev: 3c527 to build the ring for 730 * @dev: 3c527 to build the ring for
731 * 731 *
732 * This initalises the on-card and driver datastructures to 732 * This initialises the on-card and driver datastructures to
733 * the point where mc32_start_transceiver() can be called. 733 * the point where mc32_start_transceiver() can be called.
734 * 734 *
735 * The card sets up the receive ring for us. We are required to use the 735 * The card sets up the receive ring for us. We are required to use the
736 * ring it provides, although the size of the ring is configurable. 736 * ring it provides, although the size of the ring is configurable.
737 * 737 *
738 * We allocate an sk_buff for each ring entry in turn and 738 * We allocate an sk_buff for each ring entry in turn and
739 * initalise its house-keeping info. At the same time, we read 739 * initialise its house-keeping info. At the same time, we read
740 * each 'next' pointer in our rx_ring array. This reduces slow 740 * each 'next' pointer in our rx_ring array. This reduces slow
741 * shared-memory reads and makes it easy to access predecessor 741 * shared-memory reads and makes it easy to access predecessor
742 * descriptors. 742 * descriptors.
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h
index 75e28fef797b..d693b8d15cde 100644
--- a/drivers/net/3c527.h
+++ b/drivers/net/3c527.h
@@ -34,7 +34,7 @@ struct mc32_mailbox
34{ 34{
35 u16 mbox; 35 u16 mbox;
36 u16 data[1]; 36 u16 data[1];
37} __attribute((packed)); 37} __packed;
38 38
39struct skb_header 39struct skb_header
40{ 40{
@@ -43,7 +43,7 @@ struct skb_header
43 u16 next; /* Do not change! */ 43 u16 next; /* Do not change! */
44 u16 length; 44 u16 length;
45 u32 data; 45 u32 data;
46} __attribute((packed)); 46} __packed;
47 47
48struct mc32_stats 48struct mc32_stats
49{ 49{
@@ -68,7 +68,7 @@ struct mc32_stats
68 u32 dataA[6]; 68 u32 dataA[6];
69 u16 dataB[5]; 69 u16 dataB[5];
70 u32 dataC[14]; 70 u32 dataC[14];
71} __attribute((packed)); 71} __packed;
72 72
73#define STATUS_MASK 0x0F 73#define STATUS_MASK 0x0F
74#define COMPLETED (1<<7) 74#define COMPLETED (1<<7)
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index d75803e6e527..c754d88e5ec9 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -435,7 +435,6 @@ MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
435 First the windows. There are eight register windows, with the command 435 First the windows. There are eight register windows, with the command
436 and status registers available in each. 436 and status registers available in each.
437 */ 437 */
438#define EL3WINDOW(win_num) iowrite16(SelectWindow + (win_num), ioaddr + EL3_CMD)
439#define EL3_CMD 0x0e 438#define EL3_CMD 0x0e
440#define EL3_STATUS 0x0e 439#define EL3_STATUS 0x0e
441 440
@@ -645,10 +644,51 @@ struct vortex_private {
645 u16 deferred; /* Resend these interrupts when we 644 u16 deferred; /* Resend these interrupts when we
646 * bale from the ISR */ 645 * bale from the ISR */
647 u16 io_size; /* Size of PCI region (for release_region) */ 646 u16 io_size; /* Size of PCI region (for release_region) */
648 spinlock_t lock; /* Serialise access to device & its vortex_private */ 647
649 struct mii_if_info mii; /* MII lib hooks/info */ 648 /* Serialises access to hardware other than MII and variables below.
649 * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
650 spinlock_t lock;
651
652 spinlock_t mii_lock; /* Serialises access to MII */
653 struct mii_if_info mii; /* MII lib hooks/info */
654 spinlock_t window_lock; /* Serialises access to windowed regs */
655 int window; /* Register window */
650}; 656};
651 657
658static void window_set(struct vortex_private *vp, int window)
659{
660 if (window != vp->window) {
661 iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
662 vp->window = window;
663 }
664}
665
666#define DEFINE_WINDOW_IO(size) \
667static u ## size \
668window_read ## size(struct vortex_private *vp, int window, int addr) \
669{ \
670 unsigned long flags; \
671 u ## size ret; \
672 spin_lock_irqsave(&vp->window_lock, flags); \
673 window_set(vp, window); \
674 ret = ioread ## size(vp->ioaddr + addr); \
675 spin_unlock_irqrestore(&vp->window_lock, flags); \
676 return ret; \
677} \
678static void \
679window_write ## size(struct vortex_private *vp, u ## size value, \
680 int window, int addr) \
681{ \
682 unsigned long flags; \
683 spin_lock_irqsave(&vp->window_lock, flags); \
684 window_set(vp, window); \
685 iowrite ## size(value, vp->ioaddr + addr); \
686 spin_unlock_irqrestore(&vp->window_lock, flags); \
687}
688DEFINE_WINDOW_IO(8)
689DEFINE_WINDOW_IO(16)
690DEFINE_WINDOW_IO(32)
691
652#ifdef CONFIG_PCI 692#ifdef CONFIG_PCI
653#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) 693#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
654#else 694#else
@@ -711,7 +751,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
711static int vortex_up(struct net_device *dev); 751static int vortex_up(struct net_device *dev);
712static void vortex_down(struct net_device *dev, int final); 752static void vortex_down(struct net_device *dev, int final);
713static int vortex_open(struct net_device *dev); 753static int vortex_open(struct net_device *dev);
714static void mdio_sync(void __iomem *ioaddr, int bits); 754static void mdio_sync(struct vortex_private *vp, int bits);
715static int mdio_read(struct net_device *dev, int phy_id, int location); 755static int mdio_read(struct net_device *dev, int phy_id, int location);
716static void mdio_write(struct net_device *vp, int phy_id, int location, int value); 756static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
717static void vortex_timer(unsigned long arg); 757static void vortex_timer(unsigned long arg);
@@ -980,10 +1020,16 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
980 ioaddr = pci_iomap(pdev, pci_bar, 0); 1020 ioaddr = pci_iomap(pdev, pci_bar, 0);
981 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ 1021 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
982 ioaddr = pci_iomap(pdev, 0, 0); 1022 ioaddr = pci_iomap(pdev, 0, 0);
1023 if (!ioaddr) {
1024 pci_disable_device(pdev);
1025 rc = -ENOMEM;
1026 goto out;
1027 }
983 1028
984 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, 1029 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
985 ent->driver_data, unit); 1030 ent->driver_data, unit);
986 if (rc < 0) { 1031 if (rc < 0) {
1032 pci_iounmap(pdev, ioaddr);
987 pci_disable_device(pdev); 1033 pci_disable_device(pdev);
988 goto out; 1034 goto out;
989 } 1035 }
@@ -1119,6 +1165,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1119 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; 1165 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1120 vp->io_size = vci->io_size; 1166 vp->io_size = vci->io_size;
1121 vp->card_idx = card_idx; 1167 vp->card_idx = card_idx;
1168 vp->window = -1;
1122 1169
1123 /* module list only for Compaq device */ 1170 /* module list only for Compaq device */
1124 if (gendev == NULL) { 1171 if (gendev == NULL) {
@@ -1154,6 +1201,8 @@ static int __devinit vortex_probe1(struct device *gendev,
1154 } 1201 }
1155 1202
1156 spin_lock_init(&vp->lock); 1203 spin_lock_init(&vp->lock);
1204 spin_lock_init(&vp->mii_lock);
1205 spin_lock_init(&vp->window_lock);
1157 vp->gendev = gendev; 1206 vp->gendev = gendev;
1158 vp->mii.dev = dev; 1207 vp->mii.dev = dev;
1159 vp->mii.mdio_read = mdio_read; 1208 vp->mii.mdio_read = mdio_read;
@@ -1205,7 +1254,6 @@ static int __devinit vortex_probe1(struct device *gendev,
1205 vp->mii.force_media = vp->full_duplex; 1254 vp->mii.force_media = vp->full_duplex;
1206 vp->options = option; 1255 vp->options = option;
1207 /* Read the station address from the EEPROM. */ 1256 /* Read the station address from the EEPROM. */
1208 EL3WINDOW(0);
1209 { 1257 {
1210 int base; 1258 int base;
1211 1259
@@ -1218,14 +1266,15 @@ static int __devinit vortex_probe1(struct device *gendev,
1218 1266
1219 for (i = 0; i < 0x40; i++) { 1267 for (i = 0; i < 0x40; i++) {
1220 int timer; 1268 int timer;
1221 iowrite16(base + i, ioaddr + Wn0EepromCmd); 1269 window_write16(vp, base + i, 0, Wn0EepromCmd);
1222 /* Pause for at least 162 us. for the read to take place. */ 1270 /* Pause for at least 162 us. for the read to take place. */
1223 for (timer = 10; timer >= 0; timer--) { 1271 for (timer = 10; timer >= 0; timer--) {
1224 udelay(162); 1272 udelay(162);
1225 if ((ioread16(ioaddr + Wn0EepromCmd) & 0x8000) == 0) 1273 if ((window_read16(vp, 0, Wn0EepromCmd) &
1274 0x8000) == 0)
1226 break; 1275 break;
1227 } 1276 }
1228 eeprom[i] = ioread16(ioaddr + Wn0EepromData); 1277 eeprom[i] = window_read16(vp, 0, Wn0EepromData);
1229 } 1278 }
1230 } 1279 }
1231 for (i = 0; i < 0x18; i++) 1280 for (i = 0; i < 0x18; i++)
@@ -1250,9 +1299,8 @@ static int __devinit vortex_probe1(struct device *gendev,
1250 pr_err("*** EEPROM MAC address is invalid.\n"); 1299 pr_err("*** EEPROM MAC address is invalid.\n");
1251 goto free_ring; /* With every pack */ 1300 goto free_ring; /* With every pack */
1252 } 1301 }
1253 EL3WINDOW(2);
1254 for (i = 0; i < 6; i++) 1302 for (i = 0; i < 6; i++)
1255 iowrite8(dev->dev_addr[i], ioaddr + i); 1303 window_write8(vp, dev->dev_addr[i], 2, i);
1256 1304
1257 if (print_info) 1305 if (print_info)
1258 pr_cont(", IRQ %d\n", dev->irq); 1306 pr_cont(", IRQ %d\n", dev->irq);
@@ -1261,8 +1309,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1261 pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", 1309 pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n",
1262 dev->irq); 1310 dev->irq);
1263 1311
1264 EL3WINDOW(4); 1312 step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
1265 step = (ioread8(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
1266 if (print_info) { 1313 if (print_info) {
1267 pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", 1314 pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
1268 eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], 1315 eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
@@ -1285,17 +1332,15 @@ static int __devinit vortex_probe1(struct device *gendev,
1285 (unsigned long long)pci_resource_start(pdev, 2), 1332 (unsigned long long)pci_resource_start(pdev, 2),
1286 vp->cb_fn_base); 1333 vp->cb_fn_base);
1287 } 1334 }
1288 EL3WINDOW(2);
1289 1335
1290 n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010; 1336 n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1291 if (vp->drv_flags & INVERT_LED_PWR) 1337 if (vp->drv_flags & INVERT_LED_PWR)
1292 n |= 0x10; 1338 n |= 0x10;
1293 if (vp->drv_flags & INVERT_MII_PWR) 1339 if (vp->drv_flags & INVERT_MII_PWR)
1294 n |= 0x4000; 1340 n |= 0x4000;
1295 iowrite16(n, ioaddr + Wn2_ResetOptions); 1341 window_write16(vp, n, 2, Wn2_ResetOptions);
1296 if (vp->drv_flags & WNO_XCVR_PWR) { 1342 if (vp->drv_flags & WNO_XCVR_PWR) {
1297 EL3WINDOW(0); 1343 window_write16(vp, 0x0800, 0, 0);
1298 iowrite16(0x0800, ioaddr);
1299 } 1344 }
1300 } 1345 }
1301 1346
@@ -1313,14 +1358,13 @@ static int __devinit vortex_probe1(struct device *gendev,
1313 { 1358 {
1314 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 1359 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1315 unsigned int config; 1360 unsigned int config;
1316 EL3WINDOW(3); 1361 vp->available_media = window_read16(vp, 3, Wn3_Options);
1317 vp->available_media = ioread16(ioaddr + Wn3_Options);
1318 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ 1362 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1319 vp->available_media = 0x40; 1363 vp->available_media = 0x40;
1320 config = ioread32(ioaddr + Wn3_Config); 1364 config = window_read32(vp, 3, Wn3_Config);
1321 if (print_info) { 1365 if (print_info) {
1322 pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", 1366 pr_debug(" Internal config register is %4.4x, transceivers %#x.\n",
1323 config, ioread16(ioaddr + Wn3_Options)); 1367 config, window_read16(vp, 3, Wn3_Options));
1324 pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", 1368 pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1325 8 << RAM_SIZE(config), 1369 8 << RAM_SIZE(config),
1326 RAM_WIDTH(config) ? "word" : "byte", 1370 RAM_WIDTH(config) ? "word" : "byte",
@@ -1346,11 +1390,10 @@ static int __devinit vortex_probe1(struct device *gendev,
1346 if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || 1390 if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1347 dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { 1391 dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1348 int phy, phy_idx = 0; 1392 int phy, phy_idx = 0;
1349 EL3WINDOW(4);
1350 mii_preamble_required++; 1393 mii_preamble_required++;
1351 if (vp->drv_flags & EXTRA_PREAMBLE) 1394 if (vp->drv_flags & EXTRA_PREAMBLE)
1352 mii_preamble_required++; 1395 mii_preamble_required++;
1353 mdio_sync(ioaddr, 32); 1396 mdio_sync(vp, 32);
1354 mdio_read(dev, 24, MII_BMSR); 1397 mdio_read(dev, 24, MII_BMSR);
1355 for (phy = 0; phy < 32 && phy_idx < 1; phy++) { 1398 for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
1356 int mii_status, phyx; 1399 int mii_status, phyx;
@@ -1478,18 +1521,17 @@ static void
1478vortex_set_duplex(struct net_device *dev) 1521vortex_set_duplex(struct net_device *dev)
1479{ 1522{
1480 struct vortex_private *vp = netdev_priv(dev); 1523 struct vortex_private *vp = netdev_priv(dev);
1481 void __iomem *ioaddr = vp->ioaddr;
1482 1524
1483 pr_info("%s: setting %s-duplex.\n", 1525 pr_info("%s: setting %s-duplex.\n",
1484 dev->name, (vp->full_duplex) ? "full" : "half"); 1526 dev->name, (vp->full_duplex) ? "full" : "half");
1485 1527
1486 EL3WINDOW(3);
1487 /* Set the full-duplex bit. */ 1528 /* Set the full-duplex bit. */
1488 iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | 1529 window_write16(vp,
1489 (vp->large_frames ? 0x40 : 0) | 1530 ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1490 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 1531 (vp->large_frames ? 0x40 : 0) |
1491 0x100 : 0), 1532 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1492 ioaddr + Wn3_MAC_Ctrl); 1533 0x100 : 0),
1534 3, Wn3_MAC_Ctrl);
1493} 1535}
1494 1536
1495static void vortex_check_media(struct net_device *dev, unsigned int init) 1537static void vortex_check_media(struct net_device *dev, unsigned int init)
@@ -1529,8 +1571,7 @@ vortex_up(struct net_device *dev)
1529 } 1571 }
1530 1572
1531 /* Before initializing select the active media port. */ 1573 /* Before initializing select the active media port. */
1532 EL3WINDOW(3); 1574 config = window_read32(vp, 3, Wn3_Config);
1533 config = ioread32(ioaddr + Wn3_Config);
1534 1575
1535 if (vp->media_override != 7) { 1576 if (vp->media_override != 7) {
1536 pr_info("%s: Media override to transceiver %d (%s).\n", 1577 pr_info("%s: Media override to transceiver %d (%s).\n",
@@ -1577,10 +1618,9 @@ vortex_up(struct net_device *dev)
1577 config = BFINS(config, dev->if_port, 20, 4); 1618 config = BFINS(config, dev->if_port, 20, 4);
1578 if (vortex_debug > 6) 1619 if (vortex_debug > 6)
1579 pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); 1620 pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
1580 iowrite32(config, ioaddr + Wn3_Config); 1621 window_write32(vp, config, 3, Wn3_Config);
1581 1622
1582 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { 1623 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1583 EL3WINDOW(4);
1584 mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); 1624 mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1585 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); 1625 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1586 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); 1626 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
@@ -1601,51 +1641,46 @@ vortex_up(struct net_device *dev)
1601 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); 1641 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1602 1642
1603 if (vortex_debug > 1) { 1643 if (vortex_debug > 1) {
1604 EL3WINDOW(4);
1605 pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", 1644 pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
1606 dev->name, dev->irq, ioread16(ioaddr + Wn4_Media)); 1645 dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
1607 } 1646 }
1608 1647
1609 /* Set the station address and mask in window 2 each time opened. */ 1648 /* Set the station address and mask in window 2 each time opened. */
1610 EL3WINDOW(2);
1611 for (i = 0; i < 6; i++) 1649 for (i = 0; i < 6; i++)
1612 iowrite8(dev->dev_addr[i], ioaddr + i); 1650 window_write8(vp, dev->dev_addr[i], 2, i);
1613 for (; i < 12; i+=2) 1651 for (; i < 12; i+=2)
1614 iowrite16(0, ioaddr + i); 1652 window_write16(vp, 0, 2, i);
1615 1653
1616 if (vp->cb_fn_base) { 1654 if (vp->cb_fn_base) {
1617 unsigned short n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010; 1655 unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1618 if (vp->drv_flags & INVERT_LED_PWR) 1656 if (vp->drv_flags & INVERT_LED_PWR)
1619 n |= 0x10; 1657 n |= 0x10;
1620 if (vp->drv_flags & INVERT_MII_PWR) 1658 if (vp->drv_flags & INVERT_MII_PWR)
1621 n |= 0x4000; 1659 n |= 0x4000;
1622 iowrite16(n, ioaddr + Wn2_ResetOptions); 1660 window_write16(vp, n, 2, Wn2_ResetOptions);
1623 } 1661 }
1624 1662
1625 if (dev->if_port == XCVR_10base2) 1663 if (dev->if_port == XCVR_10base2)
1626 /* Start the thinnet transceiver. We should really wait 50ms...*/ 1664 /* Start the thinnet transceiver. We should really wait 50ms...*/
1627 iowrite16(StartCoax, ioaddr + EL3_CMD); 1665 iowrite16(StartCoax, ioaddr + EL3_CMD);
1628 if (dev->if_port != XCVR_NWAY) { 1666 if (dev->if_port != XCVR_NWAY) {
1629 EL3WINDOW(4); 1667 window_write16(vp,
1630 iowrite16((ioread16(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) | 1668 (window_read16(vp, 4, Wn4_Media) &
1631 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); 1669 ~(Media_10TP|Media_SQE)) |
1670 media_tbl[dev->if_port].media_bits,
1671 4, Wn4_Media);
1632 } 1672 }
1633 1673
1634 /* Switch to the stats window, and clear all stats by reading. */ 1674 /* Switch to the stats window, and clear all stats by reading. */
1635 iowrite16(StatsDisable, ioaddr + EL3_CMD); 1675 iowrite16(StatsDisable, ioaddr + EL3_CMD);
1636 EL3WINDOW(6);
1637 for (i = 0; i < 10; i++) 1676 for (i = 0; i < 10; i++)
1638 ioread8(ioaddr + i); 1677 window_read8(vp, 6, i);
1639 ioread16(ioaddr + 10); 1678 window_read16(vp, 6, 10);
1640 ioread16(ioaddr + 12); 1679 window_read16(vp, 6, 12);
1641 /* New: On the Vortex we must also clear the BadSSD counter. */ 1680 /* New: On the Vortex we must also clear the BadSSD counter. */
1642 EL3WINDOW(4); 1681 window_read8(vp, 4, 12);
1643 ioread8(ioaddr + 12);
1644 /* ..and on the Boomerang we enable the extra statistics bits. */ 1682 /* ..and on the Boomerang we enable the extra statistics bits. */
1645 iowrite16(0x0040, ioaddr + Wn4_NetDiag); 1683 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1646
1647 /* Switch to register set 7 for normal use. */
1648 EL3WINDOW(7);
1649 1684
1650 if (vp->full_bus_master_rx) { /* Boomerang bus master. */ 1685 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1651 vp->cur_rx = vp->dirty_rx = 0; 1686 vp->cur_rx = vp->dirty_rx = 0;
@@ -1763,7 +1798,7 @@ vortex_timer(unsigned long data)
1763 void __iomem *ioaddr = vp->ioaddr; 1798 void __iomem *ioaddr = vp->ioaddr;
1764 int next_tick = 60*HZ; 1799 int next_tick = 60*HZ;
1765 int ok = 0; 1800 int ok = 0;
1766 int media_status, old_window; 1801 int media_status;
1767 1802
1768 if (vortex_debug > 2) { 1803 if (vortex_debug > 2) {
1769 pr_debug("%s: Media selection timer tick happened, %s.\n", 1804 pr_debug("%s: Media selection timer tick happened, %s.\n",
@@ -1771,10 +1806,7 @@ vortex_timer(unsigned long data)
1771 pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); 1806 pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1772 } 1807 }
1773 1808
1774 disable_irq_lockdep(dev->irq); 1809 media_status = window_read16(vp, 4, Wn4_Media);
1775 old_window = ioread16(ioaddr + EL3_CMD) >> 13;
1776 EL3WINDOW(4);
1777 media_status = ioread16(ioaddr + Wn4_Media);
1778 switch (dev->if_port) { 1810 switch (dev->if_port) {
1779 case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: 1811 case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
1780 if (media_status & Media_LnkBeat) { 1812 if (media_status & Media_LnkBeat) {
@@ -1794,10 +1826,7 @@ vortex_timer(unsigned long data)
1794 case XCVR_MII: case XCVR_NWAY: 1826 case XCVR_MII: case XCVR_NWAY:
1795 { 1827 {
1796 ok = 1; 1828 ok = 1;
1797 /* Interrupts are already disabled */
1798 spin_lock(&vp->lock);
1799 vortex_check_media(dev, 0); 1829 vortex_check_media(dev, 0);
1800 spin_unlock(&vp->lock);
1801 } 1830 }
1802 break; 1831 break;
1803 default: /* Other media types handled by Tx timeouts. */ 1832 default: /* Other media types handled by Tx timeouts. */
@@ -1816,6 +1845,8 @@ vortex_timer(unsigned long data)
1816 if (!ok) { 1845 if (!ok) {
1817 unsigned int config; 1846 unsigned int config;
1818 1847
1848 spin_lock_irq(&vp->lock);
1849
1819 do { 1850 do {
1820 dev->if_port = media_tbl[dev->if_port].next; 1851 dev->if_port = media_tbl[dev->if_port].next;
1821 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); 1852 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
@@ -1830,19 +1861,22 @@ vortex_timer(unsigned long data)
1830 dev->name, media_tbl[dev->if_port].name); 1861 dev->name, media_tbl[dev->if_port].name);
1831 next_tick = media_tbl[dev->if_port].wait; 1862 next_tick = media_tbl[dev->if_port].wait;
1832 } 1863 }
1833 iowrite16((media_status & ~(Media_10TP|Media_SQE)) | 1864 window_write16(vp,
1834 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); 1865 (media_status & ~(Media_10TP|Media_SQE)) |
1866 media_tbl[dev->if_port].media_bits,
1867 4, Wn4_Media);
1835 1868
1836 EL3WINDOW(3); 1869 config = window_read32(vp, 3, Wn3_Config);
1837 config = ioread32(ioaddr + Wn3_Config);
1838 config = BFINS(config, dev->if_port, 20, 4); 1870 config = BFINS(config, dev->if_port, 20, 4);
1839 iowrite32(config, ioaddr + Wn3_Config); 1871 window_write32(vp, config, 3, Wn3_Config);
1840 1872
1841 iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, 1873 iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1842 ioaddr + EL3_CMD); 1874 ioaddr + EL3_CMD);
1843 if (vortex_debug > 1) 1875 if (vortex_debug > 1)
1844 pr_debug("wrote 0x%08x to Wn3_Config\n", config); 1876 pr_debug("wrote 0x%08x to Wn3_Config\n", config);
1845 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ 1877 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
1878
1879 spin_unlock_irq(&vp->lock);
1846 } 1880 }
1847 1881
1848leave_media_alone: 1882leave_media_alone:
@@ -1850,8 +1884,6 @@ leave_media_alone:
1850 pr_debug("%s: Media selection timer finished, %s.\n", 1884 pr_debug("%s: Media selection timer finished, %s.\n",
1851 dev->name, media_tbl[dev->if_port].name); 1885 dev->name, media_tbl[dev->if_port].name);
1852 1886
1853 EL3WINDOW(old_window);
1854 enable_irq_lockdep(dev->irq);
1855 mod_timer(&vp->timer, RUN_AT(next_tick)); 1887 mod_timer(&vp->timer, RUN_AT(next_tick));
1856 if (vp->deferred) 1888 if (vp->deferred)
1857 iowrite16(FakeIntr, ioaddr + EL3_CMD); 1889 iowrite16(FakeIntr, ioaddr + EL3_CMD);
@@ -1865,12 +1897,11 @@ static void vortex_tx_timeout(struct net_device *dev)
1865 pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", 1897 pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
1866 dev->name, ioread8(ioaddr + TxStatus), 1898 dev->name, ioread8(ioaddr + TxStatus),
1867 ioread16(ioaddr + EL3_STATUS)); 1899 ioread16(ioaddr + EL3_STATUS));
1868 EL3WINDOW(4);
1869 pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", 1900 pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n",
1870 ioread16(ioaddr + Wn4_NetDiag), 1901 window_read16(vp, 4, Wn4_NetDiag),
1871 ioread16(ioaddr + Wn4_Media), 1902 window_read16(vp, 4, Wn4_Media),
1872 ioread32(ioaddr + PktStatus), 1903 ioread32(ioaddr + PktStatus),
1873 ioread16(ioaddr + Wn4_FIFODiag)); 1904 window_read16(vp, 4, Wn4_FIFODiag));
1874 /* Slight code bloat to be user friendly. */ 1905 /* Slight code bloat to be user friendly. */
1875 if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) 1906 if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
1876 pr_err("%s: Transmitter encountered 16 collisions --" 1907 pr_err("%s: Transmitter encountered 16 collisions --"
@@ -1917,9 +1948,6 @@ static void vortex_tx_timeout(struct net_device *dev)
1917 /* Issue Tx Enable */ 1948 /* Issue Tx Enable */
1918 iowrite16(TxEnable, ioaddr + EL3_CMD); 1949 iowrite16(TxEnable, ioaddr + EL3_CMD);
1919 dev->trans_start = jiffies; /* prevent tx timeout */ 1950 dev->trans_start = jiffies; /* prevent tx timeout */
1920
1921 /* Switch to register set 7 for normal use. */
1922 EL3WINDOW(7);
1923} 1951}
1924 1952
1925/* 1953/*
@@ -1980,10 +2008,10 @@ vortex_error(struct net_device *dev, int status)
1980 ioread16(ioaddr + EL3_STATUS) & StatsFull) { 2008 ioread16(ioaddr + EL3_STATUS) & StatsFull) {
1981 pr_warning("%s: Updating statistics failed, disabling " 2009 pr_warning("%s: Updating statistics failed, disabling "
1982 "stats as an interrupt source.\n", dev->name); 2010 "stats as an interrupt source.\n", dev->name);
1983 EL3WINDOW(5); 2011 iowrite16(SetIntrEnb |
1984 iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD); 2012 (window_read16(vp, 5, 10) & ~StatsFull),
2013 ioaddr + EL3_CMD);
1985 vp->intr_enable &= ~StatsFull; 2014 vp->intr_enable &= ~StatsFull;
1986 EL3WINDOW(7);
1987 DoneDidThat++; 2015 DoneDidThat++;
1988 } 2016 }
1989 } 2017 }
@@ -1993,8 +2021,7 @@ vortex_error(struct net_device *dev, int status)
1993 } 2021 }
1994 if (status & HostError) { 2022 if (status & HostError) {
1995 u16 fifo_diag; 2023 u16 fifo_diag;
1996 EL3WINDOW(4); 2024 fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
1997 fifo_diag = ioread16(ioaddr + Wn4_FIFODiag);
1998 pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", 2025 pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
1999 dev->name, fifo_diag); 2026 dev->name, fifo_diag);
2000 /* Adapter failure requires Tx/Rx reset and reinit. */ 2027 /* Adapter failure requires Tx/Rx reset and reinit. */
@@ -2043,9 +2070,13 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2043 if (vp->bus_master) { 2070 if (vp->bus_master) {
2044 /* Set the bus-master controller to transfer the packet. */ 2071 /* Set the bus-master controller to transfer the packet. */
2045 int len = (skb->len + 3) & ~3; 2072 int len = (skb->len + 3) & ~3;
2046 iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), 2073 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2047 ioaddr + Wn7_MasterAddr); 2074 PCI_DMA_TODEVICE);
2075 spin_lock_irq(&vp->window_lock);
2076 window_set(vp, 7);
2077 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
2048 iowrite16(len, ioaddr + Wn7_MasterLen); 2078 iowrite16(len, ioaddr + Wn7_MasterLen);
2079 spin_unlock_irq(&vp->window_lock);
2049 vp->tx_skb = skb; 2080 vp->tx_skb = skb;
2050 iowrite16(StartDMADown, ioaddr + EL3_CMD); 2081 iowrite16(StartDMADown, ioaddr + EL3_CMD);
2051 /* netif_wake_queue() will be called at the DMADone interrupt. */ 2082 /* netif_wake_queue() will be called at the DMADone interrupt. */
@@ -2217,6 +2248,9 @@ vortex_interrupt(int irq, void *dev_id)
2217 pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", 2248 pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
2218 dev->name, status, ioread8(ioaddr + Timer)); 2249 dev->name, status, ioread8(ioaddr + Timer));
2219 2250
2251 spin_lock(&vp->window_lock);
2252 window_set(vp, 7);
2253
2220 do { 2254 do {
2221 if (vortex_debug > 5) 2255 if (vortex_debug > 5)
2222 pr_debug("%s: In interrupt loop, status %4.4x.\n", 2256 pr_debug("%s: In interrupt loop, status %4.4x.\n",
@@ -2275,6 +2309,8 @@ vortex_interrupt(int irq, void *dev_id)
2275 iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); 2309 iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2276 } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); 2310 } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
2277 2311
2312 spin_unlock(&vp->window_lock);
2313
2278 if (vortex_debug > 4) 2314 if (vortex_debug > 4)
2279 pr_debug("%s: exiting interrupt, status %4.4x.\n", 2315 pr_debug("%s: exiting interrupt, status %4.4x.\n",
2280 dev->name, status); 2316 dev->name, status);
@@ -2760,85 +2796,58 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2760static void update_stats(void __iomem *ioaddr, struct net_device *dev) 2796static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2761{ 2797{
2762 struct vortex_private *vp = netdev_priv(dev); 2798 struct vortex_private *vp = netdev_priv(dev);
2763 int old_window = ioread16(ioaddr + EL3_CMD);
2764 2799
2765 if (old_window == 0xffff) /* Chip suspended or ejected. */
2766 return;
2767 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ 2800 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2768 /* Switch to the stats window, and read everything. */ 2801 /* Switch to the stats window, and read everything. */
2769 EL3WINDOW(6); 2802 dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
2770 dev->stats.tx_carrier_errors += ioread8(ioaddr + 0); 2803 dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
2771 dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); 2804 dev->stats.tx_window_errors += window_read8(vp, 6, 4);
2772 dev->stats.tx_window_errors += ioread8(ioaddr + 4); 2805 dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
2773 dev->stats.rx_fifo_errors += ioread8(ioaddr + 5); 2806 dev->stats.tx_packets += window_read8(vp, 6, 6);
2774 dev->stats.tx_packets += ioread8(ioaddr + 6); 2807 dev->stats.tx_packets += (window_read8(vp, 6, 9) &
2775 dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; 2808 0x30) << 4;
2776 /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */ 2809 /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
2777 /* Don't bother with register 9, an extension of registers 6&7. 2810 /* Don't bother with register 9, an extension of registers 6&7.
2778 If we do use the 6&7 values the atomic update assumption above 2811 If we do use the 6&7 values the atomic update assumption above
2779 is invalid. */ 2812 is invalid. */
2780 dev->stats.rx_bytes += ioread16(ioaddr + 10); 2813 dev->stats.rx_bytes += window_read16(vp, 6, 10);
2781 dev->stats.tx_bytes += ioread16(ioaddr + 12); 2814 dev->stats.tx_bytes += window_read16(vp, 6, 12);
2782 /* Extra stats for get_ethtool_stats() */ 2815 /* Extra stats for get_ethtool_stats() */
2783 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); 2816 vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
2784 vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); 2817 vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
2785 vp->xstats.tx_deferred += ioread8(ioaddr + 8); 2818 vp->xstats.tx_deferred += window_read8(vp, 6, 8);
2786 EL3WINDOW(4); 2819 vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
2787 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
2788 2820
2789 dev->stats.collisions = vp->xstats.tx_multiple_collisions 2821 dev->stats.collisions = vp->xstats.tx_multiple_collisions
2790 + vp->xstats.tx_single_collisions 2822 + vp->xstats.tx_single_collisions
2791 + vp->xstats.tx_max_collisions; 2823 + vp->xstats.tx_max_collisions;
2792 2824
2793 { 2825 {
2794 u8 up = ioread8(ioaddr + 13); 2826 u8 up = window_read8(vp, 4, 13);
2795 dev->stats.rx_bytes += (up & 0x0f) << 16; 2827 dev->stats.rx_bytes += (up & 0x0f) << 16;
2796 dev->stats.tx_bytes += (up & 0xf0) << 12; 2828 dev->stats.tx_bytes += (up & 0xf0) << 12;
2797 } 2829 }
2798
2799 EL3WINDOW(old_window >> 13);
2800} 2830}
2801 2831
2802static int vortex_nway_reset(struct net_device *dev) 2832static int vortex_nway_reset(struct net_device *dev)
2803{ 2833{
2804 struct vortex_private *vp = netdev_priv(dev); 2834 struct vortex_private *vp = netdev_priv(dev);
2805 void __iomem *ioaddr = vp->ioaddr;
2806 unsigned long flags;
2807 int rc;
2808 2835
2809 spin_lock_irqsave(&vp->lock, flags); 2836 return mii_nway_restart(&vp->mii);
2810 EL3WINDOW(4);
2811 rc = mii_nway_restart(&vp->mii);
2812 spin_unlock_irqrestore(&vp->lock, flags);
2813 return rc;
2814} 2837}
2815 2838
2816static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2839static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2817{ 2840{
2818 struct vortex_private *vp = netdev_priv(dev); 2841 struct vortex_private *vp = netdev_priv(dev);
2819 void __iomem *ioaddr = vp->ioaddr;
2820 unsigned long flags;
2821 int rc;
2822 2842
2823 spin_lock_irqsave(&vp->lock, flags); 2843 return mii_ethtool_gset(&vp->mii, cmd);
2824 EL3WINDOW(4);
2825 rc = mii_ethtool_gset(&vp->mii, cmd);
2826 spin_unlock_irqrestore(&vp->lock, flags);
2827 return rc;
2828} 2844}
2829 2845
2830static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2846static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2831{ 2847{
2832 struct vortex_private *vp = netdev_priv(dev); 2848 struct vortex_private *vp = netdev_priv(dev);
2833 void __iomem *ioaddr = vp->ioaddr;
2834 unsigned long flags;
2835 int rc;
2836 2849
2837 spin_lock_irqsave(&vp->lock, flags); 2850 return mii_ethtool_sset(&vp->mii, cmd);
2838 EL3WINDOW(4);
2839 rc = mii_ethtool_sset(&vp->mii, cmd);
2840 spin_unlock_irqrestore(&vp->lock, flags);
2841 return rc;
2842} 2851}
2843 2852
2844static u32 vortex_get_msglevel(struct net_device *dev) 2853static u32 vortex_get_msglevel(struct net_device *dev)
@@ -2909,6 +2918,36 @@ static void vortex_get_drvinfo(struct net_device *dev,
2909 } 2918 }
2910} 2919}
2911 2920
2921static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2922{
2923 struct vortex_private *vp = netdev_priv(dev);
2924
2925 spin_lock_irq(&vp->lock);
2926 wol->supported = WAKE_MAGIC;
2927
2928 wol->wolopts = 0;
2929 if (vp->enable_wol)
2930 wol->wolopts |= WAKE_MAGIC;
2931 spin_unlock_irq(&vp->lock);
2932}
2933
2934static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2935{
2936 struct vortex_private *vp = netdev_priv(dev);
2937 if (wol->wolopts & ~WAKE_MAGIC)
2938 return -EINVAL;
2939
2940 spin_lock_irq(&vp->lock);
2941 if (wol->wolopts & WAKE_MAGIC)
2942 vp->enable_wol = 1;
2943 else
2944 vp->enable_wol = 0;
2945 acpi_set_WOL(dev);
2946 spin_unlock_irq(&vp->lock);
2947
2948 return 0;
2949}
2950
2912static const struct ethtool_ops vortex_ethtool_ops = { 2951static const struct ethtool_ops vortex_ethtool_ops = {
2913 .get_drvinfo = vortex_get_drvinfo, 2952 .get_drvinfo = vortex_get_drvinfo,
2914 .get_strings = vortex_get_strings, 2953 .get_strings = vortex_get_strings,
@@ -2920,6 +2959,8 @@ static const struct ethtool_ops vortex_ethtool_ops = {
2920 .set_settings = vortex_set_settings, 2959 .set_settings = vortex_set_settings,
2921 .get_link = ethtool_op_get_link, 2960 .get_link = ethtool_op_get_link,
2922 .nway_reset = vortex_nway_reset, 2961 .nway_reset = vortex_nway_reset,
2962 .get_wol = vortex_get_wol,
2963 .set_wol = vortex_set_wol,
2923}; 2964};
2924 2965
2925#ifdef CONFIG_PCI 2966#ifdef CONFIG_PCI
@@ -2930,7 +2971,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2930{ 2971{
2931 int err; 2972 int err;
2932 struct vortex_private *vp = netdev_priv(dev); 2973 struct vortex_private *vp = netdev_priv(dev);
2933 void __iomem *ioaddr = vp->ioaddr;
2934 unsigned long flags; 2974 unsigned long flags;
2935 pci_power_t state = 0; 2975 pci_power_t state = 0;
2936 2976
@@ -2942,7 +2982,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2942 if(state != 0) 2982 if(state != 0)
2943 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); 2983 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
2944 spin_lock_irqsave(&vp->lock, flags); 2984 spin_lock_irqsave(&vp->lock, flags);
2945 EL3WINDOW(4);
2946 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); 2985 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
2947 spin_unlock_irqrestore(&vp->lock, flags); 2986 spin_unlock_irqrestore(&vp->lock, flags);
2948 if(state != 0) 2987 if(state != 0)
@@ -2985,8 +3024,6 @@ static void set_rx_mode(struct net_device *dev)
2985static void set_8021q_mode(struct net_device *dev, int enable) 3024static void set_8021q_mode(struct net_device *dev, int enable)
2986{ 3025{
2987 struct vortex_private *vp = netdev_priv(dev); 3026 struct vortex_private *vp = netdev_priv(dev);
2988 void __iomem *ioaddr = vp->ioaddr;
2989 int old_window = ioread16(ioaddr + EL3_CMD);
2990 int mac_ctrl; 3027 int mac_ctrl;
2991 3028
2992 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { 3029 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
@@ -2997,28 +3034,23 @@ static void set_8021q_mode(struct net_device *dev, int enable)
2997 if (enable) 3034 if (enable)
2998 max_pkt_size += 4; /* 802.1Q VLAN tag */ 3035 max_pkt_size += 4; /* 802.1Q VLAN tag */
2999 3036
3000 EL3WINDOW(3); 3037 window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
3001 iowrite16(max_pkt_size, ioaddr+Wn3_MaxPktSize);
3002 3038
3003 /* set VlanEtherType to let the hardware checksumming 3039 /* set VlanEtherType to let the hardware checksumming
3004 treat tagged frames correctly */ 3040 treat tagged frames correctly */
3005 EL3WINDOW(7); 3041 window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
3006 iowrite16(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
3007 } else { 3042 } else {
3008 /* on older cards we have to enable large frames */ 3043 /* on older cards we have to enable large frames */
3009 3044
3010 vp->large_frames = dev->mtu > 1500 || enable; 3045 vp->large_frames = dev->mtu > 1500 || enable;
3011 3046
3012 EL3WINDOW(3); 3047 mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
3013 mac_ctrl = ioread16(ioaddr+Wn3_MAC_Ctrl);
3014 if (vp->large_frames) 3048 if (vp->large_frames)
3015 mac_ctrl |= 0x40; 3049 mac_ctrl |= 0x40;
3016 else 3050 else
3017 mac_ctrl &= ~0x40; 3051 mac_ctrl &= ~0x40;
3018 iowrite16(mac_ctrl, ioaddr+Wn3_MAC_Ctrl); 3052 window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
3019 } 3053 }
3020
3021 EL3WINDOW(old_window);
3022} 3054}
3023#else 3055#else
3024 3056
@@ -3037,7 +3069,10 @@ static void set_8021q_mode(struct net_device *dev, int enable)
3037/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually 3069/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
3038 met by back-to-back PCI I/O cycles, but we insert a delay to avoid 3070 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
3039 "overclocking" issues. */ 3071 "overclocking" issues. */
3040#define mdio_delay() ioread32(mdio_addr) 3072static void mdio_delay(struct vortex_private *vp)
3073{
3074 window_read32(vp, 4, Wn4_PhysicalMgmt);
3075}
3041 3076
3042#define MDIO_SHIFT_CLK 0x01 3077#define MDIO_SHIFT_CLK 0x01
3043#define MDIO_DIR_WRITE 0x04 3078#define MDIO_DIR_WRITE 0x04
@@ -3048,16 +3083,15 @@ static void set_8021q_mode(struct net_device *dev, int enable)
3048 3083
3049/* Generate the preamble required for initial synchronization and 3084/* Generate the preamble required for initial synchronization and
3050 a few older transceivers. */ 3085 a few older transceivers. */
3051static void mdio_sync(void __iomem *ioaddr, int bits) 3086static void mdio_sync(struct vortex_private *vp, int bits)
3052{ 3087{
3053 void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
3054
3055 /* Establish sync by sending at least 32 logic ones. */ 3088 /* Establish sync by sending at least 32 logic ones. */
3056 while (-- bits >= 0) { 3089 while (-- bits >= 0) {
3057 iowrite16(MDIO_DATA_WRITE1, mdio_addr); 3090 window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
3058 mdio_delay(); 3091 mdio_delay(vp);
3059 iowrite16(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); 3092 window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
3060 mdio_delay(); 3093 4, Wn4_PhysicalMgmt);
3094 mdio_delay(vp);
3061 } 3095 }
3062} 3096}
3063 3097
@@ -3065,59 +3099,70 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
3065{ 3099{
3066 int i; 3100 int i;
3067 struct vortex_private *vp = netdev_priv(dev); 3101 struct vortex_private *vp = netdev_priv(dev);
3068 void __iomem *ioaddr = vp->ioaddr;
3069 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; 3102 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
3070 unsigned int retval = 0; 3103 unsigned int retval = 0;
3071 void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt; 3104
3105 spin_lock_bh(&vp->mii_lock);
3072 3106
3073 if (mii_preamble_required) 3107 if (mii_preamble_required)
3074 mdio_sync(ioaddr, 32); 3108 mdio_sync(vp, 32);
3075 3109
3076 /* Shift the read command bits out. */ 3110 /* Shift the read command bits out. */
3077 for (i = 14; i >= 0; i--) { 3111 for (i = 14; i >= 0; i--) {
3078 int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; 3112 int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3079 iowrite16(dataval, mdio_addr); 3113 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3080 mdio_delay(); 3114 mdio_delay(vp);
3081 iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr); 3115 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3082 mdio_delay(); 3116 4, Wn4_PhysicalMgmt);
3117 mdio_delay(vp);
3083 } 3118 }
3084 /* Read the two transition, 16 data, and wire-idle bits. */ 3119 /* Read the two transition, 16 data, and wire-idle bits. */
3085 for (i = 19; i > 0; i--) { 3120 for (i = 19; i > 0; i--) {
3086 iowrite16(MDIO_ENB_IN, mdio_addr); 3121 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3087 mdio_delay(); 3122 mdio_delay(vp);
3088 retval = (retval << 1) | ((ioread16(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); 3123 retval = (retval << 1) |
3089 iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); 3124 ((window_read16(vp, 4, Wn4_PhysicalMgmt) &
3090 mdio_delay(); 3125 MDIO_DATA_READ) ? 1 : 0);
3126 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3127 4, Wn4_PhysicalMgmt);
3128 mdio_delay(vp);
3091 } 3129 }
3130
3131 spin_unlock_bh(&vp->mii_lock);
3132
3092 return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; 3133 return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
3093} 3134}
3094 3135
3095static void mdio_write(struct net_device *dev, int phy_id, int location, int value) 3136static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
3096{ 3137{
3097 struct vortex_private *vp = netdev_priv(dev); 3138 struct vortex_private *vp = netdev_priv(dev);
3098 void __iomem *ioaddr = vp->ioaddr;
3099 int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; 3139 int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
3100 void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
3101 int i; 3140 int i;
3102 3141
3142 spin_lock_bh(&vp->mii_lock);
3143
3103 if (mii_preamble_required) 3144 if (mii_preamble_required)
3104 mdio_sync(ioaddr, 32); 3145 mdio_sync(vp, 32);
3105 3146
3106 /* Shift the command bits out. */ 3147 /* Shift the command bits out. */
3107 for (i = 31; i >= 0; i--) { 3148 for (i = 31; i >= 0; i--) {
3108 int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; 3149 int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3109 iowrite16(dataval, mdio_addr); 3150 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3110 mdio_delay(); 3151 mdio_delay(vp);
3111 iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr); 3152 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3112 mdio_delay(); 3153 4, Wn4_PhysicalMgmt);
3154 mdio_delay(vp);
3113 } 3155 }
3114 /* Leave the interface idle. */ 3156 /* Leave the interface idle. */
3115 for (i = 1; i >= 0; i--) { 3157 for (i = 1; i >= 0; i--) {
3116 iowrite16(MDIO_ENB_IN, mdio_addr); 3158 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3117 mdio_delay(); 3159 mdio_delay(vp);
3118 iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); 3160 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3119 mdio_delay(); 3161 4, Wn4_PhysicalMgmt);
3162 mdio_delay(vp);
3120 } 3163 }
3164
3165 spin_unlock_bh(&vp->mii_lock);
3121} 3166}
3122 3167
3123/* ACPI: Advanced Configuration and Power Interface. */ 3168/* ACPI: Advanced Configuration and Power Interface. */
@@ -3131,8 +3176,7 @@ static void acpi_set_WOL(struct net_device *dev)
3131 3176
3132 if (vp->enable_wol) { 3177 if (vp->enable_wol) {
3133 /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ 3178 /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
3134 EL3WINDOW(7); 3179 window_write16(vp, 2, 7, 0x0c);
3135 iowrite16(2, ioaddr + 0x0c);
3136 /* The RxFilter must accept the WOL frames. */ 3180 /* The RxFilter must accept the WOL frames. */
3137 iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); 3181 iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
3138 iowrite16(RxEnable, ioaddr + EL3_CMD); 3182 iowrite16(RxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 284a5f4a63ac..4a4f6b81e32d 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -322,7 +322,7 @@ struct cp_dma_stats {
322 __le32 rx_ok_mcast; 322 __le32 rx_ok_mcast;
323 __le16 tx_abort; 323 __le16 tx_abort;
324 __le16 tx_underrun; 324 __le16 tx_underrun;
325} __attribute__((packed)); 325} __packed;
326 326
327struct cp_extra_stats { 327struct cp_extra_stats {
328 unsigned long rx_frags; 328 unsigned long rx_frags;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 97d8068b372b..f5166dccd8df 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -662,7 +662,7 @@ static const struct ethtool_ops rtl8139_ethtool_ops;
662/* read MMIO register */ 662/* read MMIO register */
663#define RTL_R8(reg) ioread8 (ioaddr + (reg)) 663#define RTL_R8(reg) ioread8 (ioaddr + (reg))
664#define RTL_R16(reg) ioread16 (ioaddr + (reg)) 664#define RTL_R16(reg) ioread16 (ioaddr + (reg))
665#define RTL_R32(reg) ((unsigned long) ioread32 (ioaddr + (reg))) 665#define RTL_R32(reg) ioread32 (ioaddr + (reg))
666 666
667 667
668static const u16 rtl8139_intr_mask = 668static const u16 rtl8139_intr_mask =
@@ -862,7 +862,7 @@ retry:
862 /* if unknown chip, assume array element #0, original RTL-8139 in this case */ 862 /* if unknown chip, assume array element #0, original RTL-8139 in this case */
863 i = 0; 863 i = 0;
864 dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); 864 dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
865 dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig)); 865 dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
866 tp->chipset = 0; 866 tp->chipset = 0;
867 867
868match: 868match:
@@ -1643,7 +1643,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
1643 netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", 1643 netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
1644 tp->cur_tx, tp->dirty_tx); 1644 tp->cur_tx, tp->dirty_tx);
1645 for (i = 0; i < NUM_TX_DESC; i++) 1645 for (i = 0; i < NUM_TX_DESC; i++)
1646 netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n", 1646 netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
1647 i, RTL_R32(TxStatus0 + (i * 4)), 1647 i, RTL_R32(TxStatus0 + (i * 4)),
1648 i == tp->dirty_tx % NUM_TX_DESC ? 1648 i == tp->dirty_tx % NUM_TX_DESC ?
1649 " (queue head)" : ""); 1649 " (queue head)" : "");
@@ -2487,7 +2487,7 @@ static void __set_rx_mode (struct net_device *dev)
2487 int rx_mode; 2487 int rx_mode;
2488 u32 tmp; 2488 u32 tmp;
2489 2489
2490 netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n", 2490 netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
2491 dev->flags, RTL_R32(RxConfig)); 2491 dev->flags, RTL_R32(RxConfig));
2492 2492
2493 /* Note: do not reorder, GCC is clever about common statements. */ 2493 /* Note: do not reorder, GCC is clever about common statements. */
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index dd8dc15556cb..e2c9c5b949f9 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -525,7 +525,21 @@ static irqreturn_t i596_error(int irq, void *dev_id)
525} 525}
526#endif 526#endif
527 527
528static inline void init_rx_bufs(struct net_device *dev) 528static inline void remove_rx_bufs(struct net_device *dev)
529{
530 struct i596_private *lp = dev->ml_priv;
531 struct i596_rbd *rbd;
532 int i;
533
534 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
535 if (rbd->skb == NULL)
536 break;
537 dev_kfree_skb(rbd->skb);
538 rbd->skb = NULL;
539 }
540}
541
542static inline int init_rx_bufs(struct net_device *dev)
529{ 543{
530 struct i596_private *lp = dev->ml_priv; 544 struct i596_private *lp = dev->ml_priv;
531 int i; 545 int i;
@@ -537,8 +551,11 @@ static inline void init_rx_bufs(struct net_device *dev)
537 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { 551 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
538 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); 552 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
539 553
540 if (skb == NULL) 554 if (skb == NULL) {
541 panic("82596: alloc_skb() failed"); 555 remove_rx_bufs(dev);
556 return -ENOMEM;
557 }
558
542 skb->dev = dev; 559 skb->dev = dev;
543 rbd->v_next = rbd+1; 560 rbd->v_next = rbd+1;
544 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); 561 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
@@ -574,19 +591,8 @@ static inline void init_rx_bufs(struct net_device *dev)
574 rfd->v_next = lp->rfds; 591 rfd->v_next = lp->rfds;
575 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); 592 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
576 rfd->cmd = CMD_EOL|CMD_FLEX; 593 rfd->cmd = CMD_EOL|CMD_FLEX;
577}
578 594
579static inline void remove_rx_bufs(struct net_device *dev) 595 return 0;
580{
581 struct i596_private *lp = dev->ml_priv;
582 struct i596_rbd *rbd;
583 int i;
584
585 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
586 if (rbd->skb == NULL)
587 break;
588 dev_kfree_skb(rbd->skb);
589 }
590} 596}
591 597
592 598
@@ -1009,20 +1015,35 @@ static int i596_open(struct net_device *dev)
1009 } 1015 }
1010#ifdef ENABLE_MVME16x_NET 1016#ifdef ENABLE_MVME16x_NET
1011 if (MACH_IS_MVME16x) { 1017 if (MACH_IS_MVME16x) {
1012 if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) 1018 if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
1013 return -EAGAIN; 1019 res = -EAGAIN;
1020 goto err_irq_dev;
1021 }
1014 } 1022 }
1015#endif 1023#endif
1016 init_rx_bufs(dev); 1024 res = init_rx_bufs(dev);
1025 if (res)
1026 goto err_irq_56;
1017 1027
1018 netif_start_queue(dev); 1028 netif_start_queue(dev);
1019 1029
1020 /* Initialize the 82596 memory */
1021 if (init_i596_mem(dev)) { 1030 if (init_i596_mem(dev)) {
1022 res = -EAGAIN; 1031 res = -EAGAIN;
1023 free_irq(dev->irq, dev); 1032 goto err_queue;
1024 } 1033 }
1025 1034
1035 return 0;
1036
1037err_queue:
1038 netif_stop_queue(dev);
1039 remove_rx_bufs(dev);
1040err_irq_56:
1041#ifdef ENABLE_MVME16x_NET
1042 free_irq(0x56, dev);
1043err_irq_dev:
1044#endif
1045 free_irq(dev->irq, dev);
1046
1026 return res; 1047 return res;
1027} 1048}
1028 1049
@@ -1488,6 +1509,9 @@ static int i596_close(struct net_device *dev)
1488 } 1509 }
1489#endif 1510#endif
1490 1511
1512#ifdef ENABLE_MVME16x_NET
1513 free_irq(0x56, dev);
1514#endif
1491 free_irq(dev->irq, dev); 1515 free_irq(dev->irq, dev);
1492 remove_rx_bufs(dev); 1516 remove_rx_bufs(dev);
1493 1517
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ce2fcdd4ab90..5a6895320b48 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -484,7 +484,7 @@ config XTENSA_XT2000_SONIC
484 484
485config MIPS_AU1X00_ENET 485config MIPS_AU1X00_ENET
486 tristate "MIPS AU1000 Ethernet support" 486 tristate "MIPS AU1000 Ethernet support"
487 depends on SOC_AU1X00 487 depends on MIPS_ALCHEMY
488 select PHYLIB 488 select PHYLIB
489 select CRC32 489 select CRC32
490 help 490 help
@@ -530,14 +530,15 @@ config SH_ETH
530 depends on SUPERH && \ 530 depends on SUPERH && \
531 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ 531 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
532 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ 532 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
533 CPU_SUBTYPE_SH7724) 533 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
534 select CRC32 534 select CRC32
535 select MII 535 select MII
536 select MDIO_BITBANG 536 select MDIO_BITBANG
537 select PHYLIB 537 select PHYLIB
538 help 538 help
539 Renesas SuperH Ethernet device driver. 539 Renesas SuperH Ethernet device driver.
540 This driver support SH7710, SH7712, SH7763, SH7619, and SH7724. 540 This driver supporting CPUs are:
541 - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
541 542
542config SUNLANCE 543config SUNLANCE
543 tristate "Sun LANCE support" 544 tristate "Sun LANCE support"
@@ -913,7 +914,7 @@ config SMC91X
913 tristate "SMC 91C9x/91C1xxx support" 914 tristate "SMC 91C9x/91C1xxx support"
914 select CRC32 915 select CRC32
915 select MII 916 select MII
916 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ 917 depends on ARM || M32R || SUPERH || \
917 MIPS || BLACKFIN || MN10300 || COLDFIRE 918 MIPS || BLACKFIN || MN10300 || COLDFIRE
918 help 919 help
919 This is a driver for SMC's 91x series of Ethernet chipsets, 920 This is a driver for SMC's 91x series of Ethernet chipsets,
@@ -1463,7 +1464,7 @@ config FORCEDETH
1463config CS89x0 1464config CS89x0
1464 tristate "CS89x0 support" 1465 tristate "CS89x0 support"
1465 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ 1466 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
1466 || ARCH_IXDP2X01 || ARCH_PNX010X || MACH_MX31ADS) 1467 || ARCH_IXDP2X01 || MACH_MX31ADS)
1467 ---help--- 1468 ---help---
1468 Support for CS89x0 chipset based Ethernet cards. If you have a 1469 Support for CS89x0 chipset based Ethernet cards. If you have a
1469 network (Ethernet) card of this type, say Y and read the 1470 network (Ethernet) card of this type, say Y and read the
@@ -1477,7 +1478,7 @@ config CS89x0
1477config CS89x0_NONISA_IRQ 1478config CS89x0_NONISA_IRQ
1478 def_bool y 1479 def_bool y
1479 depends on CS89x0 != n 1480 depends on CS89x0 != n
1480 depends on MACH_IXDP2351 || ARCH_IXDP2X01 || ARCH_PNX010X || MACH_MX31ADS 1481 depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS
1481 1482
1482config TC35815 1483config TC35815
1483 tristate "TOSHIBA TC35815 Ethernet support" 1484 tristate "TOSHIBA TC35815 Ethernet support"
@@ -1659,6 +1660,7 @@ config R6040
1659 depends on NET_PCI && PCI 1660 depends on NET_PCI && PCI
1660 select CRC32 1661 select CRC32
1661 select MII 1662 select MII
1663 select PHYLIB
1662 help 1664 help
1663 This is a driver for the R6040 Fast Ethernet MACs found in the 1665 This is a driver for the R6040 Fast Ethernet MACs found in the
1664 the RDC R-321x System-on-chips. 1666 the RDC R-321x System-on-chips.
@@ -1748,11 +1750,12 @@ config TLAN
1748 Please email feedback to <torben.mathiasen@compaq.com>. 1750 Please email feedback to <torben.mathiasen@compaq.com>.
1749 1751
1750config KS8842 1752config KS8842
1751 tristate "Micrel KSZ8842" 1753 tristate "Micrel KSZ8841/42 with generic bus interface"
1752 depends on HAS_IOMEM 1754 depends on HAS_IOMEM && DMA_ENGINE
1753 help 1755 help
1754 This platform driver is for Micrel KSZ8842 / KS8842 1756 This platform driver is for KSZ8841(1-port) / KS8842(2-port)
1755 2-port ethernet switch chip (managed, VLAN, QoS). 1757 ethernet switch chip (managed, VLAN, QoS) from Micrel or
1758 Timberdale(FPGA).
1756 1759
1757config KS8851 1760config KS8851
1758 tristate "Micrel KS8851 SPI" 1761 tristate "Micrel KS8851 SPI"
@@ -2601,6 +2604,29 @@ config CHELSIO_T4
2601 To compile this driver as a module choose M here; the module 2604 To compile this driver as a module choose M here; the module
2602 will be called cxgb4. 2605 will be called cxgb4.
2603 2606
2607config CHELSIO_T4VF_DEPENDS
2608 tristate
2609 depends on PCI && INET
2610 default y
2611
2612config CHELSIO_T4VF
2613 tristate "Chelsio Communications T4 Virtual Function Ethernet support"
2614 depends on CHELSIO_T4VF_DEPENDS
2615 help
2616 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
2617 adapters with PCI-E SR-IOV Virtual Functions.
2618
2619 For general information about Chelsio and our products, visit
2620 our website at <http://www.chelsio.com>.
2621
2622 For customer support, please visit our customer support page at
2623 <http://www.chelsio.com/support.htm>.
2624
2625 Please send feedback to <linux-bugs@chelsio.com>.
2626
2627 To compile this driver as a module choose M here; the module
2628 will be called cxgb4vf.
2629
2604config EHEA 2630config EHEA
2605 tristate "eHEA Ethernet support" 2631 tristate "eHEA Ethernet support"
2606 depends on IBMEBUS && INET && SPARSEMEM 2632 depends on IBMEBUS && INET && SPARSEMEM
@@ -2614,7 +2640,6 @@ config EHEA
2614config ENIC 2640config ENIC
2615 tristate "Cisco VIC Ethernet NIC Support" 2641 tristate "Cisco VIC Ethernet NIC Support"
2616 depends on PCI && INET 2642 depends on PCI && INET
2617 select INET_LRO
2618 help 2643 help
2619 This enables the support for the Cisco VIC Ethernet card. 2644 This enables the support for the Cisco VIC Ethernet card.
2620 2645
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0a0512ae77da..56e8c27f77ce 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP1000) += ipg.o
20obj-$(CONFIG_CHELSIO_T1) += chelsio/ 20obj-$(CONFIG_CHELSIO_T1) += chelsio/
21obj-$(CONFIG_CHELSIO_T3) += cxgb3/ 21obj-$(CONFIG_CHELSIO_T3) += cxgb3/
22obj-$(CONFIG_CHELSIO_T4) += cxgb4/ 22obj-$(CONFIG_CHELSIO_T4) += cxgb4/
23obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
23obj-$(CONFIG_EHEA) += ehea/ 24obj-$(CONFIG_EHEA) += ehea/
24obj-$(CONFIG_CAN) += can/ 25obj-$(CONFIG_CAN) += can/
25obj-$(CONFIG_BONDING) += bonding/ 26obj-$(CONFIG_BONDING) += bonding/
@@ -83,8 +84,7 @@ obj-$(CONFIG_FEALNX) += fealnx.o
83obj-$(CONFIG_TIGON3) += tg3.o 84obj-$(CONFIG_TIGON3) += tg3.o
84obj-$(CONFIG_BNX2) += bnx2.o 85obj-$(CONFIG_BNX2) += bnx2.o
85obj-$(CONFIG_CNIC) += cnic.o 86obj-$(CONFIG_CNIC) += cnic.o
86obj-$(CONFIG_BNX2X) += bnx2x.o 87obj-$(CONFIG_BNX2X) += bnx2x/
87bnx2x-objs := bnx2x_main.o bnx2x_link.o
88spidernet-y += spider_net.o spider_net_ethtool.o 88spidernet-y += spider_net.o spider_net_ethtool.o
89obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o 89obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
90obj-$(CONFIG_GELIC_NET) += ps3_gelic.o 90obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
@@ -275,7 +275,7 @@ obj-$(CONFIG_USB_USBNET) += usb/
275obj-$(CONFIG_USB_ZD1201) += usb/ 275obj-$(CONFIG_USB_ZD1201) += usb/
276obj-$(CONFIG_USB_IPHETH) += usb/ 276obj-$(CONFIG_USB_IPHETH) += usb/
277 277
278obj-y += wireless/ 278obj-$(CONFIG_WLAN) += wireless/
279obj-$(CONFIG_NET_TULIP) += tulip/ 279obj-$(CONFIG_NET_TULIP) += tulip/
280obj-$(CONFIG_HAMRADIO) += hamradio/ 280obj-$(CONFIG_HAMRADIO) += hamradio/
281obj-$(CONFIG_IRDA) += irda/ 281obj-$(CONFIG_IRDA) += irda/
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 3b79c6cf21a3..9bb405bd664e 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -218,12 +218,6 @@ static struct devprobe2 isa_probes[] __initdata = {
218#ifdef CONFIG_EL1 /* 3c501 */ 218#ifdef CONFIG_EL1 /* 3c501 */
219 {el1_probe, 0}, 219 {el1_probe, 0},
220#endif 220#endif
221#ifdef CONFIG_WAVELAN /* WaveLAN */
222 {wavelan_probe, 0},
223#endif
224#ifdef CONFIG_ARLAN /* Aironet */
225 {arlan_probe, 0},
226#endif
227#ifdef CONFIG_EL16 /* 3c507 */ 221#ifdef CONFIG_EL16 /* 3c507 */
228 {el16_probe, 0}, 222 {el16_probe, 0},
229#endif 223#endif
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index b9115a776fdd..5181e9322119 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -211,7 +211,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
211 retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); 211 retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
212 if (retval) { 212 if (retval) {
213 printk (" nothing! Unable to get IRQ %d.\n", dev->irq); 213 printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
214 goto out1; 214 goto out;
215 } 215 }
216 216
217 printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]); 217 printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 79636ee35829..0362c8d31a08 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -80,7 +80,7 @@ static struct net_device * __init ipddp_init(void)
80 if (version_printed++ == 0) 80 if (version_printed++ == 0)
81 printk(version); 81 printk(version);
82 82
83 /* Initalize the device structure. */ 83 /* Initialize the device structure. */
84 dev->netdev_ops = &ipddp_netdev_ops; 84 dev->netdev_ops = &ipddp_netdev_ops;
85 85
86 dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */ 86 dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 355797f70048..42fce91b71fc 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -37,69 +37,6 @@
37 37
38#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n" 38#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
39 39
40
41static void rx(struct net_device *dev, int bufnum,
42 struct archdr *pkthdr, int length);
43static int build_header(struct sk_buff *skb,
44 struct net_device *dev,
45 unsigned short type,
46 uint8_t daddr);
47static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
48 int bufnum);
49static int ack_tx(struct net_device *dev, int acked);
50
51
52static struct ArcProto capmode_proto =
53{
54 'r',
55 XMTU,
56 0,
57 rx,
58 build_header,
59 prepare_tx,
60 NULL,
61 ack_tx
62};
63
64
65static void arcnet_cap_init(void)
66{
67 int count;
68
69 for (count = 1; count <= 8; count++)
70 if (arc_proto_map[count] == arc_proto_default)
71 arc_proto_map[count] = &capmode_proto;
72
73 /* for cap mode, we only set the bcast proto if there's no better one */
74 if (arc_bcast_proto == arc_proto_default)
75 arc_bcast_proto = &capmode_proto;
76
77 arc_proto_default = &capmode_proto;
78 arc_raw_proto = &capmode_proto;
79}
80
81
82#ifdef MODULE
83
84static int __init capmode_module_init(void)
85{
86 printk(VERSION);
87 arcnet_cap_init();
88 return 0;
89}
90
91static void __exit capmode_module_exit(void)
92{
93 arcnet_unregister_proto(&capmode_proto);
94}
95module_init(capmode_module_init);
96module_exit(capmode_module_exit);
97
98MODULE_LICENSE("GPL");
99#endif /* MODULE */
100
101
102
103/* packet receiver */ 40/* packet receiver */
104static void rx(struct net_device *dev, int bufnum, 41static void rx(struct net_device *dev, int bufnum,
105 struct archdr *pkthdr, int length) 42 struct archdr *pkthdr, int length)
@@ -231,65 +168,107 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
231 BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n", 168 BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
232 length,ofs); 169 length,ofs);
233 170
234 // Copy the arcnet-header + the protocol byte down: 171 /* Copy the arcnet-header + the protocol byte down: */
235 lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); 172 lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
236 lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto, 173 lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
237 sizeof(pkt->soft.cap.proto)); 174 sizeof(pkt->soft.cap.proto));
238 175
239 // Skip the extra integer we have written into it as a cookie 176 /* Skip the extra integer we have written into it as a cookie
240 // but write the rest of the message: 177 but write the rest of the message: */
241 lp->hw.copy_to_card(dev, bufnum, ofs+1, 178 lp->hw.copy_to_card(dev, bufnum, ofs+1,
242 ((unsigned char*)&pkt->soft.cap.mes),length-1); 179 ((unsigned char*)&pkt->soft.cap.mes),length-1);
243 180
244 lp->lastload_dest = hard->dest; 181 lp->lastload_dest = hard->dest;
245 182
246 return 1; /* done */ 183 return 1; /* done */
247} 184}
248 185
249
250static int ack_tx(struct net_device *dev, int acked) 186static int ack_tx(struct net_device *dev, int acked)
251{ 187{
252 struct arcnet_local *lp = netdev_priv(dev); 188 struct arcnet_local *lp = netdev_priv(dev);
253 struct sk_buff *ackskb; 189 struct sk_buff *ackskb;
254 struct archdr *ackpkt; 190 struct archdr *ackpkt;
255 int length=sizeof(struct arc_cap); 191 int length=sizeof(struct arc_cap);
256 192
257 BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n", 193 BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
258 lp->outgoing.skb->protocol, acked); 194 lp->outgoing.skb->protocol, acked);
259 195
260 BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx"); 196 BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
261 197
262 /* Now alloc a skb to send back up through the layers: */ 198 /* Now alloc a skb to send back up through the layers: */
263 ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC); 199 ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
264 if (ackskb == NULL) { 200 if (ackskb == NULL) {
265 BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n"); 201 BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
266 goto free_outskb; 202 goto free_outskb;
267 } 203 }
204
205 skb_put(ackskb, length + ARC_HDR_SIZE );
206 ackskb->dev = dev;
207
208 skb_reset_mac_header(ackskb);
209 ackpkt = (struct archdr *)skb_mac_header(ackskb);
210 /* skb_pull(ackskb, ARC_HDR_SIZE); */
268 211
269 skb_put(ackskb, length + ARC_HDR_SIZE ); 212 skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
270 ackskb->dev = dev; 213 ARC_HDR_SIZE + sizeof(struct arc_cap));
214 ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
215 ackpkt->soft.cap.mes.ack=acked;
271 216
272 skb_reset_mac_header(ackskb); 217 BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
273 ackpkt = (struct archdr *)skb_mac_header(ackskb); 218 *((int*)&ackpkt->soft.cap.cookie[0]));
274 /* skb_pull(ackskb, ARC_HDR_SIZE); */
275 219
220 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
276 221
277 skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, 222 BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
278 ARC_HDR_SIZE + sizeof(struct arc_cap)); 223 netif_rx(ackskb);
279 ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
280 ackpkt->soft.cap.mes.ack=acked;
281 224
282 BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", 225free_outskb:
283 *((int*)&ackpkt->soft.cap.cookie[0])); 226 dev_kfree_skb_irq(lp->outgoing.skb);
227 lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
284 228
285 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); 229 return 0;
230}
286 231
287 BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); 232static struct ArcProto capmode_proto =
288 netif_rx(ackskb); 233{
234 'r',
235 XMTU,
236 0,
237 rx,
238 build_header,
239 prepare_tx,
240 NULL,
241 ack_tx
242};
289 243
290 free_outskb: 244static void arcnet_cap_init(void)
291 dev_kfree_skb_irq(lp->outgoing.skb); 245{
292 lp->outgoing.proto = NULL; /* We are always finished when in this protocol */ 246 int count;
293 247
294 return 0; 248 for (count = 1; count <= 8; count++)
249 if (arc_proto_map[count] == arc_proto_default)
250 arc_proto_map[count] = &capmode_proto;
251
252 /* for cap mode, we only set the bcast proto if there's no better one */
253 if (arc_bcast_proto == arc_proto_default)
254 arc_bcast_proto = &capmode_proto;
255
256 arc_proto_default = &capmode_proto;
257 arc_raw_proto = &capmode_proto;
295} 258}
259
260static int __init capmode_module_init(void)
261{
262 printk(VERSION);
263 arcnet_cap_init();
264 return 0;
265}
266
267static void __exit capmode_module_exit(void)
268{
269 arcnet_unregister_proto(&capmode_proto);
270}
271module_init(capmode_module_init);
272module_exit(capmode_module_exit);
273
274MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 0402da30a4ed..37272827ee55 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -90,14 +90,14 @@ static int __init com20020isa_probe(struct net_device *dev)
90 outb(0, _INTMASK); 90 outb(0, _INTMASK);
91 dev->irq = probe_irq_off(airqmask); 91 dev->irq = probe_irq_off(airqmask);
92 92
93 if (dev->irq <= 0) { 93 if ((int)dev->irq <= 0) {
94 BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed first time\n"); 94 BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed first time\n");
95 airqmask = probe_irq_on(); 95 airqmask = probe_irq_on();
96 outb(NORXflag, _INTMASK); 96 outb(NORXflag, _INTMASK);
97 udelay(5); 97 udelay(5);
98 outb(0, _INTMASK); 98 outb(0, _INTMASK);
99 dev->irq = probe_irq_off(airqmask); 99 dev->irq = probe_irq_off(airqmask);
100 if (dev->irq <= 0) { 100 if ((int)dev->irq <= 0) {
101 BUGMSG(D_NORMAL, "Autoprobe IRQ failed.\n"); 101 BUGMSG(D_NORMAL, "Autoprobe IRQ failed.\n");
102 err = -ENODEV; 102 err = -ENODEV;
103 goto out; 103 goto out;
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 4cb401813b7e..eb27976dab37 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -213,7 +213,7 @@ static int __init com90io_probe(struct net_device *dev)
213 outb(0, _INTMASK); 213 outb(0, _INTMASK);
214 dev->irq = probe_irq_off(airqmask); 214 dev->irq = probe_irq_off(airqmask);
215 215
216 if (dev->irq <= 0) { 216 if ((int)dev->irq <= 0) {
217 BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed\n"); 217 BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed\n");
218 goto err_out; 218 goto err_out;
219 } 219 }
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 24df0325090c..4f1cc7164ad9 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -738,6 +738,17 @@ static void eth_set_mcast_list(struct net_device *dev)
738 struct netdev_hw_addr *ha; 738 struct netdev_hw_addr *ha;
739 u8 diffs[ETH_ALEN], *addr; 739 u8 diffs[ETH_ALEN], *addr;
740 int i; 740 int i;
741 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
742
743 if (dev->flags & IFF_ALLMULTI) {
744 for (i = 0; i < ETH_ALEN; i++) {
745 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
746 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
747 }
748 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
749 &port->regs->rx_control[0]);
750 return;
751 }
741 752
742 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { 753 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
743 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 754 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
@@ -771,7 +782,8 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
771 782
772 if (!netif_running(dev)) 783 if (!netif_running(dev))
773 return -EINVAL; 784 return -EINVAL;
774 return phy_mii_ioctl(port->phydev, if_mii(req), cmd); 785
786 return phy_mii_ioctl(port->phydev, req, cmd);
775} 787}
776 788
777/* ethtool support */ 789/* ethtool support */
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 2e852463382b..4545d5a06c24 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -822,6 +822,9 @@ static int w90p910_ether_open(struct net_device *dev)
822 w90p910_set_global_maccmd(dev); 822 w90p910_set_global_maccmd(dev);
823 w90p910_enable_rx(dev, 1); 823 w90p910_enable_rx(dev, 1);
824 824
825 clk_enable(ether->rmiiclk);
826 clk_enable(ether->clk);
827
825 ether->rx_packets = 0x0; 828 ether->rx_packets = 0x0;
826 ether->rx_bytes = 0x0; 829 ether->rx_bytes = 0x0;
827 830
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 93185f5f09ac..89876897a6fe 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -811,10 +811,8 @@ static int net_close(struct net_device *dev)
811 /* No statistic counters on the chip to update. */ 811 /* No statistic counters on the chip to update. */
812 812
813 /* Disable the IRQ on boards of fmv18x where it is feasible. */ 813 /* Disable the IRQ on boards of fmv18x where it is feasible. */
814 if (lp->jumpered) { 814 if (lp->jumpered)
815 outb(0x00, ioaddr + IOCONFIG1); 815 outb(0x00, ioaddr + IOCONFIG1);
816 free_irq(dev->irq, dev);
817 }
818 816
819 /* Power-down the chip. Green, green, green! */ 817 /* Power-down the chip. Green, green, green! */
820 outb(0x00, ioaddr + CONFIG_1); 818 outb(0x00, ioaddr + CONFIG_1);
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 84ae905bf732..52abbbdf8a08 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -73,7 +73,8 @@
73#define FULL_DUPLEX 2 73#define FULL_DUPLEX 2
74 74
75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) 75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
76#define MAX_JUMBO_FRAME_SIZE (9*1024) 76#define MAX_JUMBO_FRAME_SIZE (6*1024)
77#define MAX_TSO_FRAME_SIZE (7*1024)
77#define MAX_TX_OFFLOAD_THRESH (9*1024) 78#define MAX_TX_OFFLOAD_THRESH (9*1024)
78 79
79#define AT_MAX_RECEIVE_QUEUE 4 80#define AT_MAX_RECEIVE_QUEUE 4
@@ -87,10 +88,11 @@
87#define AT_MAX_INT_WORK 5 88#define AT_MAX_INT_WORK 5
88#define AT_TWSI_EEPROM_TIMEOUT 100 89#define AT_TWSI_EEPROM_TIMEOUT 100
89#define AT_HW_MAX_IDLE_DELAY 10 90#define AT_HW_MAX_IDLE_DELAY 10
90#define AT_SUSPEND_LINK_TIMEOUT 28 91#define AT_SUSPEND_LINK_TIMEOUT 100
91 92
92#define AT_ASPM_L0S_TIMER 6 93#define AT_ASPM_L0S_TIMER 6
93#define AT_ASPM_L1_TIMER 12 94#define AT_ASPM_L1_TIMER 12
95#define AT_LCKDET_TIMER 12
94 96
95#define ATL1C_PCIE_L0S_L1_DISABLE 0x01 97#define ATL1C_PCIE_L0S_L1_DISABLE 0x01
96#define ATL1C_PCIE_PHY_RESET 0x02 98#define ATL1C_PCIE_PHY_RESET 0x02
@@ -316,6 +318,7 @@ enum atl1c_nic_type {
316 athr_l2c_b, 318 athr_l2c_b,
317 athr_l2c_b2, 319 athr_l2c_b2,
318 athr_l1d, 320 athr_l1d,
321 athr_l1d_2,
319}; 322};
320 323
321enum atl1c_trans_queue { 324enum atl1c_trans_queue {
@@ -392,6 +395,8 @@ struct atl1c_hw {
392 u16 subsystem_id; 395 u16 subsystem_id;
393 u16 subsystem_vendor_id; 396 u16 subsystem_vendor_id;
394 u8 revision_id; 397 u8 revision_id;
398 u16 phy_id1;
399 u16 phy_id2;
395 400
396 u32 intr_mask; 401 u32 intr_mask;
397 u8 dmaw_dly_cnt; 402 u8 dmaw_dly_cnt;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index f1389d664a21..d8501f060957 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -37,6 +37,9 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
37 if (data & TWSI_DEBUG_DEV_EXIST) 37 if (data & TWSI_DEBUG_DEV_EXIST)
38 return 1; 38 return 1;
39 39
40 AT_READ_REG(hw, REG_MASTER_CTRL, &data);
41 if (data & MASTER_CTRL_OTP_SEL)
42 return 1;
40 return 0; 43 return 0;
41} 44}
42 45
@@ -69,6 +72,8 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
69 u32 i; 72 u32 i;
70 u32 otp_ctrl_data; 73 u32 otp_ctrl_data;
71 u32 twsi_ctrl_data; 74 u32 twsi_ctrl_data;
75 u32 ltssm_ctrl_data;
76 u32 wol_data;
72 u8 eth_addr[ETH_ALEN]; 77 u8 eth_addr[ETH_ALEN];
73 u16 phy_data; 78 u16 phy_data;
74 bool raise_vol = false; 79 bool raise_vol = false;
@@ -104,6 +109,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
104 udelay(20); 109 udelay(20);
105 raise_vol = true; 110 raise_vol = true;
106 } 111 }
112 /* close open bit of ReadOnly*/
113 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
114 ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
115 AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
116
117 /* clear any WOL settings */
118 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
119 AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
120
107 121
108 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); 122 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
109 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; 123 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -119,17 +133,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
119 } 133 }
120 /* Disable OTP_CLK */ 134 /* Disable OTP_CLK */
121 if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { 135 if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
122 if (otp_ctrl_data & OTP_CTRL_CLK_EN) { 136 otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
123 otp_ctrl_data &= ~OTP_CTRL_CLK_EN; 137 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
124 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); 138 msleep(1);
125 AT_WRITE_FLUSH(hw);
126 msleep(1);
127 }
128 } 139 }
129 if (raise_vol) { 140 if (raise_vol) {
130 if (hw->nic_type == athr_l2c_b || 141 if (hw->nic_type == athr_l2c_b ||
131 hw->nic_type == athr_l2c_b2 || 142 hw->nic_type == athr_l2c_b2 ||
132 hw->nic_type == athr_l1d) { 143 hw->nic_type == athr_l1d ||
144 hw->nic_type == athr_l1d_2) {
133 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); 145 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
134 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) 146 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
135 goto out; 147 goto out;
@@ -456,14 +468,22 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
456 468
457 if (hw->nic_type == athr_l2c_b || 469 if (hw->nic_type == athr_l2c_b ||
458 hw->nic_type == athr_l2c_b2 || 470 hw->nic_type == athr_l2c_b2 ||
459 hw->nic_type == athr_l1d) { 471 hw->nic_type == athr_l1d ||
472 hw->nic_type == athr_l1d_2) {
460 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); 473 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
461 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); 474 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
462 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7); 475 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
463 msleep(20); 476 msleep(20);
464 } 477 }
465 478 if (hw->nic_type == athr_l1d) {
466 /*Enable PHY LinkChange Interrupt */ 479 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
481 }
482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
483 || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) {
484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
486 }
467 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); 487 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
468 if (err) { 488 if (err) {
469 if (netif_msg_hw(adapter)) 489 if (netif_msg_hw(adapter))
@@ -482,12 +502,10 @@ int atl1c_phy_init(struct atl1c_hw *hw)
482 struct pci_dev *pdev = adapter->pdev; 502 struct pci_dev *pdev = adapter->pdev;
483 int ret_val; 503 int ret_val;
484 u16 mii_bmcr_data = BMCR_RESET; 504 u16 mii_bmcr_data = BMCR_RESET;
485 u16 phy_id1, phy_id2;
486 505
487 if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) || 506 if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
488 (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) { 507 (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
489 if (netif_msg_link(adapter)) 508 dev_err(&pdev->dev, "Error get phy ID\n");
490 dev_err(&pdev->dev, "Error get phy ID\n");
491 return -1; 509 return -1;
492 } 510 }
493 switch (hw->media_type) { 511 switch (hw->media_type) {
@@ -572,6 +590,65 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
572 return 0; 590 return 0;
573} 591}
574 592
593int atl1c_phy_power_saving(struct atl1c_hw *hw)
594{
595 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
596 struct pci_dev *pdev = adapter->pdev;
597 int ret = 0;
598 u16 autoneg_advertised = ADVERTISED_10baseT_Half;
599 u16 save_autoneg_advertised;
600 u16 phy_data;
601 u16 mii_lpa_data;
602 u16 speed = SPEED_0;
603 u16 duplex = FULL_DUPLEX;
604 int i;
605
606 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
607 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
608 if (phy_data & BMSR_LSTATUS) {
609 atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data);
610 if (mii_lpa_data & LPA_10FULL)
611 autoneg_advertised = ADVERTISED_10baseT_Full;
612 else if (mii_lpa_data & LPA_10HALF)
613 autoneg_advertised = ADVERTISED_10baseT_Half;
614 else if (mii_lpa_data & LPA_100HALF)
615 autoneg_advertised = ADVERTISED_100baseT_Half;
616 else if (mii_lpa_data & LPA_100FULL)
617 autoneg_advertised = ADVERTISED_100baseT_Full;
618
619 save_autoneg_advertised = hw->autoneg_advertised;
620 hw->phy_configured = false;
621 hw->autoneg_advertised = autoneg_advertised;
622 if (atl1c_restart_autoneg(hw) != 0) {
623 dev_dbg(&pdev->dev, "phy autoneg failed\n");
624 ret = -1;
625 }
626 hw->autoneg_advertised = save_autoneg_advertised;
627
628 if (mii_lpa_data) {
629 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
630 mdelay(100);
631 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
632 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
633 if (phy_data & BMSR_LSTATUS) {
634 if (atl1c_get_speed_and_duplex(hw, &speed,
635 &duplex) != 0)
636 dev_dbg(&pdev->dev,
637 "get speed and duplex failed\n");
638 break;
639 }
640 }
641 }
642 } else {
643 speed = SPEED_10;
644 duplex = HALF_DUPLEX;
645 }
646 adapter->link_speed = speed;
647 adapter->link_duplex = duplex;
648
649 return ret;
650}
651
575int atl1c_restart_autoneg(struct atl1c_hw *hw) 652int atl1c_restart_autoneg(struct atl1c_hw *hw)
576{ 653{
577 int err = 0; 654 int err = 0;
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 1eeb3ed9f0cb..3dd675979aa1 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -42,7 +42,7 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
42int atl1c_phy_init(struct atl1c_hw *hw); 42int atl1c_phy_init(struct atl1c_hw *hw);
43int atl1c_check_eeprom_exist(struct atl1c_hw *hw); 43int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
44int atl1c_restart_autoneg(struct atl1c_hw *hw); 44int atl1c_restart_autoneg(struct atl1c_hw *hw);
45 45int atl1c_phy_power_saving(struct atl1c_hw *hw);
46/* register definition */ 46/* register definition */
47#define REG_DEVICE_CAP 0x5C 47#define REG_DEVICE_CAP 0x5C
48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
@@ -120,6 +120,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
120#define REG_PCIE_PHYMISC 0x1000 120#define REG_PCIE_PHYMISC 0x1000
121#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 121#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
122 122
123#define REG_PCIE_PHYMISC2 0x1004
124#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3
125#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16
126#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3
127#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18
128
123#define REG_TWSI_DEBUG 0x1108 129#define REG_TWSI_DEBUG 0x1108
124#define TWSI_DEBUG_DEV_EXIST 0x20000000 130#define TWSI_DEBUG_DEV_EXIST 0x20000000
125 131
@@ -150,24 +156,28 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
150#define PM_CTRL_ASPM_L0S_EN 0x00001000 156#define PM_CTRL_ASPM_L0S_EN 0x00001000
151#define PM_CTRL_CLK_SWH_L1 0x00002000 157#define PM_CTRL_CLK_SWH_L1 0x00002000
152#define PM_CTRL_CLK_PWM_VER1_1 0x00004000 158#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
153#define PM_CTRL_PCIE_RECV 0x00008000 159#define PM_CTRL_RCVR_WT_TIMER 0x00008000
154#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF 160#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
155#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 161#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
156#define PM_CTRL_PM_REQ_TIMER_MASK 0xF 162#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
157#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 163#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
158#define PM_CTRL_LCKDET_TIMER_MASK 0x3F 164#define PM_CTRL_LCKDET_TIMER_MASK 0xF
159#define PM_CTRL_LCKDET_TIMER_SHIFT 24 165#define PM_CTRL_LCKDET_TIMER_SHIFT 24
160#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 166#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
161#define PM_CTRL_SA_DLY_EN 0x20000000 167#define PM_CTRL_SA_DLY_EN 0x20000000
162#define PM_CTRL_MAC_ASPM_CHK 0x40000000 168#define PM_CTRL_MAC_ASPM_CHK 0x40000000
163#define PM_CTRL_HOTRST 0x80000000 169#define PM_CTRL_HOTRST 0x80000000
164 170
171#define REG_LTSSM_ID_CTRL 0x12FC
172#define LTSSM_ID_EN_WRO 0x1000
165/* Selene Master Control Register */ 173/* Selene Master Control Register */
166#define REG_MASTER_CTRL 0x1400 174#define REG_MASTER_CTRL 0x1400
167#define MASTER_CTRL_SOFT_RST 0x1 175#define MASTER_CTRL_SOFT_RST 0x1
168#define MASTER_CTRL_TEST_MODE_MASK 0x3 176#define MASTER_CTRL_TEST_MODE_MASK 0x3
169#define MASTER_CTRL_TEST_MODE_SHIFT 2 177#define MASTER_CTRL_TEST_MODE_SHIFT 2
170#define MASTER_CTRL_BERT_START 0x10 178#define MASTER_CTRL_BERT_START 0x10
179#define MASTER_CTRL_OOB_DIS_OFF 0x40
180#define MASTER_CTRL_SA_TIMER_EN 0x80
171#define MASTER_CTRL_MTIMER_EN 0x100 181#define MASTER_CTRL_MTIMER_EN 0x100
172#define MASTER_CTRL_MANUAL_INT 0x200 182#define MASTER_CTRL_MANUAL_INT 0x200
173#define MASTER_CTRL_TX_ITIMER_EN 0x400 183#define MASTER_CTRL_TX_ITIMER_EN 0x400
@@ -220,6 +230,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
220 GPHY_CTRL_PWDOWN_HW |\ 230 GPHY_CTRL_PWDOWN_HW |\
221 GPHY_CTRL_PHY_IDDQ) 231 GPHY_CTRL_PHY_IDDQ)
222 232
233#define GPHY_CTRL_POWER_SAVING ( \
234 GPHY_CTRL_SEL_ANA_RST |\
235 GPHY_CTRL_HIB_EN |\
236 GPHY_CTRL_HIB_PULSE |\
237 GPHY_CTRL_PWDOWN_HW |\
238 GPHY_CTRL_PHY_IDDQ)
223/* Block IDLE Status Register */ 239/* Block IDLE Status Register */
224#define REG_IDLE_STATUS 0x1410 240#define REG_IDLE_STATUS 0x1410
225#define IDLE_STATUS_MASK 0x00FF 241#define IDLE_STATUS_MASK 0x00FF
@@ -287,6 +303,14 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
287#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal 303#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal
288 * comes from Analog SerDes */ 304 * comes from Analog SerDes */
289#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ 305#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */
306#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE
307#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3
308#define SERDES_OVCLK_18_25 0x0
309#define SERDES_OVCLK_12_18 0x1
310#define SERDES_OVCLK_0_4 0x2
311#define SERDES_OVCLK_4_12 0x3
312#define SERDES_MAC_CLK_SLOWDOWN 0x20000
313#define SERDES_PYH_CLK_SLOWDOWN 0x40000
290 314
291/* MAC Control Register */ 315/* MAC Control Register */
292#define REG_MAC_CTRL 0x1480 316#define REG_MAC_CTRL 0x1480
@@ -693,6 +717,21 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
693#define REG_MAC_TX_STATUS_BIN 0x1760 717#define REG_MAC_TX_STATUS_BIN 0x1760
694#define REG_MAC_TX_STATUS_END 0x17c0 718#define REG_MAC_TX_STATUS_END 0x17c0
695 719
720#define REG_CLK_GATING_CTRL 0x1814
721#define CLK_GATING_DMAW_EN 0x0001
722#define CLK_GATING_DMAR_EN 0x0002
723#define CLK_GATING_TXQ_EN 0x0004
724#define CLK_GATING_RXQ_EN 0x0008
725#define CLK_GATING_TXMAC_EN 0x0010
726#define CLK_GATING_RXMAC_EN 0x0020
727
728#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\
729 CLK_GATING_DMAR_EN |\
730 CLK_GATING_TXQ_EN |\
731 CLK_GATING_RXQ_EN |\
732 CLK_GATING_TXMAC_EN|\
733 CLK_GATING_RXMAC_EN)
734
696/* DEBUG ADDR */ 735/* DEBUG ADDR */
697#define REG_DEBUG_DATA0 0x1900 736#define REG_DEBUG_DATA0 0x1900
698#define REG_DEBUG_DATA1 0x1904 737#define REG_DEBUG_DATA1 0x1904
@@ -734,6 +773,10 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
734 773
735#define MII_PHYSID1 0x02 774#define MII_PHYSID1 0x02
736#define MII_PHYSID2 0x03 775#define MII_PHYSID2 0x03
776#define L1D_MPW_PHYID1 0xD01C /* V7 */
777#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
778#define L1D_MPW_PHYID3 0xD01E /* V8 */
779
737 780
738/* Autoneg Advertisement Register */ 781/* Autoneg Advertisement Register */
739#define MII_ADVERTISE 0x04 782#define MII_ADVERTISE 0x04
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1c3c046d5f34..c7b8ef507ebd 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
21 21
22#include "atl1c.h" 22#include "atl1c.h"
23 23
24#define ATL1C_DRV_VERSION "1.0.0.2-NAPI" 24#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
25char atl1c_driver_name[] = "atl1c"; 25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION; 26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
@@ -29,7 +29,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
29#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ 29#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
30#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ 30#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
31#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ 31#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
32 32#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
33#define L2CB_V10 0xc0 33#define L2CB_V10 0xc0
34#define L2CB_V11 0xc1 34#define L2CB_V11 0xc1
35 35
@@ -97,7 +97,28 @@ static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
97 97
98static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 98static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
99 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; 99 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
100static void atl1c_pcie_patch(struct atl1c_hw *hw)
101{
102 u32 data;
100 103
104 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
105 data |= PCIE_PHYMISC_FORCE_RCV_DET;
106 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
107
108 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
109 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
110
111 data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
112 PCIE_PHYMISC2_SERDES_CDR_SHIFT);
113 data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
114 data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
115 PCIE_PHYMISC2_SERDES_TH_SHIFT);
116 data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
117 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
118 }
119}
120
121/* FIXME: no need any more ? */
101/* 122/*
102 * atl1c_init_pcie - init PCIE module 123 * atl1c_init_pcie - init PCIE module
103 */ 124 */
@@ -127,6 +148,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
127 data &= ~PCIE_UC_SERVRITY_FCP; 148 data &= ~PCIE_UC_SERVRITY_FCP;
128 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); 149 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
129 150
151 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
152 data &= ~LTSSM_ID_EN_WRO;
153 AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
154
155 atl1c_pcie_patch(hw);
130 if (flag & ATL1C_PCIE_L0S_L1_DISABLE) 156 if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
131 atl1c_disable_l0s_l1(hw); 157 atl1c_disable_l0s_l1(hw);
132 if (flag & ATL1C_PCIE_PHY_RESET) 158 if (flag & ATL1C_PCIE_PHY_RESET)
@@ -135,7 +161,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
135 AT_WRITE_REG(hw, REG_GPHY_CTRL, 161 AT_WRITE_REG(hw, REG_GPHY_CTRL,
136 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); 162 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
137 163
138 msleep(1); 164 msleep(5);
139} 165}
140 166
141/* 167/*
@@ -159,6 +185,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
159{ 185{
160 atomic_inc(&adapter->irq_sem); 186 atomic_inc(&adapter->irq_sem);
161 AT_WRITE_REG(&adapter->hw, REG_IMR, 0); 187 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
188 AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
162 AT_WRITE_FLUSH(&adapter->hw); 189 AT_WRITE_FLUSH(&adapter->hw);
163 synchronize_irq(adapter->pdev->irq); 190 synchronize_irq(adapter->pdev->irq);
164} 191}
@@ -231,15 +258,15 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
231 258
232 if ((phy_data & BMSR_LSTATUS) == 0) { 259 if ((phy_data & BMSR_LSTATUS) == 0) {
233 /* link down */ 260 /* link down */
234 if (netif_carrier_ok(netdev)) { 261 hw->hibernate = true;
235 hw->hibernate = true; 262 if (atl1c_stop_mac(hw) != 0)
236 if (atl1c_stop_mac(hw) != 0) 263 if (netif_msg_hw(adapter))
237 if (netif_msg_hw(adapter)) 264 dev_warn(&pdev->dev, "stop mac failed\n");
238 dev_warn(&pdev->dev, 265 atl1c_set_aspm(hw, false);
239 "stop mac failed\n");
240 atl1c_set_aspm(hw, false);
241 }
242 netif_carrier_off(netdev); 266 netif_carrier_off(netdev);
267 netif_stop_queue(netdev);
268 atl1c_phy_reset(hw);
269 atl1c_phy_init(&adapter->hw);
243 } else { 270 } else {
244 /* Link Up */ 271 /* Link Up */
245 hw->hibernate = false; 272 hw->hibernate = false;
@@ -308,6 +335,7 @@ static void atl1c_common_task(struct work_struct *work)
308 netdev = adapter->netdev; 335 netdev = adapter->netdev;
309 336
310 if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { 337 if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
338 adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
311 netif_device_detach(netdev); 339 netif_device_detach(netdev);
312 atl1c_down(adapter); 340 atl1c_down(adapter);
313 atl1c_up(adapter); 341 atl1c_up(adapter);
@@ -315,8 +343,11 @@ static void atl1c_common_task(struct work_struct *work)
315 return; 343 return;
316 } 344 }
317 345
318 if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) 346 if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
347 adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
319 atl1c_check_link_status(adapter); 348 atl1c_check_link_status(adapter);
349 }
350 return;
320} 351}
321 352
322 353
@@ -476,6 +507,13 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
476 netdev->mtu = new_mtu; 507 netdev->mtu = new_mtu;
477 adapter->hw.max_frame_size = new_mtu; 508 adapter->hw.max_frame_size = new_mtu;
478 atl1c_set_rxbufsize(adapter, netdev); 509 atl1c_set_rxbufsize(adapter, netdev);
510 if (new_mtu > MAX_TSO_FRAME_SIZE) {
511 adapter->netdev->features &= ~NETIF_F_TSO;
512 adapter->netdev->features &= ~NETIF_F_TSO6;
513 } else {
514 adapter->netdev->features |= NETIF_F_TSO;
515 adapter->netdev->features |= NETIF_F_TSO6;
516 }
479 atl1c_down(adapter); 517 atl1c_down(adapter);
480 atl1c_up(adapter); 518 atl1c_up(adapter);
481 clear_bit(__AT_RESETTING, &adapter->flags); 519 clear_bit(__AT_RESETTING, &adapter->flags);
@@ -613,6 +651,9 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
613 case PCI_DEVICE_ID_ATHEROS_L1D: 651 case PCI_DEVICE_ID_ATHEROS_L1D:
614 hw->nic_type = athr_l1d; 652 hw->nic_type = athr_l1d;
615 break; 653 break;
654 case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
655 hw->nic_type = athr_l1d_2;
656 break;
616 default: 657 default:
617 break; 658 break;
618 } 659 }
@@ -627,9 +668,7 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
627 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data); 668 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
628 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 669 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
629 670
630 hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ | 671 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
631 ATL1C_INTR_MODRT_ENABLE |
632 ATL1C_RX_IPV6_CHKSUM |
633 ATL1C_TXQ_MODE_ENHANCE; 672 ATL1C_TXQ_MODE_ENHANCE;
634 if (link_ctrl_data & LINK_CTRL_L0S_EN) 673 if (link_ctrl_data & LINK_CTRL_L0S_EN)
635 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; 674 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
@@ -637,12 +676,12 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
637 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; 676 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
638 if (link_ctrl_data & LINK_CTRL_EXT_SYNC) 677 if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
639 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC; 678 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
679 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
640 680
641 if (hw->nic_type == athr_l1c || 681 if (hw->nic_type == athr_l1c ||
642 hw->nic_type == athr_l1d) { 682 hw->nic_type == athr_l1d ||
643 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; 683 hw->nic_type == athr_l1d_2)
644 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; 684 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
645 }
646 return 0; 685 return 0;
647} 686}
648/* 687/*
@@ -657,6 +696,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
657{ 696{
658 struct atl1c_hw *hw = &adapter->hw; 697 struct atl1c_hw *hw = &adapter->hw;
659 struct pci_dev *pdev = adapter->pdev; 698 struct pci_dev *pdev = adapter->pdev;
699 u32 revision;
700
660 701
661 adapter->wol = 0; 702 adapter->wol = 0;
662 adapter->link_speed = SPEED_0; 703 adapter->link_speed = SPEED_0;
@@ -669,7 +710,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
669 hw->device_id = pdev->device; 710 hw->device_id = pdev->device;
670 hw->subsystem_vendor_id = pdev->subsystem_vendor; 711 hw->subsystem_vendor_id = pdev->subsystem_vendor;
671 hw->subsystem_id = pdev->subsystem_device; 712 hw->subsystem_id = pdev->subsystem_device;
672 713 AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
714 hw->revision_id = revision & 0xFF;
673 /* before link up, we assume hibernate is true */ 715 /* before link up, we assume hibernate is true */
674 hw->hibernate = true; 716 hw->hibernate = true;
675 hw->media_type = MEDIA_TYPE_AUTO_SENSOR; 717 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
@@ -974,6 +1016,7 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
974 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb; 1016 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
975 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb; 1017 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
976 int i; 1018 int i;
1019 u32 data;
977 1020
978 /* TPD */ 1021 /* TPD */
979 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, 1022 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
@@ -1017,6 +1060,23 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1017 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32)); 1060 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1018 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO, 1061 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
1019 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK)); 1062 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
1063 if (hw->nic_type == athr_l2c_b) {
1064 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1065 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
1066 AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
1067 AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
1068 AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
1069 AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
1070 AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/
1071 AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/
1072 }
1073 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
1074 /* Power Saving for L2c_B */
1075 AT_READ_REG(hw, REG_SERDES_LOCK, &data);
1076 data |= SERDES_MAC_CLK_SLOWDOWN;
1077 data |= SERDES_PYH_CLK_SLOWDOWN;
1078 AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
1079 }
1020 /* Load all of base address above */ 1080 /* Load all of base address above */
1021 AT_WRITE_REG(hw, REG_LOAD_PTR, 1); 1081 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
1022} 1082}
@@ -1029,6 +1089,7 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1029 u16 tx_offload_thresh; 1089 u16 tx_offload_thresh;
1030 u32 txq_ctrl_data; 1090 u32 txq_ctrl_data;
1031 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ 1091 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
1092 u32 max_pay_load_data;
1032 1093
1033 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 1094 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
1034 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; 1095 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
@@ -1046,8 +1107,11 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1046 TXQ_NUM_TPD_BURST_SHIFT; 1107 TXQ_NUM_TPD_BURST_SHIFT;
1047 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) 1108 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
1048 txq_ctrl_data |= TXQ_CTRL_ENH_MODE; 1109 txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
1049 txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] & 1110 max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
1050 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; 1111 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
1112 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
1113 max_pay_load_data >>= 1;
1114 txq_ctrl_data |= max_pay_load_data;
1051 1115
1052 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); 1116 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1053} 1117}
@@ -1078,7 +1142,7 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1078 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) << 1142 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
1079 RSS_HASH_BITS_SHIFT; 1143 RSS_HASH_BITS_SHIFT;
1080 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON) 1144 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
1081 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M & 1145 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
1082 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT; 1146 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
1083 1147
1084 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); 1148 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
@@ -1198,21 +1262,23 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1198{ 1262{
1199 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1263 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
1200 struct pci_dev *pdev = adapter->pdev; 1264 struct pci_dev *pdev = adapter->pdev;
1201 int ret; 1265 u32 master_ctrl_data = 0;
1202 1266
1203 AT_WRITE_REG(hw, REG_IMR, 0); 1267 AT_WRITE_REG(hw, REG_IMR, 0);
1204 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT); 1268 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
1205 1269
1206 ret = atl1c_stop_mac(hw); 1270 atl1c_stop_mac(hw);
1207 if (ret)
1208 return ret;
1209 /* 1271 /*
1210 * Issue Soft Reset to the MAC. This will reset the chip's 1272 * Issue Soft Reset to the MAC. This will reset the chip's
1211 * transmit, receive, DMA. It will not effect 1273 * transmit, receive, DMA. It will not effect
1212 * the current PCI configuration. The global reset bit is self- 1274 * the current PCI configuration. The global reset bit is self-
1213 * clearing, and should clear within a microsecond. 1275 * clearing, and should clear within a microsecond.
1214 */ 1276 */
1215 AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); 1277 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1278 master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
1279 AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
1280 & 0xFFFF));
1281
1216 AT_WRITE_FLUSH(hw); 1282 AT_WRITE_FLUSH(hw);
1217 msleep(10); 1283 msleep(10);
1218 /* Wait at least 10ms for All module to be Idle */ 1284 /* Wait at least 10ms for All module to be Idle */
@@ -1253,42 +1319,39 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1253{ 1319{
1254 u32 pm_ctrl_data; 1320 u32 pm_ctrl_data;
1255 u32 link_ctrl_data; 1321 u32 link_ctrl_data;
1322 u32 link_l1_timer = 0xF;
1256 1323
1257 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1324 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1258 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 1325 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
1259 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1260 1326
1327 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1261 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1328 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1262 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1329 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1263 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << 1330 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
1264 PM_CTRL_LCKDET_TIMER_SHIFT); 1331 PM_CTRL_LCKDET_TIMER_SHIFT);
1265 1332 pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
1266 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1267 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1268 pm_ctrl_data |= PM_CTRL_RBER_EN;
1269 pm_ctrl_data |= PM_CTRL_SDES_EN;
1270 1333
1271 if (hw->nic_type == athr_l2c_b || 1334 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1272 hw->nic_type == athr_l1d || 1335 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1273 hw->nic_type == athr_l2c_b2) {
1274 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; 1336 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
1275 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { 1337 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
1276 if (hw->nic_type == athr_l2c_b && 1338 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
1277 hw->revision_id == L2CB_V10)
1278 link_ctrl_data |= LINK_CTRL_EXT_SYNC; 1339 link_ctrl_data |= LINK_CTRL_EXT_SYNC;
1279 } 1340 }
1280 1341
1281 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); 1342 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
1282 1343
1283 pm_ctrl_data |= PM_CTRL_PCIE_RECV; 1344 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
1284 pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT; 1345 pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
1285 pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S; 1346 PM_CTRL_PM_REQ_TIMER_SHIFT);
1347 pm_ctrl_data |= AT_ASPM_L1_TIMER <<
1348 PM_CTRL_PM_REQ_TIMER_SHIFT;
1286 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN; 1349 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
1287 pm_ctrl_data &= ~PM_CTRL_HOTRST; 1350 pm_ctrl_data &= ~PM_CTRL_HOTRST;
1288 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT; 1351 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1289 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1; 1352 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
1290 } 1353 }
1291 1354 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1292 if (linkup) { 1355 if (linkup) {
1293 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1356 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1294 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1357 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
@@ -1297,27 +1360,26 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1297 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) 1360 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1298 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN; 1361 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1299 1362
1300 if (hw->nic_type == athr_l2c_b || 1363 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1301 hw->nic_type == athr_l1d || 1364 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1302 hw->nic_type == athr_l2c_b2) {
1303 if (hw->nic_type == athr_l2c_b) 1365 if (hw->nic_type == athr_l2c_b)
1304 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) 1366 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
1305 pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN; 1367 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1306 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; 1368 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1307 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; 1369 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1308 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; 1370 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1309 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; 1371 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1310 if (hw->adapter->link_speed == SPEED_100 || 1372 if (hw->adapter->link_speed == SPEED_100 ||
1311 hw->adapter->link_speed == SPEED_1000) { 1373 hw->adapter->link_speed == SPEED_1000) {
1312 pm_ctrl_data &= 1374 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1313 ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1375 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1314 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1376 if (hw->nic_type == athr_l2c_b)
1315 if (hw->nic_type == athr_l1d) 1377 link_l1_timer = 7;
1316 pm_ctrl_data |= 0xF << 1378 else if (hw->nic_type == athr_l2c_b2 ||
1317 PM_CTRL_L1_ENTRY_TIMER_SHIFT; 1379 hw->nic_type == athr_l1d_2)
1318 else 1380 link_l1_timer = 4;
1319 pm_ctrl_data |= 7 << 1381 pm_ctrl_data |= link_l1_timer <<
1320 PM_CTRL_L1_ENTRY_TIMER_SHIFT; 1382 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1321 } 1383 }
1322 } else { 1384 } else {
1323 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; 1385 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
@@ -1326,24 +1388,12 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1326 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; 1388 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1327 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1389 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1328 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1390 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1329 }
1330 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
1331 if (hw->adapter->link_speed == SPEED_10)
1332 if (hw->nic_type == athr_l1d)
1333 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
1334 else
1335 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
1336 else if (hw->adapter->link_speed == SPEED_100)
1337 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
1338 else
1339 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
1340 1391
1392 }
1341 } else { 1393 } else {
1342 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1343 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; 1394 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1344 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1395 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1345 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; 1396 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1346
1347 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; 1397 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1348 1398
1349 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) 1399 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
@@ -1351,8 +1401,9 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1351 else 1401 else
1352 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1402 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1353 } 1403 }
1354
1355 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); 1404 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1405
1406 return;
1356} 1407}
1357 1408
1358static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) 1409static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
@@ -1391,7 +1442,8 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
1391 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; 1442 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
1392 1443
1393 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; 1444 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
1394 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) { 1445 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
1446 hw->nic_type == athr_l1d_2) {
1395 mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW; 1447 mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
1396 mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32; 1448 mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
1397 } 1449 }
@@ -1409,6 +1461,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1409 struct atl1c_hw *hw = &adapter->hw; 1461 struct atl1c_hw *hw = &adapter->hw;
1410 u32 master_ctrl_data = 0; 1462 u32 master_ctrl_data = 0;
1411 u32 intr_modrt_data; 1463 u32 intr_modrt_data;
1464 u32 data;
1412 1465
1413 /* clear interrupt status */ 1466 /* clear interrupt status */
1414 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); 1467 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
@@ -1418,6 +1471,15 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1418 * HW will enable self to assert interrupt event to system after 1471 * HW will enable self to assert interrupt event to system after
1419 * waiting x-time for software to notify it accept interrupt. 1472 * waiting x-time for software to notify it accept interrupt.
1420 */ 1473 */
1474
1475 data = CLK_GATING_EN_ALL;
1476 if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
1477 if (hw->nic_type == athr_l2c_b)
1478 data &= ~CLK_GATING_RXMAC_EN;
1479 } else
1480 data = 0;
1481 AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
1482
1421 AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, 1483 AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
1422 hw->ict & INT_RETRIG_TIMER_MASK); 1484 hw->ict & INT_RETRIG_TIMER_MASK);
1423 1485
@@ -1436,6 +1498,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1436 if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) 1498 if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
1437 master_ctrl_data |= MASTER_CTRL_INT_RDCLR; 1499 master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
1438 1500
1501 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1439 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); 1502 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1440 1503
1441 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { 1504 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
@@ -1624,11 +1687,9 @@ static irqreturn_t atl1c_intr(int irq, void *data)
1624 "atl1c hardware error (status = 0x%x)\n", 1687 "atl1c hardware error (status = 0x%x)\n",
1625 status & ISR_ERROR); 1688 status & ISR_ERROR);
1626 /* reset MAC */ 1689 /* reset MAC */
1627 hw->intr_mask &= ~ISR_ERROR;
1628 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1629 adapter->work_event |= ATL1C_WORK_EVENT_RESET; 1690 adapter->work_event |= ATL1C_WORK_EVENT_RESET;
1630 schedule_work(&adapter->common_task); 1691 schedule_work(&adapter->common_task);
1631 break; 1692 return IRQ_HANDLED;
1632 } 1693 }
1633 1694
1634 if (status & ISR_OVER) 1695 if (status & ISR_OVER)
@@ -2303,7 +2364,6 @@ void atl1c_down(struct atl1c_adapter *adapter)
2303 napi_disable(&adapter->napi); 2364 napi_disable(&adapter->napi);
2304 atl1c_irq_disable(adapter); 2365 atl1c_irq_disable(adapter);
2305 atl1c_free_irq(adapter); 2366 atl1c_free_irq(adapter);
2306 AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
2307 /* reset MAC to disable all RX/TX */ 2367 /* reset MAC to disable all RX/TX */
2308 atl1c_reset_mac(&adapter->hw); 2368 atl1c_reset_mac(&adapter->hw);
2309 msleep(1); 2369 msleep(1);
@@ -2387,79 +2447,68 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2387 struct net_device *netdev = pci_get_drvdata(pdev); 2447 struct net_device *netdev = pci_get_drvdata(pdev);
2388 struct atl1c_adapter *adapter = netdev_priv(netdev); 2448 struct atl1c_adapter *adapter = netdev_priv(netdev);
2389 struct atl1c_hw *hw = &adapter->hw; 2449 struct atl1c_hw *hw = &adapter->hw;
2390 u32 ctrl; 2450 u32 mac_ctrl_data = 0;
2391 u32 mac_ctrl_data; 2451 u32 master_ctrl_data = 0;
2392 u32 master_ctrl_data;
2393 u32 wol_ctrl_data = 0; 2452 u32 wol_ctrl_data = 0;
2394 u16 mii_bmsr_data; 2453 u16 mii_intr_status_data = 0;
2395 u16 save_autoneg_advertised;
2396 u16 mii_intr_status_data;
2397 u32 wufc = adapter->wol; 2454 u32 wufc = adapter->wol;
2398 u32 i;
2399 int retval = 0; 2455 int retval = 0;
2400 2456
2457 atl1c_disable_l0s_l1(hw);
2401 if (netif_running(netdev)) { 2458 if (netif_running(netdev)) {
2402 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); 2459 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2403 atl1c_down(adapter); 2460 atl1c_down(adapter);
2404 } 2461 }
2405 netif_device_detach(netdev); 2462 netif_device_detach(netdev);
2406 atl1c_disable_l0s_l1(hw);
2407 retval = pci_save_state(pdev); 2463 retval = pci_save_state(pdev);
2408 if (retval) 2464 if (retval)
2409 return retval; 2465 return retval;
2466
2467 if (wufc)
2468 if (atl1c_phy_power_saving(hw) != 0)
2469 dev_dbg(&pdev->dev, "phy power saving failed");
2470
2471 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
2472 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
2473
2474 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
2475 mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
2476 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2477 MAC_CTRL_PRMLEN_MASK) <<
2478 MAC_CTRL_PRMLEN_SHIFT);
2479 mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
2480 mac_ctrl_data &= ~MAC_CTRL_DUPLX;
2481
2410 if (wufc) { 2482 if (wufc) {
2411 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); 2483 mac_ctrl_data |= MAC_CTRL_RX_EN;
2412 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; 2484 if (adapter->link_speed == SPEED_1000 ||
2413 2485 adapter->link_speed == SPEED_0) {
2414 /* get link status */ 2486 mac_ctrl_data |= atl1c_mac_speed_1000 <<
2415 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2487 MAC_CTRL_SPEED_SHIFT;
2416 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2488 mac_ctrl_data |= MAC_CTRL_DUPLX;
2417 save_autoneg_advertised = hw->autoneg_advertised; 2489 } else
2418 hw->autoneg_advertised = ADVERTISED_10baseT_Half; 2490 mac_ctrl_data |= atl1c_mac_speed_10_100 <<
2419 if (atl1c_restart_autoneg(hw) != 0) 2491 MAC_CTRL_SPEED_SHIFT;
2420 if (netif_msg_link(adapter)) 2492
2421 dev_warn(&pdev->dev, "phy autoneg failed\n"); 2493 if (adapter->link_duplex == DUPLEX_FULL)
2422 hw->phy_configured = false; /* re-init PHY when resume */ 2494 mac_ctrl_data |= MAC_CTRL_DUPLX;
2423 hw->autoneg_advertised = save_autoneg_advertised; 2495
2424 /* turn on magic packet wol */ 2496 /* turn on magic packet wol */
2425 if (wufc & AT_WUFC_MAG) 2497 if (wufc & AT_WUFC_MAG)
2426 wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2498 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2427 2499
2428 if (wufc & AT_WUFC_LNKC) { 2500 if (wufc & AT_WUFC_LNKC) {
2429 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2430 msleep(100);
2431 atl1c_read_phy_reg(hw, MII_BMSR,
2432 (u16 *)&mii_bmsr_data);
2433 if (mii_bmsr_data & BMSR_LSTATUS)
2434 break;
2435 }
2436 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2437 if (netif_msg_link(adapter))
2438 dev_warn(&pdev->dev,
2439 "%s: Link may change"
2440 "when suspend\n",
2441 atl1c_driver_name);
2442 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; 2501 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2443 /* only link up can wake up */ 2502 /* only link up can wake up */
2444 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { 2503 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
2445 if (netif_msg_link(adapter)) 2504 dev_dbg(&pdev->dev, "%s: read write phy "
2446 dev_err(&pdev->dev, 2505 "register failed.\n",
2447 "%s: read write phy " 2506 atl1c_driver_name);
2448 "register failed.\n",
2449 atl1c_driver_name);
2450 goto wol_dis;
2451 } 2507 }
2452 } 2508 }
2453 /* clear phy interrupt */ 2509 /* clear phy interrupt */
2454 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data); 2510 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
2455 /* Config MAC Ctrl register */ 2511 /* Config MAC Ctrl register */
2456 mac_ctrl_data = MAC_CTRL_RX_EN;
2457 /* set to 10/100M halt duplex */
2458 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2459 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2460 MAC_CTRL_PRMLEN_MASK) <<
2461 MAC_CTRL_PRMLEN_SHIFT);
2462
2463 if (adapter->vlgrp) 2512 if (adapter->vlgrp)
2464 mac_ctrl_data |= MAC_CTRL_RMV_VLAN; 2513 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
2465 2514
@@ -2467,37 +2516,30 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2467 if (wufc & AT_WUFC_MAG) 2516 if (wufc & AT_WUFC_MAG)
2468 mac_ctrl_data |= MAC_CTRL_BC_EN; 2517 mac_ctrl_data |= MAC_CTRL_BC_EN;
2469 2518
2470 if (netif_msg_hw(adapter)) 2519 dev_dbg(&pdev->dev,
2471 dev_dbg(&pdev->dev, 2520 "%s: suspend MAC=0x%x\n",
2472 "%s: suspend MAC=0x%x\n", 2521 atl1c_driver_name, mac_ctrl_data);
2473 atl1c_driver_name, mac_ctrl_data);
2474 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); 2522 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2475 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); 2523 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2476 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 2524 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2477 2525
2478 /* pcie patch */ 2526 /* pcie patch */
2479 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); 2527 device_set_wakeup_enable(&pdev->dev, 1);
2480 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2481 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2482 2528
2483 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2529 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
2484 goto suspend_exit; 2530 GPHY_CTRL_EXT_RESET);
2531 pci_prepare_to_sleep(pdev);
2532 } else {
2533 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
2534 master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
2535 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2536 mac_ctrl_data |= MAC_CTRL_DUPLX;
2537 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2538 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2539 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2540 hw->phy_configured = false; /* re-init PHY when resume */
2541 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2485 } 2542 }
2486wol_dis:
2487
2488 /* WOL disabled */
2489 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2490
2491 /* pcie patch */
2492 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
2493 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2494 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2495
2496 atl1c_phy_disable(hw);
2497 hw->phy_configured = false; /* re-init PHY when resume */
2498
2499 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2500suspend_exit:
2501 2543
2502 pci_disable_device(pdev); 2544 pci_disable_device(pdev);
2503 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2545 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -2516,9 +2558,19 @@ static int atl1c_resume(struct pci_dev *pdev)
2516 pci_enable_wake(pdev, PCI_D3cold, 0); 2558 pci_enable_wake(pdev, PCI_D3cold, 0);
2517 2559
2518 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2560 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2561 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
2562 ATL1C_PCIE_PHY_RESET);
2519 2563
2520 atl1c_phy_reset(&adapter->hw); 2564 atl1c_phy_reset(&adapter->hw);
2521 atl1c_reset_mac(&adapter->hw); 2565 atl1c_reset_mac(&adapter->hw);
2566 atl1c_phy_init(&adapter->hw);
2567
2568#if 0
2569 AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
2570 pm_data &= ~PM_CTRLSTAT_PME_EN;
2571 AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
2572#endif
2573
2522 netif_device_attach(netdev); 2574 netif_device_attach(netdev);
2523 if (netif_running(netdev)) 2575 if (netif_running(netdev))
2524 atl1c_up(adapter); 2576 atl1c_up(adapter);
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 146372fd6683..9c0ddb273ac8 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -436,8 +436,8 @@ struct rx_free_desc {
436 __le16 buf_len; /* Size of the receive buffer in host memory */ 436 __le16 buf_len; /* Size of the receive buffer in host memory */
437 u16 coalese; /* Update consumer index to host after the 437 u16 coalese; /* Update consumer index to host after the
438 * reception of this frame */ 438 * reception of this frame */
439 /* __attribute__ ((packed)) is required */ 439 /* __packed is required */
440} __attribute__ ((packed)); 440} __packed;
441 441
442/* 442/*
443 * The L1 transmit packet descriptor is comprised of four 32-bit words. 443 * The L1 transmit packet descriptor is comprised of four 32-bit words.
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index ece6128bef14..15ae6df2ff00 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -104,14 +104,6 @@ MODULE_VERSION(DRV_VERSION);
104 * complete immediately. 104 * complete immediately.
105 */ 105 */
106 106
107/* These addresses are only used if yamon doesn't tell us what
108 * the mac address is, and the mac address is not passed on the
109 * command line.
110 */
111static unsigned char au1000_mac_addr[6] __devinitdata = {
112 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
113};
114
115struct au1000_private *au_macs[NUM_ETH_INTERFACES]; 107struct au1000_private *au_macs[NUM_ETH_INTERFACES];
116 108
117/* 109/*
@@ -978,7 +970,7 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
978 if (!aup->phy_dev) 970 if (!aup->phy_dev)
979 return -EINVAL; /* PHY not controllable */ 971 return -EINVAL; /* PHY not controllable */
980 972
981 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); 973 return phy_mii_ioctl(aup->phy_dev, rq, cmd);
982} 974}
983 975
984static const struct net_device_ops au1000_netdev_ops = { 976static const struct net_device_ops au1000_netdev_ops = {
@@ -1002,7 +994,6 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1002 db_dest_t *pDB, *pDBfree; 994 db_dest_t *pDB, *pDBfree;
1003 int irq, i, err = 0; 995 int irq, i, err = 0;
1004 struct resource *base, *macen; 996 struct resource *base, *macen;
1005 char ethaddr[6];
1006 997
1007 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 998 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1008 if (!base) { 999 if (!base) {
@@ -1079,24 +1070,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1079 } 1070 }
1080 aup->mac_id = pdev->id; 1071 aup->mac_id = pdev->id;
1081 1072
1082 if (pdev->id == 0) { 1073 if (pdev->id == 0)
1083 if (prom_get_ethernet_addr(ethaddr) == 0)
1084 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1085 else {
1086 netdev_info(dev, "No MAC address found\n");
1087 /* Use the hard coded MAC addresses */
1088 }
1089
1090 au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 1074 au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1091 } else if (pdev->id == 1) 1075 else if (pdev->id == 1)
1092 au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); 1076 au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1093 1077
1094 /* 1078 /* set a random MAC now in case platform_data doesn't provide one */
1095 * Assign to the Ethernet ports two consecutive MAC addresses 1079 random_ether_addr(dev->dev_addr);
1096 * to match those that are printed on their stickers
1097 */
1098 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1099 dev->dev_addr[5] += pdev->id;
1100 1080
1101 *aup->enable = 0; 1081 *aup->enable = 0;
1102 aup->mac_enabled = 0; 1082 aup->mac_enabled = 0;
@@ -1106,6 +1086,9 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1106 dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n"); 1086 dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
1107 aup->phy1_search_mac0 = 1; 1087 aup->phy1_search_mac0 = 1;
1108 } else { 1088 } else {
1089 if (is_valid_ether_addr(pd->mac))
1090 memcpy(dev->dev_addr, pd->mac, 6);
1091
1109 aup->phy_static_config = pd->phy_static_config; 1092 aup->phy_static_config = pd->phy_static_config;
1110 aup->phy_search_highest_addr = pd->phy_search_highest_addr; 1093 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1111 aup->phy1_search_mac0 = pd->phy1_search_mac0; 1094 aup->phy1_search_mac0 = pd->phy1_search_mac0;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 55c9958043c4..20e946b1e744 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -481,8 +481,10 @@ static int ax_open(struct net_device *dev)
481 return ret; 481 return ret;
482 482
483 ret = ax_ei_open(dev); 483 ret = ax_ei_open(dev);
484 if (ret) 484 if (ret) {
485 free_irq(dev->irq, dev);
485 return ret; 486 return ret;
487 }
486 488
487 /* turn the phy on (if turned off) */ 489 /* turn the phy on (if turned off) */
488 490
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 293f9c16e786..37617abc1647 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *);
135 135
136static void b44_init_hw(struct b44 *, int); 136static void b44_init_hw(struct b44 *, int);
137 137
138static int dma_desc_align_mask;
139static int dma_desc_sync_size; 138static int dma_desc_sync_size;
140static int instance; 139static int instance;
141 140
@@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
150 unsigned long offset, 149 unsigned long offset,
151 enum dma_data_direction dir) 150 enum dma_data_direction dir)
152{ 151{
153 ssb_dma_sync_single_range_for_device(sdev, dma_base, 152 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
154 offset & dma_desc_align_mask, 153 dma_desc_sync_size, dir);
155 dma_desc_sync_size, dir);
156} 154}
157 155
158static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, 156static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
@@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
160 unsigned long offset, 158 unsigned long offset,
161 enum dma_data_direction dir) 159 enum dma_data_direction dir)
162{ 160{
163 ssb_dma_sync_single_range_for_cpu(sdev, dma_base, 161 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
164 offset & dma_desc_align_mask, 162 dma_desc_sync_size, dir);
165 dma_desc_sync_size, dir);
166} 163}
167 164
168static inline unsigned long br32(const struct b44 *bp, unsigned long reg) 165static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
@@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp)
608 605
609 BUG_ON(skb == NULL); 606 BUG_ON(skb == NULL);
610 607
611 ssb_dma_unmap_single(bp->sdev, 608 dma_unmap_single(bp->sdev->dma_dev,
612 rp->mapping, 609 rp->mapping,
613 skb->len, 610 skb->len,
614 DMA_TO_DEVICE); 611 DMA_TO_DEVICE);
615 rp->skb = NULL; 612 rp->skb = NULL;
616 dev_kfree_skb_irq(skb); 613 dev_kfree_skb_irq(skb);
617 } 614 }
@@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
648 if (skb == NULL) 645 if (skb == NULL)
649 return -ENOMEM; 646 return -ENOMEM;
650 647
651 mapping = ssb_dma_map_single(bp->sdev, skb->data, 648 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
652 RX_PKT_BUF_SZ, 649 RX_PKT_BUF_SZ,
653 DMA_FROM_DEVICE); 650 DMA_FROM_DEVICE);
654 651
655 /* Hardware bug work-around, the chip is unable to do PCI DMA 652 /* Hardware bug work-around, the chip is unable to do PCI DMA
656 to/from anything above 1GB :-( */ 653 to/from anything above 1GB :-( */
657 if (ssb_dma_mapping_error(bp->sdev, mapping) || 654 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
658 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { 655 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
659 /* Sigh... */ 656 /* Sigh... */
660 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 657 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
661 ssb_dma_unmap_single(bp->sdev, mapping, 658 dma_unmap_single(bp->sdev->dma_dev, mapping,
662 RX_PKT_BUF_SZ, DMA_FROM_DEVICE); 659 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
663 dev_kfree_skb_any(skb); 660 dev_kfree_skb_any(skb);
664 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 661 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
665 if (skb == NULL) 662 if (skb == NULL)
666 return -ENOMEM; 663 return -ENOMEM;
667 mapping = ssb_dma_map_single(bp->sdev, skb->data, 664 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
668 RX_PKT_BUF_SZ, 665 RX_PKT_BUF_SZ,
669 DMA_FROM_DEVICE); 666 DMA_FROM_DEVICE);
670 if (ssb_dma_mapping_error(bp->sdev, mapping) || 667 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
671 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { 668 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
672 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 669 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
673 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); 670 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
674 dev_kfree_skb_any(skb); 671 dev_kfree_skb_any(skb);
675 return -ENOMEM; 672 return -ENOMEM;
676 } 673 }
@@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
745 dest_idx * sizeof(*dest_desc), 742 dest_idx * sizeof(*dest_desc),
746 DMA_BIDIRECTIONAL); 743 DMA_BIDIRECTIONAL);
747 744
748 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, 745 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
749 RX_PKT_BUF_SZ, 746 RX_PKT_BUF_SZ,
750 DMA_FROM_DEVICE); 747 DMA_FROM_DEVICE);
751} 748}
752 749
753static int b44_rx(struct b44 *bp, int budget) 750static int b44_rx(struct b44 *bp, int budget)
@@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int budget)
767 struct rx_header *rh; 764 struct rx_header *rh;
768 u16 len; 765 u16 len;
769 766
770 ssb_dma_sync_single_for_cpu(bp->sdev, map, 767 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
771 RX_PKT_BUF_SZ, 768 RX_PKT_BUF_SZ,
772 DMA_FROM_DEVICE); 769 DMA_FROM_DEVICE);
773 rh = (struct rx_header *) skb->data; 770 rh = (struct rx_header *) skb->data;
774 len = le16_to_cpu(rh->len); 771 len = le16_to_cpu(rh->len);
775 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || 772 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
@@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int budget)
801 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); 798 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802 if (skb_size < 0) 799 if (skb_size < 0)
803 goto drop_it; 800 goto drop_it;
804 ssb_dma_unmap_single(bp->sdev, map, 801 dma_unmap_single(bp->sdev->dma_dev, map,
805 skb_size, DMA_FROM_DEVICE); 802 skb_size, DMA_FROM_DEVICE);
806 /* Leave out rx_header */ 803 /* Leave out rx_header */
807 skb_put(skb, len + RX_PKT_OFFSET); 804 skb_put(skb, len + RX_PKT_OFFSET);
808 skb_pull(skb, RX_PKT_OFFSET); 805 skb_pull(skb, RX_PKT_OFFSET);
@@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
954 goto err_out; 951 goto err_out;
955 } 952 }
956 953
957 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); 954 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
958 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { 955 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
959 struct sk_buff *bounce_skb; 956 struct sk_buff *bounce_skb;
960 957
961 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 958 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
962 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 959 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
963 ssb_dma_unmap_single(bp->sdev, mapping, len, 960 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
964 DMA_TO_DEVICE); 961 DMA_TO_DEVICE);
965 962
966 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); 963 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
967 if (!bounce_skb) 964 if (!bounce_skb)
968 goto err_out; 965 goto err_out;
969 966
970 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, 967 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
971 len, DMA_TO_DEVICE); 968 len, DMA_TO_DEVICE);
972 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { 969 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
973 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 970 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
974 ssb_dma_unmap_single(bp->sdev, mapping, 971 dma_unmap_single(bp->sdev->dma_dev, mapping,
975 len, DMA_TO_DEVICE); 972 len, DMA_TO_DEVICE);
976 dev_kfree_skb_any(bounce_skb); 973 dev_kfree_skb_any(bounce_skb);
977 goto err_out; 974 goto err_out;
@@ -1068,8 +1065,8 @@ static void b44_free_rings(struct b44 *bp)
1068 1065
1069 if (rp->skb == NULL) 1066 if (rp->skb == NULL)
1070 continue; 1067 continue;
1071 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ, 1068 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1072 DMA_FROM_DEVICE); 1069 DMA_FROM_DEVICE);
1073 dev_kfree_skb_any(rp->skb); 1070 dev_kfree_skb_any(rp->skb);
1074 rp->skb = NULL; 1071 rp->skb = NULL;
1075 } 1072 }
@@ -1080,8 +1077,8 @@ static void b44_free_rings(struct b44 *bp)
1080 1077
1081 if (rp->skb == NULL) 1078 if (rp->skb == NULL)
1082 continue; 1079 continue;
1083 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len, 1080 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1084 DMA_TO_DEVICE); 1081 DMA_TO_DEVICE);
1085 dev_kfree_skb_any(rp->skb); 1082 dev_kfree_skb_any(rp->skb);
1086 rp->skb = NULL; 1083 rp->skb = NULL;
1087 } 1084 }
@@ -1103,14 +1100,12 @@ static void b44_init_rings(struct b44 *bp)
1103 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); 1100 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1104 1101
1105 if (bp->flags & B44_FLAG_RX_RING_HACK) 1102 if (bp->flags & B44_FLAG_RX_RING_HACK)
1106 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma, 1103 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1107 DMA_TABLE_BYTES, 1104 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1108 DMA_BIDIRECTIONAL);
1109 1105
1110 if (bp->flags & B44_FLAG_TX_RING_HACK) 1106 if (bp->flags & B44_FLAG_TX_RING_HACK)
1111 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma, 1107 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1112 DMA_TABLE_BYTES, 1108 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1113 DMA_TO_DEVICE);
1114 1109
1115 for (i = 0; i < bp->rx_pending; i++) { 1110 for (i = 0; i < bp->rx_pending; i++) {
1116 if (b44_alloc_rx_skb(bp, -1, i) < 0) 1111 if (b44_alloc_rx_skb(bp, -1, i) < 0)
@@ -1130,27 +1125,23 @@ static void b44_free_consistent(struct b44 *bp)
1130 bp->tx_buffers = NULL; 1125 bp->tx_buffers = NULL;
1131 if (bp->rx_ring) { 1126 if (bp->rx_ring) {
1132 if (bp->flags & B44_FLAG_RX_RING_HACK) { 1127 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1133 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma, 1128 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1134 DMA_TABLE_BYTES, 1129 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1135 DMA_BIDIRECTIONAL);
1136 kfree(bp->rx_ring); 1130 kfree(bp->rx_ring);
1137 } else 1131 } else
1138 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, 1132 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1139 bp->rx_ring, bp->rx_ring_dma, 1133 bp->rx_ring, bp->rx_ring_dma);
1140 GFP_KERNEL);
1141 bp->rx_ring = NULL; 1134 bp->rx_ring = NULL;
1142 bp->flags &= ~B44_FLAG_RX_RING_HACK; 1135 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1143 } 1136 }
1144 if (bp->tx_ring) { 1137 if (bp->tx_ring) {
1145 if (bp->flags & B44_FLAG_TX_RING_HACK) { 1138 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1146 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma, 1139 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1147 DMA_TABLE_BYTES, 1140 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1148 DMA_TO_DEVICE);
1149 kfree(bp->tx_ring); 1141 kfree(bp->tx_ring);
1150 } else 1142 } else
1151 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, 1143 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1152 bp->tx_ring, bp->tx_ring_dma, 1144 bp->tx_ring, bp->tx_ring_dma);
1153 GFP_KERNEL);
1154 bp->tx_ring = NULL; 1145 bp->tx_ring = NULL;
1155 bp->flags &= ~B44_FLAG_TX_RING_HACK; 1146 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1156 } 1147 }
@@ -1175,7 +1166,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1175 goto out_err; 1166 goto out_err;
1176 1167
1177 size = DMA_TABLE_BYTES; 1168 size = DMA_TABLE_BYTES;
1178 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp); 1169 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1170 &bp->rx_ring_dma, gfp);
1179 if (!bp->rx_ring) { 1171 if (!bp->rx_ring) {
1180 /* Allocation may have failed due to pci_alloc_consistent 1172 /* Allocation may have failed due to pci_alloc_consistent
1181 insisting on use of GFP_DMA, which is more restrictive 1173 insisting on use of GFP_DMA, which is more restrictive
@@ -1187,11 +1179,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1187 if (!rx_ring) 1179 if (!rx_ring)
1188 goto out_err; 1180 goto out_err;
1189 1181
1190 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring, 1182 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1191 DMA_TABLE_BYTES, 1183 DMA_TABLE_BYTES,
1192 DMA_BIDIRECTIONAL); 1184 DMA_BIDIRECTIONAL);
1193 1185
1194 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || 1186 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1195 rx_ring_dma + size > DMA_BIT_MASK(30)) { 1187 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1196 kfree(rx_ring); 1188 kfree(rx_ring);
1197 goto out_err; 1189 goto out_err;
@@ -1202,7 +1194,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1202 bp->flags |= B44_FLAG_RX_RING_HACK; 1194 bp->flags |= B44_FLAG_RX_RING_HACK;
1203 } 1195 }
1204 1196
1205 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp); 1197 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1198 &bp->tx_ring_dma, gfp);
1206 if (!bp->tx_ring) { 1199 if (!bp->tx_ring) {
1207 /* Allocation may have failed due to ssb_dma_alloc_consistent 1200 /* Allocation may have failed due to ssb_dma_alloc_consistent
1208 insisting on use of GFP_DMA, which is more restrictive 1201 insisting on use of GFP_DMA, which is more restrictive
@@ -1214,11 +1207,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1214 if (!tx_ring) 1207 if (!tx_ring)
1215 goto out_err; 1208 goto out_err;
1216 1209
1217 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring, 1210 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1218 DMA_TABLE_BYTES, 1211 DMA_TABLE_BYTES,
1219 DMA_TO_DEVICE); 1212 DMA_TO_DEVICE);
1220 1213
1221 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || 1214 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1222 tx_ring_dma + size > DMA_BIT_MASK(30)) { 1215 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1223 kfree(tx_ring); 1216 kfree(tx_ring);
1224 goto out_err; 1217 goto out_err;
@@ -2176,12 +2169,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2176 "Failed to powerup the bus\n"); 2169 "Failed to powerup the bus\n");
2177 goto err_out_free_dev; 2170 goto err_out_free_dev;
2178 } 2171 }
2179 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); 2172
2180 if (err) { 2173 if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2174 dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2181 dev_err(sdev->dev, 2175 dev_err(sdev->dev,
2182 "Required 30BIT DMA mask unsupported by the system\n"); 2176 "Required 30BIT DMA mask unsupported by the system\n");
2183 goto err_out_powerdown; 2177 goto err_out_powerdown;
2184 } 2178 }
2179
2185 err = b44_get_invariants(bp); 2180 err = b44_get_invariants(bp);
2186 if (err) { 2181 if (err) {
2187 dev_err(sdev->dev, 2182 dev_err(sdev->dev,
@@ -2344,7 +2339,6 @@ static int __init b44_init(void)
2344 int err; 2339 int err;
2345 2340
2346 /* Setup paramaters for syncing RX/TX DMA descriptors */ 2341 /* Setup paramaters for syncing RX/TX DMA descriptors */
2347 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2348 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); 2342 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2349 2343
2350 err = b44_pci_init(); 2344 err = b44_pci_init();
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index faf5add894d7..0d2c5da08937 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1496,7 +1496,7 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1496 if (priv->has_phy) { 1496 if (priv->has_phy) {
1497 if (!priv->phydev) 1497 if (!priv->phydev)
1498 return -ENODEV; 1498 return -ENODEV;
1499 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 1499 return phy_mii_ioctl(priv->phydev, rq, cmd);
1500 } else { 1500 } else {
1501 struct mii_if_info mii; 1501 struct mii_if_info mii;
1502 1502
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b46be490cd2a..99197bd54da5 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -33,7 +33,7 @@
33 33
34#include "be_hw.h" 34#include "be_hw.h"
35 35
36#define DRV_VER "2.102.147u" 36#define DRV_VER "2.103.175u"
37#define DRV_NAME "be2net" 37#define DRV_NAME "be2net"
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -220,7 +220,16 @@ struct be_rx_obj {
220 struct be_rx_page_info page_info_tbl[RX_Q_LEN]; 220 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
221}; 221};
222 222
223struct be_vf_cfg {
224 unsigned char vf_mac_addr[ETH_ALEN];
225 u32 vf_if_handle;
226 u32 vf_pmac_id;
227 u16 vf_vlan_tag;
228 u32 vf_tx_rate;
229};
230
223#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ 231#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
232#define BE_INVALID_PMAC_ID 0xffffffff
224struct be_adapter { 233struct be_adapter {
225 struct pci_dev *pdev; 234 struct pci_dev *pdev;
226 struct net_device *netdev; 235 struct net_device *netdev;
@@ -276,23 +285,26 @@ struct be_adapter {
276 u32 port_num; 285 u32 port_num;
277 bool promiscuous; 286 bool promiscuous;
278 bool wol; 287 bool wol;
279 u32 cap; 288 u32 function_mode;
280 u32 rx_fc; /* Rx flow control */ 289 u32 rx_fc; /* Rx flow control */
281 u32 tx_fc; /* Tx flow control */ 290 u32 tx_fc; /* Tx flow control */
291 bool ue_detected;
292 bool stats_ioctl_sent;
282 int link_speed; 293 int link_speed;
283 u8 port_type; 294 u8 port_type;
284 u8 transceiver; 295 u8 transceiver;
296 u8 autoneg;
285 u8 generation; /* BladeEngine ASIC generation */ 297 u8 generation; /* BladeEngine ASIC generation */
286 u32 flash_status; 298 u32 flash_status;
287 struct completion flash_compl; 299 struct completion flash_compl;
288 300
289 bool sriov_enabled; 301 bool sriov_enabled;
290 u32 vf_if_handle[BE_MAX_VF]; 302 struct be_vf_cfg vf_cfg[BE_MAX_VF];
291 u32 vf_pmac_id[BE_MAX_VF];
292 u8 base_eq_id; 303 u8 base_eq_id;
304 u8 is_virtfn;
293}; 305};
294 306
295#define be_physfn(adapter) (!adapter->pdev->is_virtfn) 307#define be_physfn(adapter) (!adapter->is_virtfn)
296 308
297/* BladeEngine Generation numbers */ 309/* BladeEngine Generation numbers */
298#define BE_GEN2 2 310#define BE_GEN2 2
@@ -392,6 +404,15 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
392 return val; 404 return val;
393} 405}
394 406
407static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
408{
409 u8 data;
410
411 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
412 pci_read_config_byte(adapter->pdev, 0xFE, &data);
413 adapter->is_virtfn = (data != 0xAA);
414}
415
395extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 416extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
396 u16 num_popped); 417 u16 num_popped);
397extern void be_link_status_update(struct be_adapter *adapter, bool link_up); 418extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index b9ad799c719f..3d305494a606 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -25,6 +25,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
25 25
26 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 26 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 27 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
29 wmb();
28 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 30 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29} 31}
30 32
@@ -73,8 +75,10 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
73 be_dws_le_to_cpu(&resp->hw_stats, 75 be_dws_le_to_cpu(&resp->hw_stats,
74 sizeof(resp->hw_stats)); 76 sizeof(resp->hw_stats));
75 netdev_stats_update(adapter); 77 netdev_stats_update(adapter);
78 adapter->stats_ioctl_sent = false;
76 } 79 }
77 } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) { 80 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
78 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 82 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
79 CQE_STATUS_EXTD_MASK; 83 CQE_STATUS_EXTD_MASK;
80 dev_warn(&adapter->pdev->dev, 84 dev_warn(&adapter->pdev->dev,
@@ -186,7 +190,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
186 190
187static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 191static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
188{ 192{
189 int cnt = 0, wait = 5; 193 int msecs = 0;
190 u32 ready; 194 u32 ready;
191 195
192 do { 196 do {
@@ -201,15 +205,15 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
201 if (ready) 205 if (ready)
202 break; 206 break;
203 207
204 if (cnt > 4000000) { 208 if (msecs > 4000) {
205 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 209 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
210 be_dump_ue(adapter);
206 return -1; 211 return -1;
207 } 212 }
208 213
209 if (cnt > 50) 214 set_current_state(TASK_INTERRUPTIBLE);
210 wait = 200; 215 schedule_timeout(msecs_to_jiffies(1));
211 cnt += wait; 216 msecs++;
212 udelay(wait);
213 } while (true); 217 } while (true);
214 218
215 return 0; 219 return 0;
@@ -948,6 +952,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
948 sge->len = cpu_to_le32(nonemb_cmd->size); 952 sge->len = cpu_to_le32(nonemb_cmd->size);
949 953
950 be_mcc_notify(adapter); 954 be_mcc_notify(adapter);
955 adapter->stats_ioctl_sent = true;
951 956
952err: 957err:
953 spin_unlock_bh(&adapter->mcc_lock); 958 spin_unlock_bh(&adapter->mcc_lock);
@@ -1256,7 +1261,7 @@ err:
1256} 1261}
1257 1262
1258/* Uses mbox */ 1263/* Uses mbox */
1259int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) 1264int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
1260{ 1265{
1261 struct be_mcc_wrb *wrb; 1266 struct be_mcc_wrb *wrb;
1262 struct be_cmd_req_query_fw_cfg *req; 1267 struct be_cmd_req_query_fw_cfg *req;
@@ -1277,7 +1282,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1277 if (!status) { 1282 if (!status) {
1278 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1283 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1279 *port_num = le32_to_cpu(resp->phys_port); 1284 *port_num = le32_to_cpu(resp->phys_port);
1280 *cap = le32_to_cpu(resp->function_cap); 1285 *mode = le32_to_cpu(resp->function_mode);
1281 } 1286 }
1282 1287
1283 spin_unlock(&adapter->mbox_lock); 1288 spin_unlock(&adapter->mbox_lock);
@@ -1694,3 +1699,71 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1694 spin_unlock_bh(&adapter->mcc_lock); 1699 spin_unlock_bh(&adapter->mcc_lock);
1695 return status; 1700 return status;
1696} 1701}
1702
1703int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
1704{
1705 struct be_mcc_wrb *wrb;
1706 struct be_cmd_req_get_phy_info *req;
1707 struct be_sge *sge;
1708 int status;
1709
1710 spin_lock_bh(&adapter->mcc_lock);
1711
1712 wrb = wrb_from_mccq(adapter);
1713 if (!wrb) {
1714 status = -EBUSY;
1715 goto err;
1716 }
1717
1718 req = cmd->va;
1719 sge = nonembedded_sgl(wrb);
1720
1721 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1722 OPCODE_COMMON_GET_PHY_DETAILS);
1723
1724 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1725 OPCODE_COMMON_GET_PHY_DETAILS,
1726 sizeof(*req));
1727
1728 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1729 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1730 sge->len = cpu_to_le32(cmd->size);
1731
1732 status = be_mcc_notify_wait(adapter);
1733err:
1734 spin_unlock_bh(&adapter->mcc_lock);
1735 return status;
1736}
1737
1738int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1739{
1740 struct be_mcc_wrb *wrb;
1741 struct be_cmd_req_set_qos *req;
1742 int status;
1743
1744 spin_lock_bh(&adapter->mcc_lock);
1745
1746 wrb = wrb_from_mccq(adapter);
1747 if (!wrb) {
1748 status = -EBUSY;
1749 goto err;
1750 }
1751
1752 req = embedded_payload(wrb);
1753
1754 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1755 OPCODE_COMMON_SET_QOS);
1756
1757 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1758 OPCODE_COMMON_SET_QOS, sizeof(*req));
1759
1760 req->hdr.domain = domain;
1761 req->valid_bits = BE_QOS_BITS_NIC;
1762 req->max_bps_nic = bps;
1763
1764 status = be_mcc_notify_wait(adapter);
1765
1766err:
1767 spin_unlock_bh(&adapter->mcc_lock);
1768 return status;
1769}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 763dc199e337..bdc10a28cfda 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
124#define OPCODE_COMMON_CQ_CREATE 12 124#define OPCODE_COMMON_CQ_CREATE 12
125#define OPCODE_COMMON_EQ_CREATE 13 125#define OPCODE_COMMON_EQ_CREATE 13
126#define OPCODE_COMMON_MCC_CREATE 21 126#define OPCODE_COMMON_MCC_CREATE 21
127#define OPCODE_COMMON_SET_QOS 28
127#define OPCODE_COMMON_SEEPROM_READ 30 128#define OPCODE_COMMON_SEEPROM_READ 30
128#define OPCODE_COMMON_NTWK_RX_FILTER 34 129#define OPCODE_COMMON_NTWK_RX_FILTER 34
129#define OPCODE_COMMON_GET_FW_VERSION 35 130#define OPCODE_COMMON_GET_FW_VERSION 35
@@ -144,6 +145,7 @@ struct be_mcc_mailbox {
144#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 145#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
145#define OPCODE_COMMON_GET_BEACON_STATE 70 146#define OPCODE_COMMON_GET_BEACON_STATE 70
146#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 147#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
148#define OPCODE_COMMON_GET_PHY_DETAILS 102
147 149
148#define OPCODE_ETH_ACPI_CONFIG 2 150#define OPCODE_ETH_ACPI_CONFIG 2
149#define OPCODE_ETH_PROMISCUOUS 3 151#define OPCODE_ETH_PROMISCUOUS 3
@@ -747,7 +749,7 @@ struct be_cmd_resp_query_fw_cfg {
747 u32 be_config_number; 749 u32 be_config_number;
748 u32 asic_revision; 750 u32 asic_revision;
749 u32 phys_port; 751 u32 phys_port;
750 u32 function_cap; 752 u32 function_mode;
751 u32 rsvd[26]; 753 u32 rsvd[26];
752}; 754};
753 755
@@ -869,6 +871,46 @@ struct be_cmd_resp_seeprom_read {
869 u8 seeprom_data[BE_READ_SEEPROM_LEN]; 871 u8 seeprom_data[BE_READ_SEEPROM_LEN];
870}; 872};
871 873
874enum {
875 PHY_TYPE_CX4_10GB = 0,
876 PHY_TYPE_XFP_10GB,
877 PHY_TYPE_SFP_1GB,
878 PHY_TYPE_SFP_PLUS_10GB,
879 PHY_TYPE_KR_10GB,
880 PHY_TYPE_KX4_10GB,
881 PHY_TYPE_BASET_10GB,
882 PHY_TYPE_BASET_1GB,
883 PHY_TYPE_DISABLED = 255
884};
885
886struct be_cmd_req_get_phy_info {
887 struct be_cmd_req_hdr hdr;
888 u8 rsvd0[24];
889};
890struct be_cmd_resp_get_phy_info {
891 struct be_cmd_req_hdr hdr;
892 u16 phy_type;
893 u16 interface_type;
894 u32 misc_params;
895 u32 future_use[4];
896};
897
898/*********************** Set QOS ***********************/
899
900#define BE_QOS_BITS_NIC 1
901
902struct be_cmd_req_set_qos {
903 struct be_cmd_req_hdr hdr;
904 u32 valid_bits;
905 u32 max_bps_nic;
906 u32 rsvd[7];
907};
908
909struct be_cmd_resp_set_qos {
910 struct be_cmd_resp_hdr hdr;
911 u32 rsvd;
912};
913
872extern int be_pci_fnum_get(struct be_adapter *adapter); 914extern int be_pci_fnum_get(struct be_adapter *adapter);
873extern int be_cmd_POST(struct be_adapter *adapter); 915extern int be_cmd_POST(struct be_adapter *adapter);
874extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 916extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -947,4 +989,8 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
947 struct be_dma_mem *nonemb_cmd); 989 struct be_dma_mem *nonemb_cmd);
948extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 990extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
949 u8 loopback_type, u8 enable); 991 u8 loopback_type, u8 enable);
992extern int be_cmd_get_phy_info(struct be_adapter *adapter,
993 struct be_dma_mem *cmd);
994extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
995extern void be_dump_ue(struct be_adapter *adapter);
950 996
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 200e98515909..cd16243c7c36 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -314,15 +314,19 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
314static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 314static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
315{ 315{
316 struct be_adapter *adapter = netdev_priv(netdev); 316 struct be_adapter *adapter = netdev_priv(netdev);
317 u8 mac_speed = 0, connector = 0; 317 struct be_dma_mem phy_cmd;
318 struct be_cmd_resp_get_phy_info *resp;
319 u8 mac_speed = 0;
318 u16 link_speed = 0; 320 u16 link_speed = 0;
319 bool link_up = false; 321 bool link_up = false;
320 int status; 322 int status;
323 u16 intf_type;
321 324
322 if (adapter->link_speed < 0) { 325 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
323 status = be_cmd_link_status_query(adapter, &link_up, 326 status = be_cmd_link_status_query(adapter, &link_up,
324 &mac_speed, &link_speed); 327 &mac_speed, &link_speed);
325 328
329 be_link_status_update(adapter, link_up);
326 /* link_speed is in units of 10 Mbps */ 330 /* link_speed is in units of 10 Mbps */
327 if (link_speed) { 331 if (link_speed) {
328 ecmd->speed = link_speed*10; 332 ecmd->speed = link_speed*10;
@@ -337,40 +341,57 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
337 } 341 }
338 } 342 }
339 343
340 status = be_cmd_read_port_type(adapter, adapter->port_num, 344 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
341 &connector); 345 phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
346 &phy_cmd.dma);
347 if (!phy_cmd.va) {
348 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
349 return -ENOMEM;
350 }
351 status = be_cmd_get_phy_info(adapter, &phy_cmd);
342 if (!status) { 352 if (!status) {
343 switch (connector) { 353 resp = (struct be_cmd_resp_get_phy_info *) phy_cmd.va;
344 case 7: 354 intf_type = le16_to_cpu(resp->interface_type);
355
356 switch (intf_type) {
357 case PHY_TYPE_XFP_10GB:
358 case PHY_TYPE_SFP_1GB:
359 case PHY_TYPE_SFP_PLUS_10GB:
345 ecmd->port = PORT_FIBRE; 360 ecmd->port = PORT_FIBRE;
346 ecmd->transceiver = XCVR_EXTERNAL;
347 break;
348 case 0:
349 ecmd->port = PORT_TP;
350 ecmd->transceiver = XCVR_EXTERNAL;
351 break; 361 break;
352 default: 362 default:
353 ecmd->port = PORT_TP; 363 ecmd->port = PORT_TP;
354 ecmd->transceiver = XCVR_INTERNAL;
355 break; 364 break;
356 } 365 }
357 } else { 366
358 ecmd->port = PORT_AUI; 367 switch (intf_type) {
368 case PHY_TYPE_KR_10GB:
369 case PHY_TYPE_KX4_10GB:
370 ecmd->autoneg = AUTONEG_ENABLE;
359 ecmd->transceiver = XCVR_INTERNAL; 371 ecmd->transceiver = XCVR_INTERNAL;
372 break;
373 default:
374 ecmd->autoneg = AUTONEG_DISABLE;
375 ecmd->transceiver = XCVR_EXTERNAL;
376 break;
377 }
360 } 378 }
361 379
362 /* Save for future use */ 380 /* Save for future use */
363 adapter->link_speed = ecmd->speed; 381 adapter->link_speed = ecmd->speed;
364 adapter->port_type = ecmd->port; 382 adapter->port_type = ecmd->port;
365 adapter->transceiver = ecmd->transceiver; 383 adapter->transceiver = ecmd->transceiver;
384 adapter->autoneg = ecmd->autoneg;
385 pci_free_consistent(adapter->pdev, phy_cmd.size,
386 phy_cmd.va, phy_cmd.dma);
366 } else { 387 } else {
367 ecmd->speed = adapter->link_speed; 388 ecmd->speed = adapter->link_speed;
368 ecmd->port = adapter->port_type; 389 ecmd->port = adapter->port_type;
369 ecmd->transceiver = adapter->transceiver; 390 ecmd->transceiver = adapter->transceiver;
391 ecmd->autoneg = adapter->autoneg;
370 } 392 }
371 393
372 ecmd->duplex = DUPLEX_FULL; 394 ecmd->duplex = DUPLEX_FULL;
373 ecmd->autoneg = AUTONEG_DISABLE;
374 ecmd->phy_address = adapter->port_num; 395 ecmd->phy_address = adapter->port_num;
375 switch (ecmd->port) { 396 switch (ecmd->port) {
376 case PORT_FIBRE: 397 case PORT_FIBRE:
@@ -384,6 +405,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
384 break; 405 break;
385 } 406 }
386 407
408 if (ecmd->autoneg) {
409 ecmd->supported |= SUPPORTED_1000baseT_Full;
410 ecmd->supported |= SUPPORTED_Autoneg;
411 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
412 ADVERTISED_1000baseT_Full);
413 }
414
387 return 0; 415 return 0;
388} 416}
389 417
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 063026de4957..5d38046402b2 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -52,10 +52,20 @@
52 */ 52 */
53#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ 53#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
54 54
55/********* Power managment (WOL) **********/ 55/********* Power management (WOL) **********/
56#define PCICFG_PM_CONTROL_OFFSET 0x44 56#define PCICFG_PM_CONTROL_OFFSET 0x44
57#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */ 57#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
58 58
59/********* Online Control Registers *******/
60#define PCICFG_ONLINE0 0xB0
61#define PCICFG_ONLINE1 0xB4
62
63/********* UE Status and Mask Registers ***/
64#define PCICFG_UE_STATUS_LOW 0xA0
65#define PCICFG_UE_STATUS_HIGH 0xA4
66#define PCICFG_UE_STATUS_LOW_MASK 0xA8
67#define PCICFG_UE_STATUS_HI_MASK 0xAC
68
59/********* ISR0 Register offset **********/ 69/********* ISR0 Register offset **********/
60#define CEV_ISR0_OFFSET 0xC18 70#define CEV_ISR0_OFFSET 0xC18
61#define CEV_ISR_SIZE 4 71#define CEV_ISR_SIZE 4
@@ -192,7 +202,7 @@ struct amap_eth_hdr_wrb {
192 u8 event; 202 u8 event;
193 u8 crc; 203 u8 crc;
194 u8 forward; 204 u8 forward;
195 u8 ipsec; 205 u8 lso6;
196 u8 mgmt; 206 u8 mgmt;
197 u8 ipcs; 207 u8 ipcs;
198 u8 udpcs; 208 u8 udpcs;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 54b14272f333..74e146f470c6 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -40,6 +40,76 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { 0 } 40 { 0 }
41}; 41};
42MODULE_DEVICE_TABLE(pci, be_dev_ids); 42MODULE_DEVICE_TABLE(pci, be_dev_ids);
43/* UE Status Low CSR */
44static char *ue_status_low_desc[] = {
45 "CEV",
46 "CTX",
47 "DBUF",
48 "ERX",
49 "Host",
50 "MPU",
51 "NDMA",
52 "PTC ",
53 "RDMA ",
54 "RXF ",
55 "RXIPS ",
56 "RXULP0 ",
57 "RXULP1 ",
58 "RXULP2 ",
59 "TIM ",
60 "TPOST ",
61 "TPRE ",
62 "TXIPS ",
63 "TXULP0 ",
64 "TXULP1 ",
65 "UC ",
66 "WDMA ",
67 "TXULP2 ",
68 "HOST1 ",
69 "P0_OB_LINK ",
70 "P1_OB_LINK ",
71 "HOST_GPIO ",
72 "MBOX ",
73 "AXGMAC0",
74 "AXGMAC1",
75 "JTAG",
76 "MPU_INTPEND"
77};
78/* UE Status High CSR */
79static char *ue_status_hi_desc[] = {
80 "LPCMEMHOST",
81 "MGMT_MAC",
82 "PCS0ONLINE",
83 "MPU_IRAM",
84 "PCS1ONLINE",
85 "PCTL0",
86 "PCTL1",
87 "PMEM",
88 "RR",
89 "TXPB",
90 "RXPP",
91 "XAUI",
92 "TXP",
93 "ARM",
94 "IPC",
95 "HOST2",
96 "HOST3",
97 "HOST4",
98 "HOST5",
99 "HOST6",
100 "HOST7",
101 "HOST8",
102 "HOST9",
103 "NETC"
104 "Unknown",
105 "Unknown",
106 "Unknown",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown"
112};
43 113
44static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 114static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
45{ 115{
@@ -89,6 +159,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
89 u32 val = 0; 159 u32 val = 0;
90 val |= qid & DB_RQ_RING_ID_MASK; 160 val |= qid & DB_RQ_RING_ID_MASK;
91 val |= posted << DB_RQ_NUM_POSTED_SHIFT; 161 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
162
163 wmb();
92 iowrite32(val, adapter->db + DB_RQ_OFFSET); 164 iowrite32(val, adapter->db + DB_RQ_OFFSET);
93} 165}
94 166
@@ -97,6 +169,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
97 u32 val = 0; 169 u32 val = 0;
98 val |= qid & DB_TXULP_RING_ID_MASK; 170 val |= qid & DB_TXULP_RING_ID_MASK;
99 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; 171 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
172
173 wmb();
100 iowrite32(val, adapter->db + DB_TXULP1_OFFSET); 174 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
101} 175}
102 176
@@ -373,10 +447,12 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
373 447
374 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); 448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
375 449
376 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { 450 if (skb_is_gso(skb)) {
377 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
378 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
379 hdr, skb_shinfo(skb)->gso_size); 453 hdr, skb_shinfo(skb)->gso_size);
454 if (skb_is_gso_v6(skb))
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
380 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 456 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
381 if (is_tcp_pkt(skb)) 457 if (is_tcp_pkt(skb))
382 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -546,11 +622,18 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
546 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. 622 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
547 * If the user configures more, place BE in vlan promiscuous mode. 623 * If the user configures more, place BE in vlan promiscuous mode.
548 */ 624 */
549static int be_vid_config(struct be_adapter *adapter) 625static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
550{ 626{
551 u16 vtag[BE_NUM_VLANS_SUPPORTED]; 627 u16 vtag[BE_NUM_VLANS_SUPPORTED];
552 u16 ntags = 0, i; 628 u16 ntags = 0, i;
553 int status = 0; 629 int status = 0;
630 u32 if_handle;
631
632 if (vf) {
633 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
634 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
635 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
636 }
554 637
555 if (adapter->vlans_added <= adapter->max_vlans) { 638 if (adapter->vlans_added <= adapter->max_vlans) {
556 /* Construct VLAN Table to give to HW */ 639 /* Construct VLAN Table to give to HW */
@@ -566,6 +649,7 @@ static int be_vid_config(struct be_adapter *adapter)
566 status = be_cmd_vlan_config(adapter, adapter->if_handle, 649 status = be_cmd_vlan_config(adapter, adapter->if_handle,
567 NULL, 0, 1, 1); 650 NULL, 0, 1, 1);
568 } 651 }
652
569 return status; 653 return status;
570} 654}
571 655
@@ -586,27 +670,28 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
586{ 670{
587 struct be_adapter *adapter = netdev_priv(netdev); 671 struct be_adapter *adapter = netdev_priv(netdev);
588 672
673 adapter->vlans_added++;
589 if (!be_physfn(adapter)) 674 if (!be_physfn(adapter))
590 return; 675 return;
591 676
592 adapter->vlan_tag[vid] = 1; 677 adapter->vlan_tag[vid] = 1;
593 adapter->vlans_added++;
594 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 678 if (adapter->vlans_added <= (adapter->max_vlans + 1))
595 be_vid_config(adapter); 679 be_vid_config(adapter, false, 0);
596} 680}
597 681
598static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) 682static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
599{ 683{
600 struct be_adapter *adapter = netdev_priv(netdev); 684 struct be_adapter *adapter = netdev_priv(netdev);
601 685
686 adapter->vlans_added--;
687 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688
602 if (!be_physfn(adapter)) 689 if (!be_physfn(adapter))
603 return; 690 return;
604 691
605 adapter->vlan_tag[vid] = 0; 692 adapter->vlan_tag[vid] = 0;
606 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
607 adapter->vlans_added--;
608 if (adapter->vlans_added <= adapter->max_vlans) 693 if (adapter->vlans_added <= adapter->max_vlans)
609 be_vid_config(adapter); 694 be_vid_config(adapter, false, 0);
610} 695}
611 696
612static void be_set_multicast_list(struct net_device *netdev) 697static void be_set_multicast_list(struct net_device *netdev)
@@ -650,14 +735,93 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
650 if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) 735 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
651 return -EINVAL; 736 return -EINVAL;
652 737
653 status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf], 738 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
654 adapter->vf_pmac_id[vf]); 739 status = be_cmd_pmac_del(adapter,
740 adapter->vf_cfg[vf].vf_if_handle,
741 adapter->vf_cfg[vf].vf_pmac_id);
655 742
656 status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf], 743 status = be_cmd_pmac_add(adapter, mac,
657 &adapter->vf_pmac_id[vf]); 744 adapter->vf_cfg[vf].vf_if_handle,
658 if (!status) 745 &adapter->vf_cfg[vf].vf_pmac_id);
746
747 if (status)
659 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 748 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
660 mac, vf); 749 mac, vf);
750 else
751 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
752
753 return status;
754}
755
756static int be_get_vf_config(struct net_device *netdev, int vf,
757 struct ifla_vf_info *vi)
758{
759 struct be_adapter *adapter = netdev_priv(netdev);
760
761 if (!adapter->sriov_enabled)
762 return -EPERM;
763
764 if (vf >= num_vfs)
765 return -EINVAL;
766
767 vi->vf = vf;
768 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
769 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
770 vi->qos = 0;
771 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
772
773 return 0;
774}
775
776static int be_set_vf_vlan(struct net_device *netdev,
777 int vf, u16 vlan, u8 qos)
778{
779 struct be_adapter *adapter = netdev_priv(netdev);
780 int status = 0;
781
782 if (!adapter->sriov_enabled)
783 return -EPERM;
784
785 if ((vf >= num_vfs) || (vlan > 4095))
786 return -EINVAL;
787
788 if (vlan) {
789 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
790 adapter->vlans_added++;
791 } else {
792 adapter->vf_cfg[vf].vf_vlan_tag = 0;
793 adapter->vlans_added--;
794 }
795
796 status = be_vid_config(adapter, true, vf);
797
798 if (status)
799 dev_info(&adapter->pdev->dev,
800 "VLAN %d config on VF %d failed\n", vlan, vf);
801 return status;
802}
803
804static int be_set_vf_tx_rate(struct net_device *netdev,
805 int vf, int rate)
806{
807 struct be_adapter *adapter = netdev_priv(netdev);
808 int status = 0;
809
810 if (!adapter->sriov_enabled)
811 return -EPERM;
812
813 if ((vf >= num_vfs) || (rate < 0))
814 return -EINVAL;
815
816 if (rate > 10000)
817 rate = 10000;
818
819 adapter->vf_cfg[vf].vf_tx_rate = rate;
820 status = be_cmd_set_qos(adapter, rate / 10, vf);
821
822 if (status)
823 dev_info(&adapter->pdev->dev,
824 "tx rate %d on VF %d failed\n", rate, vf);
661 return status; 825 return status;
662} 826}
663 827
@@ -869,7 +1033,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
869 1033
870 /* vlanf could be wrongly set in some cards. 1034 /* vlanf could be wrongly set in some cards.
871 * ignore if vtm is not set */ 1035 * ignore if vtm is not set */
872 if ((adapter->cap & 0x400) && !vtm) 1036 if ((adapter->function_mode & 0x400) && !vtm)
873 vlanf = 0; 1037 vlanf = 0;
874 1038
875 if (unlikely(vlanf)) { 1039 if (unlikely(vlanf)) {
@@ -909,7 +1073,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
909 1073
910 /* vlanf could be wrongly set in some cards. 1074 /* vlanf could be wrongly set in some cards.
911 * ignore if vtm is not set */ 1075 * ignore if vtm is not set */
912 if ((adapter->cap & 0x400) && !vtm) 1076 if ((adapter->function_mode & 0x400) && !vtm)
913 vlanf = 0; 1077 vlanf = 0;
914 1078
915 skb = napi_get_frags(&eq_obj->napi); 1079 skb = napi_get_frags(&eq_obj->napi);
@@ -971,6 +1135,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
971 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) 1135 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
972 return NULL; 1136 return NULL;
973 1137
1138 rmb();
974 be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); 1139 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
975 1140
976 queue_tail_inc(&adapter->rx_obj.cq); 1141 queue_tail_inc(&adapter->rx_obj.cq);
@@ -1064,6 +1229,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1064 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) 1229 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1065 return NULL; 1230 return NULL;
1066 1231
1232 rmb();
1067 be_dws_le_to_cpu(txcp, sizeof(*txcp)); 1233 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1068 1234
1069 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; 1235 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
@@ -1111,6 +1277,7 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1111 if (!eqe->evt) 1277 if (!eqe->evt)
1112 return NULL; 1278 return NULL;
1113 1279
1280 rmb();
1114 eqe->evt = le32_to_cpu(eqe->evt); 1281 eqe->evt = le32_to_cpu(eqe->evt);
1115 queue_tail_inc(&eq_obj->q); 1282 queue_tail_inc(&eq_obj->q);
1116 return eqe; 1283 return eqe;
@@ -1576,12 +1743,66 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1576 return 1; 1743 return 1;
1577} 1744}
1578 1745
1746static inline bool be_detect_ue(struct be_adapter *adapter)
1747{
1748 u32 online0 = 0, online1 = 0;
1749
1750 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1751
1752 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1753
1754 if (!online0 || !online1) {
1755 adapter->ue_detected = true;
1756 dev_err(&adapter->pdev->dev,
1757 "UE Detected!! online0=%d online1=%d\n",
1758 online0, online1);
1759 return true;
1760 }
1761
1762 return false;
1763}
1764
1765void be_dump_ue(struct be_adapter *adapter)
1766{
1767 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1768 u32 i;
1769
1770 pci_read_config_dword(adapter->pdev,
1771 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1772 pci_read_config_dword(adapter->pdev,
1773 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1774 pci_read_config_dword(adapter->pdev,
1775 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1776 pci_read_config_dword(adapter->pdev,
1777 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1778
1779 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1780 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1781
1782 if (ue_status_lo) {
1783 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1784 if (ue_status_lo & 1)
1785 dev_err(&adapter->pdev->dev,
1786 "UE: %s bit set\n", ue_status_low_desc[i]);
1787 }
1788 }
1789 if (ue_status_hi) {
1790 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1791 if (ue_status_hi & 1)
1792 dev_err(&adapter->pdev->dev,
1793 "UE: %s bit set\n", ue_status_hi_desc[i]);
1794 }
1795 }
1796
1797}
1798
1579static void be_worker(struct work_struct *work) 1799static void be_worker(struct work_struct *work)
1580{ 1800{
1581 struct be_adapter *adapter = 1801 struct be_adapter *adapter =
1582 container_of(work, struct be_adapter, work.work); 1802 container_of(work, struct be_adapter, work.work);
1583 1803
1584 be_cmd_get_stats(adapter, &adapter->stats.cmd); 1804 if (!adapter->stats_ioctl_sent)
1805 be_cmd_get_stats(adapter, &adapter->stats.cmd);
1585 1806
1586 /* Set EQ delay */ 1807 /* Set EQ delay */
1587 be_rx_eqd_update(adapter); 1808 be_rx_eqd_update(adapter);
@@ -1593,6 +1814,10 @@ static void be_worker(struct work_struct *work)
1593 adapter->rx_post_starved = false; 1814 adapter->rx_post_starved = false;
1594 be_post_rx_frags(adapter); 1815 be_post_rx_frags(adapter);
1595 } 1816 }
1817 if (!adapter->ue_detected) {
1818 if (be_detect_ue(adapter))
1819 be_dump_ue(adapter);
1820 }
1596 1821
1597 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1822 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1598} 1823}
@@ -1620,9 +1845,11 @@ static void be_msix_enable(struct be_adapter *adapter)
1620 1845
1621static void be_sriov_enable(struct be_adapter *adapter) 1846static void be_sriov_enable(struct be_adapter *adapter)
1622{ 1847{
1848 be_check_sriov_fn_type(adapter);
1623#ifdef CONFIG_PCI_IOV 1849#ifdef CONFIG_PCI_IOV
1624 int status;
1625 if (be_physfn(adapter) && num_vfs) { 1850 if (be_physfn(adapter) && num_vfs) {
1851 int status;
1852
1626 status = pci_enable_sriov(adapter->pdev, num_vfs); 1853 status = pci_enable_sriov(adapter->pdev, num_vfs);
1627 adapter->sriov_enabled = status ? false : true; 1854 adapter->sriov_enabled = status ? false : true;
1628 } 1855 }
@@ -1735,6 +1962,44 @@ done:
1735 adapter->isr_registered = false; 1962 adapter->isr_registered = false;
1736} 1963}
1737 1964
1965static int be_close(struct net_device *netdev)
1966{
1967 struct be_adapter *adapter = netdev_priv(netdev);
1968 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1969 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1970 int vec;
1971
1972 cancel_delayed_work_sync(&adapter->work);
1973
1974 be_async_mcc_disable(adapter);
1975
1976 netif_stop_queue(netdev);
1977 netif_carrier_off(netdev);
1978 adapter->link_up = false;
1979
1980 be_intr_set(adapter, false);
1981
1982 if (adapter->msix_enabled) {
1983 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1984 synchronize_irq(vec);
1985 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1986 synchronize_irq(vec);
1987 } else {
1988 synchronize_irq(netdev->irq);
1989 }
1990 be_irq_unregister(adapter);
1991
1992 napi_disable(&rx_eq->napi);
1993 napi_disable(&tx_eq->napi);
1994
1995 /* Wait for all pending tx completions to arrive so that
1996 * all tx skbs are freed.
1997 */
1998 be_tx_compl_clean(adapter);
1999
2000 return 0;
2001}
2002
1738static int be_open(struct net_device *netdev) 2003static int be_open(struct net_device *netdev)
1739{ 2004{
1740 struct be_adapter *adapter = netdev_priv(netdev); 2005 struct be_adapter *adapter = netdev_priv(netdev);
@@ -1765,27 +2030,29 @@ static int be_open(struct net_device *netdev)
1765 /* Now that interrupts are on we can process async mcc */ 2030 /* Now that interrupts are on we can process async mcc */
1766 be_async_mcc_enable(adapter); 2031 be_async_mcc_enable(adapter);
1767 2032
2033 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2034
1768 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, 2035 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1769 &link_speed); 2036 &link_speed);
1770 if (status) 2037 if (status)
1771 goto ret_sts; 2038 goto err;
1772 be_link_status_update(adapter, link_up); 2039 be_link_status_update(adapter, link_up);
1773 2040
1774 if (be_physfn(adapter))
1775 status = be_vid_config(adapter);
1776 if (status)
1777 goto ret_sts;
1778
1779 if (be_physfn(adapter)) { 2041 if (be_physfn(adapter)) {
2042 status = be_vid_config(adapter, false, 0);
2043 if (status)
2044 goto err;
2045
1780 status = be_cmd_set_flow_control(adapter, 2046 status = be_cmd_set_flow_control(adapter,
1781 adapter->tx_fc, adapter->rx_fc); 2047 adapter->tx_fc, adapter->rx_fc);
1782 if (status) 2048 if (status)
1783 goto ret_sts; 2049 goto err;
1784 } 2050 }
1785 2051
1786 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 2052 return 0;
1787ret_sts: 2053err:
1788 return status; 2054 be_close(adapter->netdev);
2055 return -EIO;
1789} 2056}
1790 2057
1791static int be_setup_wol(struct be_adapter *adapter, bool enable) 2058static int be_setup_wol(struct be_adapter *adapter, bool enable)
@@ -1853,13 +2120,15 @@ static int be_setup(struct be_adapter *adapter)
1853 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED 2120 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
1854 | BE_IF_FLAGS_BROADCAST; 2121 | BE_IF_FLAGS_BROADCAST;
1855 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2122 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1856 mac, true, &adapter->vf_if_handle[vf], 2123 mac, true,
2124 &adapter->vf_cfg[vf].vf_if_handle,
1857 NULL, vf+1); 2125 NULL, vf+1);
1858 if (status) { 2126 if (status) {
1859 dev_err(&adapter->pdev->dev, 2127 dev_err(&adapter->pdev->dev,
1860 "Interface Create failed for VF %d\n", vf); 2128 "Interface Create failed for VF %d\n", vf);
1861 goto if_destroy; 2129 goto if_destroy;
1862 } 2130 }
2131 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
1863 vf++; 2132 vf++;
1864 } 2133 }
1865 } else if (!be_physfn(adapter)) { 2134 } else if (!be_physfn(adapter)) {
@@ -1893,8 +2162,9 @@ tx_qs_destroy:
1893 be_tx_queues_destroy(adapter); 2162 be_tx_queues_destroy(adapter);
1894if_destroy: 2163if_destroy:
1895 for (vf = 0; vf < num_vfs; vf++) 2164 for (vf = 0; vf < num_vfs; vf++)
1896 if (adapter->vf_if_handle[vf]) 2165 if (adapter->vf_cfg[vf].vf_if_handle)
1897 be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]); 2166 be_cmd_if_destroy(adapter,
2167 adapter->vf_cfg[vf].vf_if_handle);
1898 be_cmd_if_destroy(adapter, adapter->if_handle); 2168 be_cmd_if_destroy(adapter, adapter->if_handle);
1899do_none: 2169do_none:
1900 return status; 2170 return status;
@@ -1913,43 +2183,6 @@ static int be_clear(struct be_adapter *adapter)
1913 return 0; 2183 return 0;
1914} 2184}
1915 2185
1916static int be_close(struct net_device *netdev)
1917{
1918 struct be_adapter *adapter = netdev_priv(netdev);
1919 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1920 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1921 int vec;
1922
1923 cancel_delayed_work_sync(&adapter->work);
1924
1925 be_async_mcc_disable(adapter);
1926
1927 netif_stop_queue(netdev);
1928 netif_carrier_off(netdev);
1929 adapter->link_up = false;
1930
1931 be_intr_set(adapter, false);
1932
1933 if (adapter->msix_enabled) {
1934 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1935 synchronize_irq(vec);
1936 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1937 synchronize_irq(vec);
1938 } else {
1939 synchronize_irq(netdev->irq);
1940 }
1941 be_irq_unregister(adapter);
1942
1943 napi_disable(&rx_eq->napi);
1944 napi_disable(&tx_eq->napi);
1945
1946 /* Wait for all pending tx completions to arrive so that
1947 * all tx skbs are freed.
1948 */
1949 be_tx_compl_clean(adapter);
1950
1951 return 0;
1952}
1953 2186
1954#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 2187#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1955char flash_cookie[2][16] = {"*** SE FLAS", 2188char flash_cookie[2][16] = {"*** SE FLAS",
@@ -2174,7 +2407,10 @@ static struct net_device_ops be_netdev_ops = {
2174 .ndo_vlan_rx_register = be_vlan_register, 2407 .ndo_vlan_rx_register = be_vlan_register,
2175 .ndo_vlan_rx_add_vid = be_vlan_add_vid, 2408 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2176 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, 2409 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2177 .ndo_set_vf_mac = be_set_vf_mac 2410 .ndo_set_vf_mac = be_set_vf_mac,
2411 .ndo_set_vf_vlan = be_set_vf_vlan,
2412 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2413 .ndo_get_vf_config = be_get_vf_config
2178}; 2414};
2179 2415
2180static void be_netdev_init(struct net_device *netdev) 2416static void be_netdev_init(struct net_device *netdev)
@@ -2183,7 +2419,7 @@ static void be_netdev_init(struct net_device *netdev)
2183 2419
2184 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2420 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2185 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2421 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2186 NETIF_F_GRO; 2422 NETIF_F_GRO | NETIF_F_TSO6;
2187 2423
2188 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2424 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2189 2425
@@ -2393,7 +2629,7 @@ static int be_get_config(struct be_adapter *adapter)
2393 return status; 2629 return status;
2394 2630
2395 status = be_cmd_query_fw_cfg(adapter, 2631 status = be_cmd_query_fw_cfg(adapter,
2396 &adapter->port_num, &adapter->cap); 2632 &adapter->port_num, &adapter->function_mode);
2397 if (status) 2633 if (status)
2398 return status; 2634 return status;
2399 2635
@@ -2413,7 +2649,7 @@ static int be_get_config(struct be_adapter *adapter)
2413 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 2649 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2414 } 2650 }
2415 2651
2416 if (adapter->cap & 0x400) 2652 if (adapter->function_mode & 0x400)
2417 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; 2653 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2418 else 2654 else
2419 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; 2655 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 368f33313fb6..012613fde3f4 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
922# define bfin_tx_hwtstamp(dev, skb) 922# define bfin_tx_hwtstamp(dev, skb)
923#endif 923#endif
924 924
925static void adjust_tx_list(void) 925static inline void _tx_reclaim_skb(void)
926{
927 do {
928 tx_list_head->desc_a.config &= ~DMAEN;
929 tx_list_head->status.status_word = 0;
930 if (tx_list_head->skb) {
931 dev_kfree_skb(tx_list_head->skb);
932 tx_list_head->skb = NULL;
933 }
934 tx_list_head = tx_list_head->next;
935
936 } while (tx_list_head->status.status_word != 0);
937}
938
939static void tx_reclaim_skb(struct bfin_mac_local *lp)
926{ 940{
927 int timeout_cnt = MAX_TIMEOUT_CNT; 941 int timeout_cnt = MAX_TIMEOUT_CNT;
928 942
929 if (tx_list_head->status.status_word != 0 && 943 if (tx_list_head->status.status_word != 0)
930 current_tx_ptr != tx_list_head) { 944 _tx_reclaim_skb();
931 goto adjust_head; /* released something, just return; */
932 }
933 945
934 /* 946 if (current_tx_ptr->next == tx_list_head) {
935 * if nothing released, check wait condition
936 * current's next can not be the head,
937 * otherwise the dma will not stop as we want
938 */
939 if (current_tx_ptr->next->next == tx_list_head) {
940 while (tx_list_head->status.status_word == 0) { 947 while (tx_list_head->status.status_word == 0) {
948 /* slow down polling to avoid too many queue stop. */
941 udelay(10); 949 udelay(10);
942 if (tx_list_head->status.status_word != 0 || 950 /* reclaim skb if DMA is not running. */
943 !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) { 951 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
944 goto adjust_head; 952 break;
945 } 953 if (timeout_cnt-- < 0)
946 if (timeout_cnt-- < 0) {
947 printk(KERN_ERR DRV_NAME
948 ": wait for adjust tx list head timeout\n");
949 break; 954 break;
950 }
951 }
952 if (tx_list_head->status.status_word != 0) {
953 goto adjust_head;
954 } 955 }
956
957 if (timeout_cnt >= 0)
958 _tx_reclaim_skb();
959 else
960 netif_stop_queue(lp->ndev);
955 } 961 }
956 962
957 return; 963 if (current_tx_ptr->next != tx_list_head &&
964 netif_queue_stopped(lp->ndev))
965 netif_wake_queue(lp->ndev);
966
967 if (tx_list_head != current_tx_ptr) {
968 /* shorten the timer interval if tx queue is stopped */
969 if (netif_queue_stopped(lp->ndev))
970 lp->tx_reclaim_timer.expires =
971 jiffies + (TX_RECLAIM_JIFFIES >> 4);
972 else
973 lp->tx_reclaim_timer.expires =
974 jiffies + TX_RECLAIM_JIFFIES;
975
976 mod_timer(&lp->tx_reclaim_timer,
977 lp->tx_reclaim_timer.expires);
978 }
958 979
959adjust_head:
960 do {
961 tx_list_head->desc_a.config &= ~DMAEN;
962 tx_list_head->status.status_word = 0;
963 if (tx_list_head->skb) {
964 dev_kfree_skb(tx_list_head->skb);
965 tx_list_head->skb = NULL;
966 } else {
967 printk(KERN_ERR DRV_NAME
968 ": no sk_buff in a transmitted frame!\n");
969 }
970 tx_list_head = tx_list_head->next;
971 } while (tx_list_head->status.status_word != 0 &&
972 current_tx_ptr != tx_list_head);
973 return; 980 return;
981}
974 982
983static void tx_reclaim_skb_timeout(unsigned long lp)
984{
985 tx_reclaim_skb((struct bfin_mac_local *)lp);
975} 986}
976 987
977static int bfin_mac_hard_start_xmit(struct sk_buff *skb, 988static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
978 struct net_device *dev) 989 struct net_device *dev)
979{ 990{
991 struct bfin_mac_local *lp = netdev_priv(dev);
980 u16 *data; 992 u16 *data;
981 u32 data_align = (unsigned long)(skb->data) & 0x3; 993 u32 data_align = (unsigned long)(skb->data) & 0x3;
982 union skb_shared_tx *shtx = skb_tx(skb); 994 union skb_shared_tx *shtx = skb_tx(skb);
@@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1009 skb->len); 1021 skb->len);
1010 current_tx_ptr->desc_a.start_addr = 1022 current_tx_ptr->desc_a.start_addr =
1011 (u32)current_tx_ptr->packet; 1023 (u32)current_tx_ptr->packet;
1012 if (current_tx_ptr->status.status_word != 0)
1013 current_tx_ptr->status.status_word = 0;
1014 blackfin_dcache_flush_range( 1024 blackfin_dcache_flush_range(
1015 (u32)current_tx_ptr->packet, 1025 (u32)current_tx_ptr->packet,
1016 (u32)(current_tx_ptr->packet + skb->len + 2)); 1026 (u32)(current_tx_ptr->packet + skb->len + 2));
@@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1022 */ 1032 */
1023 SSYNC(); 1033 SSYNC();
1024 1034
1035 /* always clear status buffer before start tx dma */
1036 current_tx_ptr->status.status_word = 0;
1037
1025 /* enable this packet's dma */ 1038 /* enable this packet's dma */
1026 current_tx_ptr->desc_a.config |= DMAEN; 1039 current_tx_ptr->desc_a.config |= DMAEN;
1027 1040
@@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1037 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); 1050 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1038 1051
1039out: 1052out:
1040 adjust_tx_list();
1041
1042 bfin_tx_hwtstamp(dev, skb); 1053 bfin_tx_hwtstamp(dev, skb);
1043 1054
1044 current_tx_ptr = current_tx_ptr->next; 1055 current_tx_ptr = current_tx_ptr->next;
1045 dev->stats.tx_packets++; 1056 dev->stats.tx_packets++;
1046 dev->stats.tx_bytes += (skb->len); 1057 dev->stats.tx_bytes += (skb->len);
1058
1059 tx_reclaim_skb(lp);
1060
1047 return NETDEV_TX_OK; 1061 return NETDEV_TX_OK;
1048} 1062}
1049 1063
@@ -1167,8 +1181,11 @@ real_rx:
1167#ifdef CONFIG_NET_POLL_CONTROLLER 1181#ifdef CONFIG_NET_POLL_CONTROLLER
1168static void bfin_mac_poll(struct net_device *dev) 1182static void bfin_mac_poll(struct net_device *dev)
1169{ 1183{
1184 struct bfin_mac_local *lp = netdev_priv(dev);
1185
1170 disable_irq(IRQ_MAC_RX); 1186 disable_irq(IRQ_MAC_RX);
1171 bfin_mac_interrupt(IRQ_MAC_RX, dev); 1187 bfin_mac_interrupt(IRQ_MAC_RX, dev);
1188 tx_reclaim_skb(lp);
1172 enable_irq(IRQ_MAC_RX); 1189 enable_irq(IRQ_MAC_RX);
1173} 1190}
1174#endif /* CONFIG_NET_POLL_CONTROLLER */ 1191#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void)
1232/* Our watchdog timed out. Called by the networking layer */ 1249/* Our watchdog timed out. Called by the networking layer */
1233static void bfin_mac_timeout(struct net_device *dev) 1250static void bfin_mac_timeout(struct net_device *dev)
1234{ 1251{
1252 struct bfin_mac_local *lp = netdev_priv(dev);
1253
1235 pr_debug("%s: %s\n", dev->name, __func__); 1254 pr_debug("%s: %s\n", dev->name, __func__);
1236 1255
1237 bfin_mac_disable(); 1256 bfin_mac_disable();
1238 1257
1239 /* reset tx queue */ 1258 del_timer(&lp->tx_reclaim_timer);
1240 tx_list_tail = tx_list_head->next; 1259
1260 /* reset tx queue and free skb */
1261 while (tx_list_head != current_tx_ptr) {
1262 tx_list_head->desc_a.config &= ~DMAEN;
1263 tx_list_head->status.status_word = 0;
1264 if (tx_list_head->skb) {
1265 dev_kfree_skb(tx_list_head->skb);
1266 tx_list_head->skb = NULL;
1267 }
1268 tx_list_head = tx_list_head->next;
1269 }
1270
1271 if (netif_queue_stopped(lp->ndev))
1272 netif_wake_queue(lp->ndev);
1241 1273
1242 bfin_mac_enable(); 1274 bfin_mac_enable();
1243 1275
@@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1430 SET_NETDEV_DEV(ndev, &pdev->dev); 1462 SET_NETDEV_DEV(ndev, &pdev->dev);
1431 platform_set_drvdata(pdev, ndev); 1463 platform_set_drvdata(pdev, ndev);
1432 lp = netdev_priv(ndev); 1464 lp = netdev_priv(ndev);
1465 lp->ndev = ndev;
1433 1466
1434 /* Grab the MAC address in the MAC */ 1467 /* Grab the MAC address in the MAC */
1435 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); 1468 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
@@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1485 ndev->netdev_ops = &bfin_mac_netdev_ops; 1518 ndev->netdev_ops = &bfin_mac_netdev_ops;
1486 ndev->ethtool_ops = &bfin_mac_ethtool_ops; 1519 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1487 1520
1521 init_timer(&lp->tx_reclaim_timer);
1522 lp->tx_reclaim_timer.data = (unsigned long)lp;
1523 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1524
1488 spin_lock_init(&lp->lock); 1525 spin_lock_init(&lp->lock);
1489 1526
1490 /* now, enable interrupts */ 1527 /* now, enable interrupts */
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 1ae7b82ceeee..04e4050df18b 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -13,9 +13,12 @@
13#include <linux/net_tstamp.h> 13#include <linux/net_tstamp.h>
14#include <linux/clocksource.h> 14#include <linux/clocksource.h>
15#include <linux/timecompare.h> 15#include <linux/timecompare.h>
16#include <linux/timer.h>
16 17
17#define BFIN_MAC_CSUM_OFFLOAD 18#define BFIN_MAC_CSUM_OFFLOAD
18 19
20#define TX_RECLAIM_JIFFIES (HZ / 5)
21
19struct dma_descriptor { 22struct dma_descriptor {
20 struct dma_descriptor *next_dma_desc; 23 struct dma_descriptor *next_dma_desc;
21 unsigned long start_addr; 24 unsigned long start_addr;
@@ -68,6 +71,8 @@ struct bfin_mac_local {
68 71
69 int wol; /* Wake On Lan */ 72 int wol; /* Wake On Lan */
70 int irq_wake_requested; 73 int irq_wake_requested;
74 struct timer_list tx_reclaim_timer;
75 struct net_device *ndev;
71 76
72 /* MII and PHY stuffs */ 77 /* MII and PHY stuffs */
73 int old_link; /* used by bf537_adjust_link */ 78 int old_link; /* used by bf537_adjust_link */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 117432222a09..e6a803f1c507 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.0.15" 61#define DRV_MODULE_VERSION "2.0.17"
62#define DRV_MODULE_RELDATE "May 4, 2010" 62#define DRV_MODULE_RELDATE "July 18, 2010"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
@@ -253,7 +253,8 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253{ 253{
254 u32 diff; 254 u32 diff;
255 255
256 smp_mb(); 256 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 barrier();
257 258
258 /* The ring uses 256 indices for 255 entries, one of them 259 /* The ring uses 256 indices for 255 entries, one of them
259 * needs to be skipped. 260 * needs to be skipped.
@@ -692,9 +693,9 @@ bnx2_free_tx_mem(struct bnx2 *bp)
692 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; 693 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
693 694
694 if (txr->tx_desc_ring) { 695 if (txr->tx_desc_ring) {
695 pci_free_consistent(bp->pdev, TXBD_RING_SIZE, 696 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
696 txr->tx_desc_ring, 697 txr->tx_desc_ring,
697 txr->tx_desc_mapping); 698 txr->tx_desc_mapping);
698 txr->tx_desc_ring = NULL; 699 txr->tx_desc_ring = NULL;
699 } 700 }
700 kfree(txr->tx_buf_ring); 701 kfree(txr->tx_buf_ring);
@@ -714,9 +715,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
714 715
715 for (j = 0; j < bp->rx_max_ring; j++) { 716 for (j = 0; j < bp->rx_max_ring; j++) {
716 if (rxr->rx_desc_ring[j]) 717 if (rxr->rx_desc_ring[j])
717 pci_free_consistent(bp->pdev, RXBD_RING_SIZE, 718 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
718 rxr->rx_desc_ring[j], 719 rxr->rx_desc_ring[j],
719 rxr->rx_desc_mapping[j]); 720 rxr->rx_desc_mapping[j]);
720 rxr->rx_desc_ring[j] = NULL; 721 rxr->rx_desc_ring[j] = NULL;
721 } 722 }
722 vfree(rxr->rx_buf_ring); 723 vfree(rxr->rx_buf_ring);
@@ -724,9 +725,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
724 725
725 for (j = 0; j < bp->rx_max_pg_ring; j++) { 726 for (j = 0; j < bp->rx_max_pg_ring; j++) {
726 if (rxr->rx_pg_desc_ring[j]) 727 if (rxr->rx_pg_desc_ring[j])
727 pci_free_consistent(bp->pdev, RXBD_RING_SIZE, 728 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
728 rxr->rx_pg_desc_ring[j], 729 rxr->rx_pg_desc_ring[j],
729 rxr->rx_pg_desc_mapping[j]); 730 rxr->rx_pg_desc_mapping[j]);
730 rxr->rx_pg_desc_ring[j] = NULL; 731 rxr->rx_pg_desc_ring[j] = NULL;
731 } 732 }
732 vfree(rxr->rx_pg_ring); 733 vfree(rxr->rx_pg_ring);
@@ -748,8 +749,8 @@ bnx2_alloc_tx_mem(struct bnx2 *bp)
748 return -ENOMEM; 749 return -ENOMEM;
749 750
750 txr->tx_desc_ring = 751 txr->tx_desc_ring =
751 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE, 752 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
752 &txr->tx_desc_mapping); 753 &txr->tx_desc_mapping, GFP_KERNEL);
753 if (txr->tx_desc_ring == NULL) 754 if (txr->tx_desc_ring == NULL)
754 return -ENOMEM; 755 return -ENOMEM;
755 } 756 }
@@ -776,8 +777,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
776 777
777 for (j = 0; j < bp->rx_max_ring; j++) { 778 for (j = 0; j < bp->rx_max_ring; j++) {
778 rxr->rx_desc_ring[j] = 779 rxr->rx_desc_ring[j] =
779 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, 780 dma_alloc_coherent(&bp->pdev->dev,
780 &rxr->rx_desc_mapping[j]); 781 RXBD_RING_SIZE,
782 &rxr->rx_desc_mapping[j],
783 GFP_KERNEL);
781 if (rxr->rx_desc_ring[j] == NULL) 784 if (rxr->rx_desc_ring[j] == NULL)
782 return -ENOMEM; 785 return -ENOMEM;
783 786
@@ -795,8 +798,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
795 798
796 for (j = 0; j < bp->rx_max_pg_ring; j++) { 799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
797 rxr->rx_pg_desc_ring[j] = 800 rxr->rx_pg_desc_ring[j] =
798 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, 801 dma_alloc_coherent(&bp->pdev->dev,
799 &rxr->rx_pg_desc_mapping[j]); 802 RXBD_RING_SIZE,
803 &rxr->rx_pg_desc_mapping[j],
804 GFP_KERNEL);
800 if (rxr->rx_pg_desc_ring[j] == NULL) 805 if (rxr->rx_pg_desc_ring[j] == NULL)
801 return -ENOMEM; 806 return -ENOMEM;
802 807
@@ -816,16 +821,16 @@ bnx2_free_mem(struct bnx2 *bp)
816 821
817 for (i = 0; i < bp->ctx_pages; i++) { 822 for (i = 0; i < bp->ctx_pages; i++) {
818 if (bp->ctx_blk[i]) { 823 if (bp->ctx_blk[i]) {
819 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE, 824 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
820 bp->ctx_blk[i], 825 bp->ctx_blk[i],
821 bp->ctx_blk_mapping[i]); 826 bp->ctx_blk_mapping[i]);
822 bp->ctx_blk[i] = NULL; 827 bp->ctx_blk[i] = NULL;
823 } 828 }
824 } 829 }
825 if (bnapi->status_blk.msi) { 830 if (bnapi->status_blk.msi) {
826 pci_free_consistent(bp->pdev, bp->status_stats_size, 831 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
827 bnapi->status_blk.msi, 832 bnapi->status_blk.msi,
828 bp->status_blk_mapping); 833 bp->status_blk_mapping);
829 bnapi->status_blk.msi = NULL; 834 bnapi->status_blk.msi = NULL;
830 bp->stats_blk = NULL; 835 bp->stats_blk = NULL;
831 } 836 }
@@ -846,8 +851,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
846 bp->status_stats_size = status_blk_size + 851 bp->status_stats_size = status_blk_size +
847 sizeof(struct statistics_block); 852 sizeof(struct statistics_block);
848 853
849 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, 854 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
850 &bp->status_blk_mapping); 855 &bp->status_blk_mapping, GFP_KERNEL);
851 if (status_blk == NULL) 856 if (status_blk == NULL)
852 goto alloc_mem_err; 857 goto alloc_mem_err;
853 858
@@ -860,7 +865,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
860 bnapi->hw_rx_cons_ptr = 865 bnapi->hw_rx_cons_ptr =
861 &bnapi->status_blk.msi->status_rx_quick_consumer_index0; 866 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862 if (bp->flags & BNX2_FLAG_MSIX_CAP) { 867 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) { 868 for (i = 1; i < bp->irq_nvecs; i++) {
864 struct status_block_msix *sblk; 869 struct status_block_msix *sblk;
865 870
866 bnapi = &bp->bnx2_napi[i]; 871 bnapi = &bp->bnx2_napi[i];
@@ -885,9 +890,10 @@ bnx2_alloc_mem(struct bnx2 *bp)
885 if (bp->ctx_pages == 0) 890 if (bp->ctx_pages == 0)
886 bp->ctx_pages = 1; 891 bp->ctx_pages = 1;
887 for (i = 0; i < bp->ctx_pages; i++) { 892 for (i = 0; i < bp->ctx_pages; i++) {
888 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev, 893 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
889 BCM_PAGE_SIZE, 894 BCM_PAGE_SIZE,
890 &bp->ctx_blk_mapping[i]); 895 &bp->ctx_blk_mapping[i],
896 GFP_KERNEL);
891 if (bp->ctx_blk[i] == NULL) 897 if (bp->ctx_blk[i] == NULL)
892 goto alloc_mem_err; 898 goto alloc_mem_err;
893 } 899 }
@@ -1446,7 +1452,8 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
1446static void 1452static void
1447bnx2_enable_forced_2g5(struct bnx2 *bp) 1453bnx2_enable_forced_2g5(struct bnx2 *bp)
1448{ 1454{
1449 u32 bmcr; 1455 u32 uninitialized_var(bmcr);
1456 int err;
1450 1457
1451 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 1458 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1452 return; 1459 return;
@@ -1456,22 +1463,28 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1456 1463
1457 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1458 MII_BNX2_BLK_ADDR_SERDES_DIG); 1465 MII_BNX2_BLK_ADDR_SERDES_DIG);
1459 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); 1466 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1460 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; 1467 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1461 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G; 1468 val |= MII_BNX2_SD_MISC1_FORCE |
1462 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); 1469 MII_BNX2_SD_MISC1_FORCE_2_5G;
1470 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1471 }
1463 1472
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1473 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1465 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1474 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1466 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1475 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1467 1476
1468 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1477 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1469 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1478 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470 bmcr |= BCM5708S_BMCR_FORCE_2500; 1479 if (!err)
1480 bmcr |= BCM5708S_BMCR_FORCE_2500;
1471 } else { 1481 } else {
1472 return; 1482 return;
1473 } 1483 }
1474 1484
1485 if (err)
1486 return;
1487
1475 if (bp->autoneg & AUTONEG_SPEED) { 1488 if (bp->autoneg & AUTONEG_SPEED) {
1476 bmcr &= ~BMCR_ANENABLE; 1489 bmcr &= ~BMCR_ANENABLE;
1477 if (bp->req_duplex == DUPLEX_FULL) 1490 if (bp->req_duplex == DUPLEX_FULL)
@@ -1483,7 +1496,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1483static void 1496static void
1484bnx2_disable_forced_2g5(struct bnx2 *bp) 1497bnx2_disable_forced_2g5(struct bnx2 *bp)
1485{ 1498{
1486 u32 bmcr; 1499 u32 uninitialized_var(bmcr);
1500 int err;
1487 1501
1488 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 1502 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1489 return; 1503 return;
@@ -1493,21 +1507,26 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1493 1507
1494 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1508 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495 MII_BNX2_BLK_ADDR_SERDES_DIG); 1509 MII_BNX2_BLK_ADDR_SERDES_DIG);
1496 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); 1510 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1497 val &= ~MII_BNX2_SD_MISC1_FORCE; 1511 val &= ~MII_BNX2_SD_MISC1_FORCE;
1498 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); 1512 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1513 }
1499 1514
1500 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1515 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1501 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1516 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1502 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1517 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1503 1518
1504 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1519 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1505 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1520 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1506 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1521 if (!err)
1522 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1507 } else { 1523 } else {
1508 return; 1524 return;
1509 } 1525 }
1510 1526
1527 if (err)
1528 return;
1529
1511 if (bp->autoneg & AUTONEG_SPEED) 1530 if (bp->autoneg & AUTONEG_SPEED)
1512 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; 1531 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1513 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); 1532 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
@@ -2651,19 +2670,19 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2651} 2670}
2652 2671
2653static inline int 2672static inline int
2654bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) 2673bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2655{ 2674{
2656 dma_addr_t mapping; 2675 dma_addr_t mapping;
2657 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; 2676 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2658 struct rx_bd *rxbd = 2677 struct rx_bd *rxbd =
2659 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; 2678 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2660 struct page *page = alloc_page(GFP_ATOMIC); 2679 struct page *page = alloc_page(gfp);
2661 2680
2662 if (!page) 2681 if (!page)
2663 return -ENOMEM; 2682 return -ENOMEM;
2664 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, 2683 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2665 PCI_DMA_FROMDEVICE); 2684 PCI_DMA_FROMDEVICE);
2666 if (pci_dma_mapping_error(bp->pdev, mapping)) { 2685 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2667 __free_page(page); 2686 __free_page(page);
2668 return -EIO; 2687 return -EIO;
2669 } 2688 }
@@ -2684,15 +2703,15 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2684 if (!page) 2703 if (!page)
2685 return; 2704 return;
2686 2705
2687 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE, 2706 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2688 PCI_DMA_FROMDEVICE); 2707 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2689 2708
2690 __free_page(page); 2709 __free_page(page);
2691 rx_pg->page = NULL; 2710 rx_pg->page = NULL;
2692} 2711}
2693 2712
2694static inline int 2713static inline int
2695bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) 2714bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2696{ 2715{
2697 struct sk_buff *skb; 2716 struct sk_buff *skb;
2698 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; 2717 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
@@ -2700,7 +2719,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2700 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 2719 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2701 unsigned long align; 2720 unsigned long align;
2702 2721
2703 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 2722 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2704 if (skb == NULL) { 2723 if (skb == NULL) {
2705 return -ENOMEM; 2724 return -ENOMEM;
2706 } 2725 }
@@ -2708,9 +2727,9 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2708 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) 2727 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2709 skb_reserve(skb, BNX2_RX_ALIGN - align); 2728 skb_reserve(skb, BNX2_RX_ALIGN - align);
2710 2729
2711 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 2730 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2712 PCI_DMA_FROMDEVICE); 2731 PCI_DMA_FROMDEVICE);
2713 if (pci_dma_mapping_error(bp->pdev, mapping)) { 2732 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2714 dev_kfree_skb(skb); 2733 dev_kfree_skb(skb);
2715 return -EIO; 2734 return -EIO;
2716 } 2735 }
@@ -2816,7 +2835,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2816 } 2835 }
2817 } 2836 }
2818 2837
2819 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), 2838 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2820 skb_headlen(skb), PCI_DMA_TODEVICE); 2839 skb_headlen(skb), PCI_DMA_TODEVICE);
2821 2840
2822 tx_buf->skb = NULL; 2841 tx_buf->skb = NULL;
@@ -2825,7 +2844,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2825 for (i = 0; i < last; i++) { 2844 for (i = 0; i < last; i++) {
2826 sw_cons = NEXT_TX_BD(sw_cons); 2845 sw_cons = NEXT_TX_BD(sw_cons);
2827 2846
2828 pci_unmap_page(bp->pdev, 2847 dma_unmap_page(&bp->pdev->dev,
2829 dma_unmap_addr( 2848 dma_unmap_addr(
2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2849 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831 mapping), 2850 mapping),
@@ -2932,7 +2951,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2932 cons_rx_buf = &rxr->rx_buf_ring[cons]; 2951 cons_rx_buf = &rxr->rx_buf_ring[cons];
2933 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2952 prod_rx_buf = &rxr->rx_buf_ring[prod];
2934 2953
2935 pci_dma_sync_single_for_device(bp->pdev, 2954 dma_sync_single_for_device(&bp->pdev->dev,
2936 dma_unmap_addr(cons_rx_buf, mapping), 2955 dma_unmap_addr(cons_rx_buf, mapping),
2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2956 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2938 2957
@@ -2961,7 +2980,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2961 int err; 2980 int err;
2962 u16 prod = ring_idx & 0xffff; 2981 u16 prod = ring_idx & 0xffff;
2963 2982
2964 err = bnx2_alloc_rx_skb(bp, rxr, prod); 2983 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2965 if (unlikely(err)) { 2984 if (unlikely(err)) {
2966 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); 2985 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2967 if (hdr_len) { 2986 if (hdr_len) {
@@ -2974,7 +2993,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2974 } 2993 }
2975 2994
2976 skb_reserve(skb, BNX2_RX_OFFSET); 2995 skb_reserve(skb, BNX2_RX_OFFSET);
2977 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, 2996 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2978 PCI_DMA_FROMDEVICE); 2997 PCI_DMA_FROMDEVICE);
2979 2998
2980 if (hdr_len == 0) { 2999 if (hdr_len == 0) {
@@ -3026,7 +3045,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3026 rx_pg->page = NULL; 3045 rx_pg->page = NULL;
3027 3046
3028 err = bnx2_alloc_rx_page(bp, rxr, 3047 err = bnx2_alloc_rx_page(bp, rxr,
3029 RX_PG_RING_IDX(pg_prod)); 3048 RX_PG_RING_IDX(pg_prod),
3049 GFP_ATOMIC);
3030 if (unlikely(err)) { 3050 if (unlikely(err)) {
3031 rxr->rx_pg_cons = pg_cons; 3051 rxr->rx_pg_cons = pg_cons;
3032 rxr->rx_pg_prod = pg_prod; 3052 rxr->rx_pg_prod = pg_prod;
@@ -3035,7 +3055,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3035 return err; 3055 return err;
3036 } 3056 }
3037 3057
3038 pci_unmap_page(bp->pdev, mapping_old, 3058 dma_unmap_page(&bp->pdev->dev, mapping_old,
3039 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3059 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3040 3060
3041 frag_size -= frag_len; 3061 frag_size -= frag_len;
@@ -3106,7 +3126,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3106 3126
3107 dma_addr = dma_unmap_addr(rx_buf, mapping); 3127 dma_addr = dma_unmap_addr(rx_buf, mapping);
3108 3128
3109 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 3129 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3110 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3130 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3111 PCI_DMA_FROMDEVICE); 3131 PCI_DMA_FROMDEVICE);
3112 3132
@@ -3206,6 +3226,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3206 L2_FHDR_ERRORS_UDP_XSUM)) == 0)) 3226 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3207 skb->ip_summed = CHECKSUM_UNNECESSARY; 3227 skb->ip_summed = CHECKSUM_UNNECESSARY;
3208 } 3228 }
3229 if ((bp->dev->features & NETIF_F_RXHASH) &&
3230 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3231 L2_FHDR_STATUS_USE_RXHASH))
3232 skb->rxhash = rx_hdr->l2_fhdr_hash;
3209 3233
3210 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); 3234 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3211 3235
@@ -5162,7 +5186,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5162 5186
5163 ring_prod = prod = rxr->rx_pg_prod; 5187 ring_prod = prod = rxr->rx_pg_prod;
5164 for (i = 0; i < bp->rx_pg_ring_size; i++) { 5188 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5165 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) { 5189 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5166 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", 5190 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5167 ring_num, i, bp->rx_pg_ring_size); 5191 ring_num, i, bp->rx_pg_ring_size);
5168 break; 5192 break;
@@ -5174,7 +5198,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5174 5198
5175 ring_prod = prod = rxr->rx_prod; 5199 ring_prod = prod = rxr->rx_prod;
5176 for (i = 0; i < bp->rx_ring_size; i++) { 5200 for (i = 0; i < bp->rx_ring_size; i++) {
5177 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) { 5201 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5178 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", 5202 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5179 ring_num, i, bp->rx_ring_size); 5203 ring_num, i, bp->rx_ring_size);
5180 break; 5204 break;
@@ -5320,7 +5344,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5320 continue; 5344 continue;
5321 } 5345 }
5322 5346
5323 pci_unmap_single(bp->pdev, 5347 dma_unmap_single(&bp->pdev->dev,
5324 dma_unmap_addr(tx_buf, mapping), 5348 dma_unmap_addr(tx_buf, mapping),
5325 skb_headlen(skb), 5349 skb_headlen(skb),
5326 PCI_DMA_TODEVICE); 5350 PCI_DMA_TODEVICE);
@@ -5331,7 +5355,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5331 j++; 5355 j++;
5332 for (k = 0; k < last; k++, j++) { 5356 for (k = 0; k < last; k++, j++) {
5333 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5357 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5334 pci_unmap_page(bp->pdev, 5358 dma_unmap_page(&bp->pdev->dev,
5335 dma_unmap_addr(tx_buf, mapping), 5359 dma_unmap_addr(tx_buf, mapping),
5336 skb_shinfo(skb)->frags[k].size, 5360 skb_shinfo(skb)->frags[k].size,
5337 PCI_DMA_TODEVICE); 5361 PCI_DMA_TODEVICE);
@@ -5361,7 +5385,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5361 if (skb == NULL) 5385 if (skb == NULL)
5362 continue; 5386 continue;
5363 5387
5364 pci_unmap_single(bp->pdev, 5388 dma_unmap_single(&bp->pdev->dev,
5365 dma_unmap_addr(rx_buf, mapping), 5389 dma_unmap_addr(rx_buf, mapping),
5366 bp->rx_buf_use_size, 5390 bp->rx_buf_use_size,
5367 PCI_DMA_FROMDEVICE); 5391 PCI_DMA_FROMDEVICE);
@@ -5714,9 +5738,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5714 for (i = 14; i < pkt_size; i++) 5738 for (i = 14; i < pkt_size; i++)
5715 packet[i] = (unsigned char) (i & 0xff); 5739 packet[i] = (unsigned char) (i & 0xff);
5716 5740
5717 map = pci_map_single(bp->pdev, skb->data, pkt_size, 5741 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5718 PCI_DMA_TODEVICE); 5742 PCI_DMA_TODEVICE);
5719 if (pci_dma_mapping_error(bp->pdev, map)) { 5743 if (dma_mapping_error(&bp->pdev->dev, map)) {
5720 dev_kfree_skb(skb); 5744 dev_kfree_skb(skb);
5721 return -EIO; 5745 return -EIO;
5722 } 5746 }
@@ -5754,7 +5778,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5754 5778
5755 udelay(5); 5779 udelay(5);
5756 5780
5757 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); 5781 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5758 dev_kfree_skb(skb); 5782 dev_kfree_skb(skb);
5759 5783
5760 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) 5784 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
@@ -5771,7 +5795,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5771 rx_hdr = rx_buf->desc; 5795 rx_hdr = rx_buf->desc;
5772 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5796 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5773 5797
5774 pci_dma_sync_single_for_cpu(bp->pdev, 5798 dma_sync_single_for_cpu(&bp->pdev->dev,
5775 dma_unmap_addr(rx_buf, mapping), 5799 dma_unmap_addr(rx_buf, mapping),
5776 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5800 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5777 5801
@@ -6129,7 +6153,7 @@ bnx2_free_irq(struct bnx2 *bp)
6129static void 6153static void
6130bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) 6154bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6131{ 6155{
6132 int i, rc; 6156 int i, total_vecs, rc;
6133 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; 6157 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6134 struct net_device *dev = bp->dev; 6158 struct net_device *dev = bp->dev;
6135 const int len = sizeof(bp->irq_tbl[0].name); 6159 const int len = sizeof(bp->irq_tbl[0].name);
@@ -6148,13 +6172,29 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6148 msix_ent[i].vector = 0; 6172 msix_ent[i].vector = 0;
6149 } 6173 }
6150 6174
6151 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC); 6175 total_vecs = msix_vecs;
6176#ifdef BCM_CNIC
6177 total_vecs++;
6178#endif
6179 rc = -ENOSPC;
6180 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6181 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6182 if (rc <= 0)
6183 break;
6184 if (rc > 0)
6185 total_vecs = rc;
6186 }
6187
6152 if (rc != 0) 6188 if (rc != 0)
6153 return; 6189 return;
6154 6190
6191 msix_vecs = total_vecs;
6192#ifdef BCM_CNIC
6193 msix_vecs--;
6194#endif
6155 bp->irq_nvecs = msix_vecs; 6195 bp->irq_nvecs = msix_vecs;
6156 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; 6196 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6157 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 6197 for (i = 0; i < total_vecs; i++) {
6158 bp->irq_tbl[i].vector = msix_ent[i].vector; 6198 bp->irq_tbl[i].vector = msix_ent[i].vector;
6159 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i); 6199 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6160 bp->irq_tbl[i].handler = bnx2_msi_1shot; 6200 bp->irq_tbl[i].handler = bnx2_msi_1shot;
@@ -6172,7 +6212,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6172 bp->irq_nvecs = 1; 6212 bp->irq_nvecs = 1;
6173 bp->irq_tbl[0].vector = bp->pdev->irq; 6213 bp->irq_tbl[0].vector = bp->pdev->irq;
6174 6214
6175 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1) 6215 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6176 bnx2_enable_msix(bp, msix_vecs); 6216 bnx2_enable_msix(bp, msix_vecs);
6177 6217
6178 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && 6218 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
@@ -6296,9 +6336,14 @@ static void
6296bnx2_dump_state(struct bnx2 *bp) 6336bnx2_dump_state(struct bnx2 *bp)
6297{ 6337{
6298 struct net_device *dev = bp->dev; 6338 struct net_device *dev = bp->dev;
6299 u32 mcp_p0, mcp_p1; 6339 u32 mcp_p0, mcp_p1, val1, val2;
6300 6340
6301 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem)); 6341 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6342 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6343 atomic_read(&bp->intr_sem), val1);
6344 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6345 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6346 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6302 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", 6347 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6303 REG_RD(bp, BNX2_EMAC_TX_STATUS), 6348 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6304 REG_RD(bp, BNX2_EMAC_RX_STATUS)); 6349 REG_RD(bp, BNX2_EMAC_RX_STATUS));
@@ -6434,8 +6479,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6434 } else 6479 } else
6435 mss = 0; 6480 mss = 0;
6436 6481
6437 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 6482 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6438 if (pci_dma_mapping_error(bp->pdev, mapping)) { 6483 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6439 dev_kfree_skb(skb); 6484 dev_kfree_skb(skb);
6440 return NETDEV_TX_OK; 6485 return NETDEV_TX_OK;
6441 } 6486 }
@@ -6463,9 +6508,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6463 txbd = &txr->tx_desc_ring[ring_prod]; 6508 txbd = &txr->tx_desc_ring[ring_prod];
6464 6509
6465 len = frag->size; 6510 len = frag->size;
6466 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 6511 mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6467 len, PCI_DMA_TODEVICE); 6512 len, PCI_DMA_TODEVICE);
6468 if (pci_dma_mapping_error(bp->pdev, mapping)) 6513 if (dma_mapping_error(&bp->pdev->dev, mapping))
6469 goto dma_error; 6514 goto dma_error;
6470 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, 6515 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6471 mapping); 6516 mapping);
@@ -6490,6 +6535,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6490 6535
6491 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { 6536 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6492 netif_tx_stop_queue(txq); 6537 netif_tx_stop_queue(txq);
6538
6539 /* netif_tx_stop_queue() must be done before checking
6540 * tx index in bnx2_tx_avail() below, because in
6541 * bnx2_tx_int(), we update tx index before checking for
6542 * netif_tx_queue_stopped().
6543 */
6544 smp_mb();
6493 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) 6545 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6494 netif_tx_wake_queue(txq); 6546 netif_tx_wake_queue(txq);
6495 } 6547 }
@@ -6504,7 +6556,7 @@ dma_error:
6504 ring_prod = TX_RING_IDX(prod); 6556 ring_prod = TX_RING_IDX(prod);
6505 tx_buf = &txr->tx_buf_ring[ring_prod]; 6557 tx_buf = &txr->tx_buf_ring[ring_prod];
6506 tx_buf->skb = NULL; 6558 tx_buf->skb = NULL;
6507 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), 6559 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6508 skb_headlen(skb), PCI_DMA_TODEVICE); 6560 skb_headlen(skb), PCI_DMA_TODEVICE);
6509 6561
6510 /* unmap remaining mapped pages */ 6562 /* unmap remaining mapped pages */
@@ -6512,7 +6564,7 @@ dma_error:
6512 prod = NEXT_TX_BD(prod); 6564 prod = NEXT_TX_BD(prod);
6513 ring_prod = TX_RING_IDX(prod); 6565 ring_prod = TX_RING_IDX(prod);
6514 tx_buf = &txr->tx_buf_ring[ring_prod]; 6566 tx_buf = &txr->tx_buf_ring[ring_prod];
6515 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping), 6567 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6516 skb_shinfo(skb)->frags[i].size, 6568 skb_shinfo(skb)->frags[i].size,
6517 PCI_DMA_TODEVICE); 6569 PCI_DMA_TODEVICE);
6518 } 6570 }
@@ -6567,36 +6619,25 @@ bnx2_save_stats(struct bnx2 *bp)
6567 temp_stats[i] += hw_stats[i]; 6619 temp_stats[i] += hw_stats[i];
6568} 6620}
6569 6621
6570#define GET_64BIT_NET_STATS64(ctr) \ 6622#define GET_64BIT_NET_STATS64(ctr) \
6571 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ 6623 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6572 (unsigned long) (ctr##_lo)
6573
6574#define GET_64BIT_NET_STATS32(ctr) \
6575 (ctr##_lo)
6576 6624
6577#if (BITS_PER_LONG == 64)
6578#define GET_64BIT_NET_STATS(ctr) \ 6625#define GET_64BIT_NET_STATS(ctr) \
6579 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \ 6626 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6580 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr) 6627 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6581#else
6582#define GET_64BIT_NET_STATS(ctr) \
6583 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6584 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6585#endif
6586 6628
6587#define GET_32BIT_NET_STATS(ctr) \ 6629#define GET_32BIT_NET_STATS(ctr) \
6588 (unsigned long) (bp->stats_blk->ctr + \ 6630 (unsigned long) (bp->stats_blk->ctr + \
6589 bp->temp_stats_blk->ctr) 6631 bp->temp_stats_blk->ctr)
6590 6632
6591static struct net_device_stats * 6633static struct rtnl_link_stats64 *
6592bnx2_get_stats(struct net_device *dev) 6634bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6593{ 6635{
6594 struct bnx2 *bp = netdev_priv(dev); 6636 struct bnx2 *bp = netdev_priv(dev);
6595 struct net_device_stats *net_stats = &dev->stats;
6596 6637
6597 if (bp->stats_blk == NULL) { 6638 if (bp->stats_blk == NULL)
6598 return net_stats; 6639 return net_stats;
6599 } 6640
6600 net_stats->rx_packets = 6641 net_stats->rx_packets =
6601 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + 6642 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6602 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) + 6643 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
@@ -6614,7 +6655,7 @@ bnx2_get_stats(struct net_device *dev)
6614 GET_64BIT_NET_STATS(stat_IfHCOutOctets); 6655 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6615 6656
6616 net_stats->multicast = 6657 net_stats->multicast =
6617 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts); 6658 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6618 6659
6619 net_stats->collisions = 6660 net_stats->collisions =
6620 GET_32BIT_NET_STATS(stat_EtherStatsCollisions); 6661 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
@@ -7545,6 +7586,12 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data)
7545 return (ethtool_op_set_tx_csum(dev, data)); 7586 return (ethtool_op_set_tx_csum(dev, data));
7546} 7587}
7547 7588
7589static int
7590bnx2_set_flags(struct net_device *dev, u32 data)
7591{
7592 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7593}
7594
7548static const struct ethtool_ops bnx2_ethtool_ops = { 7595static const struct ethtool_ops bnx2_ethtool_ops = {
7549 .get_settings = bnx2_get_settings, 7596 .get_settings = bnx2_get_settings,
7550 .set_settings = bnx2_set_settings, 7597 .set_settings = bnx2_set_settings,
@@ -7574,6 +7621,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
7574 .phys_id = bnx2_phys_id, 7621 .phys_id = bnx2_phys_id,
7575 .get_ethtool_stats = bnx2_get_ethtool_stats, 7622 .get_ethtool_stats = bnx2_get_ethtool_stats,
7576 .get_sset_count = bnx2_get_sset_count, 7623 .get_sset_count = bnx2_get_sset_count,
7624 .set_flags = bnx2_set_flags,
7625 .get_flags = ethtool_op_get_flags,
7577}; 7626};
7578 7627
7579/* Called with rtnl_lock */ 7628/* Called with rtnl_lock */
@@ -8259,7 +8308,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8259 .ndo_open = bnx2_open, 8308 .ndo_open = bnx2_open,
8260 .ndo_start_xmit = bnx2_start_xmit, 8309 .ndo_start_xmit = bnx2_start_xmit,
8261 .ndo_stop = bnx2_close, 8310 .ndo_stop = bnx2_close,
8262 .ndo_get_stats = bnx2_get_stats, 8311 .ndo_get_stats64 = bnx2_get_stats64,
8263 .ndo_set_rx_mode = bnx2_set_rx_mode, 8312 .ndo_set_rx_mode = bnx2_set_rx_mode,
8264 .ndo_do_ioctl = bnx2_ioctl, 8313 .ndo_do_ioctl = bnx2_ioctl,
8265 .ndo_validate_addr = eth_validate_addr, 8314 .ndo_validate_addr = eth_validate_addr,
@@ -8320,7 +8369,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8320 memcpy(dev->dev_addr, bp->mac_addr, 6); 8369 memcpy(dev->dev_addr, bp->mac_addr, 6);
8321 memcpy(dev->perm_addr, bp->mac_addr, 6); 8370 memcpy(dev->perm_addr, bp->mac_addr, 6);
8322 8371
8323 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO; 8372 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8373 NETIF_F_RXHASH;
8324 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); 8374 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8325 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8375 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8326 dev->features |= NETIF_F_IPV6_CSUM; 8376 dev->features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index ddaa3fc99876..2104c1005d02 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -295,6 +295,9 @@ struct l2_fhdr {
295 #define L2_FHDR_ERRORS_TCP_XSUM (1<<28) 295 #define L2_FHDR_ERRORS_TCP_XSUM (1<<28)
296 #define L2_FHDR_ERRORS_UDP_XSUM (1<<31) 296 #define L2_FHDR_ERRORS_UDP_XSUM (1<<31)
297 297
298 #define L2_FHDR_STATUS_USE_RXHASH \
299 (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH)
300
298 u32 l2_fhdr_hash; 301 u32 l2_fhdr_hash;
299#if defined(__BIG_ENDIAN) 302#if defined(__BIG_ENDIAN)
300 u16 l2_fhdr_pkt_len; 303 u16 l2_fhdr_pkt_len;
@@ -6634,9 +6637,12 @@ struct flash_spec {
6634 6637
6635#define BNX2_MAX_MSIX_HW_VEC 9 6638#define BNX2_MAX_MSIX_HW_VEC 9
6636#define BNX2_MAX_MSIX_VEC 9 6639#define BNX2_MAX_MSIX_VEC 9
6637#define BNX2_BASE_VEC 0 6640#ifdef BCM_CNIC
6638#define BNX2_TX_VEC 1 6641#define BNX2_MIN_MSIX_VEC 2
6639#define BNX2_TX_INT_NUM (BNX2_TX_VEC << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT) 6642#else
6643#define BNX2_MIN_MSIX_VEC 1
6644#endif
6645
6640 6646
6641struct bnx2_irq { 6647struct bnx2_irq {
6642 irq_handler_t handler; 6648 irq_handler_t handler;
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
new file mode 100644
index 000000000000..084afce89ae9
--- /dev/null
+++ b/drivers/net/bnx2x/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Broadcom 10-Gigabit ethernet driver
3#
4
5obj-$(CONFIG_BNX2X) += bnx2x.o
6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index bb0872a63315..53af9c93e75c 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,6 +20,10 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.52.53-3"
24#define DRV_MODULE_RELDATE "2010/18/04"
25#define BNX2X_BC_VER 0x040200
26
23#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
24#define BCM_VLAN 1 28#define BCM_VLAN 1
25#endif 29#endif
@@ -32,7 +36,7 @@
32 36
33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
34#define BCM_CNIC 1 38#define BCM_CNIC 1
35#include "cnic_if.h" 39#include "../cnic_if.h"
36#endif 40#endif
37 41
38 42
@@ -45,10 +49,12 @@
45#endif 49#endif
46 50
47#include <linux/mdio.h> 51#include <linux/mdio.h>
52#include <linux/pci.h>
48#include "bnx2x_reg.h" 53#include "bnx2x_reg.h"
49#include "bnx2x_fw_defs.h" 54#include "bnx2x_fw_defs.h"
50#include "bnx2x_hsi.h" 55#include "bnx2x_hsi.h"
51#include "bnx2x_link.h" 56#include "bnx2x_link.h"
57#include "bnx2x_stats.h"
52 58
53/* error/debug prints */ 59/* error/debug prints */
54 60
@@ -106,6 +112,7 @@ do { \
106 dev_info(&bp->pdev->dev, __fmt, ##__args); \ 112 dev_info(&bp->pdev->dev, __fmt, ##__args); \
107} while (0) 113} while (0)
108 114
115void bnx2x_panic_dump(struct bnx2x *bp);
109 116
110#ifdef BNX2X_STOP_ON_ERROR 117#ifdef BNX2X_STOP_ON_ERROR
111#define bnx2x_panic() do { \ 118#define bnx2x_panic() do { \
@@ -248,43 +255,6 @@ union db_prod {
248#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) 255#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
249 256
250 257
251struct bnx2x_eth_q_stats {
252 u32 total_bytes_received_hi;
253 u32 total_bytes_received_lo;
254 u32 total_bytes_transmitted_hi;
255 u32 total_bytes_transmitted_lo;
256 u32 total_unicast_packets_received_hi;
257 u32 total_unicast_packets_received_lo;
258 u32 total_multicast_packets_received_hi;
259 u32 total_multicast_packets_received_lo;
260 u32 total_broadcast_packets_received_hi;
261 u32 total_broadcast_packets_received_lo;
262 u32 total_unicast_packets_transmitted_hi;
263 u32 total_unicast_packets_transmitted_lo;
264 u32 total_multicast_packets_transmitted_hi;
265 u32 total_multicast_packets_transmitted_lo;
266 u32 total_broadcast_packets_transmitted_hi;
267 u32 total_broadcast_packets_transmitted_lo;
268 u32 valid_bytes_received_hi;
269 u32 valid_bytes_received_lo;
270
271 u32 error_bytes_received_hi;
272 u32 error_bytes_received_lo;
273 u32 etherstatsoverrsizepkts_hi;
274 u32 etherstatsoverrsizepkts_lo;
275 u32 no_buff_discard_hi;
276 u32 no_buff_discard_lo;
277
278 u32 driver_xoff;
279 u32 rx_err_discard_pkt;
280 u32 rx_skb_alloc_failed;
281 u32 hw_csum_err;
282};
283
284#define BNX2X_NUM_Q_STATS 13
285#define Q_STATS_OFFSET32(stat_name) \
286 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
287
288struct bnx2x_fastpath { 258struct bnx2x_fastpath {
289 259
290 struct napi_struct napi; 260 struct napi_struct napi;
@@ -593,27 +563,6 @@ struct bnx2x_common {
593 563
594/* port */ 564/* port */
595 565
596struct nig_stats {
597 u32 brb_discard;
598 u32 brb_packet;
599 u32 brb_truncate;
600 u32 flow_ctrl_discard;
601 u32 flow_ctrl_octets;
602 u32 flow_ctrl_packet;
603 u32 mng_discard;
604 u32 mng_octet_inp;
605 u32 mng_octet_out;
606 u32 mng_packet_inp;
607 u32 mng_packet_out;
608 u32 pbf_octets;
609 u32 pbf_packet;
610 u32 safc_inp;
611 u32 egress_mac_pkt0_lo;
612 u32 egress_mac_pkt0_hi;
613 u32 egress_mac_pkt1_lo;
614 u32 egress_mac_pkt1_hi;
615};
616
617struct bnx2x_port { 566struct bnx2x_port {
618 u32 pmf; 567 u32 pmf;
619 568
@@ -641,156 +590,6 @@ struct bnx2x_port {
641/* end of port */ 590/* end of port */
642 591
643 592
644enum bnx2x_stats_event {
645 STATS_EVENT_PMF = 0,
646 STATS_EVENT_LINK_UP,
647 STATS_EVENT_UPDATE,
648 STATS_EVENT_STOP,
649 STATS_EVENT_MAX
650};
651
652enum bnx2x_stats_state {
653 STATS_STATE_DISABLED = 0,
654 STATS_STATE_ENABLED,
655 STATS_STATE_MAX
656};
657
658struct bnx2x_eth_stats {
659 u32 total_bytes_received_hi;
660 u32 total_bytes_received_lo;
661 u32 total_bytes_transmitted_hi;
662 u32 total_bytes_transmitted_lo;
663 u32 total_unicast_packets_received_hi;
664 u32 total_unicast_packets_received_lo;
665 u32 total_multicast_packets_received_hi;
666 u32 total_multicast_packets_received_lo;
667 u32 total_broadcast_packets_received_hi;
668 u32 total_broadcast_packets_received_lo;
669 u32 total_unicast_packets_transmitted_hi;
670 u32 total_unicast_packets_transmitted_lo;
671 u32 total_multicast_packets_transmitted_hi;
672 u32 total_multicast_packets_transmitted_lo;
673 u32 total_broadcast_packets_transmitted_hi;
674 u32 total_broadcast_packets_transmitted_lo;
675 u32 valid_bytes_received_hi;
676 u32 valid_bytes_received_lo;
677
678 u32 error_bytes_received_hi;
679 u32 error_bytes_received_lo;
680 u32 etherstatsoverrsizepkts_hi;
681 u32 etherstatsoverrsizepkts_lo;
682 u32 no_buff_discard_hi;
683 u32 no_buff_discard_lo;
684
685 u32 rx_stat_ifhcinbadoctets_hi;
686 u32 rx_stat_ifhcinbadoctets_lo;
687 u32 tx_stat_ifhcoutbadoctets_hi;
688 u32 tx_stat_ifhcoutbadoctets_lo;
689 u32 rx_stat_dot3statsfcserrors_hi;
690 u32 rx_stat_dot3statsfcserrors_lo;
691 u32 rx_stat_dot3statsalignmenterrors_hi;
692 u32 rx_stat_dot3statsalignmenterrors_lo;
693 u32 rx_stat_dot3statscarriersenseerrors_hi;
694 u32 rx_stat_dot3statscarriersenseerrors_lo;
695 u32 rx_stat_falsecarriererrors_hi;
696 u32 rx_stat_falsecarriererrors_lo;
697 u32 rx_stat_etherstatsundersizepkts_hi;
698 u32 rx_stat_etherstatsundersizepkts_lo;
699 u32 rx_stat_dot3statsframestoolong_hi;
700 u32 rx_stat_dot3statsframestoolong_lo;
701 u32 rx_stat_etherstatsfragments_hi;
702 u32 rx_stat_etherstatsfragments_lo;
703 u32 rx_stat_etherstatsjabbers_hi;
704 u32 rx_stat_etherstatsjabbers_lo;
705 u32 rx_stat_maccontrolframesreceived_hi;
706 u32 rx_stat_maccontrolframesreceived_lo;
707 u32 rx_stat_bmac_xpf_hi;
708 u32 rx_stat_bmac_xpf_lo;
709 u32 rx_stat_bmac_xcf_hi;
710 u32 rx_stat_bmac_xcf_lo;
711 u32 rx_stat_xoffstateentered_hi;
712 u32 rx_stat_xoffstateentered_lo;
713 u32 rx_stat_xonpauseframesreceived_hi;
714 u32 rx_stat_xonpauseframesreceived_lo;
715 u32 rx_stat_xoffpauseframesreceived_hi;
716 u32 rx_stat_xoffpauseframesreceived_lo;
717 u32 tx_stat_outxonsent_hi;
718 u32 tx_stat_outxonsent_lo;
719 u32 tx_stat_outxoffsent_hi;
720 u32 tx_stat_outxoffsent_lo;
721 u32 tx_stat_flowcontroldone_hi;
722 u32 tx_stat_flowcontroldone_lo;
723 u32 tx_stat_etherstatscollisions_hi;
724 u32 tx_stat_etherstatscollisions_lo;
725 u32 tx_stat_dot3statssinglecollisionframes_hi;
726 u32 tx_stat_dot3statssinglecollisionframes_lo;
727 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
728 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
729 u32 tx_stat_dot3statsdeferredtransmissions_hi;
730 u32 tx_stat_dot3statsdeferredtransmissions_lo;
731 u32 tx_stat_dot3statsexcessivecollisions_hi;
732 u32 tx_stat_dot3statsexcessivecollisions_lo;
733 u32 tx_stat_dot3statslatecollisions_hi;
734 u32 tx_stat_dot3statslatecollisions_lo;
735 u32 tx_stat_etherstatspkts64octets_hi;
736 u32 tx_stat_etherstatspkts64octets_lo;
737 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
738 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
739 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
740 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
741 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
742 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
743 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
744 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
745 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
746 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
747 u32 tx_stat_etherstatspktsover1522octets_hi;
748 u32 tx_stat_etherstatspktsover1522octets_lo;
749 u32 tx_stat_bmac_2047_hi;
750 u32 tx_stat_bmac_2047_lo;
751 u32 tx_stat_bmac_4095_hi;
752 u32 tx_stat_bmac_4095_lo;
753 u32 tx_stat_bmac_9216_hi;
754 u32 tx_stat_bmac_9216_lo;
755 u32 tx_stat_bmac_16383_hi;
756 u32 tx_stat_bmac_16383_lo;
757 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
758 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
759 u32 tx_stat_bmac_ufl_hi;
760 u32 tx_stat_bmac_ufl_lo;
761
762 u32 pause_frames_received_hi;
763 u32 pause_frames_received_lo;
764 u32 pause_frames_sent_hi;
765 u32 pause_frames_sent_lo;
766
767 u32 etherstatspkts1024octetsto1522octets_hi;
768 u32 etherstatspkts1024octetsto1522octets_lo;
769 u32 etherstatspktsover1522octets_hi;
770 u32 etherstatspktsover1522octets_lo;
771
772 u32 brb_drop_hi;
773 u32 brb_drop_lo;
774 u32 brb_truncate_hi;
775 u32 brb_truncate_lo;
776
777 u32 mac_filter_discard;
778 u32 xxoverflow_discard;
779 u32 brb_truncate_discard;
780 u32 mac_discard;
781
782 u32 driver_xoff;
783 u32 rx_err_discard_pkt;
784 u32 rx_skb_alloc_failed;
785 u32 hw_csum_err;
786
787 u32 nig_timer_max;
788};
789
790#define BNX2X_NUM_STATS 43
791#define STATS_OFFSET32(stat_name) \
792 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
793
794 593
795#ifdef BCM_CNIC 594#ifdef BCM_CNIC
796#define MAX_CONTEXT 15 595#define MAX_CONTEXT 15
@@ -1006,6 +805,8 @@ struct bnx2x {
1006 805
1007 int multi_mode; 806 int multi_mode;
1008 int num_queues; 807 int num_queues;
808 int disable_tpa;
809 int int_mode;
1009 810
1010 u32 rx_mode; 811 u32 rx_mode;
1011#define BNX2X_RX_MODE_NONE 0 812#define BNX2X_RX_MODE_NONE 0
@@ -1134,6 +935,10 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command);
1134void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); 935void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
1135void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 936void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
1136 u32 addr, u32 len); 937 u32 addr, u32 len);
938void bnx2x_calc_fc_adv(struct bnx2x *bp);
939int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
940 u32 data_hi, u32 data_lo, int common);
941void bnx2x_update_coalesce(struct bnx2x *bp);
1137 942
1138static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 943static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1139 int wait) 944 int wait)
@@ -1375,6 +1180,18 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1375#define BNX2X_VPD_LEN 128 1180#define BNX2X_VPD_LEN 128
1376#define VENDOR_ID_LEN 4 1181#define VENDOR_ID_LEN 4
1377 1182
1183#ifdef BNX2X_MAIN
1184#define BNX2X_EXTERN
1185#else
1186#define BNX2X_EXTERN extern
1187#endif
1188
1189BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
1190
1378/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ 1191/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1379 1192
1193extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1194
1195void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1196
1380#endif /* bnx2x.h */ 1197#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
new file mode 100644
index 000000000000..02bf710629a3
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,2252 @@
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include <net/ip6_checksum.h>
23#include "bnx2x_cmn.h"
24
25#ifdef BCM_VLAN
26#include <linux/if_vlan.h>
27#endif
28
29static int bnx2x_poll(struct napi_struct *napi, int budget);
30
31/* free skb in the packet ring at pos idx
32 * return idx of last bd freed
33 */
34static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
35 u16 idx)
36{
37 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
38 struct eth_tx_start_bd *tx_start_bd;
39 struct eth_tx_bd *tx_data_bd;
40 struct sk_buff *skb = tx_buf->skb;
41 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
42 int nbd;
43
44 /* prefetch skb end pointer to speedup dev_kfree_skb() */
45 prefetch(&skb->end);
46
47 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
48 idx, tx_buf, skb);
49
50 /* unmap first bd */
51 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
52 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
53 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
54 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
55
56 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
57#ifdef BNX2X_STOP_ON_ERROR
58 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
59 BNX2X_ERR("BAD nbd!\n");
60 bnx2x_panic();
61 }
62#endif
63 new_cons = nbd + tx_buf->first_bd;
64
65 /* Get the next bd */
66 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
67
68 /* Skip a parse bd... */
69 --nbd;
70 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
71
72 /* ...and the TSO split header bd since they have no mapping */
73 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
74 --nbd;
75 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
76 }
77
78 /* now free frags */
79 while (nbd > 0) {
80
81 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
82 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
83 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
84 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
85 if (--nbd)
86 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
87 }
88
89 /* release skb */
90 WARN_ON(!skb);
91 dev_kfree_skb(skb);
92 tx_buf->first_bd = 0;
93 tx_buf->skb = NULL;
94
95 return new_cons;
96}
97
98int bnx2x_tx_int(struct bnx2x_fastpath *fp)
99{
100 struct bnx2x *bp = fp->bp;
101 struct netdev_queue *txq;
102 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
103
104#ifdef BNX2X_STOP_ON_ERROR
105 if (unlikely(bp->panic))
106 return -1;
107#endif
108
109 txq = netdev_get_tx_queue(bp->dev, fp->index);
110 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
111 sw_cons = fp->tx_pkt_cons;
112
113 while (sw_cons != hw_cons) {
114 u16 pkt_cons;
115
116 pkt_cons = TX_BD(sw_cons);
117
118 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
119
120 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
121 hw_cons, sw_cons, pkt_cons);
122
123/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
124 rmb();
125 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
126 }
127*/
128 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
129 sw_cons++;
130 }
131
132 fp->tx_pkt_cons = sw_cons;
133 fp->tx_bd_cons = bd_cons;
134
135 /* Need to make the tx_bd_cons update visible to start_xmit()
136 * before checking for netif_tx_queue_stopped(). Without the
137 * memory barrier, there is a small possibility that
138 * start_xmit() will miss it and cause the queue to be stopped
139 * forever.
140 */
141 smp_mb();
142
143 /* TBD need a thresh? */
144 if (unlikely(netif_tx_queue_stopped(txq))) {
145 /* Taking tx_lock() is needed to prevent reenabling the queue
146 * while it's empty. This could have happen if rx_action() gets
147 * suspended in bnx2x_tx_int() after the condition before
148 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
149 *
150 * stops the queue->sees fresh tx_bd_cons->releases the queue->
151 * sends some packets consuming the whole queue again->
152 * stops the queue
153 */
154
155 __netif_tx_lock(txq, smp_processor_id());
156
157 if ((netif_tx_queue_stopped(txq)) &&
158 (bp->state == BNX2X_STATE_OPEN) &&
159 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
160 netif_tx_wake_queue(txq);
161
162 __netif_tx_unlock(txq);
163 }
164 return 0;
165}
166
167static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
168 u16 idx)
169{
170 u16 last_max = fp->last_max_sge;
171
172 if (SUB_S16(idx, last_max) > 0)
173 fp->last_max_sge = idx;
174}
175
176static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
177 struct eth_fast_path_rx_cqe *fp_cqe)
178{
179 struct bnx2x *bp = fp->bp;
180 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
181 le16_to_cpu(fp_cqe->len_on_bd)) >>
182 SGE_PAGE_SHIFT;
183 u16 last_max, last_elem, first_elem;
184 u16 delta = 0;
185 u16 i;
186
187 if (!sge_len)
188 return;
189
190 /* First mark all used pages */
191 for (i = 0; i < sge_len; i++)
192 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
193
194 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
195 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
196
197 /* Here we assume that the last SGE index is the biggest */
198 prefetch((void *)(fp->sge_mask));
199 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
200
201 last_max = RX_SGE(fp->last_max_sge);
202 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
203 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
204
205 /* If ring is not full */
206 if (last_elem + 1 != first_elem)
207 last_elem++;
208
209 /* Now update the prod */
210 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
211 if (likely(fp->sge_mask[i]))
212 break;
213
214 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
215 delta += RX_SGE_MASK_ELEM_SZ;
216 }
217
218 if (delta > 0) {
219 fp->rx_sge_prod += delta;
220 /* clear page-end entries */
221 bnx2x_clear_sge_mask_next_elems(fp);
222 }
223
224 DP(NETIF_MSG_RX_STATUS,
225 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
226 fp->last_max_sge, fp->rx_sge_prod);
227}
228
229static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
230 struct sk_buff *skb, u16 cons, u16 prod)
231{
232 struct bnx2x *bp = fp->bp;
233 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
234 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
235 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
236 dma_addr_t mapping;
237
238 /* move empty skb from pool to prod and map it */
239 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
240 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
241 bp->rx_buf_size, DMA_FROM_DEVICE);
242 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
243
244 /* move partial skb from cons to pool (don't unmap yet) */
245 fp->tpa_pool[queue] = *cons_rx_buf;
246
247 /* mark bin state as start - print error if current state != stop */
248 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
249 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
250
251 fp->tpa_state[queue] = BNX2X_TPA_START;
252
253 /* point prod_bd to new skb */
254 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
255 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
256
257#ifdef BNX2X_STOP_ON_ERROR
258 fp->tpa_queue_used |= (1 << queue);
259#ifdef _ASM_GENERIC_INT_L64_H
260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
261#else
262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
263#endif
264 fp->tpa_queue_used);
265#endif
266}
267
268static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
269 struct sk_buff *skb,
270 struct eth_fast_path_rx_cqe *fp_cqe,
271 u16 cqe_idx)
272{
273 struct sw_rx_page *rx_pg, old_rx_pg;
274 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
275 u32 i, frag_len, frag_size, pages;
276 int err;
277 int j;
278
279 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
280 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
281
282 /* This is needed in order to enable forwarding support */
283 if (frag_size)
284 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
285 max(frag_size, (u32)len_on_bd));
286
287#ifdef BNX2X_STOP_ON_ERROR
288 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
289 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
290 pages, cqe_idx);
291 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
292 fp_cqe->pkt_len, len_on_bd);
293 bnx2x_panic();
294 return -EINVAL;
295 }
296#endif
297
298 /* Run through the SGL and compose the fragmented skb */
299 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
300 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
301
302 /* FW gives the indices of the SGE as if the ring is an array
303 (meaning that "next" element will consume 2 indices) */
304 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
305 rx_pg = &fp->rx_page_ring[sge_idx];
306 old_rx_pg = *rx_pg;
307
308 /* If we fail to allocate a substitute page, we simply stop
309 where we are and drop the whole packet */
310 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
311 if (unlikely(err)) {
312 fp->eth_q_stats.rx_skb_alloc_failed++;
313 return err;
314 }
315
316 /* Unmap the page as we r going to pass it to the stack */
317 dma_unmap_page(&bp->pdev->dev,
318 dma_unmap_addr(&old_rx_pg, mapping),
319 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
320
321 /* Add one frag and update the appropriate fields in the skb */
322 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
323
324 skb->data_len += frag_len;
325 skb->truesize += frag_len;
326 skb->len += frag_len;
327
328 frag_size -= frag_len;
329 }
330
331 return 0;
332}
333
334static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
335 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
336 u16 cqe_idx)
337{
338 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
339 struct sk_buff *skb = rx_buf->skb;
340 /* alloc new skb */
341 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
342
343 /* Unmap skb in the pool anyway, as we are going to change
344 pool entry status to BNX2X_TPA_STOP even if new skb allocation
345 fails. */
346 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
347 bp->rx_buf_size, DMA_FROM_DEVICE);
348
349 if (likely(new_skb)) {
350 /* fix ip xsum and give it to the stack */
351 /* (no need to map the new skb) */
352#ifdef BCM_VLAN
353 int is_vlan_cqe =
354 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
355 PARSING_FLAGS_VLAN);
356 int is_not_hwaccel_vlan_cqe =
357 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
358#endif
359
360 prefetch(skb);
361 prefetch(((char *)(skb)) + 128);
362
363#ifdef BNX2X_STOP_ON_ERROR
364 if (pad + len > bp->rx_buf_size) {
365 BNX2X_ERR("skb_put is about to fail... "
366 "pad %d len %d rx_buf_size %d\n",
367 pad, len, bp->rx_buf_size);
368 bnx2x_panic();
369 return;
370 }
371#endif
372
373 skb_reserve(skb, pad);
374 skb_put(skb, len);
375
376 skb->protocol = eth_type_trans(skb, bp->dev);
377 skb->ip_summed = CHECKSUM_UNNECESSARY;
378
379 {
380 struct iphdr *iph;
381
382 iph = (struct iphdr *)skb->data;
383#ifdef BCM_VLAN
384 /* If there is no Rx VLAN offloading -
385 take VLAN tag into an account */
386 if (unlikely(is_not_hwaccel_vlan_cqe))
387 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
388#endif
389 iph->check = 0;
390 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
391 }
392
393 if (!bnx2x_fill_frag_skb(bp, fp, skb,
394 &cqe->fast_path_cqe, cqe_idx)) {
395#ifdef BCM_VLAN
396 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
397 (!is_not_hwaccel_vlan_cqe))
398 vlan_gro_receive(&fp->napi, bp->vlgrp,
399 le16_to_cpu(cqe->fast_path_cqe.
400 vlan_tag), skb);
401 else
402#endif
403 napi_gro_receive(&fp->napi, skb);
404 } else {
405 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
406 " - dropping packet!\n");
407 dev_kfree_skb(skb);
408 }
409
410
411 /* put new skb in bin */
412 fp->tpa_pool[queue].skb = new_skb;
413
414 } else {
415 /* else drop the packet and keep the buffer in the bin */
416 DP(NETIF_MSG_RX_STATUS,
417 "Failed to allocate new skb - dropping packet!\n");
418 fp->eth_q_stats.rx_skb_alloc_failed++;
419 }
420
421 fp->tpa_state[queue] = BNX2X_TPA_STOP;
422}
423
424/* Set Toeplitz hash value in the skb using the value from the
425 * CQE (calculated by HW).
426 */
427static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
428 struct sk_buff *skb)
429{
430 /* Set Toeplitz hash from CQE */
431 if ((bp->dev->features & NETIF_F_RXHASH) &&
432 (cqe->fast_path_cqe.status_flags &
433 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
434 skb->rxhash =
435 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
436}
437
438int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
439{
440 struct bnx2x *bp = fp->bp;
441 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
442 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
443 int rx_pkt = 0;
444
445#ifdef BNX2X_STOP_ON_ERROR
446 if (unlikely(bp->panic))
447 return 0;
448#endif
449
450 /* CQ "next element" is of the size of the regular element,
451 that's why it's ok here */
452 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
453 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
454 hw_comp_cons++;
455
456 bd_cons = fp->rx_bd_cons;
457 bd_prod = fp->rx_bd_prod;
458 bd_prod_fw = bd_prod;
459 sw_comp_cons = fp->rx_comp_cons;
460 sw_comp_prod = fp->rx_comp_prod;
461
462 /* Memory barrier necessary as speculative reads of the rx
463 * buffer can be ahead of the index in the status block
464 */
465 rmb();
466
467 DP(NETIF_MSG_RX_STATUS,
468 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
469 fp->index, hw_comp_cons, sw_comp_cons);
470
471 while (sw_comp_cons != hw_comp_cons) {
472 struct sw_rx_bd *rx_buf = NULL;
473 struct sk_buff *skb;
474 union eth_rx_cqe *cqe;
475 u8 cqe_fp_flags;
476 u16 len, pad;
477
478 comp_ring_cons = RCQ_BD(sw_comp_cons);
479 bd_prod = RX_BD(bd_prod);
480 bd_cons = RX_BD(bd_cons);
481
482 /* Prefetch the page containing the BD descriptor
483 at producer's index. It will be needed when new skb is
484 allocated */
485 prefetch((void *)(PAGE_ALIGN((unsigned long)
486 (&fp->rx_desc_ring[bd_prod])) -
487 PAGE_SIZE + 1));
488
489 cqe = &fp->rx_comp_ring[comp_ring_cons];
490 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
491
492 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
493 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
494 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
495 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
496 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
497 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
498
499 /* is this a slowpath msg? */
500 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
501 bnx2x_sp_event(fp, cqe);
502 goto next_cqe;
503
504 /* this is an rx packet */
505 } else {
506 rx_buf = &fp->rx_buf_ring[bd_cons];
507 skb = rx_buf->skb;
508 prefetch(skb);
509 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
510 pad = cqe->fast_path_cqe.placement_offset;
511
512 /* If CQE is marked both TPA_START and TPA_END
513 it is a non-TPA CQE */
514 if ((!fp->disable_tpa) &&
515 (TPA_TYPE(cqe_fp_flags) !=
516 (TPA_TYPE_START | TPA_TYPE_END))) {
517 u16 queue = cqe->fast_path_cqe.queue_index;
518
519 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
520 DP(NETIF_MSG_RX_STATUS,
521 "calling tpa_start on queue %d\n",
522 queue);
523
524 bnx2x_tpa_start(fp, queue, skb,
525 bd_cons, bd_prod);
526
527 /* Set Toeplitz hash for an LRO skb */
528 bnx2x_set_skb_rxhash(bp, cqe, skb);
529
530 goto next_rx;
531 }
532
533 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
534 DP(NETIF_MSG_RX_STATUS,
535 "calling tpa_stop on queue %d\n",
536 queue);
537
538 if (!BNX2X_RX_SUM_FIX(cqe))
539 BNX2X_ERR("STOP on none TCP "
540 "data\n");
541
542 /* This is a size of the linear data
543 on this skb */
544 len = le16_to_cpu(cqe->fast_path_cqe.
545 len_on_bd);
546 bnx2x_tpa_stop(bp, fp, queue, pad,
547 len, cqe, comp_ring_cons);
548#ifdef BNX2X_STOP_ON_ERROR
549 if (bp->panic)
550 return 0;
551#endif
552
553 bnx2x_update_sge_prod(fp,
554 &cqe->fast_path_cqe);
555 goto next_cqe;
556 }
557 }
558
559 dma_sync_single_for_device(&bp->pdev->dev,
560 dma_unmap_addr(rx_buf, mapping),
561 pad + RX_COPY_THRESH,
562 DMA_FROM_DEVICE);
563 prefetch(((char *)(skb)) + 128);
564
565 /* is this an error packet? */
566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
567 DP(NETIF_MSG_RX_ERR,
568 "ERROR flags %x rx packet %u\n",
569 cqe_fp_flags, sw_comp_cons);
570 fp->eth_q_stats.rx_err_discard_pkt++;
571 goto reuse_rx;
572 }
573
574 /* Since we don't have a jumbo ring
575 * copy small packets if mtu > 1500
576 */
577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
578 (len <= RX_COPY_THRESH)) {
579 struct sk_buff *new_skb;
580
581 new_skb = netdev_alloc_skb(bp->dev,
582 len + pad);
583 if (new_skb == NULL) {
584 DP(NETIF_MSG_RX_ERR,
585 "ERROR packet dropped "
586 "because of alloc failure\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
588 goto reuse_rx;
589 }
590
591 /* aligned copy */
592 skb_copy_from_linear_data_offset(skb, pad,
593 new_skb->data + pad, len);
594 skb_reserve(new_skb, pad);
595 skb_put(new_skb, len);
596
597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
598
599 skb = new_skb;
600
601 } else
602 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
603 dma_unmap_single(&bp->pdev->dev,
604 dma_unmap_addr(rx_buf, mapping),
605 bp->rx_buf_size,
606 DMA_FROM_DEVICE);
607 skb_reserve(skb, pad);
608 skb_put(skb, len);
609
610 } else {
611 DP(NETIF_MSG_RX_ERR,
612 "ERROR packet dropped because "
613 "of alloc failure\n");
614 fp->eth_q_stats.rx_skb_alloc_failed++;
615reuse_rx:
616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
617 goto next_rx;
618 }
619
620 skb->protocol = eth_type_trans(skb, bp->dev);
621
622 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb);
624
625 skb->ip_summed = CHECKSUM_NONE;
626 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 else
630 fp->eth_q_stats.hw_csum_err++;
631 }
632 }
633
634 skb_record_rx_queue(skb, fp->index);
635
636#ifdef BCM_VLAN
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639 PARSING_FLAGS_VLAN))
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
642 else
643#endif
644 napi_gro_receive(&fp->napi, skb);
645
646
647next_rx:
648 rx_buf->skb = NULL;
649
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
653 rx_pkt++;
654next_cqe:
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657
658 if (rx_pkt == budget)
659 break;
660 } /* while */
661
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
666
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
669 fp->rx_sge_prod);
670
671 fp->rx_pkt += rx_pkt;
672 fp->rx_calls++;
673
674 return rx_pkt;
675}
676
677static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678{
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
681
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
685 return IRQ_HANDLED;
686 }
687
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
689 fp->index, fp->sb_id);
690 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
691
692#ifdef BNX2X_STOP_ON_ERROR
693 if (unlikely(bp->panic))
694 return IRQ_HANDLED;
695#endif
696
697 /* Handle Rx and Tx according to MSI-X vector */
698 prefetch(fp->rx_cons_sb);
699 prefetch(fp->tx_cons_sb);
700 prefetch(&fp->status_blk->u_status_block.status_block_index);
701 prefetch(&fp->status_blk->c_status_block.status_block_index);
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
703
704 return IRQ_HANDLED;
705}
706
707
708/* HW Lock for shared dual port PHYs */
709void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710{
711 mutex_lock(&bp->port.phy_mutex);
712
713 if (bp->port.need_hw_lock)
714 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
715}
716
717void bnx2x_release_phy_lock(struct bnx2x *bp)
718{
719 if (bp->port.need_hw_lock)
720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
721
722 mutex_unlock(&bp->port.phy_mutex);
723}
724
725void bnx2x_link_report(struct bnx2x *bp)
726{
727 if (bp->flags & MF_FUNC_DIS) {
728 netif_carrier_off(bp->dev);
729 netdev_err(bp->dev, "NIC Link is Down\n");
730 return;
731 }
732
733 if (bp->link_vars.link_up) {
734 u16 line_speed;
735
736 if (bp->state == BNX2X_STATE_OPEN)
737 netif_carrier_on(bp->dev);
738 netdev_info(bp->dev, "NIC Link is Up, ");
739
740 line_speed = bp->link_vars.line_speed;
741 if (IS_E1HMF(bp)) {
742 u16 vn_max_rate;
743
744 vn_max_rate =
745 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
746 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
747 if (vn_max_rate < line_speed)
748 line_speed = vn_max_rate;
749 }
750 pr_cont("%d Mbps ", line_speed);
751
752 if (bp->link_vars.duplex == DUPLEX_FULL)
753 pr_cont("full duplex");
754 else
755 pr_cont("half duplex");
756
757 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
758 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
759 pr_cont(", receive ");
760 if (bp->link_vars.flow_ctrl &
761 BNX2X_FLOW_CTRL_TX)
762 pr_cont("& transmit ");
763 } else {
764 pr_cont(", transmit ");
765 }
766 pr_cont("flow control ON");
767 }
768 pr_cont("\n");
769
770 } else { /* link_down */
771 netif_carrier_off(bp->dev);
772 netdev_err(bp->dev, "NIC Link is Down\n");
773 }
774}
775
776void bnx2x_init_rx_rings(struct bnx2x *bp)
777{
778 int func = BP_FUNC(bp);
779 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
780 ETH_MAX_AGGREGATION_QUEUES_E1H;
781 u16 ring_prod, cqe_ring_prod;
782 int i, j;
783
784 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
785 DP(NETIF_MSG_IFUP,
786 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
787
788 if (bp->flags & TPA_ENABLE_FLAG) {
789
790 for_each_queue(bp, j) {
791 struct bnx2x_fastpath *fp = &bp->fp[j];
792
793 for (i = 0; i < max_agg_queues; i++) {
794 fp->tpa_pool[i].skb =
795 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
796 if (!fp->tpa_pool[i].skb) {
797 BNX2X_ERR("Failed to allocate TPA "
798 "skb pool for queue[%d] - "
799 "disabling TPA on this "
800 "queue!\n", j);
801 bnx2x_free_tpa_pool(bp, fp, i);
802 fp->disable_tpa = 1;
803 break;
804 }
805 dma_unmap_addr_set((struct sw_rx_bd *)
806 &bp->fp->tpa_pool[i],
807 mapping, 0);
808 fp->tpa_state[i] = BNX2X_TPA_STOP;
809 }
810 }
811 }
812
813 for_each_queue(bp, j) {
814 struct bnx2x_fastpath *fp = &bp->fp[j];
815
816 fp->rx_bd_cons = 0;
817 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
818 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
819
820 /* "next page" elements initialization */
821 /* SGE ring */
822 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
823 struct eth_rx_sge *sge;
824
825 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
826 sge->addr_hi =
827 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
828 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
829 sge->addr_lo =
830 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
831 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
832 }
833
834 bnx2x_init_sge_ring_bit_mask(fp);
835
836 /* RX BD ring */
837 for (i = 1; i <= NUM_RX_RINGS; i++) {
838 struct eth_rx_bd *rx_bd;
839
840 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
841 rx_bd->addr_hi =
842 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
843 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
844 rx_bd->addr_lo =
845 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
846 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
847 }
848
849 /* CQ ring */
850 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
851 struct eth_rx_cqe_next_page *nextpg;
852
853 nextpg = (struct eth_rx_cqe_next_page *)
854 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
855 nextpg->addr_hi =
856 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
857 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
858 nextpg->addr_lo =
859 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
860 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
861 }
862
863 /* Allocate SGEs and initialize the ring elements */
864 for (i = 0, ring_prod = 0;
865 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
866
867 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
868 BNX2X_ERR("was only able to allocate "
869 "%d rx sges\n", i);
870 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
871 /* Cleanup already allocated elements */
872 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
873 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
874 fp->disable_tpa = 1;
875 ring_prod = 0;
876 break;
877 }
878 ring_prod = NEXT_SGE_IDX(ring_prod);
879 }
880 fp->rx_sge_prod = ring_prod;
881
882 /* Allocate BDs and initialize BD ring */
883 fp->rx_comp_cons = 0;
884 cqe_ring_prod = ring_prod = 0;
885 for (i = 0; i < bp->rx_ring_size; i++) {
886 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
887 BNX2X_ERR("was only able to allocate "
888 "%d rx skbs on queue[%d]\n", i, j);
889 fp->eth_q_stats.rx_skb_alloc_failed++;
890 break;
891 }
892 ring_prod = NEXT_RX_IDX(ring_prod);
893 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
894 WARN_ON(ring_prod <= i);
895 }
896
897 fp->rx_bd_prod = ring_prod;
898 /* must not have more available CQEs than BDs */
899 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
900 cqe_ring_prod);
901 fp->rx_pkt = fp->rx_calls = 0;
902
903 /* Warning!
904 * this will generate an interrupt (to the TSTORM)
905 * must only be done after chip is initialized
906 */
907 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
908 fp->rx_sge_prod);
909 if (j != 0)
910 continue;
911
912 REG_WR(bp, BAR_USTRORM_INTMEM +
913 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
914 U64_LO(fp->rx_comp_mapping));
915 REG_WR(bp, BAR_USTRORM_INTMEM +
916 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
917 U64_HI(fp->rx_comp_mapping));
918 }
919}
920static void bnx2x_free_tx_skbs(struct bnx2x *bp)
921{
922 int i;
923
924 for_each_queue(bp, i) {
925 struct bnx2x_fastpath *fp = &bp->fp[i];
926
927 u16 bd_cons = fp->tx_bd_cons;
928 u16 sw_prod = fp->tx_pkt_prod;
929 u16 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != sw_prod) {
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
933 sw_cons++;
934 }
935 }
936}
937
938static void bnx2x_free_rx_skbs(struct bnx2x *bp)
939{
940 int i, j;
941
942 for_each_queue(bp, j) {
943 struct bnx2x_fastpath *fp = &bp->fp[j];
944
945 for (i = 0; i < NUM_RX_BD; i++) {
946 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
947 struct sk_buff *skb = rx_buf->skb;
948
949 if (skb == NULL)
950 continue;
951
952 dma_unmap_single(&bp->pdev->dev,
953 dma_unmap_addr(rx_buf, mapping),
954 bp->rx_buf_size, DMA_FROM_DEVICE);
955
956 rx_buf->skb = NULL;
957 dev_kfree_skb(skb);
958 }
959 if (!fp->disable_tpa)
960 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
961 ETH_MAX_AGGREGATION_QUEUES_E1 :
962 ETH_MAX_AGGREGATION_QUEUES_E1H);
963 }
964}
965
966void bnx2x_free_skbs(struct bnx2x *bp)
967{
968 bnx2x_free_tx_skbs(bp);
969 bnx2x_free_rx_skbs(bp);
970}
971
972static void bnx2x_free_msix_irqs(struct bnx2x *bp)
973{
974 int i, offset = 1;
975
976 free_irq(bp->msix_table[0].vector, bp->dev);
977 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
978 bp->msix_table[0].vector);
979
980#ifdef BCM_CNIC
981 offset++;
982#endif
983 for_each_queue(bp, i) {
984 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
985 "state %x\n", i, bp->msix_table[i + offset].vector,
986 bnx2x_fp(bp, i, state));
987
988 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
989 }
990}
991
992void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
993{
994 if (bp->flags & USING_MSIX_FLAG) {
995 if (!disable_only)
996 bnx2x_free_msix_irqs(bp);
997 pci_disable_msix(bp->pdev);
998 bp->flags &= ~USING_MSIX_FLAG;
999
1000 } else if (bp->flags & USING_MSI_FLAG) {
1001 if (!disable_only)
1002 free_irq(bp->pdev->irq, bp->dev);
1003 pci_disable_msi(bp->pdev);
1004 bp->flags &= ~USING_MSI_FLAG;
1005
1006 } else if (!disable_only)
1007 free_irq(bp->pdev->irq, bp->dev);
1008}
1009
1010static int bnx2x_enable_msix(struct bnx2x *bp)
1011{
1012 int i, rc, offset = 1;
1013 int igu_vec = 0;
1014
1015 bp->msix_table[0].entry = igu_vec;
1016 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1017
1018#ifdef BCM_CNIC
1019 igu_vec = BP_L_ID(bp) + offset;
1020 bp->msix_table[1].entry = igu_vec;
1021 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1022 offset++;
1023#endif
1024 for_each_queue(bp, i) {
1025 igu_vec = BP_L_ID(bp) + offset + i;
1026 bp->msix_table[i + offset].entry = igu_vec;
1027 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1028 "(fastpath #%u)\n", i + offset, igu_vec, i);
1029 }
1030
1031 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1032 BNX2X_NUM_QUEUES(bp) + offset);
1033
1034 /*
1035 * reconfigure number of tx/rx queues according to available
1036 * MSI-X vectors
1037 */
1038 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1039 /* vectors available for FP */
1040 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1041
1042 DP(NETIF_MSG_IFUP,
1043 "Trying to use less MSI-X vectors: %d\n", rc);
1044
1045 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1046
1047 if (rc) {
1048 DP(NETIF_MSG_IFUP,
1049 "MSI-X is not attainable rc %d\n", rc);
1050 return rc;
1051 }
1052
1053 bp->num_queues = min(bp->num_queues, fp_vec);
1054
1055 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1056 bp->num_queues);
1057 } else if (rc) {
1058 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1059 return rc;
1060 }
1061
1062 bp->flags |= USING_MSIX_FLAG;
1063
1064 return 0;
1065}
1066
1067static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1068{
1069 int i, rc, offset = 1;
1070
1071 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1072 bp->dev->name, bp->dev);
1073 if (rc) {
1074 BNX2X_ERR("request sp irq failed\n");
1075 return -EBUSY;
1076 }
1077
1078#ifdef BCM_CNIC
1079 offset++;
1080#endif
1081 for_each_queue(bp, i) {
1082 struct bnx2x_fastpath *fp = &bp->fp[i];
1083 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1084 bp->dev->name, i);
1085
1086 rc = request_irq(bp->msix_table[i + offset].vector,
1087 bnx2x_msix_fp_int, 0, fp->name, fp);
1088 if (rc) {
1089 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1090 bnx2x_free_msix_irqs(bp);
1091 return -EBUSY;
1092 }
1093
1094 fp->state = BNX2X_FP_STATE_IRQ;
1095 }
1096
1097 i = BNX2X_NUM_QUEUES(bp);
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1103
1104 return 0;
1105}
1106
1107static int bnx2x_enable_msi(struct bnx2x *bp)
1108{
1109 int rc;
1110
1111 rc = pci_enable_msi(bp->pdev);
1112 if (rc) {
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1114 return -1;
1115 }
1116 bp->flags |= USING_MSI_FLAG;
1117
1118 return 0;
1119}
1120
1121static int bnx2x_req_irq(struct bnx2x *bp)
1122{
1123 unsigned long flags;
1124 int rc;
1125
1126 if (bp->flags & USING_MSI_FLAG)
1127 flags = 0;
1128 else
1129 flags = IRQF_SHARED;
1130
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1133 if (!rc)
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1135
1136 return rc;
1137}
1138
1139static void bnx2x_napi_enable(struct bnx2x *bp)
1140{
1141 int i;
1142
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1145}
1146
1147static void bnx2x_napi_disable(struct bnx2x *bp)
1148{
1149 int i;
1150
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1153}
1154
1155void bnx2x_netif_start(struct bnx2x *bp)
1156{
1157 int intr_sem;
1158
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1161
1162 if (intr_sem) {
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1168 }
1169 }
1170}
1171
1172void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173{
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1177}
1178static int bnx2x_set_num_queues(struct bnx2x *bp)
1179{
1180 int rc = 0;
1181
1182 switch (bp->int_mode) {
1183 case INT_MODE_INTx:
1184 case INT_MODE_MSI:
1185 bp->num_queues = 1;
1186 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1187 break;
1188 default:
1189 /* Set number of queues according to bp->multi_mode value */
1190 bnx2x_set_num_queues_msix(bp);
1191
1192 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1193 bp->num_queues);
1194
1195 /* if we can't use MSI-X we only need one fp,
1196 * so try to enable MSI-X with the requested number of fp's
1197 * and fallback to MSI or legacy INTx with one fp
1198 */
1199 rc = bnx2x_enable_msix(bp);
1200 if (rc)
1201 /* failed to enable MSI-X */
1202 bp->num_queues = 1;
1203 break;
1204 }
1205 bp->dev->real_num_tx_queues = bp->num_queues;
1206 return rc;
1207}
1208
1209/* must be called with rtnl_lock */
1210int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1211{
1212 u32 load_code;
1213 int i, rc;
1214
1215#ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic))
1217 return -EPERM;
1218#endif
1219
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1221
1222 rc = bnx2x_set_num_queues(bp);
1223
1224 if (bnx2x_alloc_mem(bp)) {
1225 bnx2x_free_irq(bp, true);
1226 return -ENOMEM;
1227 }
1228
1229 for_each_queue(bp, i)
1230 bnx2x_fp(bp, i, disable_tpa) =
1231 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1232
1233 for_each_queue(bp, i)
1234 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1235 bnx2x_poll, 128);
1236
1237 bnx2x_napi_enable(bp);
1238
1239 if (bp->flags & USING_MSIX_FLAG) {
1240 rc = bnx2x_req_msix_irqs(bp);
1241 if (rc) {
1242 bnx2x_free_irq(bp, true);
1243 goto load_error1;
1244 }
1245 } else {
1246 /* Fall to INTx if failed to enable MSI-X due to lack of
1247 memory (in bnx2x_set_num_queues()) */
1248 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1249 bnx2x_enable_msi(bp);
1250 bnx2x_ack_int(bp);
1251 rc = bnx2x_req_irq(bp);
1252 if (rc) {
1253 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1254 bnx2x_free_irq(bp, true);
1255 goto load_error1;
1256 }
1257 if (bp->flags & USING_MSI_FLAG) {
1258 bp->dev->irq = bp->pdev->irq;
1259 netdev_info(bp->dev, "using MSI IRQ %d\n",
1260 bp->pdev->irq);
1261 }
1262 }
1263
1264 /* Send LOAD_REQUEST command to MCP
1265 Returns the type of LOAD command:
1266 if it is the first port to be initialized
1267 common blocks should be initialized, otherwise - not
1268 */
1269 if (!BP_NOMCP(bp)) {
1270 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
1271 if (!load_code) {
1272 BNX2X_ERR("MCP response failure, aborting\n");
1273 rc = -EBUSY;
1274 goto load_error2;
1275 }
1276 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1277 rc = -EBUSY; /* other port in diagnostic mode */
1278 goto load_error2;
1279 }
1280
1281 } else {
1282 int port = BP_PORT(bp);
1283
1284 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1285 load_count[0], load_count[1], load_count[2]);
1286 load_count[0]++;
1287 load_count[1 + port]++;
1288 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1289 load_count[0], load_count[1], load_count[2]);
1290 if (load_count[0] == 1)
1291 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1292 else if (load_count[1 + port] == 1)
1293 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1294 else
1295 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1296 }
1297
1298 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1299 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1300 bp->port.pmf = 1;
1301 else
1302 bp->port.pmf = 0;
1303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1304
1305 /* Initialize HW */
1306 rc = bnx2x_init_hw(bp, load_code);
1307 if (rc) {
1308 BNX2X_ERR("HW init failed, aborting\n");
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1312 goto load_error2;
1313 }
1314
1315 /* Setup NIC internals and enable interrupts */
1316 bnx2x_nic_init(bp, load_code);
1317
1318 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1319 (bp->common.shmem2_base))
1320 SHMEM2_WR(bp, dcc_support,
1321 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1322 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1323
1324 /* Send LOAD_DONE command to MCP */
1325 if (!BP_NOMCP(bp)) {
1326 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1327 if (!load_code) {
1328 BNX2X_ERR("MCP response failure, aborting\n");
1329 rc = -EBUSY;
1330 goto load_error3;
1331 }
1332 }
1333
1334 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1335
1336 rc = bnx2x_setup_leading(bp);
1337 if (rc) {
1338 BNX2X_ERR("Setup leading failed!\n");
1339#ifndef BNX2X_STOP_ON_ERROR
1340 goto load_error3;
1341#else
1342 bp->panic = 1;
1343 return -EBUSY;
1344#endif
1345 }
1346
1347 if (CHIP_IS_E1H(bp))
1348 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS;
1351 }
1352
1353 if (bp->state == BNX2X_STATE_OPEN) {
1354#ifdef BCM_CNIC
1355 /* Enable Timer scan */
1356 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1357#endif
1358 for_each_nondefault_queue(bp, i) {
1359 rc = bnx2x_setup_multi(bp, i);
1360 if (rc)
1361#ifdef BCM_CNIC
1362 goto load_error4;
1363#else
1364 goto load_error3;
1365#endif
1366 }
1367
1368 if (CHIP_IS_E1(bp))
1369 bnx2x_set_eth_mac_addr_e1(bp, 1);
1370 else
1371 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1372#ifdef BCM_CNIC
1373 /* Set iSCSI L2 MAC */
1374 mutex_lock(&bp->cnic_mutex);
1375 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1376 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1377 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1378 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1379 CNIC_SB_ID(bp));
1380 }
1381 mutex_unlock(&bp->cnic_mutex);
1382#endif
1383 }
1384
1385 if (bp->port.pmf)
1386 bnx2x_initial_phy_init(bp, load_mode);
1387
1388 /* Start fast path */
1389 switch (load_mode) {
1390 case LOAD_NORMAL:
1391 if (bp->state == BNX2X_STATE_OPEN) {
1392 /* Tx queue should be only reenabled */
1393 netif_tx_wake_all_queues(bp->dev);
1394 }
1395 /* Initialize the receive filter. */
1396 bnx2x_set_rx_mode(bp->dev);
1397 break;
1398
1399 case LOAD_OPEN:
1400 netif_tx_start_all_queues(bp->dev);
1401 if (bp->state != BNX2X_STATE_OPEN)
1402 netif_tx_disable(bp->dev);
1403 /* Initialize the receive filter. */
1404 bnx2x_set_rx_mode(bp->dev);
1405 break;
1406
1407 case LOAD_DIAG:
1408 /* Initialize the receive filter. */
1409 bnx2x_set_rx_mode(bp->dev);
1410 bp->state = BNX2X_STATE_DIAG;
1411 break;
1412
1413 default:
1414 break;
1415 }
1416
1417 if (!bp->port.pmf)
1418 bnx2x__link_status_update(bp);
1419
1420 /* start the timer */
1421 mod_timer(&bp->timer, jiffies + bp->current_interval);
1422
1423#ifdef BCM_CNIC
1424 bnx2x_setup_cnic_irq_info(bp);
1425 if (bp->state == BNX2X_STATE_OPEN)
1426 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1427#endif
1428 bnx2x_inc_load_cnt(bp);
1429
1430 return 0;
1431
1432#ifdef BCM_CNIC
1433load_error4:
1434 /* Disable Timer scan */
1435 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1436#endif
1437load_error3:
1438 bnx2x_int_disable_sync(bp, 1);
1439 if (!BP_NOMCP(bp)) {
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1442 }
1443 bp->port.pmf = 0;
1444 /* Free SKBs, SGEs, TPA pool and driver internals */
1445 bnx2x_free_skbs(bp);
1446 for_each_queue(bp, i)
1447 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1448load_error2:
1449 /* Release IRQs */
1450 bnx2x_free_irq(bp, false);
1451load_error1:
1452 bnx2x_napi_disable(bp);
1453 for_each_queue(bp, i)
1454 netif_napi_del(&bnx2x_fp(bp, i, napi));
1455 bnx2x_free_mem(bp);
1456
1457 return rc;
1458}
1459
1460/* must be called with rtnl_lock */
1461int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1462{
1463 int i;
1464
1465 if (bp->state == BNX2X_STATE_CLOSED) {
1466 /* Interface has been removed - nothing to recover */
1467 bp->recovery_state = BNX2X_RECOVERY_DONE;
1468 bp->is_leader = 0;
1469 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1470 smp_wmb();
1471
1472 return -EINVAL;
1473 }
1474
1475#ifdef BCM_CNIC
1476 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1477#endif
1478 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1479
1480 /* Set "drop all" */
1481 bp->rx_mode = BNX2X_RX_MODE_NONE;
1482 bnx2x_set_storm_rx_mode(bp);
1483
1484 /* Disable HW interrupts, NAPI and Tx */
1485 bnx2x_netif_stop(bp, 1);
1486 netif_carrier_off(bp->dev);
1487
1488 del_timer_sync(&bp->timer);
1489 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1490 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1492
1493 /* Release IRQs */
1494 bnx2x_free_irq(bp, false);
1495
1496 /* Cleanup the chip if needed */
1497 if (unload_mode != UNLOAD_RECOVERY)
1498 bnx2x_chip_cleanup(bp, unload_mode);
1499
1500 bp->port.pmf = 0;
1501
1502 /* Free SKBs, SGEs, TPA pool and driver internals */
1503 bnx2x_free_skbs(bp);
1504 for_each_queue(bp, i)
1505 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1506 for_each_queue(bp, i)
1507 netif_napi_del(&bnx2x_fp(bp, i, napi));
1508 bnx2x_free_mem(bp);
1509
1510 bp->state = BNX2X_STATE_CLOSED;
1511
1512 /* The last driver must disable a "close the gate" if there is no
1513 * parity attention or "process kill" pending.
1514 */
1515 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1516 bnx2x_reset_is_done(bp))
1517 bnx2x_disable_close_the_gate(bp);
1518
1519 /* Reset MCP mail box sequence if there is on going recovery */
1520 if (unload_mode == UNLOAD_RECOVERY)
1521 bp->fw_seq = 0;
1522
1523 return 0;
1524}
1525int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1526{
1527 u16 pmcsr;
1528
1529 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1530
1531 switch (state) {
1532 case PCI_D0:
1533 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1534 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1535 PCI_PM_CTRL_PME_STATUS));
1536
1537 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1538 /* delay required during transition out of D3hot */
1539 msleep(20);
1540 break;
1541
1542 case PCI_D3hot:
1543 /* If there are other clients above don't
1544 shut down the power */
1545 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1546 return 0;
1547 /* Don't shut down the power for emulation and FPGA */
1548 if (CHIP_REV_IS_SLOW(bp))
1549 return 0;
1550
1551 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1552 pmcsr |= 3;
1553
1554 if (bp->wol)
1555 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1556
1557 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1558 pmcsr);
1559
1560 /* No more memory access after this point until
1561 * device is brought back to D0.
1562 */
1563 break;
1564
1565 default:
1566 return -EINVAL;
1567 }
1568 return 0;
1569}
1570
1571
1572
1573/*
1574 * net_device service functions
1575 */
1576
1577static int bnx2x_poll(struct napi_struct *napi, int budget)
1578{
1579 int work_done = 0;
1580 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1581 napi);
1582 struct bnx2x *bp = fp->bp;
1583
1584 while (1) {
1585#ifdef BNX2X_STOP_ON_ERROR
1586 if (unlikely(bp->panic)) {
1587 napi_complete(napi);
1588 return 0;
1589 }
1590#endif
1591
1592 if (bnx2x_has_tx_work(fp))
1593 bnx2x_tx_int(fp);
1594
1595 if (bnx2x_has_rx_work(fp)) {
1596 work_done += bnx2x_rx_int(fp, budget - work_done);
1597
1598 /* must not complete if we consumed full budget */
1599 if (work_done >= budget)
1600 break;
1601 }
1602
1603 /* Fall out from the NAPI loop if needed */
1604 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1605 bnx2x_update_fpsb_idx(fp);
1606 /* bnx2x_has_rx_work() reads the status block, thus we need
1607 * to ensure that status block indices have been actually read
1608 * (bnx2x_update_fpsb_idx) prior to this check
1609 * (bnx2x_has_rx_work) so that we won't write the "newer"
1610 * value of the status block to IGU (if there was a DMA right
1611 * after bnx2x_has_rx_work and if there is no rmb, the memory
1612 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1613 * before bnx2x_ack_sb). In this case there will never be
1614 * another interrupt until there is another update of the
1615 * status block, while there is still unhandled work.
1616 */
1617 rmb();
1618
1619 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1620 napi_complete(napi);
1621 /* Re-enable interrupts */
1622 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1623 le16_to_cpu(fp->fp_c_idx),
1624 IGU_INT_NOP, 1);
1625 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1626 le16_to_cpu(fp->fp_u_idx),
1627 IGU_INT_ENABLE, 1);
1628 break;
1629 }
1630 }
1631 }
1632
1633 return work_done;
1634}
1635
1636
1637/* we split the first BD into headers and data BDs
1638 * to ease the pain of our fellow microcode engineers
1639 * we use one mapping for both BDs
1640 * So far this has only been observed to happen
1641 * in Other Operating Systems(TM)
1642 */
1643static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1644 struct bnx2x_fastpath *fp,
1645 struct sw_tx_bd *tx_buf,
1646 struct eth_tx_start_bd **tx_bd, u16 hlen,
1647 u16 bd_prod, int nbd)
1648{
1649 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1650 struct eth_tx_bd *d_tx_bd;
1651 dma_addr_t mapping;
1652 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1653
1654 /* first fix first BD */
1655 h_tx_bd->nbd = cpu_to_le16(nbd);
1656 h_tx_bd->nbytes = cpu_to_le16(hlen);
1657
1658 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1659 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1660 h_tx_bd->addr_lo, h_tx_bd->nbd);
1661
1662 /* now get a new data BD
1663 * (after the pbd) and fill it */
1664 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1665 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1666
1667 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1668 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1669
1670 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1671 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1672 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1673
1674 /* this marks the BD as one that has no individual mapping */
1675 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1676
1677 DP(NETIF_MSG_TX_QUEUED,
1678 "TSO split data size is %d (%x:%x)\n",
1679 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1680
1681 /* update tx_bd */
1682 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1683
1684 return bd_prod;
1685}
1686
1687static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1688{
1689 if (fix > 0)
1690 csum = (u16) ~csum_fold(csum_sub(csum,
1691 csum_partial(t_header - fix, fix, 0)));
1692
1693 else if (fix < 0)
1694 csum = (u16) ~csum_fold(csum_add(csum,
1695 csum_partial(t_header, -fix, 0)));
1696
1697 return swab16(csum);
1698}
1699
1700static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1701{
1702 u32 rc;
1703
1704 if (skb->ip_summed != CHECKSUM_PARTIAL)
1705 rc = XMIT_PLAIN;
1706
1707 else {
1708 if (skb->protocol == htons(ETH_P_IPV6)) {
1709 rc = XMIT_CSUM_V6;
1710 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1711 rc |= XMIT_CSUM_TCP;
1712
1713 } else {
1714 rc = XMIT_CSUM_V4;
1715 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1716 rc |= XMIT_CSUM_TCP;
1717 }
1718 }
1719
1720 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1721 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1722
1723 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1724 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1725
1726 return rc;
1727}
1728
1729#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1730/* check if packet requires linearization (packet is too fragmented)
1731 no need to check fragmentation if page size > 8K (there will be no
1732 violation to FW restrictions) */
1733static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1734 u32 xmit_type)
1735{
1736 int to_copy = 0;
1737 int hlen = 0;
1738 int first_bd_sz = 0;
1739
1740 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1741 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1742
1743 if (xmit_type & XMIT_GSO) {
1744 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1745 /* Check if LSO packet needs to be copied:
1746 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1747 int wnd_size = MAX_FETCH_BD - 3;
1748 /* Number of windows to check */
1749 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1750 int wnd_idx = 0;
1751 int frag_idx = 0;
1752 u32 wnd_sum = 0;
1753
1754 /* Headers length */
1755 hlen = (int)(skb_transport_header(skb) - skb->data) +
1756 tcp_hdrlen(skb);
1757
1758 /* Amount of data (w/o headers) on linear part of SKB*/
1759 first_bd_sz = skb_headlen(skb) - hlen;
1760
1761 wnd_sum = first_bd_sz;
1762
1763 /* Calculate the first sum - it's special */
1764 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1765 wnd_sum +=
1766 skb_shinfo(skb)->frags[frag_idx].size;
1767
1768 /* If there was data on linear skb data - check it */
1769 if (first_bd_sz > 0) {
1770 if (unlikely(wnd_sum < lso_mss)) {
1771 to_copy = 1;
1772 goto exit_lbl;
1773 }
1774
1775 wnd_sum -= first_bd_sz;
1776 }
1777
1778 /* Others are easier: run through the frag list and
1779 check all windows */
1780 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1781 wnd_sum +=
1782 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1783
1784 if (unlikely(wnd_sum < lso_mss)) {
1785 to_copy = 1;
1786 break;
1787 }
1788 wnd_sum -=
1789 skb_shinfo(skb)->frags[wnd_idx].size;
1790 }
1791 } else {
1792 /* in non-LSO too fragmented packet should always
1793 be linearized */
1794 to_copy = 1;
1795 }
1796 }
1797
1798exit_lbl:
1799 if (unlikely(to_copy))
1800 DP(NETIF_MSG_TX_QUEUED,
1801 "Linearization IS REQUIRED for %s packet. "
1802 "num_frags %d hlen %d first_bd_sz %d\n",
1803 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1804 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1805
1806 return to_copy;
1807}
1808#endif
1809
1810/* called with netif_tx_lock
1811 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1812 * netif_wake_queue()
1813 */
1814netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1815{
1816 struct bnx2x *bp = netdev_priv(dev);
1817 struct bnx2x_fastpath *fp;
1818 struct netdev_queue *txq;
1819 struct sw_tx_bd *tx_buf;
1820 struct eth_tx_start_bd *tx_start_bd;
1821 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1822 struct eth_tx_parse_bd *pbd = NULL;
1823 u16 pkt_prod, bd_prod;
1824 int nbd, fp_index;
1825 dma_addr_t mapping;
1826 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1827 int i;
1828 u8 hlen = 0;
1829 __le16 pkt_size = 0;
1830 struct ethhdr *eth;
1831 u8 mac_type = UNICAST_ADDRESS;
1832
1833#ifdef BNX2X_STOP_ON_ERROR
1834 if (unlikely(bp->panic))
1835 return NETDEV_TX_BUSY;
1836#endif
1837
1838 fp_index = skb_get_queue_mapping(skb);
1839 txq = netdev_get_tx_queue(dev, fp_index);
1840
1841 fp = &bp->fp[fp_index];
1842
1843 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1844 fp->eth_q_stats.driver_xoff++;
1845 netif_tx_stop_queue(txq);
1846 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1847 return NETDEV_TX_BUSY;
1848 }
1849
1850 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1851 " gso type %x xmit_type %x\n",
1852 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1853 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1854
1855 eth = (struct ethhdr *)skb->data;
1856
1857 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1858 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1859 if (is_broadcast_ether_addr(eth->h_dest))
1860 mac_type = BROADCAST_ADDRESS;
1861 else
1862 mac_type = MULTICAST_ADDRESS;
1863 }
1864
1865#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1866 /* First, check if we need to linearize the skb (due to FW
1867 restrictions). No need to check fragmentation if page size > 8K
1868 (there will be no violation to FW restrictions) */
1869 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1870 /* Statistics of linearization */
1871 bp->lin_cnt++;
1872 if (skb_linearize(skb) != 0) {
1873 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1874 "silently dropping this SKB\n");
1875 dev_kfree_skb_any(skb);
1876 return NETDEV_TX_OK;
1877 }
1878 }
1879#endif
1880
1881 /*
1882 Please read carefully. First we use one BD which we mark as start,
1883 then we have a parsing info BD (used for TSO or xsum),
1884 and only then we have the rest of the TSO BDs.
1885 (don't forget to mark the last one as last,
1886 and to unmap only AFTER you write to the BD ...)
1887 And above all, all pdb sizes are in words - NOT DWORDS!
1888 */
1889
1890 pkt_prod = fp->tx_pkt_prod++;
1891 bd_prod = TX_BD(fp->tx_bd_prod);
1892
1893 /* get a tx_buf and first BD */
1894 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1895 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1896
1897 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1898 tx_start_bd->general_data = (mac_type <<
1899 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1900 /* header nbd */
1901 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1902
1903 /* remember the first BD of the packet */
1904 tx_buf->first_bd = fp->tx_bd_prod;
1905 tx_buf->skb = skb;
1906 tx_buf->flags = 0;
1907
1908 DP(NETIF_MSG_TX_QUEUED,
1909 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1910 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1911
1912#ifdef BCM_VLAN
1913 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1914 (bp->flags & HW_VLAN_TX_FLAG)) {
1915 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1916 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1917 } else
1918#endif
1919 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1920
1921 /* turn on parsing and get a BD */
1922 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1923 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1924
1925 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1926
1927 if (xmit_type & XMIT_CSUM) {
1928 hlen = (skb_network_header(skb) - skb->data) / 2;
1929
1930 /* for now NS flag is not used in Linux */
1931 pbd->global_data =
1932 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1933 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1934
1935 pbd->ip_hlen = (skb_transport_header(skb) -
1936 skb_network_header(skb)) / 2;
1937
1938 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1939
1940 pbd->total_hlen = cpu_to_le16(hlen);
1941 hlen = hlen*2;
1942
1943 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1944
1945 if (xmit_type & XMIT_CSUM_V4)
1946 tx_start_bd->bd_flags.as_bitfield |=
1947 ETH_TX_BD_FLAGS_IP_CSUM;
1948 else
1949 tx_start_bd->bd_flags.as_bitfield |=
1950 ETH_TX_BD_FLAGS_IPV6;
1951
1952 if (xmit_type & XMIT_CSUM_TCP) {
1953 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1954
1955 } else {
1956 s8 fix = SKB_CS_OFF(skb); /* signed! */
1957
1958 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1959
1960 DP(NETIF_MSG_TX_QUEUED,
1961 "hlen %d fix %d csum before fix %x\n",
1962 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1963
1964 /* HW bug: fixup the CSUM */
1965 pbd->tcp_pseudo_csum =
1966 bnx2x_csum_fix(skb_transport_header(skb),
1967 SKB_CS(skb), fix);
1968
1969 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1970 pbd->tcp_pseudo_csum);
1971 }
1972 }
1973
1974 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1975 skb_headlen(skb), DMA_TO_DEVICE);
1976
1977 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1978 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1979 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
1980 tx_start_bd->nbd = cpu_to_le16(nbd);
1981 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1982 pkt_size = tx_start_bd->nbytes;
1983
1984 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
1985 " nbytes %d flags %x vlan %x\n",
1986 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
1987 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
1988 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
1989
1990 if (xmit_type & XMIT_GSO) {
1991
1992 DP(NETIF_MSG_TX_QUEUED,
1993 "TSO packet len %d hlen %d total len %d tso size %d\n",
1994 skb->len, hlen, skb_headlen(skb),
1995 skb_shinfo(skb)->gso_size);
1996
1997 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
1998
1999 if (unlikely(skb_headlen(skb) > hlen))
2000 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2001 hlen, bd_prod, ++nbd);
2002
2003 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2004 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2005 pbd->tcp_flags = pbd_tcp_flags(skb);
2006
2007 if (xmit_type & XMIT_GSO_V4) {
2008 pbd->ip_id = swab16(ip_hdr(skb)->id);
2009 pbd->tcp_pseudo_csum =
2010 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2011 ip_hdr(skb)->daddr,
2012 0, IPPROTO_TCP, 0));
2013
2014 } else
2015 pbd->tcp_pseudo_csum =
2016 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2017 &ipv6_hdr(skb)->daddr,
2018 0, IPPROTO_TCP, 0));
2019
2020 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2021 }
2022 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2023
2024 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2025 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2026
2027 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2028 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2029 if (total_pkt_bd == NULL)
2030 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2031
2032 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2033 frag->page_offset,
2034 frag->size, DMA_TO_DEVICE);
2035
2036 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2037 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2038 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2039 le16_add_cpu(&pkt_size, frag->size);
2040
2041 DP(NETIF_MSG_TX_QUEUED,
2042 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2043 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2044 le16_to_cpu(tx_data_bd->nbytes));
2045 }
2046
2047 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2048
2049 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2050
2051 /* now send a tx doorbell, counting the next BD
2052 * if the packet contains or ends with it
2053 */
2054 if (TX_BD_POFF(bd_prod) < nbd)
2055 nbd++;
2056
2057 if (total_pkt_bd != NULL)
2058 total_pkt_bd->total_pkt_bytes = pkt_size;
2059
2060 if (pbd)
2061 DP(NETIF_MSG_TX_QUEUED,
2062 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2063 " tcp_flags %x xsum %x seq %u hlen %u\n",
2064 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2065 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2066 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2067
2068 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2069
2070 /*
2071 * Make sure that the BD data is updated before updating the producer
2072 * since FW might read the BD right after the producer is updated.
2073 * This is only applicable for weak-ordered memory model archs such
2074 * as IA-64. The following barrier is also mandatory since FW will
2075 * assumes packets must have BDs.
2076 */
2077 wmb();
2078
2079 fp->tx_db.data.prod += nbd;
2080 barrier();
2081 DOORBELL(bp, fp->index, fp->tx_db.raw);
2082
2083 mmiowb();
2084
2085 fp->tx_bd_prod += nbd;
2086
2087 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2088 netif_tx_stop_queue(txq);
2089
2090 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2091 * ordering of set_bit() in netif_tx_stop_queue() and read of
2092 * fp->bd_tx_cons */
2093 smp_mb();
2094
2095 fp->eth_q_stats.driver_xoff++;
2096 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2097 netif_tx_wake_queue(txq);
2098 }
2099 fp->tx_pkt++;
2100
2101 return NETDEV_TX_OK;
2102}
2103/* called with rtnl_lock */
2104int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2105{
2106 struct sockaddr *addr = p;
2107 struct bnx2x *bp = netdev_priv(dev);
2108
2109 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2110 return -EINVAL;
2111
2112 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2113 if (netif_running(dev)) {
2114 if (CHIP_IS_E1(bp))
2115 bnx2x_set_eth_mac_addr_e1(bp, 1);
2116 else
2117 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2118 }
2119
2120 return 0;
2121}
2122
2123/* called with rtnl_lock */
2124int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2125{
2126 struct bnx2x *bp = netdev_priv(dev);
2127 int rc = 0;
2128
2129 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2130 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2131 return -EAGAIN;
2132 }
2133
2134 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2135 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2136 return -EINVAL;
2137
2138 /* This does not race with packet allocation
2139 * because the actual alloc size is
2140 * only updated as part of load
2141 */
2142 dev->mtu = new_mtu;
2143
2144 if (netif_running(dev)) {
2145 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2146 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2147 }
2148
2149 return rc;
2150}
2151
2152void bnx2x_tx_timeout(struct net_device *dev)
2153{
2154 struct bnx2x *bp = netdev_priv(dev);
2155
2156#ifdef BNX2X_STOP_ON_ERROR
2157 if (!bp->panic)
2158 bnx2x_panic();
2159#endif
2160 /* This allows the netif to be shutdown gracefully before resetting */
2161 schedule_delayed_work(&bp->reset_task, 0);
2162}
2163
2164#ifdef BCM_VLAN
2165/* called with rtnl_lock */
2166void bnx2x_vlan_rx_register(struct net_device *dev,
2167 struct vlan_group *vlgrp)
2168{
2169 struct bnx2x *bp = netdev_priv(dev);
2170
2171 bp->vlgrp = vlgrp;
2172
2173 /* Set flags according to the required capabilities */
2174 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2175
2176 if (dev->features & NETIF_F_HW_VLAN_TX)
2177 bp->flags |= HW_VLAN_TX_FLAG;
2178
2179 if (dev->features & NETIF_F_HW_VLAN_RX)
2180 bp->flags |= HW_VLAN_RX_FLAG;
2181
2182 if (netif_running(dev))
2183 bnx2x_set_client_config(bp);
2184}
2185
2186#endif
2187int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2188{
2189 struct net_device *dev = pci_get_drvdata(pdev);
2190 struct bnx2x *bp;
2191
2192 if (!dev) {
2193 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2194 return -ENODEV;
2195 }
2196 bp = netdev_priv(dev);
2197
2198 rtnl_lock();
2199
2200 pci_save_state(pdev);
2201
2202 if (!netif_running(dev)) {
2203 rtnl_unlock();
2204 return 0;
2205 }
2206
2207 netif_device_detach(dev);
2208
2209 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2210
2211 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2212
2213 rtnl_unlock();
2214
2215 return 0;
2216}
2217
2218int bnx2x_resume(struct pci_dev *pdev)
2219{
2220 struct net_device *dev = pci_get_drvdata(pdev);
2221 struct bnx2x *bp;
2222 int rc;
2223
2224 if (!dev) {
2225 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2226 return -ENODEV;
2227 }
2228 bp = netdev_priv(dev);
2229
2230 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2231 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2232 return -EAGAIN;
2233 }
2234
2235 rtnl_lock();
2236
2237 pci_restore_state(pdev);
2238
2239 if (!netif_running(dev)) {
2240 rtnl_unlock();
2241 return 0;
2242 }
2243
2244 bnx2x_set_power_state(bp, PCI_D0);
2245 netif_device_attach(dev);
2246
2247 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2248
2249 rtnl_unlock();
2250
2251 return rc;
2252}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
new file mode 100644
index 000000000000..d1979b1a7ed2
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,652 @@
1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/netdevice.h>
22
23
24#include "bnx2x.h"
25
26
27/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
29 */
30
31/**
32 * Initialize link parameters structure variables.
33 *
34 * @param bp
35 * @param load_mode
36 *
37 * @return u8
38 */
39u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
40
41/**
42 * Configure hw according to link parameters structure.
43 *
44 * @param bp
45 */
46void bnx2x_link_set(struct bnx2x *bp);
47
48/**
49 * Query link status
50 *
51 * @param bp
52 *
53 * @return 0 - link is UP
54 */
55u8 bnx2x_link_test(struct bnx2x *bp);
56
57/**
58 * Handles link status change
59 *
60 * @param bp
61 */
62void bnx2x__link_status_update(struct bnx2x *bp);
63
64/**
65 * MSI-X slowpath interrupt handler
66 *
67 * @param irq
68 * @param dev_instance
69 *
70 * @return irqreturn_t
71 */
72irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
73
74/**
75 * non MSI-X interrupt handler
76 *
77 * @param irq
78 * @param dev_instance
79 *
80 * @return irqreturn_t
81 */
82irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
83#ifdef BCM_CNIC
84
85/**
86 * Send command to cnic driver
87 *
88 * @param bp
89 * @param cmd
90 */
91int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
92
93/**
94 * Provides cnic information for proper interrupt handling
95 *
96 * @param bp
97 */
98void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
99#endif
100
101/**
102 * Enable HW interrupts.
103 *
104 * @param bp
105 */
106void bnx2x_int_enable(struct bnx2x *bp);
107
108/**
109 * Disable interrupts. This function ensures that there are no
110 * ISRs or SP DPCs (sp_task) are running after it returns.
111 *
112 * @param bp
113 * @param disable_hw if true, disable HW interrupts.
114 */
115void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
116
117/**
118 * Init HW blocks according to current initialization stage:
119 * COMMON, PORT or FUNCTION.
120 *
121 * @param bp
122 * @param load_code: COMMON, PORT or FUNCTION
123 *
124 * @return int
125 */
126int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
127
128/**
129 * Init driver internals:
130 * - rings
131 * - status blocks
132 * - etc.
133 *
134 * @param bp
135 * @param load_code COMMON, PORT or FUNCTION
136 */
137void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
138
139/**
140 * Allocate driver's memory.
141 *
142 * @param bp
143 *
144 * @return int
145 */
146int bnx2x_alloc_mem(struct bnx2x *bp);
147
148/**
149 * Release driver's memory.
150 *
151 * @param bp
152 */
153void bnx2x_free_mem(struct bnx2x *bp);
154
155/**
156 * Bring up a leading (the first) eth Client.
157 *
158 * @param bp
159 *
160 * @return int
161 */
162int bnx2x_setup_leading(struct bnx2x *bp);
163
164/**
165 * Setup non-leading eth Client.
166 *
167 * @param bp
168 * @param fp
169 *
170 * @return int
171 */
172int bnx2x_setup_multi(struct bnx2x *bp, int index);
173
174/**
175 * Set number of quueus according to mode and number of available
176 * msi-x vectors
177 *
178 * @param bp
179 *
180 */
181void bnx2x_set_num_queues_msix(struct bnx2x *bp);
182
183/**
184 * Cleanup chip internals:
185 * - Cleanup MAC configuration.
186 * - Close clients.
187 * - etc.
188 *
189 * @param bp
190 * @param unload_mode
191 */
192void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
193
194/**
195 * Acquire HW lock.
196 *
197 * @param bp
198 * @param resource Resource bit which was locked
199 *
200 * @return int
201 */
202int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
203
204/**
205 * Release HW lock.
206 *
207 * @param bp driver handle
208 * @param resource Resource bit which was locked
209 *
210 * @return int
211 */
212int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
213
214/**
215 * Configure eth MAC address in the HW according to the value in
216 * netdev->dev_addr for 57711
217 *
218 * @param bp driver handle
219 * @param set
220 */
221void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
222
223/**
224 * Configure eth MAC address in the HW according to the value in
225 * netdev->dev_addr for 57710
226 *
227 * @param bp driver handle
228 * @param set
229 */
230void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
231
232#ifdef BCM_CNIC
233/**
234 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
235 * MAC(s). The function will wait until the ramrod completion
236 * returns.
237 *
238 * @param bp driver handle
239 * @param set set or clear the CAM entry
240 *
241 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
242 */
243int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
244#endif
245
246/**
247 * Initialize status block in FW and HW
248 *
249 * @param bp driver handle
250 * @param sb host_status_block
251 * @param dma_addr_t mapping
252 * @param int sb_id
253 */
254void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
255 dma_addr_t mapping, int sb_id);
256
257/**
258 * Reconfigure FW/HW according to dev->flags rx mode
259 *
260 * @param dev net_device
261 *
262 */
263void bnx2x_set_rx_mode(struct net_device *dev);
264
265/**
266 * Configure MAC filtering rules in a FW.
267 *
268 * @param bp driver handle
269 */
270void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
271
272/* Parity errors related */
273void bnx2x_inc_load_cnt(struct bnx2x *bp);
274u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
275bool bnx2x_chk_parity_attn(struct bnx2x *bp);
276bool bnx2x_reset_is_done(struct bnx2x *bp);
277void bnx2x_disable_close_the_gate(struct bnx2x *bp);
278
279/**
280 * Perform statistics handling according to event
281 *
282 * @param bp driver handle
283 * @param even tbnx2x_stats_event
284 */
285void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
286
287/**
288 * Configures FW with client paramteres (like HW VLAN removal)
289 * for each active client.
290 *
291 * @param bp
292 */
293void bnx2x_set_client_config(struct bnx2x *bp);
294
295/**
296 * Handle sp events
297 *
298 * @param fp fastpath handle for the event
299 * @param rr_cqe eth_rx_cqe
300 */
301void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
302
303
304static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
305{
306 struct host_status_block *fpsb = fp->status_blk;
307
308 barrier(); /* status block is written to by the chip */
309 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
310 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
311}
312
313static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
314 struct bnx2x_fastpath *fp,
315 u16 bd_prod, u16 rx_comp_prod,
316 u16 rx_sge_prod)
317{
318 struct ustorm_eth_rx_producers rx_prods = {0};
319 int i;
320
321 /* Update producers */
322 rx_prods.bd_prod = bd_prod;
323 rx_prods.cqe_prod = rx_comp_prod;
324 rx_prods.sge_prod = rx_sge_prod;
325
326 /*
327 * Make sure that the BD and SGE data is updated before updating the
328 * producers since FW might read the BD/SGE right after the producer
329 * is updated.
330 * This is only applicable for weak-ordered memory model archs such
331 * as IA-64. The following barrier is also mandatory since FW will
332 * assumes BDs must have buffers.
333 */
334 wmb();
335
336 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
337 REG_WR(bp, BAR_USTRORM_INTMEM +
338 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
339 ((u32 *)&rx_prods)[i]);
340
341 mmiowb(); /* keep prod updates ordered */
342
343 DP(NETIF_MSG_RX_STATUS,
344 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
345 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
346}
347
348
349
350static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
351 u8 storm, u16 index, u8 op, u8 update)
352{
353 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
354 COMMAND_REG_INT_ACK);
355 struct igu_ack_register igu_ack;
356
357 igu_ack.status_block_index = index;
358 igu_ack.sb_id_and_flags =
359 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
360 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
361 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
362 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
363
364 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
365 (*(u32 *)&igu_ack), hc_addr);
366 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
367
368 /* Make sure that ACK is written */
369 mmiowb();
370 barrier();
371}
372static inline u16 bnx2x_ack_int(struct bnx2x *bp)
373{
374 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
375 COMMAND_REG_SIMD_MASK);
376 u32 result = REG_RD(bp, hc_addr);
377
378 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
379 result, hc_addr);
380
381 return result;
382}
383
384/*
385 * fast path service functions
386 */
387
388static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
389{
390 /* Tell compiler that consumer and producer can change */
391 barrier();
392 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
393}
394
395static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
396{
397 s16 used;
398 u16 prod;
399 u16 cons;
400
401 prod = fp->tx_bd_prod;
402 cons = fp->tx_bd_cons;
403
404 /* NUM_TX_RINGS = number of "next-page" entries
405 It will be used as a threshold */
406 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
407
408#ifdef BNX2X_STOP_ON_ERROR
409 WARN_ON(used < 0);
410 WARN_ON(used > fp->bp->tx_ring_size);
411 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
412#endif
413
414 return (s16)(fp->bp->tx_ring_size) - used;
415}
416
417static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
418{
419 u16 hw_cons;
420
421 /* Tell compiler that status block fields can change */
422 barrier();
423 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
424 return hw_cons != fp->tx_pkt_cons;
425}
426
427static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
429{
430 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
431 struct page *page = sw_buf->page;
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433
434 /* Skip "next page" elements */
435 if (!page)
436 return;
437
438 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
439 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
440 __free_pages(page, PAGES_PER_SGE_SHIFT);
441
442 sw_buf->page = NULL;
443 sge->addr_hi = 0;
444 sge->addr_lo = 0;
445}
446
447static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
448 struct bnx2x_fastpath *fp, int last)
449{
450 int i;
451
452 for (i = 0; i < last; i++)
453 bnx2x_free_rx_sge(bp, fp, i);
454}
455
456static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
457 struct bnx2x_fastpath *fp, u16 index)
458{
459 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
460 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
461 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
462 dma_addr_t mapping;
463
464 if (unlikely(page == NULL))
465 return -ENOMEM;
466
467 mapping = dma_map_page(&bp->pdev->dev, page, 0,
468 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
469 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
470 __free_pages(page, PAGES_PER_SGE_SHIFT);
471 return -ENOMEM;
472 }
473
474 sw_buf->page = page;
475 dma_unmap_addr_set(sw_buf, mapping, mapping);
476
477 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
478 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
479
480 return 0;
481}
482static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
483 struct bnx2x_fastpath *fp, u16 index)
484{
485 struct sk_buff *skb;
486 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
487 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
488 dma_addr_t mapping;
489
490 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
491 if (unlikely(skb == NULL))
492 return -ENOMEM;
493
494 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
495 DMA_FROM_DEVICE);
496 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
497 dev_kfree_skb(skb);
498 return -ENOMEM;
499 }
500
501 rx_buf->skb = skb;
502 dma_unmap_addr_set(rx_buf, mapping, mapping);
503
504 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
505 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
506
507 return 0;
508}
509
510/* note that we are not allocating a new skb,
511 * we are just moving one from cons to prod
512 * we are not creating a new mapping,
513 * so there is no need to check for dma_mapping_error().
514 */
515static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
516 struct sk_buff *skb, u16 cons, u16 prod)
517{
518 struct bnx2x *bp = fp->bp;
519 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
520 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
521 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
522 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
523
524 dma_sync_single_for_device(&bp->pdev->dev,
525 dma_unmap_addr(cons_rx_buf, mapping),
526 RX_COPY_THRESH, DMA_FROM_DEVICE);
527
528 prod_rx_buf->skb = cons_rx_buf->skb;
529 dma_unmap_addr_set(prod_rx_buf, mapping,
530 dma_unmap_addr(cons_rx_buf, mapping));
531 *prod_bd = *cons_bd;
532}
533
534static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
535{
536 int i, j;
537
538 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
539 int idx = RX_SGE_CNT * i - 1;
540
541 for (j = 0; j < 2; j++) {
542 SGE_MASK_CLEAR_BIT(fp, idx);
543 idx--;
544 }
545 }
546}
547
548static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
549{
550 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
551 memset(fp->sge_mask, 0xff,
552 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
553
554 /* Clear the two last indices in the page to 1:
555 these are the indices that correspond to the "next" element,
556 hence will never be indicated and should be removed from
557 the calculations. */
558 bnx2x_clear_sge_mask_next_elems(fp);
559}
560static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
561 struct bnx2x_fastpath *fp, int last)
562{
563 int i;
564
565 for (i = 0; i < last; i++) {
566 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
567 struct sk_buff *skb = rx_buf->skb;
568
569 if (skb == NULL) {
570 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
571 continue;
572 }
573
574 if (fp->tpa_state[i] == BNX2X_TPA_START)
575 dma_unmap_single(&bp->pdev->dev,
576 dma_unmap_addr(rx_buf, mapping),
577 bp->rx_buf_size, DMA_FROM_DEVICE);
578
579 dev_kfree_skb(skb);
580 rx_buf->skb = NULL;
581 }
582}
583
584
585static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
586{
587 int i, j;
588
589 for_each_queue(bp, j) {
590 struct bnx2x_fastpath *fp = &bp->fp[j];
591
592 for (i = 1; i <= NUM_TX_RINGS; i++) {
593 struct eth_tx_next_bd *tx_next_bd =
594 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
595
596 tx_next_bd->addr_hi =
597 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
598 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
599 tx_next_bd->addr_lo =
600 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
601 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
602 }
603
604 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
605 fp->tx_db.data.zero_fill1 = 0;
606 fp->tx_db.data.prod = 0;
607
608 fp->tx_pkt_prod = 0;
609 fp->tx_pkt_cons = 0;
610 fp->tx_bd_prod = 0;
611 fp->tx_bd_cons = 0;
612 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
613 fp->tx_pkt = 0;
614 }
615}
616static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
617{
618 u16 rx_cons_sb;
619
620 /* Tell compiler that status block fields can change */
621 barrier();
622 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
623 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
624 rx_cons_sb++;
625 return (fp->rx_comp_cons != rx_cons_sb);
626}
627
628/* HW Lock for shared dual port PHYs */
629void bnx2x_acquire_phy_lock(struct bnx2x *bp);
630void bnx2x_release_phy_lock(struct bnx2x *bp);
631
632void bnx2x_link_report(struct bnx2x *bp);
633int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
634int bnx2x_tx_int(struct bnx2x_fastpath *fp);
635void bnx2x_init_rx_rings(struct bnx2x *bp);
636netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
637
638int bnx2x_change_mac_addr(struct net_device *dev, void *p);
639void bnx2x_tx_timeout(struct net_device *dev);
640void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
641void bnx2x_netif_start(struct bnx2x *bp);
642void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
643void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
644int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
645int bnx2x_resume(struct pci_dev *pdev);
646void bnx2x_free_skbs(struct bnx2x *bp);
647int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
648int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
649int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
650int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
651
652#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index 3bb9a91bb3f7..3bb9a91bb3f7 100644
--- a/drivers/net/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 000000000000..8b75b05e34c5
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,1971 @@
1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#include <linux/ethtool.h>
18#include <linux/netdevice.h>
19#include <linux/types.h>
20#include <linux/sched.h>
21#include <linux/crc32.h>
22
23
24#include "bnx2x.h"
25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h"
27
28
29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
30{
31 struct bnx2x *bp = netdev_priv(dev);
32
33 cmd->supported = bp->port.supported;
34 cmd->advertising = bp->port.advertising;
35
36 if ((bp->state == BNX2X_STATE_OPEN) &&
37 !(bp->flags & MF_FUNC_DIS) &&
38 (bp->link_vars.link_up)) {
39 cmd->speed = bp->link_vars.line_speed;
40 cmd->duplex = bp->link_vars.duplex;
41 if (IS_E1HMF(bp)) {
42 u16 vn_max_rate;
43
44 vn_max_rate =
45 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
46 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
47 if (vn_max_rate < cmd->speed)
48 cmd->speed = vn_max_rate;
49 }
50 } else {
51 cmd->speed = -1;
52 cmd->duplex = -1;
53 }
54
55 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
56 u32 ext_phy_type =
57 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
58
59 switch (ext_phy_type) {
60 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
61 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
62 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
63 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
64 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
65 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
66 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
67 cmd->port = PORT_FIBRE;
68 break;
69
70 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
71 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
72 cmd->port = PORT_TP;
73 break;
74
75 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
76 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
77 bp->link_params.ext_phy_config);
78 break;
79
80 default:
81 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
82 bp->link_params.ext_phy_config);
83 break;
84 }
85 } else
86 cmd->port = PORT_TP;
87
88 cmd->phy_address = bp->mdio.prtad;
89 cmd->transceiver = XCVR_INTERNAL;
90
91 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
92 cmd->autoneg = AUTONEG_ENABLE;
93 else
94 cmd->autoneg = AUTONEG_DISABLE;
95
96 cmd->maxtxpkt = 0;
97 cmd->maxrxpkt = 0;
98
99 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
100 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
101 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
102 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
103 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
104 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
105 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
106
107 return 0;
108}
109
110static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
111{
112 struct bnx2x *bp = netdev_priv(dev);
113 u32 advertising;
114
115 if (IS_E1HMF(bp))
116 return 0;
117
118 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
119 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
120 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
121 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
122 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
123 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
124 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
125
126 if (cmd->autoneg == AUTONEG_ENABLE) {
127 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
128 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
129 return -EINVAL;
130 }
131
132 /* advertise the requested speed and duplex if supported */
133 cmd->advertising &= bp->port.supported;
134
135 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
136 bp->link_params.req_duplex = DUPLEX_FULL;
137 bp->port.advertising |= (ADVERTISED_Autoneg |
138 cmd->advertising);
139
140 } else { /* forced speed */
141 /* advertise the requested speed and duplex if supported */
142 switch (cmd->speed) {
143 case SPEED_10:
144 if (cmd->duplex == DUPLEX_FULL) {
145 if (!(bp->port.supported &
146 SUPPORTED_10baseT_Full)) {
147 DP(NETIF_MSG_LINK,
148 "10M full not supported\n");
149 return -EINVAL;
150 }
151
152 advertising = (ADVERTISED_10baseT_Full |
153 ADVERTISED_TP);
154 } else {
155 if (!(bp->port.supported &
156 SUPPORTED_10baseT_Half)) {
157 DP(NETIF_MSG_LINK,
158 "10M half not supported\n");
159 return -EINVAL;
160 }
161
162 advertising = (ADVERTISED_10baseT_Half |
163 ADVERTISED_TP);
164 }
165 break;
166
167 case SPEED_100:
168 if (cmd->duplex == DUPLEX_FULL) {
169 if (!(bp->port.supported &
170 SUPPORTED_100baseT_Full)) {
171 DP(NETIF_MSG_LINK,
172 "100M full not supported\n");
173 return -EINVAL;
174 }
175
176 advertising = (ADVERTISED_100baseT_Full |
177 ADVERTISED_TP);
178 } else {
179 if (!(bp->port.supported &
180 SUPPORTED_100baseT_Half)) {
181 DP(NETIF_MSG_LINK,
182 "100M half not supported\n");
183 return -EINVAL;
184 }
185
186 advertising = (ADVERTISED_100baseT_Half |
187 ADVERTISED_TP);
188 }
189 break;
190
191 case SPEED_1000:
192 if (cmd->duplex != DUPLEX_FULL) {
193 DP(NETIF_MSG_LINK, "1G half not supported\n");
194 return -EINVAL;
195 }
196
197 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
198 DP(NETIF_MSG_LINK, "1G full not supported\n");
199 return -EINVAL;
200 }
201
202 advertising = (ADVERTISED_1000baseT_Full |
203 ADVERTISED_TP);
204 break;
205
206 case SPEED_2500:
207 if (cmd->duplex != DUPLEX_FULL) {
208 DP(NETIF_MSG_LINK,
209 "2.5G half not supported\n");
210 return -EINVAL;
211 }
212
213 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
214 DP(NETIF_MSG_LINK,
215 "2.5G full not supported\n");
216 return -EINVAL;
217 }
218
219 advertising = (ADVERTISED_2500baseX_Full |
220 ADVERTISED_TP);
221 break;
222
223 case SPEED_10000:
224 if (cmd->duplex != DUPLEX_FULL) {
225 DP(NETIF_MSG_LINK, "10G half not supported\n");
226 return -EINVAL;
227 }
228
229 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
230 DP(NETIF_MSG_LINK, "10G full not supported\n");
231 return -EINVAL;
232 }
233
234 advertising = (ADVERTISED_10000baseT_Full |
235 ADVERTISED_FIBRE);
236 break;
237
238 default:
239 DP(NETIF_MSG_LINK, "Unsupported speed\n");
240 return -EINVAL;
241 }
242
243 bp->link_params.req_line_speed = cmd->speed;
244 bp->link_params.req_duplex = cmd->duplex;
245 bp->port.advertising = advertising;
246 }
247
248 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
249 DP_LEVEL " req_duplex %d advertising 0x%x\n",
250 bp->link_params.req_line_speed, bp->link_params.req_duplex,
251 bp->port.advertising);
252
253 if (netif_running(dev)) {
254 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
255 bnx2x_link_set(bp);
256 }
257
258 return 0;
259}
260
261#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
262#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
263
264static int bnx2x_get_regs_len(struct net_device *dev)
265{
266 struct bnx2x *bp = netdev_priv(dev);
267 int regdump_len = 0;
268 int i;
269
270 if (CHIP_IS_E1(bp)) {
271 for (i = 0; i < REGS_COUNT; i++)
272 if (IS_E1_ONLINE(reg_addrs[i].info))
273 regdump_len += reg_addrs[i].size;
274
275 for (i = 0; i < WREGS_COUNT_E1; i++)
276 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
277 regdump_len += wreg_addrs_e1[i].size *
278 (1 + wreg_addrs_e1[i].read_regs_count);
279
280 } else { /* E1H */
281 for (i = 0; i < REGS_COUNT; i++)
282 if (IS_E1H_ONLINE(reg_addrs[i].info))
283 regdump_len += reg_addrs[i].size;
284
285 for (i = 0; i < WREGS_COUNT_E1H; i++)
286 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
287 regdump_len += wreg_addrs_e1h[i].size *
288 (1 + wreg_addrs_e1h[i].read_regs_count);
289 }
290 regdump_len *= 4;
291 regdump_len += sizeof(struct dump_hdr);
292
293 return regdump_len;
294}
295
296static void bnx2x_get_regs(struct net_device *dev,
297 struct ethtool_regs *regs, void *_p)
298{
299 u32 *p = _p, i, j;
300 struct bnx2x *bp = netdev_priv(dev);
301 struct dump_hdr dump_hdr = {0};
302
303 regs->version = 0;
304 memset(p, 0, regs->len);
305
306 if (!netif_running(bp->dev))
307 return;
308
309 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
310 dump_hdr.dump_sign = dump_sign_all;
311 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
312 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
313 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
314 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
315 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
316
317 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
318 p += dump_hdr.hdr_size + 1;
319
320 if (CHIP_IS_E1(bp)) {
321 for (i = 0; i < REGS_COUNT; i++)
322 if (IS_E1_ONLINE(reg_addrs[i].info))
323 for (j = 0; j < reg_addrs[i].size; j++)
324 *p++ = REG_RD(bp,
325 reg_addrs[i].addr + j*4);
326
327 } else { /* E1H */
328 for (i = 0; i < REGS_COUNT; i++)
329 if (IS_E1H_ONLINE(reg_addrs[i].info))
330 for (j = 0; j < reg_addrs[i].size; j++)
331 *p++ = REG_RD(bp,
332 reg_addrs[i].addr + j*4);
333 }
334}
335
336#define PHY_FW_VER_LEN 10
337
338static void bnx2x_get_drvinfo(struct net_device *dev,
339 struct ethtool_drvinfo *info)
340{
341 struct bnx2x *bp = netdev_priv(dev);
342 u8 phy_fw_ver[PHY_FW_VER_LEN];
343
344 strcpy(info->driver, DRV_MODULE_NAME);
345 strcpy(info->version, DRV_MODULE_VERSION);
346
347 phy_fw_ver[0] = '\0';
348 if (bp->port.pmf) {
349 bnx2x_acquire_phy_lock(bp);
350 bnx2x_get_ext_phy_fw_version(&bp->link_params,
351 (bp->state != BNX2X_STATE_CLOSED),
352 phy_fw_ver, PHY_FW_VER_LEN);
353 bnx2x_release_phy_lock(bp);
354 }
355
356 strncpy(info->fw_version, bp->fw_ver, 32);
357 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
358 "bc %d.%d.%d%s%s",
359 (bp->common.bc_ver & 0xff0000) >> 16,
360 (bp->common.bc_ver & 0xff00) >> 8,
361 (bp->common.bc_ver & 0xff),
362 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
363 strcpy(info->bus_info, pci_name(bp->pdev));
364 info->n_stats = BNX2X_NUM_STATS;
365 info->testinfo_len = BNX2X_NUM_TESTS;
366 info->eedump_len = bp->common.flash_size;
367 info->regdump_len = bnx2x_get_regs_len(dev);
368}
369
370static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
371{
372 struct bnx2x *bp = netdev_priv(dev);
373
374 if (bp->flags & NO_WOL_FLAG) {
375 wol->supported = 0;
376 wol->wolopts = 0;
377 } else {
378 wol->supported = WAKE_MAGIC;
379 if (bp->wol)
380 wol->wolopts = WAKE_MAGIC;
381 else
382 wol->wolopts = 0;
383 }
384 memset(&wol->sopass, 0, sizeof(wol->sopass));
385}
386
387static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
388{
389 struct bnx2x *bp = netdev_priv(dev);
390
391 if (wol->wolopts & ~WAKE_MAGIC)
392 return -EINVAL;
393
394 if (wol->wolopts & WAKE_MAGIC) {
395 if (bp->flags & NO_WOL_FLAG)
396 return -EINVAL;
397
398 bp->wol = 1;
399 } else
400 bp->wol = 0;
401
402 return 0;
403}
404
405static u32 bnx2x_get_msglevel(struct net_device *dev)
406{
407 struct bnx2x *bp = netdev_priv(dev);
408
409 return bp->msg_enable;
410}
411
412static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
413{
414 struct bnx2x *bp = netdev_priv(dev);
415
416 if (capable(CAP_NET_ADMIN))
417 bp->msg_enable = level;
418}
419
420static int bnx2x_nway_reset(struct net_device *dev)
421{
422 struct bnx2x *bp = netdev_priv(dev);
423
424 if (!bp->port.pmf)
425 return 0;
426
427 if (netif_running(dev)) {
428 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
429 bnx2x_link_set(bp);
430 }
431
432 return 0;
433}
434
435static u32 bnx2x_get_link(struct net_device *dev)
436{
437 struct bnx2x *bp = netdev_priv(dev);
438
439 if (bp->flags & MF_FUNC_DIS)
440 return 0;
441
442 return bp->link_vars.link_up;
443}
444
445static int bnx2x_get_eeprom_len(struct net_device *dev)
446{
447 struct bnx2x *bp = netdev_priv(dev);
448
449 return bp->common.flash_size;
450}
451
452static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
453{
454 int port = BP_PORT(bp);
455 int count, i;
456 u32 val = 0;
457
458 /* adjust timeout for emulation/FPGA */
459 count = NVRAM_TIMEOUT_COUNT;
460 if (CHIP_REV_IS_SLOW(bp))
461 count *= 100;
462
463 /* request access to nvram interface */
464 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
465 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
466
467 for (i = 0; i < count*10; i++) {
468 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
469 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
470 break;
471
472 udelay(5);
473 }
474
475 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
476 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
477 return -EBUSY;
478 }
479
480 return 0;
481}
482
483static int bnx2x_release_nvram_lock(struct bnx2x *bp)
484{
485 int port = BP_PORT(bp);
486 int count, i;
487 u32 val = 0;
488
489 /* adjust timeout for emulation/FPGA */
490 count = NVRAM_TIMEOUT_COUNT;
491 if (CHIP_REV_IS_SLOW(bp))
492 count *= 100;
493
494 /* relinquish nvram interface */
495 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
496 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
497
498 for (i = 0; i < count*10; i++) {
499 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
500 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
501 break;
502
503 udelay(5);
504 }
505
506 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
507 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
508 return -EBUSY;
509 }
510
511 return 0;
512}
513
514static void bnx2x_enable_nvram_access(struct bnx2x *bp)
515{
516 u32 val;
517
518 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
519
520 /* enable both bits, even on read */
521 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
522 (val | MCPR_NVM_ACCESS_ENABLE_EN |
523 MCPR_NVM_ACCESS_ENABLE_WR_EN));
524}
525
526static void bnx2x_disable_nvram_access(struct bnx2x *bp)
527{
528 u32 val;
529
530 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
531
532 /* disable both bits, even after read */
533 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
534 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
535 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
536}
537
538static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
539 u32 cmd_flags)
540{
541 int count, i, rc;
542 u32 val;
543
544 /* build the command word */
545 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
546
547 /* need to clear DONE bit separately */
548 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
549
550 /* address of the NVRAM to read from */
551 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
552 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
553
554 /* issue a read command */
555 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
556
557 /* adjust timeout for emulation/FPGA */
558 count = NVRAM_TIMEOUT_COUNT;
559 if (CHIP_REV_IS_SLOW(bp))
560 count *= 100;
561
562 /* wait for completion */
563 *ret_val = 0;
564 rc = -EBUSY;
565 for (i = 0; i < count; i++) {
566 udelay(5);
567 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
568
569 if (val & MCPR_NVM_COMMAND_DONE) {
570 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
571 /* we read nvram data in cpu order
572 * but ethtool sees it as an array of bytes
573 * converting to big-endian will do the work */
574 *ret_val = cpu_to_be32(val);
575 rc = 0;
576 break;
577 }
578 }
579
580 return rc;
581}
582
583static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
584 int buf_size)
585{
586 int rc;
587 u32 cmd_flags;
588 __be32 val;
589
590 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
591 DP(BNX2X_MSG_NVM,
592 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
593 offset, buf_size);
594 return -EINVAL;
595 }
596
597 if (offset + buf_size > bp->common.flash_size) {
598 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
599 " buf_size (0x%x) > flash_size (0x%x)\n",
600 offset, buf_size, bp->common.flash_size);
601 return -EINVAL;
602 }
603
604 /* request access to nvram interface */
605 rc = bnx2x_acquire_nvram_lock(bp);
606 if (rc)
607 return rc;
608
609 /* enable access to nvram interface */
610 bnx2x_enable_nvram_access(bp);
611
612 /* read the first word(s) */
613 cmd_flags = MCPR_NVM_COMMAND_FIRST;
614 while ((buf_size > sizeof(u32)) && (rc == 0)) {
615 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
616 memcpy(ret_buf, &val, 4);
617
618 /* advance to the next dword */
619 offset += sizeof(u32);
620 ret_buf += sizeof(u32);
621 buf_size -= sizeof(u32);
622 cmd_flags = 0;
623 }
624
625 if (rc == 0) {
626 cmd_flags |= MCPR_NVM_COMMAND_LAST;
627 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
628 memcpy(ret_buf, &val, 4);
629 }
630
631 /* disable access to nvram interface */
632 bnx2x_disable_nvram_access(bp);
633 bnx2x_release_nvram_lock(bp);
634
635 return rc;
636}
637
638static int bnx2x_get_eeprom(struct net_device *dev,
639 struct ethtool_eeprom *eeprom, u8 *eebuf)
640{
641 struct bnx2x *bp = netdev_priv(dev);
642 int rc;
643
644 if (!netif_running(dev))
645 return -EAGAIN;
646
647 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
648 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
649 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
650 eeprom->len, eeprom->len);
651
652 /* parameters already validated in ethtool_get_eeprom */
653
654 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
655
656 return rc;
657}
658
659static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
660 u32 cmd_flags)
661{
662 int count, i, rc;
663
664 /* build the command word */
665 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
666
667 /* need to clear DONE bit separately */
668 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
669
670 /* write the data */
671 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
672
673 /* address of the NVRAM to write to */
674 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
675 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
676
677 /* issue the write command */
678 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
679
680 /* adjust timeout for emulation/FPGA */
681 count = NVRAM_TIMEOUT_COUNT;
682 if (CHIP_REV_IS_SLOW(bp))
683 count *= 100;
684
685 /* wait for completion */
686 rc = -EBUSY;
687 for (i = 0; i < count; i++) {
688 udelay(5);
689 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
690 if (val & MCPR_NVM_COMMAND_DONE) {
691 rc = 0;
692 break;
693 }
694 }
695
696 return rc;
697}
698
699#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
700
701static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
702 int buf_size)
703{
704 int rc;
705 u32 cmd_flags;
706 u32 align_offset;
707 __be32 val;
708
709 if (offset + buf_size > bp->common.flash_size) {
710 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
711 " buf_size (0x%x) > flash_size (0x%x)\n",
712 offset, buf_size, bp->common.flash_size);
713 return -EINVAL;
714 }
715
716 /* request access to nvram interface */
717 rc = bnx2x_acquire_nvram_lock(bp);
718 if (rc)
719 return rc;
720
721 /* enable access to nvram interface */
722 bnx2x_enable_nvram_access(bp);
723
724 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
725 align_offset = (offset & ~0x03);
726 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
727
728 if (rc == 0) {
729 val &= ~(0xff << BYTE_OFFSET(offset));
730 val |= (*data_buf << BYTE_OFFSET(offset));
731
732 /* nvram data is returned as an array of bytes
733 * convert it back to cpu order */
734 val = be32_to_cpu(val);
735
736 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
737 cmd_flags);
738 }
739
740 /* disable access to nvram interface */
741 bnx2x_disable_nvram_access(bp);
742 bnx2x_release_nvram_lock(bp);
743
744 return rc;
745}
746
747static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
748 int buf_size)
749{
750 int rc;
751 u32 cmd_flags;
752 u32 val;
753 u32 written_so_far;
754
755 if (buf_size == 1) /* ethtool */
756 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
757
758 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
759 DP(BNX2X_MSG_NVM,
760 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
761 offset, buf_size);
762 return -EINVAL;
763 }
764
765 if (offset + buf_size > bp->common.flash_size) {
766 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
767 " buf_size (0x%x) > flash_size (0x%x)\n",
768 offset, buf_size, bp->common.flash_size);
769 return -EINVAL;
770 }
771
772 /* request access to nvram interface */
773 rc = bnx2x_acquire_nvram_lock(bp);
774 if (rc)
775 return rc;
776
777 /* enable access to nvram interface */
778 bnx2x_enable_nvram_access(bp);
779
780 written_so_far = 0;
781 cmd_flags = MCPR_NVM_COMMAND_FIRST;
782 while ((written_so_far < buf_size) && (rc == 0)) {
783 if (written_so_far == (buf_size - sizeof(u32)))
784 cmd_flags |= MCPR_NVM_COMMAND_LAST;
785 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
786 cmd_flags |= MCPR_NVM_COMMAND_LAST;
787 else if ((offset % NVRAM_PAGE_SIZE) == 0)
788 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
789
790 memcpy(&val, data_buf, 4);
791
792 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
793
794 /* advance to the next dword */
795 offset += sizeof(u32);
796 data_buf += sizeof(u32);
797 written_so_far += sizeof(u32);
798 cmd_flags = 0;
799 }
800
801 /* disable access to nvram interface */
802 bnx2x_disable_nvram_access(bp);
803 bnx2x_release_nvram_lock(bp);
804
805 return rc;
806}
807
808static int bnx2x_set_eeprom(struct net_device *dev,
809 struct ethtool_eeprom *eeprom, u8 *eebuf)
810{
811 struct bnx2x *bp = netdev_priv(dev);
812 int port = BP_PORT(bp);
813 int rc = 0;
814
815 if (!netif_running(dev))
816 return -EAGAIN;
817
818 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
819 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
820 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
821 eeprom->len, eeprom->len);
822
823 /* parameters already validated in ethtool_set_eeprom */
824
825 /* PHY eeprom can be accessed only by the PMF */
826 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
827 !bp->port.pmf)
828 return -EINVAL;
829
830 if (eeprom->magic == 0x50485950) {
831 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
832 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
833
834 bnx2x_acquire_phy_lock(bp);
835 rc |= bnx2x_link_reset(&bp->link_params,
836 &bp->link_vars, 0);
837 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
838 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
839 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
840 MISC_REGISTERS_GPIO_HIGH, port);
841 bnx2x_release_phy_lock(bp);
842 bnx2x_link_report(bp);
843
844 } else if (eeprom->magic == 0x50485952) {
845 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
846 if (bp->state == BNX2X_STATE_OPEN) {
847 bnx2x_acquire_phy_lock(bp);
848 rc |= bnx2x_link_reset(&bp->link_params,
849 &bp->link_vars, 1);
850
851 rc |= bnx2x_phy_init(&bp->link_params,
852 &bp->link_vars);
853 bnx2x_release_phy_lock(bp);
854 bnx2x_calc_fc_adv(bp);
855 }
856 } else if (eeprom->magic == 0x53985943) {
857 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
858 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
859 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
860 u8 ext_phy_addr =
861 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
862
863 /* DSP Remove Download Mode */
864 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
865 MISC_REGISTERS_GPIO_LOW, port);
866
867 bnx2x_acquire_phy_lock(bp);
868
869 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
870
871 /* wait 0.5 sec to allow it to run */
872 msleep(500);
873 bnx2x_ext_phy_hw_reset(bp, port);
874 msleep(500);
875 bnx2x_release_phy_lock(bp);
876 }
877 } else
878 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
879
880 return rc;
881}
882static int bnx2x_get_coalesce(struct net_device *dev,
883 struct ethtool_coalesce *coal)
884{
885 struct bnx2x *bp = netdev_priv(dev);
886
887 memset(coal, 0, sizeof(struct ethtool_coalesce));
888
889 coal->rx_coalesce_usecs = bp->rx_ticks;
890 coal->tx_coalesce_usecs = bp->tx_ticks;
891
892 return 0;
893}
894
895static int bnx2x_set_coalesce(struct net_device *dev,
896 struct ethtool_coalesce *coal)
897{
898 struct bnx2x *bp = netdev_priv(dev);
899
900 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
901 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
902 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
903
904 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
905 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
906 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
907
908 if (netif_running(dev))
909 bnx2x_update_coalesce(bp);
910
911 return 0;
912}
913
914static void bnx2x_get_ringparam(struct net_device *dev,
915 struct ethtool_ringparam *ering)
916{
917 struct bnx2x *bp = netdev_priv(dev);
918
919 ering->rx_max_pending = MAX_RX_AVAIL;
920 ering->rx_mini_max_pending = 0;
921 ering->rx_jumbo_max_pending = 0;
922
923 ering->rx_pending = bp->rx_ring_size;
924 ering->rx_mini_pending = 0;
925 ering->rx_jumbo_pending = 0;
926
927 ering->tx_max_pending = MAX_TX_AVAIL;
928 ering->tx_pending = bp->tx_ring_size;
929}
930
931static int bnx2x_set_ringparam(struct net_device *dev,
932 struct ethtool_ringparam *ering)
933{
934 struct bnx2x *bp = netdev_priv(dev);
935 int rc = 0;
936
937 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
938 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
939 return -EAGAIN;
940 }
941
942 if ((ering->rx_pending > MAX_RX_AVAIL) ||
943 (ering->tx_pending > MAX_TX_AVAIL) ||
944 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
945 return -EINVAL;
946
947 bp->rx_ring_size = ering->rx_pending;
948 bp->tx_ring_size = ering->tx_pending;
949
950 if (netif_running(dev)) {
951 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
952 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
953 }
954
955 return rc;
956}
957
958static void bnx2x_get_pauseparam(struct net_device *dev,
959 struct ethtool_pauseparam *epause)
960{
961 struct bnx2x *bp = netdev_priv(dev);
962
963 epause->autoneg = (bp->link_params.req_flow_ctrl ==
964 BNX2X_FLOW_CTRL_AUTO) &&
965 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
966
967 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
968 BNX2X_FLOW_CTRL_RX);
969 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
970 BNX2X_FLOW_CTRL_TX);
971
972 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
973 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
974 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
975}
976
977static int bnx2x_set_pauseparam(struct net_device *dev,
978 struct ethtool_pauseparam *epause)
979{
980 struct bnx2x *bp = netdev_priv(dev);
981
982 if (IS_E1HMF(bp))
983 return 0;
984
985 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
986 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
987 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
988
989 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
990
991 if (epause->rx_pause)
992 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
993
994 if (epause->tx_pause)
995 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
996
997 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
998 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
999
1000 if (epause->autoneg) {
1001 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
1002 DP(NETIF_MSG_LINK, "autoneg not supported\n");
1003 return -EINVAL;
1004 }
1005
1006 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
1007 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
1008 }
1009
1010 DP(NETIF_MSG_LINK,
1011 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
1012
1013 if (netif_running(dev)) {
1014 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1015 bnx2x_link_set(bp);
1016 }
1017
1018 return 0;
1019}
1020
1021static int bnx2x_set_flags(struct net_device *dev, u32 data)
1022{
1023 struct bnx2x *bp = netdev_priv(dev);
1024 int changed = 0;
1025 int rc = 0;
1026
1027 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
1028 return -EINVAL;
1029
1030 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1031 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1032 return -EAGAIN;
1033 }
1034
1035 /* TPA requires Rx CSUM offloading */
1036 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
1037 if (!bp->disable_tpa) {
1038 if (!(dev->features & NETIF_F_LRO)) {
1039 dev->features |= NETIF_F_LRO;
1040 bp->flags |= TPA_ENABLE_FLAG;
1041 changed = 1;
1042 }
1043 } else
1044 rc = -EINVAL;
1045 } else if (dev->features & NETIF_F_LRO) {
1046 dev->features &= ~NETIF_F_LRO;
1047 bp->flags &= ~TPA_ENABLE_FLAG;
1048 changed = 1;
1049 }
1050
1051 if (data & ETH_FLAG_RXHASH)
1052 dev->features |= NETIF_F_RXHASH;
1053 else
1054 dev->features &= ~NETIF_F_RXHASH;
1055
1056 if (changed && netif_running(dev)) {
1057 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1058 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
1059 }
1060
1061 return rc;
1062}
1063
1064static u32 bnx2x_get_rx_csum(struct net_device *dev)
1065{
1066 struct bnx2x *bp = netdev_priv(dev);
1067
1068 return bp->rx_csum;
1069}
1070
1071static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
1072{
1073 struct bnx2x *bp = netdev_priv(dev);
1074 int rc = 0;
1075
1076 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1077 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1078 return -EAGAIN;
1079 }
1080
1081 bp->rx_csum = data;
1082
1083 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
1084 TPA'ed packets will be discarded due to wrong TCP CSUM */
1085 if (!data) {
1086 u32 flags = ethtool_op_get_flags(dev);
1087
1088 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
1089 }
1090
1091 return rc;
1092}
1093
1094static int bnx2x_set_tso(struct net_device *dev, u32 data)
1095{
1096 if (data) {
1097 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
1098 dev->features |= NETIF_F_TSO6;
1099 } else {
1100 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
1101 dev->features &= ~NETIF_F_TSO6;
1102 }
1103
1104 return 0;
1105}
1106
1107static const struct {
1108 char string[ETH_GSTRING_LEN];
1109} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
1110 { "register_test (offline)" },
1111 { "memory_test (offline)" },
1112 { "loopback_test (offline)" },
1113 { "nvram_test (online)" },
1114 { "interrupt_test (online)" },
1115 { "link_test (online)" },
1116 { "idle check (online)" }
1117};
1118
1119static int bnx2x_test_registers(struct bnx2x *bp)
1120{
1121 int idx, i, rc = -ENODEV;
1122 u32 wr_val = 0;
1123 int port = BP_PORT(bp);
1124 static const struct {
1125 u32 offset0;
1126 u32 offset1;
1127 u32 mask;
1128 } reg_tbl[] = {
1129/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
1130 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
1131 { HC_REG_AGG_INT_0, 4, 0x000003ff },
1132 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
1133 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
1134 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
1135 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
1136 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
1137 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
1138 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
1139/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
1140 { QM_REG_CONNNUM_0, 4, 0x000fffff },
1141 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
1142 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
1143 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
1144 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
1145 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
1146 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
1147 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
1148 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
1149/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
1150 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
1151 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
1152 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
1153 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
1154 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
1155 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
1156 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
1157 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
1158 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
1159/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
1160 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
1161 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
1162 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
1163 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
1164 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
1165 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
1166
1167 { 0xffffffff, 0, 0x00000000 }
1168 };
1169
1170 if (!netif_running(bp->dev))
1171 return rc;
1172
1173 /* Repeat the test twice:
1174 First by writing 0x00000000, second by writing 0xffffffff */
1175 for (idx = 0; idx < 2; idx++) {
1176
1177 switch (idx) {
1178 case 0:
1179 wr_val = 0;
1180 break;
1181 case 1:
1182 wr_val = 0xffffffff;
1183 break;
1184 }
1185
1186 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
1187 u32 offset, mask, save_val, val;
1188
1189 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
1190 mask = reg_tbl[i].mask;
1191
1192 save_val = REG_RD(bp, offset);
1193
1194 REG_WR(bp, offset, (wr_val & mask));
1195 val = REG_RD(bp, offset);
1196
1197 /* Restore the original register's value */
1198 REG_WR(bp, offset, save_val);
1199
1200 /* verify value is as expected */
1201 if ((val & mask) != (wr_val & mask)) {
1202 DP(NETIF_MSG_PROBE,
1203 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
1204 offset, val, wr_val, mask);
1205 goto test_reg_exit;
1206 }
1207 }
1208 }
1209
1210 rc = 0;
1211
1212test_reg_exit:
1213 return rc;
1214}
1215
1216static int bnx2x_test_memory(struct bnx2x *bp)
1217{
1218 int i, j, rc = -ENODEV;
1219 u32 val;
1220 static const struct {
1221 u32 offset;
1222 int size;
1223 } mem_tbl[] = {
1224 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
1225 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
1226 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
1227 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
1228 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
1229 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
1230 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
1231
1232 { 0xffffffff, 0 }
1233 };
1234 static const struct {
1235 char *name;
1236 u32 offset;
1237 u32 e1_mask;
1238 u32 e1h_mask;
1239 } prty_tbl[] = {
1240 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
1241 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
1242 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
1243 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
1244 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
1245 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
1246
1247 { NULL, 0xffffffff, 0, 0 }
1248 };
1249
1250 if (!netif_running(bp->dev))
1251 return rc;
1252
1253 /* Go through all the memories */
1254 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
1255 for (j = 0; j < mem_tbl[i].size; j++)
1256 REG_RD(bp, mem_tbl[i].offset + j*4);
1257
1258 /* Check the parity status */
1259 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1260 val = REG_RD(bp, prty_tbl[i].offset);
1261 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1262 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
1263 DP(NETIF_MSG_HW,
1264 "%s is 0x%x\n", prty_tbl[i].name, val);
1265 goto test_mem_exit;
1266 }
1267 }
1268
1269 rc = 0;
1270
1271test_mem_exit:
1272 return rc;
1273}
1274
1275static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
1276{
1277 int cnt = 1000;
1278
1279 if (link_up)
1280 while (bnx2x_link_test(bp) && cnt--)
1281 msleep(10);
1282}
1283
1284static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1285{
1286 unsigned int pkt_size, num_pkts, i;
1287 struct sk_buff *skb;
1288 unsigned char *packet;
1289 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
1290 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
1291 u16 tx_start_idx, tx_idx;
1292 u16 rx_start_idx, rx_idx;
1293 u16 pkt_prod, bd_prod;
1294 struct sw_tx_bd *tx_buf;
1295 struct eth_tx_start_bd *tx_start_bd;
1296 struct eth_tx_parse_bd *pbd = NULL;
1297 dma_addr_t mapping;
1298 union eth_rx_cqe *cqe;
1299 u8 cqe_fp_flags;
1300 struct sw_rx_bd *rx_buf;
1301 u16 len;
1302 int rc = -ENODEV;
1303
1304 /* check the loopback mode */
1305 switch (loopback_mode) {
1306 case BNX2X_PHY_LOOPBACK:
1307 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
1308 return -EINVAL;
1309 break;
1310 case BNX2X_MAC_LOOPBACK:
1311 bp->link_params.loopback_mode = LOOPBACK_BMAC;
1312 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1313 break;
1314 default:
1315 return -EINVAL;
1316 }
1317
1318 /* prepare the loopback packet */
1319 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1320 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1321 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1322 if (!skb) {
1323 rc = -ENOMEM;
1324 goto test_loopback_exit;
1325 }
1326 packet = skb_put(skb, pkt_size);
1327 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
1328 memset(packet + ETH_ALEN, 0, ETH_ALEN);
1329 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
1330 for (i = ETH_HLEN; i < pkt_size; i++)
1331 packet[i] = (unsigned char) (i & 0xff);
1332
1333 /* send the loopback packet */
1334 num_pkts = 0;
1335 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
1336 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1337
1338 pkt_prod = fp_tx->tx_pkt_prod++;
1339 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
1340 tx_buf->first_bd = fp_tx->tx_bd_prod;
1341 tx_buf->skb = skb;
1342 tx_buf->flags = 0;
1343
1344 bd_prod = TX_BD(fp_tx->tx_bd_prod);
1345 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1346 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1347 skb_headlen(skb), DMA_TO_DEVICE);
1348 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1349 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1350 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
1351 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1352 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1353 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1354 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
1355 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
1356
1357 /* turn on parsing and get a BD */
1358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1359 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
1360
1361 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1362
1363 wmb();
1364
1365 fp_tx->tx_db.data.prod += 2;
1366 barrier();
1367 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
1368
1369 mmiowb();
1370
1371 num_pkts++;
1372 fp_tx->tx_bd_prod += 2; /* start + pbd */
1373
1374 udelay(100);
1375
1376 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
1377 if (tx_idx != tx_start_idx + num_pkts)
1378 goto test_loopback_exit;
1379
1380 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1381 if (rx_idx != rx_start_idx + num_pkts)
1382 goto test_loopback_exit;
1383
1384 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
1385 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1386 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
1387 goto test_loopback_rx_exit;
1388
1389 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1390 if (len != pkt_size)
1391 goto test_loopback_rx_exit;
1392
1393 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
1394 skb = rx_buf->skb;
1395 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
1396 for (i = ETH_HLEN; i < pkt_size; i++)
1397 if (*(skb->data + i) != (unsigned char) (i & 0xff))
1398 goto test_loopback_rx_exit;
1399
1400 rc = 0;
1401
1402test_loopback_rx_exit:
1403
1404 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
1405 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
1406 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
1407 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
1408
1409 /* Update producers */
1410 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
1411 fp_rx->rx_sge_prod);
1412
1413test_loopback_exit:
1414 bp->link_params.loopback_mode = LOOPBACK_NONE;
1415
1416 return rc;
1417}
1418
1419static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
1420{
1421 int rc = 0, res;
1422
1423 if (BP_NOMCP(bp))
1424 return rc;
1425
1426 if (!netif_running(bp->dev))
1427 return BNX2X_LOOPBACK_FAILED;
1428
1429 bnx2x_netif_stop(bp, 1);
1430 bnx2x_acquire_phy_lock(bp);
1431
1432 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
1433 if (res) {
1434 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
1435 rc |= BNX2X_PHY_LOOPBACK_FAILED;
1436 }
1437
1438 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
1439 if (res) {
1440 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
1441 rc |= BNX2X_MAC_LOOPBACK_FAILED;
1442 }
1443
1444 bnx2x_release_phy_lock(bp);
1445 bnx2x_netif_start(bp);
1446
1447 return rc;
1448}
1449
1450#define CRC32_RESIDUAL 0xdebb20e3
1451
1452static int bnx2x_test_nvram(struct bnx2x *bp)
1453{
1454 static const struct {
1455 int offset;
1456 int size;
1457 } nvram_tbl[] = {
1458 { 0, 0x14 }, /* bootstrap */
1459 { 0x14, 0xec }, /* dir */
1460 { 0x100, 0x350 }, /* manuf_info */
1461 { 0x450, 0xf0 }, /* feature_info */
1462 { 0x640, 0x64 }, /* upgrade_key_info */
1463 { 0x6a4, 0x64 },
1464 { 0x708, 0x70 }, /* manuf_key_info */
1465 { 0x778, 0x70 },
1466 { 0, 0 }
1467 };
1468 __be32 buf[0x350 / 4];
1469 u8 *data = (u8 *)buf;
1470 int i, rc;
1471 u32 magic, crc;
1472
1473 if (BP_NOMCP(bp))
1474 return 0;
1475
1476 rc = bnx2x_nvram_read(bp, 0, data, 4);
1477 if (rc) {
1478 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
1479 goto test_nvram_exit;
1480 }
1481
1482 magic = be32_to_cpu(buf[0]);
1483 if (magic != 0x669955aa) {
1484 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
1485 rc = -ENODEV;
1486 goto test_nvram_exit;
1487 }
1488
1489 for (i = 0; nvram_tbl[i].size; i++) {
1490
1491 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
1492 nvram_tbl[i].size);
1493 if (rc) {
1494 DP(NETIF_MSG_PROBE,
1495 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
1496 goto test_nvram_exit;
1497 }
1498
1499 crc = ether_crc_le(nvram_tbl[i].size, data);
1500 if (crc != CRC32_RESIDUAL) {
1501 DP(NETIF_MSG_PROBE,
1502 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
1503 rc = -ENODEV;
1504 goto test_nvram_exit;
1505 }
1506 }
1507
1508test_nvram_exit:
1509 return rc;
1510}
1511
1512static int bnx2x_test_intr(struct bnx2x *bp)
1513{
1514 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
1515 int i, rc;
1516
1517 if (!netif_running(bp->dev))
1518 return -ENODEV;
1519
1520 config->hdr.length = 0;
1521 if (CHIP_IS_E1(bp))
1522 /* use last unicast entries */
1523 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
1524 else
1525 config->hdr.offset = BP_FUNC(bp);
1526 config->hdr.client_id = bp->fp->cl_id;
1527 config->hdr.reserved1 = 0;
1528
1529 bp->set_mac_pending++;
1530 smp_wmb();
1531 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
1532 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
1533 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
1534 if (rc == 0) {
1535 for (i = 0; i < 10; i++) {
1536 if (!bp->set_mac_pending)
1537 break;
1538 smp_rmb();
1539 msleep_interruptible(10);
1540 }
1541 if (i == 10)
1542 rc = -ENODEV;
1543 }
1544
1545 return rc;
1546}
1547
1548static void bnx2x_self_test(struct net_device *dev,
1549 struct ethtool_test *etest, u64 *buf)
1550{
1551 struct bnx2x *bp = netdev_priv(dev);
1552
1553 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1554 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1555 etest->flags |= ETH_TEST_FL_FAILED;
1556 return;
1557 }
1558
1559 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
1560
1561 if (!netif_running(dev))
1562 return;
1563
1564 /* offline tests are not supported in MF mode */
1565 if (IS_E1HMF(bp))
1566 etest->flags &= ~ETH_TEST_FL_OFFLINE;
1567
1568 if (etest->flags & ETH_TEST_FL_OFFLINE) {
1569 int port = BP_PORT(bp);
1570 u32 val;
1571 u8 link_up;
1572
1573 /* save current value of input enable for TX port IF */
1574 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
1575 /* disable input for TX port IF */
1576 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
1577
1578 link_up = (bnx2x_link_test(bp) == 0);
1579 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1580 bnx2x_nic_load(bp, LOAD_DIAG);
1581 /* wait until link state is restored */
1582 bnx2x_wait_for_link(bp, link_up);
1583
1584 if (bnx2x_test_registers(bp) != 0) {
1585 buf[0] = 1;
1586 etest->flags |= ETH_TEST_FL_FAILED;
1587 }
1588 if (bnx2x_test_memory(bp) != 0) {
1589 buf[1] = 1;
1590 etest->flags |= ETH_TEST_FL_FAILED;
1591 }
1592 buf[2] = bnx2x_test_loopback(bp, link_up);
1593 if (buf[2] != 0)
1594 etest->flags |= ETH_TEST_FL_FAILED;
1595
1596 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1597
1598 /* restore input for TX port IF */
1599 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
1600
1601 bnx2x_nic_load(bp, LOAD_NORMAL);
1602 /* wait until link state is restored */
1603 bnx2x_wait_for_link(bp, link_up);
1604 }
1605 if (bnx2x_test_nvram(bp) != 0) {
1606 buf[3] = 1;
1607 etest->flags |= ETH_TEST_FL_FAILED;
1608 }
1609 if (bnx2x_test_intr(bp) != 0) {
1610 buf[4] = 1;
1611 etest->flags |= ETH_TEST_FL_FAILED;
1612 }
1613 if (bp->port.pmf)
1614 if (bnx2x_link_test(bp) != 0) {
1615 buf[5] = 1;
1616 etest->flags |= ETH_TEST_FL_FAILED;
1617 }
1618
1619#ifdef BNX2X_EXTRA_DEBUG
1620 bnx2x_panic_dump(bp);
1621#endif
1622}
1623
1624static const struct {
1625 long offset;
1626 int size;
1627 u8 string[ETH_GSTRING_LEN];
1628} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
1629/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
1630 { Q_STATS_OFFSET32(error_bytes_received_hi),
1631 8, "[%d]: rx_error_bytes" },
1632 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
1633 8, "[%d]: rx_ucast_packets" },
1634 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
1635 8, "[%d]: rx_mcast_packets" },
1636 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
1637 8, "[%d]: rx_bcast_packets" },
1638 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
1639 { Q_STATS_OFFSET32(rx_err_discard_pkt),
1640 4, "[%d]: rx_phy_ip_err_discards"},
1641 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
1642 4, "[%d]: rx_skb_alloc_discard" },
1643 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
1644
1645/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
1646 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1647 8, "[%d]: tx_ucast_packets" },
1648 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1649 8, "[%d]: tx_mcast_packets" },
1650 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1651 8, "[%d]: tx_bcast_packets" }
1652};
1653
1654static const struct {
1655 long offset;
1656 int size;
1657 u32 flags;
1658#define STATS_FLAGS_PORT 1
1659#define STATS_FLAGS_FUNC 2
1660#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
1661 u8 string[ETH_GSTRING_LEN];
1662} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
1663/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
1664 8, STATS_FLAGS_BOTH, "rx_bytes" },
1665 { STATS_OFFSET32(error_bytes_received_hi),
1666 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
1667 { STATS_OFFSET32(total_unicast_packets_received_hi),
1668 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
1669 { STATS_OFFSET32(total_multicast_packets_received_hi),
1670 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
1671 { STATS_OFFSET32(total_broadcast_packets_received_hi),
1672 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
1673 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
1674 8, STATS_FLAGS_PORT, "rx_crc_errors" },
1675 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
1676 8, STATS_FLAGS_PORT, "rx_align_errors" },
1677 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
1678 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
1679 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
1680 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
1681/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
1682 8, STATS_FLAGS_PORT, "rx_fragments" },
1683 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
1684 8, STATS_FLAGS_PORT, "rx_jabbers" },
1685 { STATS_OFFSET32(no_buff_discard_hi),
1686 8, STATS_FLAGS_BOTH, "rx_discards" },
1687 { STATS_OFFSET32(mac_filter_discard),
1688 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
1689 { STATS_OFFSET32(xxoverflow_discard),
1690 4, STATS_FLAGS_PORT, "rx_fw_discards" },
1691 { STATS_OFFSET32(brb_drop_hi),
1692 8, STATS_FLAGS_PORT, "rx_brb_discard" },
1693 { STATS_OFFSET32(brb_truncate_hi),
1694 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
1695 { STATS_OFFSET32(pause_frames_received_hi),
1696 8, STATS_FLAGS_PORT, "rx_pause_frames" },
1697 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
1698 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
1699 { STATS_OFFSET32(nig_timer_max),
1700 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
1701/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
1702 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
1703 { STATS_OFFSET32(rx_skb_alloc_failed),
1704 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
1705 { STATS_OFFSET32(hw_csum_err),
1706 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
1707
1708 { STATS_OFFSET32(total_bytes_transmitted_hi),
1709 8, STATS_FLAGS_BOTH, "tx_bytes" },
1710 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
1711 8, STATS_FLAGS_PORT, "tx_error_bytes" },
1712 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1713 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
1714 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1715 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
1716 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1717 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
1718 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
1719 8, STATS_FLAGS_PORT, "tx_mac_errors" },
1720 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
1721 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
1722/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
1723 8, STATS_FLAGS_PORT, "tx_single_collisions" },
1724 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
1725 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
1726 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
1727 8, STATS_FLAGS_PORT, "tx_deferred" },
1728 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
1729 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
1730 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
1731 8, STATS_FLAGS_PORT, "tx_late_collisions" },
1732 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
1733 8, STATS_FLAGS_PORT, "tx_total_collisions" },
1734 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
1735 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
1736 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
1737 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
1738 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
1739 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
1740 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
1741 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
1742/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
1743 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
1744 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
1745 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
1746 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
1747 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
1748 { STATS_OFFSET32(pause_frames_sent_hi),
1749 8, STATS_FLAGS_PORT, "tx_pause_frames" }
1750};
1751
1752#define IS_PORT_STAT(i) \
1753 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
1754#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
1755#define IS_E1HMF_MODE_STAT(bp) \
1756 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
1757
1758static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
1759{
1760 struct bnx2x *bp = netdev_priv(dev);
1761 int i, num_stats;
1762
1763 switch (stringset) {
1764 case ETH_SS_STATS:
1765 if (is_multi(bp)) {
1766 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
1767 if (!IS_E1HMF_MODE_STAT(bp))
1768 num_stats += BNX2X_NUM_STATS;
1769 } else {
1770 if (IS_E1HMF_MODE_STAT(bp)) {
1771 num_stats = 0;
1772 for (i = 0; i < BNX2X_NUM_STATS; i++)
1773 if (IS_FUNC_STAT(i))
1774 num_stats++;
1775 } else
1776 num_stats = BNX2X_NUM_STATS;
1777 }
1778 return num_stats;
1779
1780 case ETH_SS_TEST:
1781 return BNX2X_NUM_TESTS;
1782
1783 default:
1784 return -EINVAL;
1785 }
1786}
1787
1788static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1789{
1790 struct bnx2x *bp = netdev_priv(dev);
1791 int i, j, k;
1792
1793 switch (stringset) {
1794 case ETH_SS_STATS:
1795 if (is_multi(bp)) {
1796 k = 0;
1797 for_each_queue(bp, i) {
1798 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
1799 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
1800 bnx2x_q_stats_arr[j].string, i);
1801 k += BNX2X_NUM_Q_STATS;
1802 }
1803 if (IS_E1HMF_MODE_STAT(bp))
1804 break;
1805 for (j = 0; j < BNX2X_NUM_STATS; j++)
1806 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
1807 bnx2x_stats_arr[j].string);
1808 } else {
1809 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1810 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
1811 continue;
1812 strcpy(buf + j*ETH_GSTRING_LEN,
1813 bnx2x_stats_arr[i].string);
1814 j++;
1815 }
1816 }
1817 break;
1818
1819 case ETH_SS_TEST:
1820 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
1821 break;
1822 }
1823}
1824
1825static void bnx2x_get_ethtool_stats(struct net_device *dev,
1826 struct ethtool_stats *stats, u64 *buf)
1827{
1828 struct bnx2x *bp = netdev_priv(dev);
1829 u32 *hw_stats, *offset;
1830 int i, j, k;
1831
1832 if (is_multi(bp)) {
1833 k = 0;
1834 for_each_queue(bp, i) {
1835 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
1836 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
1837 if (bnx2x_q_stats_arr[j].size == 0) {
1838 /* skip this counter */
1839 buf[k + j] = 0;
1840 continue;
1841 }
1842 offset = (hw_stats +
1843 bnx2x_q_stats_arr[j].offset);
1844 if (bnx2x_q_stats_arr[j].size == 4) {
1845 /* 4-byte counter */
1846 buf[k + j] = (u64) *offset;
1847 continue;
1848 }
1849 /* 8-byte counter */
1850 buf[k + j] = HILO_U64(*offset, *(offset + 1));
1851 }
1852 k += BNX2X_NUM_Q_STATS;
1853 }
1854 if (IS_E1HMF_MODE_STAT(bp))
1855 return;
1856 hw_stats = (u32 *)&bp->eth_stats;
1857 for (j = 0; j < BNX2X_NUM_STATS; j++) {
1858 if (bnx2x_stats_arr[j].size == 0) {
1859 /* skip this counter */
1860 buf[k + j] = 0;
1861 continue;
1862 }
1863 offset = (hw_stats + bnx2x_stats_arr[j].offset);
1864 if (bnx2x_stats_arr[j].size == 4) {
1865 /* 4-byte counter */
1866 buf[k + j] = (u64) *offset;
1867 continue;
1868 }
1869 /* 8-byte counter */
1870 buf[k + j] = HILO_U64(*offset, *(offset + 1));
1871 }
1872 } else {
1873 hw_stats = (u32 *)&bp->eth_stats;
1874 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1875 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
1876 continue;
1877 if (bnx2x_stats_arr[i].size == 0) {
1878 /* skip this counter */
1879 buf[j] = 0;
1880 j++;
1881 continue;
1882 }
1883 offset = (hw_stats + bnx2x_stats_arr[i].offset);
1884 if (bnx2x_stats_arr[i].size == 4) {
1885 /* 4-byte counter */
1886 buf[j] = (u64) *offset;
1887 j++;
1888 continue;
1889 }
1890 /* 8-byte counter */
1891 buf[j] = HILO_U64(*offset, *(offset + 1));
1892 j++;
1893 }
1894 }
1895}
1896
1897static int bnx2x_phys_id(struct net_device *dev, u32 data)
1898{
1899 struct bnx2x *bp = netdev_priv(dev);
1900 int i;
1901
1902 if (!netif_running(dev))
1903 return 0;
1904
1905 if (!bp->port.pmf)
1906 return 0;
1907
1908 if (data == 0)
1909 data = 2;
1910
1911 for (i = 0; i < (data * 2); i++) {
1912 if ((i % 2) == 0)
1913 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
1914 SPEED_1000);
1915 else
1916 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
1917
1918 msleep_interruptible(500);
1919 if (signal_pending(current))
1920 break;
1921 }
1922
1923 if (bp->link_vars.link_up)
1924 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
1925 bp->link_vars.line_speed);
1926
1927 return 0;
1928}
1929
1930static const struct ethtool_ops bnx2x_ethtool_ops = {
1931 .get_settings = bnx2x_get_settings,
1932 .set_settings = bnx2x_set_settings,
1933 .get_drvinfo = bnx2x_get_drvinfo,
1934 .get_regs_len = bnx2x_get_regs_len,
1935 .get_regs = bnx2x_get_regs,
1936 .get_wol = bnx2x_get_wol,
1937 .set_wol = bnx2x_set_wol,
1938 .get_msglevel = bnx2x_get_msglevel,
1939 .set_msglevel = bnx2x_set_msglevel,
1940 .nway_reset = bnx2x_nway_reset,
1941 .get_link = bnx2x_get_link,
1942 .get_eeprom_len = bnx2x_get_eeprom_len,
1943 .get_eeprom = bnx2x_get_eeprom,
1944 .set_eeprom = bnx2x_set_eeprom,
1945 .get_coalesce = bnx2x_get_coalesce,
1946 .set_coalesce = bnx2x_set_coalesce,
1947 .get_ringparam = bnx2x_get_ringparam,
1948 .set_ringparam = bnx2x_set_ringparam,
1949 .get_pauseparam = bnx2x_get_pauseparam,
1950 .set_pauseparam = bnx2x_set_pauseparam,
1951 .get_rx_csum = bnx2x_get_rx_csum,
1952 .set_rx_csum = bnx2x_set_rx_csum,
1953 .get_tx_csum = ethtool_op_get_tx_csum,
1954 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1955 .set_flags = bnx2x_set_flags,
1956 .get_flags = ethtool_op_get_flags,
1957 .get_sg = ethtool_op_get_sg,
1958 .set_sg = ethtool_op_set_sg,
1959 .get_tso = ethtool_op_get_tso,
1960 .set_tso = bnx2x_set_tso,
1961 .self_test = bnx2x_self_test,
1962 .get_sset_count = bnx2x_get_sset_count,
1963 .get_strings = bnx2x_get_strings,
1964 .phys_id = bnx2x_phys_id,
1965 .get_ethtool_stats = bnx2x_get_ethtool_stats,
1966};
1967
1968void bnx2x_set_ethtool_ops(struct net_device *netdev)
1969{
1970 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
1971}
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index 08d71bf438d6..08d71bf438d6 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
diff --git a/drivers/net/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
index 3f5ee5d7cc2a..3f5ee5d7cc2a 100644
--- a/drivers/net/bnx2x_fw_file_hdr.h
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index fd1f29e0317d..fd1f29e0317d 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 65b26cbfe3e7..65b26cbfe3e7 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index 2b1363a6fe78..2b1363a6fe78 100644
--- a/drivers/net/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index ff70be898765..0383e3066313 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -4266,14 +4266,16 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4266 MDIO_PMA_REG_10G_CTRL2, 0x0008); 4266 MDIO_PMA_REG_10G_CTRL2, 0x0008);
4267 } 4267 }
4268 4268
4269 /* Set 2-wire transfer rate to 400Khz since 100Khz 4269 /* Set 2-wire transfer rate of SFP+ module EEPROM
4270 is not operational */ 4270 * to 100Khz since some DACs(direct attached cables) do
4271 * not work at 400Khz.
4272 */
4271 bnx2x_cl45_write(bp, params->port, 4273 bnx2x_cl45_write(bp, params->port,
4272 ext_phy_type, 4274 ext_phy_type,
4273 ext_phy_addr, 4275 ext_phy_addr,
4274 MDIO_PMA_DEVAD, 4276 MDIO_PMA_DEVAD,
4275 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, 4277 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4276 0xa101); 4278 0xa001);
4277 4279
4278 /* Set TX PreEmphasis if needed */ 4280 /* Set TX PreEmphasis if needed */
4279 if ((params->feature_config_flags & 4281 if ((params->feature_config_flags &
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 40c2981de8ed..40c2981de8ed 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 46167c081727..b4ec2b02a465 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -51,15 +51,12 @@
51#include <linux/io.h> 51#include <linux/io.h>
52#include <linux/stringify.h> 52#include <linux/stringify.h>
53 53
54 54#define BNX2X_MAIN
55#include "bnx2x.h" 55#include "bnx2x.h"
56#include "bnx2x_init.h" 56#include "bnx2x_init.h"
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_cmn.h"
59 59
60#define DRV_MODULE_VERSION "1.52.53-2"
61#define DRV_MODULE_RELDATE "2010/21/07"
62#define BNX2X_BC_VER 0x040200
63 60
64#include <linux/firmware.h> 61#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h" 62#include "bnx2x_fw_file_hdr.h"
@@ -121,8 +118,6 @@ static int debug;
121module_param(debug, int, 0); 118module_param(debug, int, 0);
122MODULE_PARM_DESC(debug, " Default debug msglevel"); 119MODULE_PARM_DESC(debug, " Default debug msglevel");
123 120
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126static struct workqueue_struct *bnx2x_wq; 121static struct workqueue_struct *bnx2x_wq;
127 122
128enum bnx2x_board_type { 123enum bnx2x_board_type {
@@ -177,7 +172,7 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
177 return val; 172 return val;
178} 173}
179 174
180static const u32 dmae_reg_go_c[] = { 175const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, 176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, 177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, 178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
@@ -185,8 +180,7 @@ static const u32 dmae_reg_go_c[] = {
185}; 180};
186 181
187/* copy command into DMAE command memory and set DMAE command go */ 182/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, 183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
189 int idx)
190{ 184{
191 u32 cmd_offset; 185 u32 cmd_offset;
192 int i; 186 int i;
@@ -541,7 +535,7 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
541 pr_err("end of fw dump\n"); 535 pr_err("end of fw dump\n");
542} 536}
543 537
544static void bnx2x_panic_dump(struct bnx2x *bp) 538void bnx2x_panic_dump(struct bnx2x *bp)
545{ 539{
546 int i; 540 int i;
547 u16 j, start, end; 541 u16 j, start, end;
@@ -654,7 +648,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
654 BNX2X_ERR("end crash dump -----------------\n"); 648 BNX2X_ERR("end crash dump -----------------\n");
655} 649}
656 650
657static void bnx2x_int_enable(struct bnx2x *bp) 651void bnx2x_int_enable(struct bnx2x *bp)
658{ 652{
659 int port = BP_PORT(bp); 653 int port = BP_PORT(bp);
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -736,7 +730,7 @@ static void bnx2x_int_disable(struct bnx2x *bp)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737} 731}
738 732
739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740{ 734{
741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742 int i, offset; 736 int i, offset;
@@ -806,235 +800,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
806 return false; 800 return false;
807} 801}
808 802
809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810 u8 storm, u16 index, u8 op, u8 update)
811{
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
830}
831
832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833{
834 struct host_status_block *fpsb = fp->status_blk;
835
836 barrier(); /* status block is written to by the chip */
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839}
840
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
846
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
849
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863}
864
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
874 struct sk_buff *skb = tx_buf->skb;
875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876 int nbd;
877
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891#ifdef BNX2X_STOP_ON_ERROR
892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893 BNX2X_ERR("BAD nbd!\n");
894 bnx2x_panic();
895 }
896#endif
897 new_cons = nbd + tx_buf->first_bd;
898
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
924 WARN_ON(!skb);
925 dev_kfree_skb(skb);
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
929 return new_cons;
930}
931
932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933{
934 s16 used;
935 u16 prod;
936 u16 cons;
937
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945#ifdef BNX2X_STOP_ON_ERROR
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949#endif
950
951 return (s16)(fp->bp->tx_ring_size) - used;
952}
953
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965{
966 struct bnx2x *bp = fp->bp;
967 struct netdev_queue *txq;
968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
972 return -1;
973#endif
974
975 txq = netdev_get_tx_queue(bp->dev, fp->index);
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
987 hw_cons, sw_cons, pkt_cons);
988
989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
1007 smp_mb();
1008
1009 /* TBD need a thresh? */
1010 if (unlikely(netif_tx_queue_stopped(txq))) {
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
1019 */
1020
1021 __netif_tx_lock(txq, smp_processor_id());
1022
1023 if ((netif_tx_queue_stopped(txq)) &&
1024 (bp->state == BNX2X_STATE_OPEN) &&
1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026 netif_tx_wake_queue(txq);
1027
1028 __netif_tx_unlock(txq);
1029 }
1030 return 0;
1031}
1032 803
1033#ifdef BCM_CNIC 804#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); 805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif 806#endif
1036 807
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe) 809 union eth_rx_cqe *rr_cqe)
1039{ 810{
1040 struct bnx2x *bp = fp->bp; 811 struct bnx2x *bp = fp->bp;
@@ -1118,703 +889,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1118 mb(); /* force bnx2x_wait_ramrod() to see the change */ 889 mb(); /* force bnx2x_wait_ramrod() to see the change */
1119} 890}
1120 891
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
1226 *prod_bd = *cons_bd;
1227}
1228
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
1258 SGE_PAGE_SHIFT;
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
1348#ifdef _ASM_GENERIC_INT_L64_H
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394 rx_pg = &fp->rx_page_ring[sge_idx];
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438 if (likely(new_skb)) {
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
1490 else
1491#endif
1492 napi_gro_receive(&fp->napi, skb);
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
1504 /* else drop the packet and keep the buffer in the bin */
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
1507 fp->eth_q_stats.rx_skb_alloc_failed++;
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
1518 struct ustorm_eth_rx_producers rx_prods = {0};
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539 ((u32 *)&rx_prods)[i]);
1540
1541 mmiowb(); /* keep prod updates ordered */
1542
1543 DP(NETIF_MSG_RX_STATUS,
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546}
1547
1548static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549{
1550 struct bnx2x *bp = fp->bp;
1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553 int rx_pkt = 0;
1554
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1557 return 0;
1558#endif
1559
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564 hw_comp_cons++;
1565
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
1568 bd_prod_fw = bd_prod;
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1571
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1574 */
1575 rmb();
1576
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1579 fp->index, hw_comp_cons, sw_comp_cons);
1580
1581 while (sw_comp_cons != hw_comp_cons) {
1582 struct sw_rx_bd *rx_buf = NULL;
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
1585 u8 cqe_fp_flags, cqe_fp_status_flags;
1586 u16 len, pad;
1587
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1591
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1594 allocated */
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1597 PAGE_SIZE + 1));
1598
1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1602
1603 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1604 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1605 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1606 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609
1610 /* is this a slowpath msg? */
1611 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1612 bnx2x_sp_event(fp, cqe);
1613 goto next_cqe;
1614
1615 /* this is an rx packet */
1616 } else {
1617 rx_buf = &fp->rx_buf_ring[bd_cons];
1618 skb = rx_buf->skb;
1619 prefetch(skb);
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1622
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
1628 u16 queue = cqe->fast_path_cqe.queue_index;
1629
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1633 queue);
1634
1635 bnx2x_tpa_start(fp, queue, skb,
1636 bd_cons, bd_prod);
1637 goto next_rx;
1638 }
1639
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1643 queue);
1644
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1647 "data\n");
1648
1649 /* This is a size of the linear data
1650 on this skb */
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1652 len_on_bd);
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655#ifdef BNX2X_STOP_ON_ERROR
1656 if (bp->panic)
1657 return 0;
1658#endif
1659
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1662 goto next_cqe;
1663 }
1664 }
1665
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1669 DMA_FROM_DEVICE);
1670 prefetch(((char *)(skb)) + 128);
1671
1672 /* is this an error packet? */
1673 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1674 DP(NETIF_MSG_RX_ERR,
1675 "ERROR flags %x rx packet %u\n",
1676 cqe_fp_flags, sw_comp_cons);
1677 fp->eth_q_stats.rx_err_discard_pkt++;
1678 goto reuse_rx;
1679 }
1680
1681 /* Since we don't have a jumbo ring
1682 * copy small packets if mtu > 1500
1683 */
1684 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1685 (len <= RX_COPY_THRESH)) {
1686 struct sk_buff *new_skb;
1687
1688 new_skb = netdev_alloc_skb(bp->dev,
1689 len + pad);
1690 if (new_skb == NULL) {
1691 DP(NETIF_MSG_RX_ERR,
1692 "ERROR packet dropped "
1693 "because of alloc failure\n");
1694 fp->eth_q_stats.rx_skb_alloc_failed++;
1695 goto reuse_rx;
1696 }
1697
1698 /* aligned copy */
1699 skb_copy_from_linear_data_offset(skb, pad,
1700 new_skb->data + pad, len);
1701 skb_reserve(new_skb, pad);
1702 skb_put(new_skb, len);
1703
1704 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1705
1706 skb = new_skb;
1707
1708 } else
1709 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1710 dma_unmap_single(&bp->pdev->dev,
1711 dma_unmap_addr(rx_buf, mapping),
1712 bp->rx_buf_size,
1713 DMA_FROM_DEVICE);
1714 skb_reserve(skb, pad);
1715 skb_put(skb, len);
1716
1717 } else {
1718 DP(NETIF_MSG_RX_ERR,
1719 "ERROR packet dropped because "
1720 "of alloc failure\n");
1721 fp->eth_q_stats.rx_skb_alloc_failed++;
1722reuse_rx:
1723 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1724 goto next_rx;
1725 }
1726
1727 skb->protocol = eth_type_trans(skb, bp->dev);
1728
1729 if ((bp->dev->features & NETIF_F_RXHASH) &&
1730 (cqe_fp_status_flags &
1731 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732 skb->rxhash = le32_to_cpu(
1733 cqe->fast_path_cqe.rss_hash_result);
1734
1735 skb->ip_summed = CHECKSUM_NONE;
1736 if (bp->rx_csum) {
1737 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1738 skb->ip_summed = CHECKSUM_UNNECESSARY;
1739 else
1740 fp->eth_q_stats.hw_csum_err++;
1741 }
1742 }
1743
1744 skb_record_rx_queue(skb, fp->index);
1745
1746#ifdef BCM_VLAN
1747 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1748 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1749 PARSING_FLAGS_VLAN))
1750 vlan_gro_receive(&fp->napi, bp->vlgrp,
1751 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1752 else
1753#endif
1754 napi_gro_receive(&fp->napi, skb);
1755
1756
1757next_rx:
1758 rx_buf->skb = NULL;
1759
1760 bd_cons = NEXT_RX_IDX(bd_cons);
1761 bd_prod = NEXT_RX_IDX(bd_prod);
1762 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1763 rx_pkt++;
1764next_cqe:
1765 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1766 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1767
1768 if (rx_pkt == budget)
1769 break;
1770 } /* while */
1771
1772 fp->rx_bd_cons = bd_cons;
1773 fp->rx_bd_prod = bd_prod_fw;
1774 fp->rx_comp_cons = sw_comp_cons;
1775 fp->rx_comp_prod = sw_comp_prod;
1776
1777 /* Update producers */
1778 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1779 fp->rx_sge_prod);
1780
1781 fp->rx_pkt += rx_pkt;
1782 fp->rx_calls++;
1783
1784 return rx_pkt;
1785}
1786
1787static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1788{
1789 struct bnx2x_fastpath *fp = fp_cookie;
1790 struct bnx2x *bp = fp->bp;
1791
1792 /* Return here if interrupt is disabled */
1793 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1794 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1795 return IRQ_HANDLED;
1796 }
1797
1798 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1799 fp->index, fp->sb_id);
1800 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1801
1802#ifdef BNX2X_STOP_ON_ERROR
1803 if (unlikely(bp->panic))
1804 return IRQ_HANDLED;
1805#endif
1806
1807 /* Handle Rx and Tx according to MSI-X vector */
1808 prefetch(fp->rx_cons_sb);
1809 prefetch(fp->tx_cons_sb);
1810 prefetch(&fp->status_blk->u_status_block.status_block_index);
1811 prefetch(&fp->status_blk->c_status_block.status_block_index);
1812 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1813
1814 return IRQ_HANDLED;
1815}
1816
1817static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1818{ 893{
1819 struct bnx2x *bp = netdev_priv(dev_instance); 894 struct bnx2x *bp = netdev_priv(dev_instance);
1820 u16 status = bnx2x_ack_int(bp); 895 u16 status = bnx2x_ack_int(bp);
@@ -1888,7 +963,6 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1888 963
1889/* end of fast path */ 964/* end of fast path */
1890 965
1891static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1892 966
1893/* Link */ 967/* Link */
1894 968
@@ -1896,7 +970,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1896 * General service functions 970 * General service functions
1897 */ 971 */
1898 972
1899static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1900{ 974{
1901 u32 lock_status; 975 u32 lock_status;
1902 u32 resource_bit = (1 << resource); 976 u32 resource_bit = (1 << resource);
@@ -1941,7 +1015,7 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1941 return -EAGAIN; 1015 return -EAGAIN;
1942} 1016}
1943 1017
1944static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1945{ 1019{
1946 u32 lock_status; 1020 u32 lock_status;
1947 u32 resource_bit = (1 << resource); 1021 u32 resource_bit = (1 << resource);
@@ -1977,22 +1051,6 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1977 return 0; 1051 return 0;
1978} 1052}
1979 1053
1980/* HW Lock for shared dual port PHYs */
1981static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1982{
1983 mutex_lock(&bp->port.phy_mutex);
1984
1985 if (bp->port.need_hw_lock)
1986 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1987}
1988
1989static void bnx2x_release_phy_lock(struct bnx2x *bp)
1990{
1991 if (bp->port.need_hw_lock)
1992 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1993
1994 mutex_unlock(&bp->port.phy_mutex);
1995}
1996 1054
1997int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1998{ 1056{
@@ -2169,7 +1227,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2169 return 0; 1227 return 0;
2170} 1228}
2171 1229
2172static void bnx2x_calc_fc_adv(struct bnx2x *bp) 1230void bnx2x_calc_fc_adv(struct bnx2x *bp)
2173{ 1231{
2174 switch (bp->link_vars.ieee_fc & 1232 switch (bp->link_vars.ieee_fc &
2175 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
@@ -2194,58 +1252,8 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2194 } 1252 }
2195} 1253}
2196 1254
2197static void bnx2x_link_report(struct bnx2x *bp)
2198{
2199 if (bp->flags & MF_FUNC_DIS) {
2200 netif_carrier_off(bp->dev);
2201 netdev_err(bp->dev, "NIC Link is Down\n");
2202 return;
2203 }
2204 1255
2205 if (bp->link_vars.link_up) { 1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2206 u16 line_speed;
2207
2208 if (bp->state == BNX2X_STATE_OPEN)
2209 netif_carrier_on(bp->dev);
2210 netdev_info(bp->dev, "NIC Link is Up, ");
2211
2212 line_speed = bp->link_vars.line_speed;
2213 if (IS_E1HMF(bp)) {
2214 u16 vn_max_rate;
2215
2216 vn_max_rate =
2217 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2218 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219 if (vn_max_rate < line_speed)
2220 line_speed = vn_max_rate;
2221 }
2222 pr_cont("%d Mbps ", line_speed);
2223
2224 if (bp->link_vars.duplex == DUPLEX_FULL)
2225 pr_cont("full duplex");
2226 else
2227 pr_cont("half duplex");
2228
2229 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2230 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2231 pr_cont(", receive ");
2232 if (bp->link_vars.flow_ctrl &
2233 BNX2X_FLOW_CTRL_TX)
2234 pr_cont("& transmit ");
2235 } else {
2236 pr_cont(", transmit ");
2237 }
2238 pr_cont("flow control ON");
2239 }
2240 pr_cont("\n");
2241
2242 } else { /* link_down */
2243 netif_carrier_off(bp->dev);
2244 netdev_err(bp->dev, "NIC Link is Down\n");
2245 }
2246}
2247
2248static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2249{ 1257{
2250 if (!BP_NOMCP(bp)) { 1258 if (!BP_NOMCP(bp)) {
2251 u8 rc; 1259 u8 rc;
@@ -2280,7 +1288,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2280 return -EINVAL; 1288 return -EINVAL;
2281} 1289}
2282 1290
2283static void bnx2x_link_set(struct bnx2x *bp) 1291void bnx2x_link_set(struct bnx2x *bp)
2284{ 1292{
2285 if (!BP_NOMCP(bp)) { 1293 if (!BP_NOMCP(bp)) {
2286 bnx2x_acquire_phy_lock(bp); 1294 bnx2x_acquire_phy_lock(bp);
@@ -2302,7 +1310,7 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2302 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 1310 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2303} 1311}
2304 1312
2305static u8 bnx2x_link_test(struct bnx2x *bp) 1313u8 bnx2x_link_test(struct bnx2x *bp)
2306{ 1314{
2307 u8 rc = 0; 1315 u8 rc = 0;
2308 1316
@@ -2534,7 +1542,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2534 } 1542 }
2535} 1543}
2536 1544
2537static void bnx2x__link_status_update(struct bnx2x *bp) 1545void bnx2x__link_status_update(struct bnx2x *bp)
2538{ 1546{
2539 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) 1547 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2540 return; 1548 return;
@@ -2615,9 +1623,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2615 return rc; 1623 return rc;
2616} 1624}
2617 1625
2618static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2619static void bnx2x_set_rx_mode(struct net_device *dev);
2620
2621static void bnx2x_e1h_disable(struct bnx2x *bp) 1626static void bnx2x_e1h_disable(struct bnx2x *bp)
2622{ 1627{
2623 int port = BP_PORT(bp); 1628 int port = BP_PORT(bp);
@@ -2745,7 +1750,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2745} 1750}
2746 1751
2747/* the slow path queue is odd since completions arrive on the fastpath ring */ 1752/* the slow path queue is odd since completions arrive on the fastpath ring */
2748static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1753int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2749 u32 data_hi, u32 data_lo, int common) 1754 u32 data_hi, u32 data_lo, int common)
2750{ 1755{
2751 struct eth_spe *spe; 1756 struct eth_spe *spe;
@@ -3157,10 +2162,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3157 } 2162 }
3158} 2163}
3159 2164
3160static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 2165#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3165#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ 2166#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3166#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 2167#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
@@ -3194,7 +2195,7 @@ static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3194/* 2195/*
3195 * should be run under rtnl lock 2196 * should be run under rtnl lock
3196 */ 2197 */
3197static inline bool bnx2x_reset_is_done(struct bnx2x *bp) 2198bool bnx2x_reset_is_done(struct bnx2x *bp)
3198{ 2199{
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2200 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 2201 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
@@ -3204,7 +2205,7 @@ static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3204/* 2205/*
3205 * should be run under rtnl lock 2206 * should be run under rtnl lock
3206 */ 2207 */
3207static inline void bnx2x_inc_load_cnt(struct bnx2x *bp) 2208inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208{ 2209{
3209 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2210 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210 2211
@@ -3219,7 +2220,7 @@ static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3219/* 2220/*
3220 * should be run under rtnl lock 2221 * should be run under rtnl lock
3221 */ 2222 */
3222static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp) 2223u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223{ 2224{
3224 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2225 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225 2226
@@ -3437,7 +2438,7 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3437 return false; 2438 return false;
3438} 2439}
3439 2440
3440static bool bnx2x_chk_parity_attn(struct bnx2x *bp) 2441bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3441{ 2442{
3442 struct attn_route attn; 2443 struct attn_route attn;
3443 int port = BP_PORT(bp); 2444 int port = BP_PORT(bp);
@@ -3615,7 +2616,7 @@ static void bnx2x_sp_task(struct work_struct *work)
3615 IGU_INT_ENABLE, 1); 2616 IGU_INT_ENABLE, 1);
3616} 2617}
3617 2618
3618static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 2619irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3619{ 2620{
3620 struct net_device *dev = dev_instance; 2621 struct net_device *dev = dev_instance;
3621 struct bnx2x *bp = netdev_priv(dev); 2622 struct bnx2x *bp = netdev_priv(dev);
@@ -3651,1398 +2652,6 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3651 2652
3652/* end of slow path */ 2653/* end of slow path */
3653 2654
3654/* Statistics */
3655
3656/****************************************************************************
3657* Macros
3658****************************************************************************/
3659
3660/* sum[hi:lo] += add[hi:lo] */
3661#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3662 do { \
3663 s_lo += a_lo; \
3664 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3665 } while (0)
3666
3667/* difference = minuend - subtrahend */
3668#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3669 do { \
3670 if (m_lo < s_lo) { \
3671 /* underflow */ \
3672 d_hi = m_hi - s_hi; \
3673 if (d_hi > 0) { \
3674 /* we can 'loan' 1 */ \
3675 d_hi--; \
3676 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3677 } else { \
3678 /* m_hi <= s_hi */ \
3679 d_hi = 0; \
3680 d_lo = 0; \
3681 } \
3682 } else { \
3683 /* m_lo >= s_lo */ \
3684 if (m_hi < s_hi) { \
3685 d_hi = 0; \
3686 d_lo = 0; \
3687 } else { \
3688 /* m_hi >= s_hi */ \
3689 d_hi = m_hi - s_hi; \
3690 d_lo = m_lo - s_lo; \
3691 } \
3692 } \
3693 } while (0)
3694
3695#define UPDATE_STAT64(s, t) \
3696 do { \
3697 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3698 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3699 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3700 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3701 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3702 pstats->mac_stx[1].t##_lo, diff.lo); \
3703 } while (0)
3704
3705#define UPDATE_STAT64_NIG(s, t) \
3706 do { \
3707 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3708 diff.lo, new->s##_lo, old->s##_lo); \
3709 ADD_64(estats->t##_hi, diff.hi, \
3710 estats->t##_lo, diff.lo); \
3711 } while (0)
3712
3713/* sum[hi:lo] += add */
3714#define ADD_EXTEND_64(s_hi, s_lo, a) \
3715 do { \
3716 s_lo += a; \
3717 s_hi += (s_lo < a) ? 1 : 0; \
3718 } while (0)
3719
3720#define UPDATE_EXTEND_STAT(s) \
3721 do { \
3722 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3723 pstats->mac_stx[1].s##_lo, \
3724 new->s); \
3725 } while (0)
3726
3727#define UPDATE_EXTEND_TSTAT(s, t) \
3728 do { \
3729 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3730 old_tclient->s = tclient->s; \
3731 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732 } while (0)
3733
3734#define UPDATE_EXTEND_USTAT(s, t) \
3735 do { \
3736 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3737 old_uclient->s = uclient->s; \
3738 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739 } while (0)
3740
3741#define UPDATE_EXTEND_XSTAT(s, t) \
3742 do { \
3743 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3744 old_xclient->s = xclient->s; \
3745 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746 } while (0)
3747
3748/* minuend -= subtrahend */
3749#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3750 do { \
3751 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3752 } while (0)
3753
3754/* minuend[hi:lo] -= subtrahend */
3755#define SUB_EXTEND_64(m_hi, m_lo, s) \
3756 do { \
3757 SUB_64(m_hi, 0, m_lo, s); \
3758 } while (0)
3759
3760#define SUB_EXTEND_USTAT(s, t) \
3761 do { \
3762 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3763 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3764 } while (0)
3765
3766/*
3767 * General service functions
3768 */
3769
3770static inline long bnx2x_hilo(u32 *hiref)
3771{
3772 u32 lo = *(hiref + 1);
3773#if (BITS_PER_LONG == 64)
3774 u32 hi = *hiref;
3775
3776 return HILO_U64(hi, lo);
3777#else
3778 return lo;
3779#endif
3780}
3781
3782/*
3783 * Init service functions
3784 */
3785
3786static void bnx2x_storm_stats_post(struct bnx2x *bp)
3787{
3788 if (!bp->stats_pending) {
3789 struct eth_query_ramrod_data ramrod_data = {0};
3790 int i, rc;
3791
3792 spin_lock_bh(&bp->stats_lock);
3793
3794 ramrod_data.drv_counter = bp->stats_counter++;
3795 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3796 for_each_queue(bp, i)
3797 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3798
3799 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3800 ((u32 *)&ramrod_data)[1],
3801 ((u32 *)&ramrod_data)[0], 0);
3802 if (rc == 0) {
3803 /* stats ramrod has it's own slot on the spq */
3804 bp->spq_left++;
3805 bp->stats_pending = 1;
3806 }
3807
3808 spin_unlock_bh(&bp->stats_lock);
3809 }
3810}
3811
3812static void bnx2x_hw_stats_post(struct bnx2x *bp)
3813{
3814 struct dmae_command *dmae = &bp->stats_dmae;
3815 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3816
3817 *stats_comp = DMAE_COMP_VAL;
3818 if (CHIP_REV_IS_SLOW(bp))
3819 return;
3820
3821 /* loader */
3822 if (bp->executer_idx) {
3823 int loader_idx = PMF_DMAE_C(bp);
3824
3825 memset(dmae, 0, sizeof(struct dmae_command));
3826
3827 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3828 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3829 DMAE_CMD_DST_RESET |
3830#ifdef __BIG_ENDIAN
3831 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3832#else
3833 DMAE_CMD_ENDIANITY_DW_SWAP |
3834#endif
3835 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3836 DMAE_CMD_PORT_0) |
3837 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3838 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3839 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3840 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3841 sizeof(struct dmae_command) *
3842 (loader_idx + 1)) >> 2;
3843 dmae->dst_addr_hi = 0;
3844 dmae->len = sizeof(struct dmae_command) >> 2;
3845 if (CHIP_IS_E1(bp))
3846 dmae->len--;
3847 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3848 dmae->comp_addr_hi = 0;
3849 dmae->comp_val = 1;
3850
3851 *stats_comp = 0;
3852 bnx2x_post_dmae(bp, dmae, loader_idx);
3853
3854 } else if (bp->func_stx) {
3855 *stats_comp = 0;
3856 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3857 }
3858}
3859
3860static int bnx2x_stats_comp(struct bnx2x *bp)
3861{
3862 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3863 int cnt = 10;
3864
3865 might_sleep();
3866 while (*stats_comp != DMAE_COMP_VAL) {
3867 if (!cnt) {
3868 BNX2X_ERR("timeout waiting for stats finished\n");
3869 break;
3870 }
3871 cnt--;
3872 msleep(1);
3873 }
3874 return 1;
3875}
3876
3877/*
3878 * Statistics service functions
3879 */
3880
3881static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3882{
3883 struct dmae_command *dmae;
3884 u32 opcode;
3885 int loader_idx = PMF_DMAE_C(bp);
3886 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3887
3888 /* sanity */
3889 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3890 BNX2X_ERR("BUG!\n");
3891 return;
3892 }
3893
3894 bp->executer_idx = 0;
3895
3896 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3897 DMAE_CMD_C_ENABLE |
3898 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3899#ifdef __BIG_ENDIAN
3900 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3901#else
3902 DMAE_CMD_ENDIANITY_DW_SWAP |
3903#endif
3904 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3905 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3906
3907 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3908 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3909 dmae->src_addr_lo = bp->port.port_stx >> 2;
3910 dmae->src_addr_hi = 0;
3911 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3912 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3913 dmae->len = DMAE_LEN32_RD_MAX;
3914 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3915 dmae->comp_addr_hi = 0;
3916 dmae->comp_val = 1;
3917
3918 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3919 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3920 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3921 dmae->src_addr_hi = 0;
3922 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3923 DMAE_LEN32_RD_MAX * 4);
3924 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3925 DMAE_LEN32_RD_MAX * 4);
3926 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3927 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3928 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3929 dmae->comp_val = DMAE_COMP_VAL;
3930
3931 *stats_comp = 0;
3932 bnx2x_hw_stats_post(bp);
3933 bnx2x_stats_comp(bp);
3934}
3935
3936static void bnx2x_port_stats_init(struct bnx2x *bp)
3937{
3938 struct dmae_command *dmae;
3939 int port = BP_PORT(bp);
3940 int vn = BP_E1HVN(bp);
3941 u32 opcode;
3942 int loader_idx = PMF_DMAE_C(bp);
3943 u32 mac_addr;
3944 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3945
3946 /* sanity */
3947 if (!bp->link_vars.link_up || !bp->port.pmf) {
3948 BNX2X_ERR("BUG!\n");
3949 return;
3950 }
3951
3952 bp->executer_idx = 0;
3953
3954 /* MCP */
3955 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3956 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3957 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3958#ifdef __BIG_ENDIAN
3959 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3960#else
3961 DMAE_CMD_ENDIANITY_DW_SWAP |
3962#endif
3963 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3964 (vn << DMAE_CMD_E1HVN_SHIFT));
3965
3966 if (bp->port.port_stx) {
3967
3968 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3969 dmae->opcode = opcode;
3970 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3971 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3972 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3973 dmae->dst_addr_hi = 0;
3974 dmae->len = sizeof(struct host_port_stats) >> 2;
3975 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3976 dmae->comp_addr_hi = 0;
3977 dmae->comp_val = 1;
3978 }
3979
3980 if (bp->func_stx) {
3981
3982 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3983 dmae->opcode = opcode;
3984 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3985 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3986 dmae->dst_addr_lo = bp->func_stx >> 2;
3987 dmae->dst_addr_hi = 0;
3988 dmae->len = sizeof(struct host_func_stats) >> 2;
3989 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3990 dmae->comp_addr_hi = 0;
3991 dmae->comp_val = 1;
3992 }
3993
3994 /* MAC */
3995 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3996 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3997 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3998#ifdef __BIG_ENDIAN
3999 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4000#else
4001 DMAE_CMD_ENDIANITY_DW_SWAP |
4002#endif
4003 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4004 (vn << DMAE_CMD_E1HVN_SHIFT));
4005
4006 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4007
4008 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4009 NIG_REG_INGRESS_BMAC0_MEM);
4010
4011 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4012 BIGMAC_REGISTER_TX_STAT_GTBYT */
4013 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4014 dmae->opcode = opcode;
4015 dmae->src_addr_lo = (mac_addr +
4016 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4017 dmae->src_addr_hi = 0;
4018 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4019 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4020 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4021 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4022 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4023 dmae->comp_addr_hi = 0;
4024 dmae->comp_val = 1;
4025
4026 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4027 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4028 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4029 dmae->opcode = opcode;
4030 dmae->src_addr_lo = (mac_addr +
4031 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4032 dmae->src_addr_hi = 0;
4033 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4034 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4035 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4036 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4037 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4038 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4039 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4040 dmae->comp_addr_hi = 0;
4041 dmae->comp_val = 1;
4042
4043 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4044
4045 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4046
4047 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4048 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4049 dmae->opcode = opcode;
4050 dmae->src_addr_lo = (mac_addr +
4051 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4052 dmae->src_addr_hi = 0;
4053 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4054 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4055 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4056 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4057 dmae->comp_addr_hi = 0;
4058 dmae->comp_val = 1;
4059
4060 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4061 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4062 dmae->opcode = opcode;
4063 dmae->src_addr_lo = (mac_addr +
4064 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4065 dmae->src_addr_hi = 0;
4066 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4067 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4068 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4069 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4070 dmae->len = 1;
4071 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4072 dmae->comp_addr_hi = 0;
4073 dmae->comp_val = 1;
4074
4075 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4076 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4077 dmae->opcode = opcode;
4078 dmae->src_addr_lo = (mac_addr +
4079 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4080 dmae->src_addr_hi = 0;
4081 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4082 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4083 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4084 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4085 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4086 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4087 dmae->comp_addr_hi = 0;
4088 dmae->comp_val = 1;
4089 }
4090
4091 /* NIG */
4092 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4093 dmae->opcode = opcode;
4094 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4095 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4096 dmae->src_addr_hi = 0;
4097 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4098 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4099 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4100 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4101 dmae->comp_addr_hi = 0;
4102 dmae->comp_val = 1;
4103
4104 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4105 dmae->opcode = opcode;
4106 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4107 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4108 dmae->src_addr_hi = 0;
4109 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4110 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4111 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4112 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4113 dmae->len = (2*sizeof(u32)) >> 2;
4114 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4115 dmae->comp_addr_hi = 0;
4116 dmae->comp_val = 1;
4117
4118 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4119 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4120 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4121 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4122#ifdef __BIG_ENDIAN
4123 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4124#else
4125 DMAE_CMD_ENDIANITY_DW_SWAP |
4126#endif
4127 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4128 (vn << DMAE_CMD_E1HVN_SHIFT));
4129 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4130 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4131 dmae->src_addr_hi = 0;
4132 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4133 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4134 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4135 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4136 dmae->len = (2*sizeof(u32)) >> 2;
4137 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4138 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4139 dmae->comp_val = DMAE_COMP_VAL;
4140
4141 *stats_comp = 0;
4142}
4143
4144static void bnx2x_func_stats_init(struct bnx2x *bp)
4145{
4146 struct dmae_command *dmae = &bp->stats_dmae;
4147 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4148
4149 /* sanity */
4150 if (!bp->func_stx) {
4151 BNX2X_ERR("BUG!\n");
4152 return;
4153 }
4154
4155 bp->executer_idx = 0;
4156 memset(dmae, 0, sizeof(struct dmae_command));
4157
4158 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4159 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4160 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4161#ifdef __BIG_ENDIAN
4162 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4163#else
4164 DMAE_CMD_ENDIANITY_DW_SWAP |
4165#endif
4166 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4167 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4168 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4169 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4170 dmae->dst_addr_lo = bp->func_stx >> 2;
4171 dmae->dst_addr_hi = 0;
4172 dmae->len = sizeof(struct host_func_stats) >> 2;
4173 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4174 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4175 dmae->comp_val = DMAE_COMP_VAL;
4176
4177 *stats_comp = 0;
4178}
4179
4180static void bnx2x_stats_start(struct bnx2x *bp)
4181{
4182 if (bp->port.pmf)
4183 bnx2x_port_stats_init(bp);
4184
4185 else if (bp->func_stx)
4186 bnx2x_func_stats_init(bp);
4187
4188 bnx2x_hw_stats_post(bp);
4189 bnx2x_storm_stats_post(bp);
4190}
4191
4192static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4193{
4194 bnx2x_stats_comp(bp);
4195 bnx2x_stats_pmf_update(bp);
4196 bnx2x_stats_start(bp);
4197}
4198
4199static void bnx2x_stats_restart(struct bnx2x *bp)
4200{
4201 bnx2x_stats_comp(bp);
4202 bnx2x_stats_start(bp);
4203}
4204
4205static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4206{
4207 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4208 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4209 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4210 struct {
4211 u32 lo;
4212 u32 hi;
4213 } diff;
4214
4215 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4216 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4217 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4218 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4219 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4220 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4221 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4222 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4223 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4224 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4225 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4226 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4227 UPDATE_STAT64(tx_stat_gt127,
4228 tx_stat_etherstatspkts65octetsto127octets);
4229 UPDATE_STAT64(tx_stat_gt255,
4230 tx_stat_etherstatspkts128octetsto255octets);
4231 UPDATE_STAT64(tx_stat_gt511,
4232 tx_stat_etherstatspkts256octetsto511octets);
4233 UPDATE_STAT64(tx_stat_gt1023,
4234 tx_stat_etherstatspkts512octetsto1023octets);
4235 UPDATE_STAT64(tx_stat_gt1518,
4236 tx_stat_etherstatspkts1024octetsto1522octets);
4237 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4238 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4239 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4240 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4241 UPDATE_STAT64(tx_stat_gterr,
4242 tx_stat_dot3statsinternalmactransmiterrors);
4243 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4244
4245 estats->pause_frames_received_hi =
4246 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4247 estats->pause_frames_received_lo =
4248 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4249
4250 estats->pause_frames_sent_hi =
4251 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4252 estats->pause_frames_sent_lo =
4253 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4254}
4255
4256static void bnx2x_emac_stats_update(struct bnx2x *bp)
4257{
4258 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4259 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4260 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4261
4262 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4263 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4264 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4265 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4266 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4267 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4268 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4269 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4270 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4271 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4272 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4273 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4274 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4275 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4276 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4277 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4278 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4279 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4280 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4281 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4282 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4283 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4284 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4285 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4286 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4287 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4288 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4289 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4290 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4291 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4292 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4293
4294 estats->pause_frames_received_hi =
4295 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4296 estats->pause_frames_received_lo =
4297 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4298 ADD_64(estats->pause_frames_received_hi,
4299 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4300 estats->pause_frames_received_lo,
4301 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4302
4303 estats->pause_frames_sent_hi =
4304 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4305 estats->pause_frames_sent_lo =
4306 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4307 ADD_64(estats->pause_frames_sent_hi,
4308 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4309 estats->pause_frames_sent_lo,
4310 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4311}
4312
4313static int bnx2x_hw_stats_update(struct bnx2x *bp)
4314{
4315 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4316 struct nig_stats *old = &(bp->port.old_nig_stats);
4317 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4318 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4319 struct {
4320 u32 lo;
4321 u32 hi;
4322 } diff;
4323
4324 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4325 bnx2x_bmac_stats_update(bp);
4326
4327 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4328 bnx2x_emac_stats_update(bp);
4329
4330 else { /* unreached */
4331 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4332 return -1;
4333 }
4334
4335 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4336 new->brb_discard - old->brb_discard);
4337 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4338 new->brb_truncate - old->brb_truncate);
4339
4340 UPDATE_STAT64_NIG(egress_mac_pkt0,
4341 etherstatspkts1024octetsto1522octets);
4342 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4343
4344 memcpy(old, new, sizeof(struct nig_stats));
4345
4346 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4347 sizeof(struct mac_stx));
4348 estats->brb_drop_hi = pstats->brb_drop_hi;
4349 estats->brb_drop_lo = pstats->brb_drop_lo;
4350
4351 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4352
4353 if (!BP_NOMCP(bp)) {
4354 u32 nig_timer_max =
4355 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4356 if (nig_timer_max != estats->nig_timer_max) {
4357 estats->nig_timer_max = nig_timer_max;
4358 BNX2X_ERR("NIG timer max (%u)\n",
4359 estats->nig_timer_max);
4360 }
4361 }
4362
4363 return 0;
4364}
4365
4366static int bnx2x_storm_stats_update(struct bnx2x *bp)
4367{
4368 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4369 struct tstorm_per_port_stats *tport =
4370 &stats->tstorm_common.port_statistics;
4371 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4372 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4373 int i;
4374 u16 cur_stats_counter;
4375
4376 /* Make sure we use the value of the counter
4377 * used for sending the last stats ramrod.
4378 */
4379 spin_lock_bh(&bp->stats_lock);
4380 cur_stats_counter = bp->stats_counter - 1;
4381 spin_unlock_bh(&bp->stats_lock);
4382
4383 memcpy(&(fstats->total_bytes_received_hi),
4384 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4385 sizeof(struct host_func_stats) - 2*sizeof(u32));
4386 estats->error_bytes_received_hi = 0;
4387 estats->error_bytes_received_lo = 0;
4388 estats->etherstatsoverrsizepkts_hi = 0;
4389 estats->etherstatsoverrsizepkts_lo = 0;
4390 estats->no_buff_discard_hi = 0;
4391 estats->no_buff_discard_lo = 0;
4392
4393 for_each_queue(bp, i) {
4394 struct bnx2x_fastpath *fp = &bp->fp[i];
4395 int cl_id = fp->cl_id;
4396 struct tstorm_per_client_stats *tclient =
4397 &stats->tstorm_common.client_statistics[cl_id];
4398 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4399 struct ustorm_per_client_stats *uclient =
4400 &stats->ustorm_common.client_statistics[cl_id];
4401 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4402 struct xstorm_per_client_stats *xclient =
4403 &stats->xstorm_common.client_statistics[cl_id];
4404 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4405 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4406 u32 diff;
4407
4408 /* are storm stats valid? */
4409 if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
4410 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4411 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4412 i, xclient->stats_counter, cur_stats_counter + 1);
4413 return -1;
4414 }
4415 if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
4416 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4417 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4418 i, tclient->stats_counter, cur_stats_counter + 1);
4419 return -2;
4420 }
4421 if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
4422 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4423 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4424 i, uclient->stats_counter, cur_stats_counter + 1);
4425 return -4;
4426 }
4427
4428 qstats->total_bytes_received_hi =
4429 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4430 qstats->total_bytes_received_lo =
4431 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4432
4433 ADD_64(qstats->total_bytes_received_hi,
4434 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4435 qstats->total_bytes_received_lo,
4436 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4437
4438 ADD_64(qstats->total_bytes_received_hi,
4439 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4440 qstats->total_bytes_received_lo,
4441 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4442
4443 SUB_64(qstats->total_bytes_received_hi,
4444 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4445 qstats->total_bytes_received_lo,
4446 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4447
4448 SUB_64(qstats->total_bytes_received_hi,
4449 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4450 qstats->total_bytes_received_lo,
4451 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4452
4453 SUB_64(qstats->total_bytes_received_hi,
4454 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4455 qstats->total_bytes_received_lo,
4456 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4457
4458 qstats->valid_bytes_received_hi =
4459 qstats->total_bytes_received_hi;
4460 qstats->valid_bytes_received_lo =
4461 qstats->total_bytes_received_lo;
4462
4463 qstats->error_bytes_received_hi =
4464 le32_to_cpu(tclient->rcv_error_bytes.hi);
4465 qstats->error_bytes_received_lo =
4466 le32_to_cpu(tclient->rcv_error_bytes.lo);
4467
4468 ADD_64(qstats->total_bytes_received_hi,
4469 qstats->error_bytes_received_hi,
4470 qstats->total_bytes_received_lo,
4471 qstats->error_bytes_received_lo);
4472
4473 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4474 total_unicast_packets_received);
4475 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4476 total_multicast_packets_received);
4477 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4478 total_broadcast_packets_received);
4479 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4480 etherstatsoverrsizepkts);
4481 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4482
4483 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4484 total_unicast_packets_received);
4485 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4486 total_multicast_packets_received);
4487 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4488 total_broadcast_packets_received);
4489 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4490 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4491 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4492
4493 qstats->total_bytes_transmitted_hi =
4494 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4495 qstats->total_bytes_transmitted_lo =
4496 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4497
4498 ADD_64(qstats->total_bytes_transmitted_hi,
4499 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4500 qstats->total_bytes_transmitted_lo,
4501 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4502
4503 ADD_64(qstats->total_bytes_transmitted_hi,
4504 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4505 qstats->total_bytes_transmitted_lo,
4506 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4507
4508 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4509 total_unicast_packets_transmitted);
4510 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4511 total_multicast_packets_transmitted);
4512 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4513 total_broadcast_packets_transmitted);
4514
4515 old_tclient->checksum_discard = tclient->checksum_discard;
4516 old_tclient->ttl0_discard = tclient->ttl0_discard;
4517
4518 ADD_64(fstats->total_bytes_received_hi,
4519 qstats->total_bytes_received_hi,
4520 fstats->total_bytes_received_lo,
4521 qstats->total_bytes_received_lo);
4522 ADD_64(fstats->total_bytes_transmitted_hi,
4523 qstats->total_bytes_transmitted_hi,
4524 fstats->total_bytes_transmitted_lo,
4525 qstats->total_bytes_transmitted_lo);
4526 ADD_64(fstats->total_unicast_packets_received_hi,
4527 qstats->total_unicast_packets_received_hi,
4528 fstats->total_unicast_packets_received_lo,
4529 qstats->total_unicast_packets_received_lo);
4530 ADD_64(fstats->total_multicast_packets_received_hi,
4531 qstats->total_multicast_packets_received_hi,
4532 fstats->total_multicast_packets_received_lo,
4533 qstats->total_multicast_packets_received_lo);
4534 ADD_64(fstats->total_broadcast_packets_received_hi,
4535 qstats->total_broadcast_packets_received_hi,
4536 fstats->total_broadcast_packets_received_lo,
4537 qstats->total_broadcast_packets_received_lo);
4538 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4539 qstats->total_unicast_packets_transmitted_hi,
4540 fstats->total_unicast_packets_transmitted_lo,
4541 qstats->total_unicast_packets_transmitted_lo);
4542 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4543 qstats->total_multicast_packets_transmitted_hi,
4544 fstats->total_multicast_packets_transmitted_lo,
4545 qstats->total_multicast_packets_transmitted_lo);
4546 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4547 qstats->total_broadcast_packets_transmitted_hi,
4548 fstats->total_broadcast_packets_transmitted_lo,
4549 qstats->total_broadcast_packets_transmitted_lo);
4550 ADD_64(fstats->valid_bytes_received_hi,
4551 qstats->valid_bytes_received_hi,
4552 fstats->valid_bytes_received_lo,
4553 qstats->valid_bytes_received_lo);
4554
4555 ADD_64(estats->error_bytes_received_hi,
4556 qstats->error_bytes_received_hi,
4557 estats->error_bytes_received_lo,
4558 qstats->error_bytes_received_lo);
4559 ADD_64(estats->etherstatsoverrsizepkts_hi,
4560 qstats->etherstatsoverrsizepkts_hi,
4561 estats->etherstatsoverrsizepkts_lo,
4562 qstats->etherstatsoverrsizepkts_lo);
4563 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4564 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4565 }
4566
4567 ADD_64(fstats->total_bytes_received_hi,
4568 estats->rx_stat_ifhcinbadoctets_hi,
4569 fstats->total_bytes_received_lo,
4570 estats->rx_stat_ifhcinbadoctets_lo);
4571
4572 memcpy(estats, &(fstats->total_bytes_received_hi),
4573 sizeof(struct host_func_stats) - 2*sizeof(u32));
4574
4575 ADD_64(estats->etherstatsoverrsizepkts_hi,
4576 estats->rx_stat_dot3statsframestoolong_hi,
4577 estats->etherstatsoverrsizepkts_lo,
4578 estats->rx_stat_dot3statsframestoolong_lo);
4579 ADD_64(estats->error_bytes_received_hi,
4580 estats->rx_stat_ifhcinbadoctets_hi,
4581 estats->error_bytes_received_lo,
4582 estats->rx_stat_ifhcinbadoctets_lo);
4583
4584 if (bp->port.pmf) {
4585 estats->mac_filter_discard =
4586 le32_to_cpu(tport->mac_filter_discard);
4587 estats->xxoverflow_discard =
4588 le32_to_cpu(tport->xxoverflow_discard);
4589 estats->brb_truncate_discard =
4590 le32_to_cpu(tport->brb_truncate_discard);
4591 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4592 }
4593
4594 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4595
4596 bp->stats_pending = 0;
4597
4598 return 0;
4599}
4600
4601static void bnx2x_net_stats_update(struct bnx2x *bp)
4602{
4603 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4604 struct net_device_stats *nstats = &bp->dev->stats;
4605 int i;
4606
4607 nstats->rx_packets =
4608 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4609 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4610 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4611
4612 nstats->tx_packets =
4613 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4614 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4615 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4616
4617 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4618
4619 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4620
4621 nstats->rx_dropped = estats->mac_discard;
4622 for_each_queue(bp, i)
4623 nstats->rx_dropped +=
4624 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4625
4626 nstats->tx_dropped = 0;
4627
4628 nstats->multicast =
4629 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4630
4631 nstats->collisions =
4632 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4633
4634 nstats->rx_length_errors =
4635 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4636 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4637 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4638 bnx2x_hilo(&estats->brb_truncate_hi);
4639 nstats->rx_crc_errors =
4640 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4641 nstats->rx_frame_errors =
4642 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4643 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4644 nstats->rx_missed_errors = estats->xxoverflow_discard;
4645
4646 nstats->rx_errors = nstats->rx_length_errors +
4647 nstats->rx_over_errors +
4648 nstats->rx_crc_errors +
4649 nstats->rx_frame_errors +
4650 nstats->rx_fifo_errors +
4651 nstats->rx_missed_errors;
4652
4653 nstats->tx_aborted_errors =
4654 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4655 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4656 nstats->tx_carrier_errors =
4657 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4658 nstats->tx_fifo_errors = 0;
4659 nstats->tx_heartbeat_errors = 0;
4660 nstats->tx_window_errors = 0;
4661
4662 nstats->tx_errors = nstats->tx_aborted_errors +
4663 nstats->tx_carrier_errors +
4664 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4665}
4666
4667static void bnx2x_drv_stats_update(struct bnx2x *bp)
4668{
4669 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4670 int i;
4671
4672 estats->driver_xoff = 0;
4673 estats->rx_err_discard_pkt = 0;
4674 estats->rx_skb_alloc_failed = 0;
4675 estats->hw_csum_err = 0;
4676 for_each_queue(bp, i) {
4677 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4678
4679 estats->driver_xoff += qstats->driver_xoff;
4680 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4681 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4682 estats->hw_csum_err += qstats->hw_csum_err;
4683 }
4684}
4685
4686static void bnx2x_stats_update(struct bnx2x *bp)
4687{
4688 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4689
4690 if (*stats_comp != DMAE_COMP_VAL)
4691 return;
4692
4693 if (bp->port.pmf)
4694 bnx2x_hw_stats_update(bp);
4695
4696 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4697 BNX2X_ERR("storm stats were not updated for 3 times\n");
4698 bnx2x_panic();
4699 return;
4700 }
4701
4702 bnx2x_net_stats_update(bp);
4703 bnx2x_drv_stats_update(bp);
4704
4705 if (netif_msg_timer(bp)) {
4706 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4707 int i;
4708
4709 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4710 bp->dev->name,
4711 estats->brb_drop_lo, estats->brb_truncate_lo);
4712
4713 for_each_queue(bp, i) {
4714 struct bnx2x_fastpath *fp = &bp->fp[i];
4715 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4716
4717 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4718 " rx pkt(%lu) rx calls(%lu %lu)\n",
4719 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4720 fp->rx_comp_cons),
4721 le16_to_cpu(*fp->rx_cons_sb),
4722 bnx2x_hilo(&qstats->
4723 total_unicast_packets_received_hi),
4724 fp->rx_calls, fp->rx_pkt);
4725 }
4726
4727 for_each_queue(bp, i) {
4728 struct bnx2x_fastpath *fp = &bp->fp[i];
4729 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4730 struct netdev_queue *txq =
4731 netdev_get_tx_queue(bp->dev, i);
4732
4733 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4734 " tx pkt(%lu) tx calls (%lu)"
4735 " %s (Xoff events %u)\n",
4736 fp->name, bnx2x_tx_avail(fp),
4737 le16_to_cpu(*fp->tx_cons_sb),
4738 bnx2x_hilo(&qstats->
4739 total_unicast_packets_transmitted_hi),
4740 fp->tx_pkt,
4741 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4742 qstats->driver_xoff);
4743 }
4744 }
4745
4746 bnx2x_hw_stats_post(bp);
4747 bnx2x_storm_stats_post(bp);
4748}
4749
4750static void bnx2x_port_stats_stop(struct bnx2x *bp)
4751{
4752 struct dmae_command *dmae;
4753 u32 opcode;
4754 int loader_idx = PMF_DMAE_C(bp);
4755 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4756
4757 bp->executer_idx = 0;
4758
4759 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4760 DMAE_CMD_C_ENABLE |
4761 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4762#ifdef __BIG_ENDIAN
4763 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4764#else
4765 DMAE_CMD_ENDIANITY_DW_SWAP |
4766#endif
4767 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4768 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4769
4770 if (bp->port.port_stx) {
4771
4772 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4773 if (bp->func_stx)
4774 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4775 else
4776 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4777 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4778 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4779 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4780 dmae->dst_addr_hi = 0;
4781 dmae->len = sizeof(struct host_port_stats) >> 2;
4782 if (bp->func_stx) {
4783 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4784 dmae->comp_addr_hi = 0;
4785 dmae->comp_val = 1;
4786 } else {
4787 dmae->comp_addr_lo =
4788 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4789 dmae->comp_addr_hi =
4790 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4791 dmae->comp_val = DMAE_COMP_VAL;
4792
4793 *stats_comp = 0;
4794 }
4795 }
4796
4797 if (bp->func_stx) {
4798
4799 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4800 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4801 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4802 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4803 dmae->dst_addr_lo = bp->func_stx >> 2;
4804 dmae->dst_addr_hi = 0;
4805 dmae->len = sizeof(struct host_func_stats) >> 2;
4806 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4807 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4808 dmae->comp_val = DMAE_COMP_VAL;
4809
4810 *stats_comp = 0;
4811 }
4812}
4813
4814static void bnx2x_stats_stop(struct bnx2x *bp)
4815{
4816 int update = 0;
4817
4818 bnx2x_stats_comp(bp);
4819
4820 if (bp->port.pmf)
4821 update = (bnx2x_hw_stats_update(bp) == 0);
4822
4823 update |= (bnx2x_storm_stats_update(bp) == 0);
4824
4825 if (update) {
4826 bnx2x_net_stats_update(bp);
4827
4828 if (bp->port.pmf)
4829 bnx2x_port_stats_stop(bp);
4830
4831 bnx2x_hw_stats_post(bp);
4832 bnx2x_stats_comp(bp);
4833 }
4834}
4835
4836static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4837{
4838}
4839
4840static const struct {
4841 void (*action)(struct bnx2x *bp);
4842 enum bnx2x_stats_state next_state;
4843} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4844/* state event */
4845{
4846/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4847/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4848/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4849/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4850},
4851{
4852/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4853/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4854/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4855/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4856}
4857};
4858
4859static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4860{
4861 enum bnx2x_stats_state state;
4862
4863 if (unlikely(bp->panic))
4864 return;
4865
4866 /* Protect a state change flow */
4867 spin_lock_bh(&bp->stats_lock);
4868 state = bp->stats_state;
4869 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4870 spin_unlock_bh(&bp->stats_lock);
4871
4872 bnx2x_stats_stm[state][event].action(bp);
4873
4874 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4875 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4876 state, event, bp->stats_state);
4877}
4878
4879static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4880{
4881 struct dmae_command *dmae;
4882 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4883
4884 /* sanity */
4885 if (!bp->port.pmf || !bp->port.port_stx) {
4886 BNX2X_ERR("BUG!\n");
4887 return;
4888 }
4889
4890 bp->executer_idx = 0;
4891
4892 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4893 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4894 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4895 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4896#ifdef __BIG_ENDIAN
4897 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4898#else
4899 DMAE_CMD_ENDIANITY_DW_SWAP |
4900#endif
4901 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4902 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4903 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4904 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4905 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4906 dmae->dst_addr_hi = 0;
4907 dmae->len = sizeof(struct host_port_stats) >> 2;
4908 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4909 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4910 dmae->comp_val = DMAE_COMP_VAL;
4911
4912 *stats_comp = 0;
4913 bnx2x_hw_stats_post(bp);
4914 bnx2x_stats_comp(bp);
4915}
4916
4917static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4918{
4919 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4920 int port = BP_PORT(bp);
4921 int func;
4922 u32 func_stx;
4923
4924 /* sanity */
4925 if (!bp->port.pmf || !bp->func_stx) {
4926 BNX2X_ERR("BUG!\n");
4927 return;
4928 }
4929
4930 /* save our func_stx */
4931 func_stx = bp->func_stx;
4932
4933 for (vn = VN_0; vn < vn_max; vn++) {
4934 func = 2*vn + port;
4935
4936 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4937 bnx2x_func_stats_init(bp);
4938 bnx2x_hw_stats_post(bp);
4939 bnx2x_stats_comp(bp);
4940 }
4941
4942 /* restore our func_stx */
4943 bp->func_stx = func_stx;
4944}
4945
4946static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4947{
4948 struct dmae_command *dmae = &bp->stats_dmae;
4949 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4950
4951 /* sanity */
4952 if (!bp->func_stx) {
4953 BNX2X_ERR("BUG!\n");
4954 return;
4955 }
4956
4957 bp->executer_idx = 0;
4958 memset(dmae, 0, sizeof(struct dmae_command));
4959
4960 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4961 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4962 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4963#ifdef __BIG_ENDIAN
4964 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4965#else
4966 DMAE_CMD_ENDIANITY_DW_SWAP |
4967#endif
4968 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4969 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4970 dmae->src_addr_lo = bp->func_stx >> 2;
4971 dmae->src_addr_hi = 0;
4972 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4973 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4974 dmae->len = sizeof(struct host_func_stats) >> 2;
4975 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4976 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4977 dmae->comp_val = DMAE_COMP_VAL;
4978
4979 *stats_comp = 0;
4980 bnx2x_hw_stats_post(bp);
4981 bnx2x_stats_comp(bp);
4982}
4983
4984static void bnx2x_stats_init(struct bnx2x *bp)
4985{
4986 int port = BP_PORT(bp);
4987 int func = BP_FUNC(bp);
4988 int i;
4989
4990 bp->stats_pending = 0;
4991 bp->executer_idx = 0;
4992 bp->stats_counter = 0;
4993
4994 /* port and func stats for management */
4995 if (!BP_NOMCP(bp)) {
4996 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4997 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4998
4999 } else {
5000 bp->port.port_stx = 0;
5001 bp->func_stx = 0;
5002 }
5003 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
5004 bp->port.port_stx, bp->func_stx);
5005
5006 /* port stats */
5007 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
5008 bp->port.old_nig_stats.brb_discard =
5009 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5010 bp->port.old_nig_stats.brb_truncate =
5011 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5012 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5013 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5014 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5015 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5016
5017 /* function stats */
5018 for_each_queue(bp, i) {
5019 struct bnx2x_fastpath *fp = &bp->fp[i];
5020
5021 memset(&fp->old_tclient, 0,
5022 sizeof(struct tstorm_per_client_stats));
5023 memset(&fp->old_uclient, 0,
5024 sizeof(struct ustorm_per_client_stats));
5025 memset(&fp->old_xclient, 0,
5026 sizeof(struct xstorm_per_client_stats));
5027 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5028 }
5029
5030 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5031 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5032
5033 bp->stats_state = STATS_STATE_DISABLED;
5034
5035 if (bp->port.pmf) {
5036 if (bp->port.port_stx)
5037 bnx2x_port_stats_base_init(bp);
5038
5039 if (bp->func_stx)
5040 bnx2x_func_stats_base_init(bp);
5041
5042 } else if (bp->func_stx)
5043 bnx2x_func_stats_base_update(bp);
5044}
5045
5046static void bnx2x_timer(unsigned long data) 2655static void bnx2x_timer(unsigned long data)
5047{ 2656{
5048 struct bnx2x *bp = (struct bnx2x *) data; 2657 struct bnx2x *bp = (struct bnx2x *) data;
@@ -5113,7 +2722,7 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5113 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); 2722 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5114} 2723}
5115 2724
5116static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 2725void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5117 dma_addr_t mapping, int sb_id) 2726 dma_addr_t mapping, int sb_id)
5118{ 2727{
5119 int port = BP_PORT(bp); 2728 int port = BP_PORT(bp);
@@ -5292,7 +2901,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
5292 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 2901 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5293} 2902}
5294 2903
5295static void bnx2x_update_coalesce(struct bnx2x *bp) 2904void bnx2x_update_coalesce(struct bnx2x *bp)
5296{ 2905{
5297 int port = BP_PORT(bp); 2906 int port = BP_PORT(bp);
5298 int i; 2907 int i;
@@ -5322,207 +2931,6 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
5322 } 2931 }
5323} 2932}
5324 2933
5325static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5326 struct bnx2x_fastpath *fp, int last)
5327{
5328 int i;
5329
5330 for (i = 0; i < last; i++) {
5331 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5332 struct sk_buff *skb = rx_buf->skb;
5333
5334 if (skb == NULL) {
5335 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5336 continue;
5337 }
5338
5339 if (fp->tpa_state[i] == BNX2X_TPA_START)
5340 dma_unmap_single(&bp->pdev->dev,
5341 dma_unmap_addr(rx_buf, mapping),
5342 bp->rx_buf_size, DMA_FROM_DEVICE);
5343
5344 dev_kfree_skb(skb);
5345 rx_buf->skb = NULL;
5346 }
5347}
5348
5349static void bnx2x_init_rx_rings(struct bnx2x *bp)
5350{
5351 int func = BP_FUNC(bp);
5352 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5353 ETH_MAX_AGGREGATION_QUEUES_E1H;
5354 u16 ring_prod, cqe_ring_prod;
5355 int i, j;
5356
5357 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5358 DP(NETIF_MSG_IFUP,
5359 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5360
5361 if (bp->flags & TPA_ENABLE_FLAG) {
5362
5363 for_each_queue(bp, j) {
5364 struct bnx2x_fastpath *fp = &bp->fp[j];
5365
5366 for (i = 0; i < max_agg_queues; i++) {
5367 fp->tpa_pool[i].skb =
5368 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5369 if (!fp->tpa_pool[i].skb) {
5370 BNX2X_ERR("Failed to allocate TPA "
5371 "skb pool for queue[%d] - "
5372 "disabling TPA on this "
5373 "queue!\n", j);
5374 bnx2x_free_tpa_pool(bp, fp, i);
5375 fp->disable_tpa = 1;
5376 break;
5377 }
5378 dma_unmap_addr_set((struct sw_rx_bd *)
5379 &bp->fp->tpa_pool[i],
5380 mapping, 0);
5381 fp->tpa_state[i] = BNX2X_TPA_STOP;
5382 }
5383 }
5384 }
5385
5386 for_each_queue(bp, j) {
5387 struct bnx2x_fastpath *fp = &bp->fp[j];
5388
5389 fp->rx_bd_cons = 0;
5390 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5391 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5392
5393 /* "next page" elements initialization */
5394 /* SGE ring */
5395 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5396 struct eth_rx_sge *sge;
5397
5398 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5399 sge->addr_hi =
5400 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5401 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5402 sge->addr_lo =
5403 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5404 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5405 }
5406
5407 bnx2x_init_sge_ring_bit_mask(fp);
5408
5409 /* RX BD ring */
5410 for (i = 1; i <= NUM_RX_RINGS; i++) {
5411 struct eth_rx_bd *rx_bd;
5412
5413 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5414 rx_bd->addr_hi =
5415 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5416 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5417 rx_bd->addr_lo =
5418 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5419 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5420 }
5421
5422 /* CQ ring */
5423 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5424 struct eth_rx_cqe_next_page *nextpg;
5425
5426 nextpg = (struct eth_rx_cqe_next_page *)
5427 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5428 nextpg->addr_hi =
5429 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5430 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5431 nextpg->addr_lo =
5432 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5434 }
5435
5436 /* Allocate SGEs and initialize the ring elements */
5437 for (i = 0, ring_prod = 0;
5438 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5439
5440 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5441 BNX2X_ERR("was only able to allocate "
5442 "%d rx sges\n", i);
5443 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5444 /* Cleanup already allocated elements */
5445 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5446 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5447 fp->disable_tpa = 1;
5448 ring_prod = 0;
5449 break;
5450 }
5451 ring_prod = NEXT_SGE_IDX(ring_prod);
5452 }
5453 fp->rx_sge_prod = ring_prod;
5454
5455 /* Allocate BDs and initialize BD ring */
5456 fp->rx_comp_cons = 0;
5457 cqe_ring_prod = ring_prod = 0;
5458 for (i = 0; i < bp->rx_ring_size; i++) {
5459 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5460 BNX2X_ERR("was only able to allocate "
5461 "%d rx skbs on queue[%d]\n", i, j);
5462 fp->eth_q_stats.rx_skb_alloc_failed++;
5463 break;
5464 }
5465 ring_prod = NEXT_RX_IDX(ring_prod);
5466 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5467 WARN_ON(ring_prod <= i);
5468 }
5469
5470 fp->rx_bd_prod = ring_prod;
5471 /* must not have more available CQEs than BDs */
5472 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5473 cqe_ring_prod);
5474 fp->rx_pkt = fp->rx_calls = 0;
5475
5476 /* Warning!
5477 * this will generate an interrupt (to the TSTORM)
5478 * must only be done after chip is initialized
5479 */
5480 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5481 fp->rx_sge_prod);
5482 if (j != 0)
5483 continue;
5484
5485 REG_WR(bp, BAR_USTRORM_INTMEM +
5486 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5487 U64_LO(fp->rx_comp_mapping));
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5490 U64_HI(fp->rx_comp_mapping));
5491 }
5492}
5493
5494static void bnx2x_init_tx_ring(struct bnx2x *bp)
5495{
5496 int i, j;
5497
5498 for_each_queue(bp, j) {
5499 struct bnx2x_fastpath *fp = &bp->fp[j];
5500
5501 for (i = 1; i <= NUM_TX_RINGS; i++) {
5502 struct eth_tx_next_bd *tx_next_bd =
5503 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5504
5505 tx_next_bd->addr_hi =
5506 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5507 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5508 tx_next_bd->addr_lo =
5509 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5510 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5511 }
5512
5513 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5514 fp->tx_db.data.zero_fill1 = 0;
5515 fp->tx_db.data.prod = 0;
5516
5517 fp->tx_pkt_prod = 0;
5518 fp->tx_pkt_cons = 0;
5519 fp->tx_bd_prod = 0;
5520 fp->tx_bd_cons = 0;
5521 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5522 fp->tx_pkt = 0;
5523 }
5524}
5525
5526static void bnx2x_init_sp_ring(struct bnx2x *bp) 2934static void bnx2x_init_sp_ring(struct bnx2x *bp)
5527{ 2935{
5528 int func = BP_FUNC(bp); 2936 int func = BP_FUNC(bp);
@@ -5637,7 +3045,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
5637 bp->fp->cl_id + (i % bp->num_queues)); 3045 bp->fp->cl_id + (i % bp->num_queues));
5638} 3046}
5639 3047
5640static void bnx2x_set_client_config(struct bnx2x *bp) 3048void bnx2x_set_client_config(struct bnx2x *bp)
5641{ 3049{
5642 struct tstorm_eth_client_config tstorm_client = {0}; 3050 struct tstorm_eth_client_config tstorm_client = {0};
5643 int port = BP_PORT(bp); 3051 int port = BP_PORT(bp);
@@ -5670,7 +3078,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
5670 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); 3078 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5671} 3079}
5672 3080
5673static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 3081void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5674{ 3082{
5675 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; 3083 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5676 int mode = bp->rx_mode; 3084 int mode = bp->rx_mode;
@@ -5990,7 +3398,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5990 } 3398 }
5991} 3399}
5992 3400
5993static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 3401void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5994{ 3402{
5995 int i; 3403 int i;
5996 3404
@@ -7071,7 +4479,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
7071 return 0; 4479 return 0;
7072} 4480}
7073 4481
7074static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 4482int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7075{ 4483{
7076 int i, rc = 0; 4484 int i, rc = 0;
7077 4485
@@ -7133,7 +4541,7 @@ init_hw_err:
7133 return rc; 4541 return rc;
7134} 4542}
7135 4543
7136static void bnx2x_free_mem(struct bnx2x *bp) 4544void bnx2x_free_mem(struct bnx2x *bp)
7137{ 4545{
7138 4546
7139#define BNX2X_PCI_FREE(x, y, size) \ 4547#define BNX2X_PCI_FREE(x, y, size) \
@@ -7215,7 +4623,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
7215#undef BNX2X_KFREE 4623#undef BNX2X_KFREE
7216} 4624}
7217 4625
7218static int bnx2x_alloc_mem(struct bnx2x *bp) 4626int bnx2x_alloc_mem(struct bnx2x *bp)
7219{ 4627{
7220 4628
7221#define BNX2X_PCI_ALLOC(x, y, size) \ 4629#define BNX2X_PCI_ALLOC(x, y, size) \
@@ -7321,264 +4729,6 @@ alloc_mem_err:
7321#undef BNX2X_ALLOC 4729#undef BNX2X_ALLOC
7322} 4730}
7323 4731
7324static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7325{
7326 int i;
7327
7328 for_each_queue(bp, i) {
7329 struct bnx2x_fastpath *fp = &bp->fp[i];
7330
7331 u16 bd_cons = fp->tx_bd_cons;
7332 u16 sw_prod = fp->tx_pkt_prod;
7333 u16 sw_cons = fp->tx_pkt_cons;
7334
7335 while (sw_cons != sw_prod) {
7336 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7337 sw_cons++;
7338 }
7339 }
7340}
7341
7342static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7343{
7344 int i, j;
7345
7346 for_each_queue(bp, j) {
7347 struct bnx2x_fastpath *fp = &bp->fp[j];
7348
7349 for (i = 0; i < NUM_RX_BD; i++) {
7350 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7351 struct sk_buff *skb = rx_buf->skb;
7352
7353 if (skb == NULL)
7354 continue;
7355
7356 dma_unmap_single(&bp->pdev->dev,
7357 dma_unmap_addr(rx_buf, mapping),
7358 bp->rx_buf_size, DMA_FROM_DEVICE);
7359
7360 rx_buf->skb = NULL;
7361 dev_kfree_skb(skb);
7362 }
7363 if (!fp->disable_tpa)
7364 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7365 ETH_MAX_AGGREGATION_QUEUES_E1 :
7366 ETH_MAX_AGGREGATION_QUEUES_E1H);
7367 }
7368}
7369
7370static void bnx2x_free_skbs(struct bnx2x *bp)
7371{
7372 bnx2x_free_tx_skbs(bp);
7373 bnx2x_free_rx_skbs(bp);
7374}
7375
7376static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7377{
7378 int i, offset = 1;
7379
7380 free_irq(bp->msix_table[0].vector, bp->dev);
7381 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7382 bp->msix_table[0].vector);
7383
7384#ifdef BCM_CNIC
7385 offset++;
7386#endif
7387 for_each_queue(bp, i) {
7388 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7389 "state %x\n", i, bp->msix_table[i + offset].vector,
7390 bnx2x_fp(bp, i, state));
7391
7392 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7393 }
7394}
7395
7396static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7397{
7398 if (bp->flags & USING_MSIX_FLAG) {
7399 if (!disable_only)
7400 bnx2x_free_msix_irqs(bp);
7401 pci_disable_msix(bp->pdev);
7402 bp->flags &= ~USING_MSIX_FLAG;
7403
7404 } else if (bp->flags & USING_MSI_FLAG) {
7405 if (!disable_only)
7406 free_irq(bp->pdev->irq, bp->dev);
7407 pci_disable_msi(bp->pdev);
7408 bp->flags &= ~USING_MSI_FLAG;
7409
7410 } else if (!disable_only)
7411 free_irq(bp->pdev->irq, bp->dev);
7412}
7413
7414static int bnx2x_enable_msix(struct bnx2x *bp)
7415{
7416 int i, rc, offset = 1;
7417 int igu_vec = 0;
7418
7419 bp->msix_table[0].entry = igu_vec;
7420 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7421
7422#ifdef BCM_CNIC
7423 igu_vec = BP_L_ID(bp) + offset;
7424 bp->msix_table[1].entry = igu_vec;
7425 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7426 offset++;
7427#endif
7428 for_each_queue(bp, i) {
7429 igu_vec = BP_L_ID(bp) + offset + i;
7430 bp->msix_table[i + offset].entry = igu_vec;
7431 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7432 "(fastpath #%u)\n", i + offset, igu_vec, i);
7433 }
7434
7435 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7436 BNX2X_NUM_QUEUES(bp) + offset);
7437
7438 /*
7439 * reconfigure number of tx/rx queues according to available
7440 * MSI-X vectors
7441 */
7442 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7443 /* vectors available for FP */
7444 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7445
7446 DP(NETIF_MSG_IFUP,
7447 "Trying to use less MSI-X vectors: %d\n", rc);
7448
7449 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7450
7451 if (rc) {
7452 DP(NETIF_MSG_IFUP,
7453 "MSI-X is not attainable rc %d\n", rc);
7454 return rc;
7455 }
7456
7457 bp->num_queues = min(bp->num_queues, fp_vec);
7458
7459 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7460 bp->num_queues);
7461 } else if (rc) {
7462 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7463 return rc;
7464 }
7465
7466 bp->flags |= USING_MSIX_FLAG;
7467
7468 return 0;
7469}
7470
7471static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7472{
7473 int i, rc, offset = 1;
7474
7475 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7476 bp->dev->name, bp->dev);
7477 if (rc) {
7478 BNX2X_ERR("request sp irq failed\n");
7479 return -EBUSY;
7480 }
7481
7482#ifdef BCM_CNIC
7483 offset++;
7484#endif
7485 for_each_queue(bp, i) {
7486 struct bnx2x_fastpath *fp = &bp->fp[i];
7487 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7488 bp->dev->name, i);
7489
7490 rc = request_irq(bp->msix_table[i + offset].vector,
7491 bnx2x_msix_fp_int, 0, fp->name, fp);
7492 if (rc) {
7493 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7494 bnx2x_free_msix_irqs(bp);
7495 return -EBUSY;
7496 }
7497
7498 fp->state = BNX2X_FP_STATE_IRQ;
7499 }
7500
7501 i = BNX2X_NUM_QUEUES(bp);
7502 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7503 " ... fp[%d] %d\n",
7504 bp->msix_table[0].vector,
7505 0, bp->msix_table[offset].vector,
7506 i - 1, bp->msix_table[offset + i - 1].vector);
7507
7508 return 0;
7509}
7510
7511static int bnx2x_enable_msi(struct bnx2x *bp)
7512{
7513 int rc;
7514
7515 rc = pci_enable_msi(bp->pdev);
7516 if (rc) {
7517 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7518 return -1;
7519 }
7520 bp->flags |= USING_MSI_FLAG;
7521
7522 return 0;
7523}
7524
7525static int bnx2x_req_irq(struct bnx2x *bp)
7526{
7527 unsigned long flags;
7528 int rc;
7529
7530 if (bp->flags & USING_MSI_FLAG)
7531 flags = 0;
7532 else
7533 flags = IRQF_SHARED;
7534
7535 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7536 bp->dev->name, bp->dev);
7537 if (!rc)
7538 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7539
7540 return rc;
7541}
7542
7543static void bnx2x_napi_enable(struct bnx2x *bp)
7544{
7545 int i;
7546
7547 for_each_queue(bp, i)
7548 napi_enable(&bnx2x_fp(bp, i, napi));
7549}
7550
7551static void bnx2x_napi_disable(struct bnx2x *bp)
7552{
7553 int i;
7554
7555 for_each_queue(bp, i)
7556 napi_disable(&bnx2x_fp(bp, i, napi));
7557}
7558
7559static void bnx2x_netif_start(struct bnx2x *bp)
7560{
7561 int intr_sem;
7562
7563 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7564 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7565
7566 if (intr_sem) {
7567 if (netif_running(bp->dev)) {
7568 bnx2x_napi_enable(bp);
7569 bnx2x_int_enable(bp);
7570 if (bp->state == BNX2X_STATE_OPEN)
7571 netif_tx_wake_all_queues(bp->dev);
7572 }
7573 }
7574}
7575
7576static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7577{
7578 bnx2x_int_disable_sync(bp, disable_hw);
7579 bnx2x_napi_disable(bp);
7580 netif_tx_disable(bp->dev);
7581}
7582 4732
7583/* 4733/*
7584 * Init service functions 4734 * Init service functions
@@ -7749,7 +4899,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7749 return -EBUSY; 4899 return -EBUSY;
7750} 4900}
7751 4901
7752static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) 4902void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7753{ 4903{
7754 bp->set_mac_pending++; 4904 bp->set_mac_pending++;
7755 smp_wmb(); 4905 smp_wmb();
@@ -7761,7 +4911,7 @@ static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7761 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 4911 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7762} 4912}
7763 4913
7764static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) 4914void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7765{ 4915{
7766 bp->set_mac_pending++; 4916 bp->set_mac_pending++;
7767 smp_wmb(); 4917 smp_wmb();
@@ -7785,7 +4935,7 @@ static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7785 * 4935 *
7786 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 4936 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7787 */ 4937 */
7788static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 4938int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7789{ 4939{
7790 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); 4940 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7791 4941
@@ -7812,7 +4962,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7812} 4962}
7813#endif 4963#endif
7814 4964
7815static int bnx2x_setup_leading(struct bnx2x *bp) 4965int bnx2x_setup_leading(struct bnx2x *bp)
7816{ 4966{
7817 int rc; 4967 int rc;
7818 4968
@@ -7828,7 +4978,7 @@ static int bnx2x_setup_leading(struct bnx2x *bp)
7828 return rc; 4978 return rc;
7829} 4979}
7830 4980
7831static int bnx2x_setup_multi(struct bnx2x *bp, int index) 4981int bnx2x_setup_multi(struct bnx2x *bp, int index)
7832{ 4982{
7833 struct bnx2x_fastpath *fp = &bp->fp[index]; 4983 struct bnx2x_fastpath *fp = &bp->fp[index];
7834 4984
@@ -7845,9 +4995,8 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7845 &(fp->state), 0); 4995 &(fp->state), 0);
7846} 4996}
7847 4997
7848static int bnx2x_poll(struct napi_struct *napi, int budget);
7849 4998
7850static void bnx2x_set_num_queues_msix(struct bnx2x *bp) 4999void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7851{ 5000{
7852 5001
7853 switch (bp->multi_mode) { 5002 switch (bp->multi_mode) {
@@ -7871,292 +5020,7 @@ static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7871 } 5020 }
7872} 5021}
7873 5022
7874static int bnx2x_set_num_queues(struct bnx2x *bp)
7875{
7876 int rc = 0;
7877
7878 switch (int_mode) {
7879 case INT_MODE_INTx:
7880 case INT_MODE_MSI:
7881 bp->num_queues = 1;
7882 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7883 break;
7884 default:
7885 /* Set number of queues according to bp->multi_mode value */
7886 bnx2x_set_num_queues_msix(bp);
7887
7888 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7889 bp->num_queues);
7890
7891 /* if we can't use MSI-X we only need one fp,
7892 * so try to enable MSI-X with the requested number of fp's
7893 * and fallback to MSI or legacy INTx with one fp
7894 */
7895 rc = bnx2x_enable_msix(bp);
7896 if (rc)
7897 /* failed to enable MSI-X */
7898 bp->num_queues = 1;
7899 break;
7900 }
7901 bp->dev->real_num_tx_queues = bp->num_queues;
7902 return rc;
7903}
7904
7905#ifdef BCM_CNIC
7906static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7907static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7908#endif
7909
7910/* must be called with rtnl_lock */
7911static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7912{
7913 u32 load_code;
7914 int i, rc;
7915
7916#ifdef BNX2X_STOP_ON_ERROR
7917 if (unlikely(bp->panic))
7918 return -EPERM;
7919#endif
7920
7921 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7922
7923 rc = bnx2x_set_num_queues(bp);
7924
7925 if (bnx2x_alloc_mem(bp)) {
7926 bnx2x_free_irq(bp, true);
7927 return -ENOMEM;
7928 }
7929
7930 for_each_queue(bp, i)
7931 bnx2x_fp(bp, i, disable_tpa) =
7932 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7933
7934 for_each_queue(bp, i)
7935 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7936 bnx2x_poll, 128);
7937 5023
7938 bnx2x_napi_enable(bp);
7939
7940 if (bp->flags & USING_MSIX_FLAG) {
7941 rc = bnx2x_req_msix_irqs(bp);
7942 if (rc) {
7943 bnx2x_free_irq(bp, true);
7944 goto load_error1;
7945 }
7946 } else {
7947 /* Fall to INTx if failed to enable MSI-X due to lack of
7948 memory (in bnx2x_set_num_queues()) */
7949 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7950 bnx2x_enable_msi(bp);
7951 bnx2x_ack_int(bp);
7952 rc = bnx2x_req_irq(bp);
7953 if (rc) {
7954 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7955 bnx2x_free_irq(bp, true);
7956 goto load_error1;
7957 }
7958 if (bp->flags & USING_MSI_FLAG) {
7959 bp->dev->irq = bp->pdev->irq;
7960 netdev_info(bp->dev, "using MSI IRQ %d\n",
7961 bp->pdev->irq);
7962 }
7963 }
7964
7965 /* Send LOAD_REQUEST command to MCP
7966 Returns the type of LOAD command:
7967 if it is the first port to be initialized
7968 common blocks should be initialized, otherwise - not
7969 */
7970 if (!BP_NOMCP(bp)) {
7971 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7972 if (!load_code) {
7973 BNX2X_ERR("MCP response failure, aborting\n");
7974 rc = -EBUSY;
7975 goto load_error2;
7976 }
7977 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7978 rc = -EBUSY; /* other port in diagnostic mode */
7979 goto load_error2;
7980 }
7981
7982 } else {
7983 int port = BP_PORT(bp);
7984
7985 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7986 load_count[0], load_count[1], load_count[2]);
7987 load_count[0]++;
7988 load_count[1 + port]++;
7989 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7990 load_count[0], load_count[1], load_count[2]);
7991 if (load_count[0] == 1)
7992 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7993 else if (load_count[1 + port] == 1)
7994 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7995 else
7996 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7997 }
7998
7999 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8000 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8001 bp->port.pmf = 1;
8002 else
8003 bp->port.pmf = 0;
8004 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8005
8006 /* Initialize HW */
8007 rc = bnx2x_init_hw(bp, load_code);
8008 if (rc) {
8009 BNX2X_ERR("HW init failed, aborting\n");
8010 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8012 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8013 goto load_error2;
8014 }
8015
8016 /* Setup NIC internals and enable interrupts */
8017 bnx2x_nic_init(bp, load_code);
8018
8019 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8020 (bp->common.shmem2_base))
8021 SHMEM2_WR(bp, dcc_support,
8022 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8023 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8024
8025 /* Send LOAD_DONE command to MCP */
8026 if (!BP_NOMCP(bp)) {
8027 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8028 if (!load_code) {
8029 BNX2X_ERR("MCP response failure, aborting\n");
8030 rc = -EBUSY;
8031 goto load_error3;
8032 }
8033 }
8034
8035 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8036
8037 rc = bnx2x_setup_leading(bp);
8038 if (rc) {
8039 BNX2X_ERR("Setup leading failed!\n");
8040#ifndef BNX2X_STOP_ON_ERROR
8041 goto load_error3;
8042#else
8043 bp->panic = 1;
8044 return -EBUSY;
8045#endif
8046 }
8047
8048 if (CHIP_IS_E1H(bp))
8049 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8050 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8051 bp->flags |= MF_FUNC_DIS;
8052 }
8053
8054 if (bp->state == BNX2X_STATE_OPEN) {
8055#ifdef BCM_CNIC
8056 /* Enable Timer scan */
8057 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8058#endif
8059 for_each_nondefault_queue(bp, i) {
8060 rc = bnx2x_setup_multi(bp, i);
8061 if (rc)
8062#ifdef BCM_CNIC
8063 goto load_error4;
8064#else
8065 goto load_error3;
8066#endif
8067 }
8068
8069 if (CHIP_IS_E1(bp))
8070 bnx2x_set_eth_mac_addr_e1(bp, 1);
8071 else
8072 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8073#ifdef BCM_CNIC
8074 /* Set iSCSI L2 MAC */
8075 mutex_lock(&bp->cnic_mutex);
8076 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8077 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8078 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8079 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8080 CNIC_SB_ID(bp));
8081 }
8082 mutex_unlock(&bp->cnic_mutex);
8083#endif
8084 }
8085
8086 if (bp->port.pmf)
8087 bnx2x_initial_phy_init(bp, load_mode);
8088
8089 /* Start fast path */
8090 switch (load_mode) {
8091 case LOAD_NORMAL:
8092 if (bp->state == BNX2X_STATE_OPEN) {
8093 /* Tx queue should be only reenabled */
8094 netif_tx_wake_all_queues(bp->dev);
8095 }
8096 /* Initialize the receive filter. */
8097 bnx2x_set_rx_mode(bp->dev);
8098 break;
8099
8100 case LOAD_OPEN:
8101 netif_tx_start_all_queues(bp->dev);
8102 if (bp->state != BNX2X_STATE_OPEN)
8103 netif_tx_disable(bp->dev);
8104 /* Initialize the receive filter. */
8105 bnx2x_set_rx_mode(bp->dev);
8106 break;
8107
8108 case LOAD_DIAG:
8109 /* Initialize the receive filter. */
8110 bnx2x_set_rx_mode(bp->dev);
8111 bp->state = BNX2X_STATE_DIAG;
8112 break;
8113
8114 default:
8115 break;
8116 }
8117
8118 if (!bp->port.pmf)
8119 bnx2x__link_status_update(bp);
8120
8121 /* start the timer */
8122 mod_timer(&bp->timer, jiffies + bp->current_interval);
8123
8124#ifdef BCM_CNIC
8125 bnx2x_setup_cnic_irq_info(bp);
8126 if (bp->state == BNX2X_STATE_OPEN)
8127 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8128#endif
8129 bnx2x_inc_load_cnt(bp);
8130
8131 return 0;
8132
8133#ifdef BCM_CNIC
8134load_error4:
8135 /* Disable Timer scan */
8136 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8137#endif
8138load_error3:
8139 bnx2x_int_disable_sync(bp, 1);
8140 if (!BP_NOMCP(bp)) {
8141 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8142 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8143 }
8144 bp->port.pmf = 0;
8145 /* Free SKBs, SGEs, TPA pool and driver internals */
8146 bnx2x_free_skbs(bp);
8147 for_each_queue(bp, i)
8148 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8149load_error2:
8150 /* Release IRQs */
8151 bnx2x_free_irq(bp, false);
8152load_error1:
8153 bnx2x_napi_disable(bp);
8154 for_each_queue(bp, i)
8155 netif_napi_del(&bnx2x_fp(bp, i, napi));
8156 bnx2x_free_mem(bp);
8157
8158 return rc;
8159}
8160 5024
8161static int bnx2x_stop_multi(struct bnx2x *bp, int index) 5025static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8162{ 5026{
@@ -8314,7 +5178,7 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8314 } 5178 }
8315} 5179}
8316 5180
8317static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 5181void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8318{ 5182{
8319 int port = BP_PORT(bp); 5183 int port = BP_PORT(bp);
8320 u32 reset_code = 0; 5184 u32 reset_code = 0;
@@ -8462,7 +5326,7 @@ unload_error:
8462 5326
8463} 5327}
8464 5328
8465static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp) 5329void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8466{ 5330{
8467 u32 val; 5331 u32 val;
8468 5332
@@ -8484,71 +5348,6 @@ static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8484 } 5348 }
8485} 5349}
8486 5350
8487/* must be called with rtnl_lock */
8488static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8489{
8490 int i;
8491
8492 if (bp->state == BNX2X_STATE_CLOSED) {
8493 /* Interface has been removed - nothing to recover */
8494 bp->recovery_state = BNX2X_RECOVERY_DONE;
8495 bp->is_leader = 0;
8496 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8497 smp_wmb();
8498
8499 return -EINVAL;
8500 }
8501
8502#ifdef BCM_CNIC
8503 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8504#endif
8505 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8506
8507 /* Set "drop all" */
8508 bp->rx_mode = BNX2X_RX_MODE_NONE;
8509 bnx2x_set_storm_rx_mode(bp);
8510
8511 /* Disable HW interrupts, NAPI and Tx */
8512 bnx2x_netif_stop(bp, 1);
8513 netif_carrier_off(bp->dev);
8514
8515 del_timer_sync(&bp->timer);
8516 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8517 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8518 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8519
8520 /* Release IRQs */
8521 bnx2x_free_irq(bp, false);
8522
8523 /* Cleanup the chip if needed */
8524 if (unload_mode != UNLOAD_RECOVERY)
8525 bnx2x_chip_cleanup(bp, unload_mode);
8526
8527 bp->port.pmf = 0;
8528
8529 /* Free SKBs, SGEs, TPA pool and driver internals */
8530 bnx2x_free_skbs(bp);
8531 for_each_queue(bp, i)
8532 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8533 for_each_queue(bp, i)
8534 netif_napi_del(&bnx2x_fp(bp, i, napi));
8535 bnx2x_free_mem(bp);
8536
8537 bp->state = BNX2X_STATE_CLOSED;
8538
8539 /* The last driver must disable a "close the gate" if there is no
8540 * parity attention or "process kill" pending.
8541 */
8542 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8543 bnx2x_reset_is_done(bp))
8544 bnx2x_disable_close_the_gate(bp);
8545
8546 /* Reset MCP mail box sequence if there is on going recovery */
8547 if (unload_mode == UNLOAD_RECOVERY)
8548 bp->fw_seq = 0;
8549
8550 return 0;
8551}
8552 5351
8553/* Close gates #2, #3 and #4: */ 5352/* Close gates #2, #3 and #4: */
8554static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 5353static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
@@ -8861,8 +5660,6 @@ exit_leader_reset:
8861 return rc; 5660 return rc;
8862} 5661}
8863 5662
8864static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8865
8866/* Assumption: runs under rtnl lock. This together with the fact 5663/* Assumption: runs under rtnl lock. This together with the fact
8867 * that it's called only from bnx2x_reset_task() ensure that it 5664 * that it's called only from bnx2x_reset_task() ensure that it
8868 * will never be called when netif_running(bp->dev) is false. 5665 * will never be called when netif_running(bp->dev) is false.
@@ -8999,8 +5796,6 @@ reset_task_exit:
8999 5796
9000/* end of nic load/unload */ 5797/* end of nic load/unload */
9001 5798
9002/* ethtool_ops */
9003
9004/* 5799/*
9005 * Init service functions 5800 * Init service functions
9006 */ 5801 */
@@ -9949,7 +6744,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9949 multi_mode = ETH_RSS_MODE_DISABLED; 6744 multi_mode = ETH_RSS_MODE_DISABLED;
9950 } 6745 }
9951 bp->multi_mode = multi_mode; 6746 bp->multi_mode = multi_mode;
9952 6747 bp->int_mode = int_mode;
9953 6748
9954 bp->dev->features |= NETIF_F_GRO; 6749 bp->dev->features |= NETIF_F_GRO;
9955 6750
@@ -9961,6 +6756,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9961 bp->flags |= TPA_ENABLE_FLAG; 6756 bp->flags |= TPA_ENABLE_FLAG;
9962 bp->dev->features |= NETIF_F_LRO; 6757 bp->dev->features |= NETIF_F_LRO;
9963 } 6758 }
6759 bp->disable_tpa = disable_tpa;
9964 6760
9965 if (CHIP_IS_E1(bp)) 6761 if (CHIP_IS_E1(bp))
9966 bp->dropless_fc = 0; 6762 bp->dropless_fc = 0;
@@ -9989,2544 +6785,11 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9989 return rc; 6785 return rc;
9990} 6786}
9991 6787
9992/*
9993 * ethtool service functions
9994 */
9995
9996/* All ethtool functions called with rtnl_lock */
9997
9998static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9999{
10000 struct bnx2x *bp = netdev_priv(dev);
10001
10002 cmd->supported = bp->port.supported;
10003 cmd->advertising = bp->port.advertising;
10004
10005 if ((bp->state == BNX2X_STATE_OPEN) &&
10006 !(bp->flags & MF_FUNC_DIS) &&
10007 (bp->link_vars.link_up)) {
10008 cmd->speed = bp->link_vars.line_speed;
10009 cmd->duplex = bp->link_vars.duplex;
10010 if (IS_E1HMF(bp)) {
10011 u16 vn_max_rate;
10012
10013 vn_max_rate =
10014 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10015 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10016 if (vn_max_rate < cmd->speed)
10017 cmd->speed = vn_max_rate;
10018 }
10019 } else {
10020 cmd->speed = -1;
10021 cmd->duplex = -1;
10022 }
10023
10024 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10025 u32 ext_phy_type =
10026 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10027
10028 switch (ext_phy_type) {
10029 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10032 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10034 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10036 cmd->port = PORT_FIBRE;
10037 break;
10038
10039 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10040 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10041 cmd->port = PORT_TP;
10042 break;
10043
10044 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10045 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10046 bp->link_params.ext_phy_config);
10047 break;
10048
10049 default:
10050 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10051 bp->link_params.ext_phy_config);
10052 break;
10053 }
10054 } else
10055 cmd->port = PORT_TP;
10056
10057 cmd->phy_address = bp->mdio.prtad;
10058 cmd->transceiver = XCVR_INTERNAL;
10059
10060 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10061 cmd->autoneg = AUTONEG_ENABLE;
10062 else
10063 cmd->autoneg = AUTONEG_DISABLE;
10064
10065 cmd->maxtxpkt = 0;
10066 cmd->maxrxpkt = 0;
10067
10068 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10069 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10070 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10071 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10072 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10073 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10074 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10075
10076 return 0;
10077}
10078
10079static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10080{
10081 struct bnx2x *bp = netdev_priv(dev);
10082 u32 advertising;
10083
10084 if (IS_E1HMF(bp))
10085 return 0;
10086
10087 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10088 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10089 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10090 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10091 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10092 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10093 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10094
10095 if (cmd->autoneg == AUTONEG_ENABLE) {
10096 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10097 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10098 return -EINVAL;
10099 }
10100
10101 /* advertise the requested speed and duplex if supported */
10102 cmd->advertising &= bp->port.supported;
10103
10104 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10105 bp->link_params.req_duplex = DUPLEX_FULL;
10106 bp->port.advertising |= (ADVERTISED_Autoneg |
10107 cmd->advertising);
10108
10109 } else { /* forced speed */
10110 /* advertise the requested speed and duplex if supported */
10111 switch (cmd->speed) {
10112 case SPEED_10:
10113 if (cmd->duplex == DUPLEX_FULL) {
10114 if (!(bp->port.supported &
10115 SUPPORTED_10baseT_Full)) {
10116 DP(NETIF_MSG_LINK,
10117 "10M full not supported\n");
10118 return -EINVAL;
10119 }
10120
10121 advertising = (ADVERTISED_10baseT_Full |
10122 ADVERTISED_TP);
10123 } else {
10124 if (!(bp->port.supported &
10125 SUPPORTED_10baseT_Half)) {
10126 DP(NETIF_MSG_LINK,
10127 "10M half not supported\n");
10128 return -EINVAL;
10129 }
10130
10131 advertising = (ADVERTISED_10baseT_Half |
10132 ADVERTISED_TP);
10133 }
10134 break;
10135
10136 case SPEED_100:
10137 if (cmd->duplex == DUPLEX_FULL) {
10138 if (!(bp->port.supported &
10139 SUPPORTED_100baseT_Full)) {
10140 DP(NETIF_MSG_LINK,
10141 "100M full not supported\n");
10142 return -EINVAL;
10143 }
10144
10145 advertising = (ADVERTISED_100baseT_Full |
10146 ADVERTISED_TP);
10147 } else {
10148 if (!(bp->port.supported &
10149 SUPPORTED_100baseT_Half)) {
10150 DP(NETIF_MSG_LINK,
10151 "100M half not supported\n");
10152 return -EINVAL;
10153 }
10154
10155 advertising = (ADVERTISED_100baseT_Half |
10156 ADVERTISED_TP);
10157 }
10158 break;
10159
10160 case SPEED_1000:
10161 if (cmd->duplex != DUPLEX_FULL) {
10162 DP(NETIF_MSG_LINK, "1G half not supported\n");
10163 return -EINVAL;
10164 }
10165
10166 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10167 DP(NETIF_MSG_LINK, "1G full not supported\n");
10168 return -EINVAL;
10169 }
10170
10171 advertising = (ADVERTISED_1000baseT_Full |
10172 ADVERTISED_TP);
10173 break;
10174
10175 case SPEED_2500:
10176 if (cmd->duplex != DUPLEX_FULL) {
10177 DP(NETIF_MSG_LINK,
10178 "2.5G half not supported\n");
10179 return -EINVAL;
10180 }
10181
10182 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10183 DP(NETIF_MSG_LINK,
10184 "2.5G full not supported\n");
10185 return -EINVAL;
10186 }
10187
10188 advertising = (ADVERTISED_2500baseX_Full |
10189 ADVERTISED_TP);
10190 break;
10191
10192 case SPEED_10000:
10193 if (cmd->duplex != DUPLEX_FULL) {
10194 DP(NETIF_MSG_LINK, "10G half not supported\n");
10195 return -EINVAL;
10196 }
10197
10198 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10199 DP(NETIF_MSG_LINK, "10G full not supported\n");
10200 return -EINVAL;
10201 }
10202
10203 advertising = (ADVERTISED_10000baseT_Full |
10204 ADVERTISED_FIBRE);
10205 break;
10206
10207 default:
10208 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10209 return -EINVAL;
10210 }
10211
10212 bp->link_params.req_line_speed = cmd->speed;
10213 bp->link_params.req_duplex = cmd->duplex;
10214 bp->port.advertising = advertising;
10215 }
10216
10217 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10218 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10219 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10220 bp->port.advertising);
10221
10222 if (netif_running(dev)) {
10223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10224 bnx2x_link_set(bp);
10225 }
10226
10227 return 0;
10228}
10229
10230#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10231#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10232
10233static int bnx2x_get_regs_len(struct net_device *dev)
10234{
10235 struct bnx2x *bp = netdev_priv(dev);
10236 int regdump_len = 0;
10237 int i;
10238
10239 if (CHIP_IS_E1(bp)) {
10240 for (i = 0; i < REGS_COUNT; i++)
10241 if (IS_E1_ONLINE(reg_addrs[i].info))
10242 regdump_len += reg_addrs[i].size;
10243
10244 for (i = 0; i < WREGS_COUNT_E1; i++)
10245 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10246 regdump_len += wreg_addrs_e1[i].size *
10247 (1 + wreg_addrs_e1[i].read_regs_count);
10248
10249 } else { /* E1H */
10250 for (i = 0; i < REGS_COUNT; i++)
10251 if (IS_E1H_ONLINE(reg_addrs[i].info))
10252 regdump_len += reg_addrs[i].size;
10253
10254 for (i = 0; i < WREGS_COUNT_E1H; i++)
10255 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10256 regdump_len += wreg_addrs_e1h[i].size *
10257 (1 + wreg_addrs_e1h[i].read_regs_count);
10258 }
10259 regdump_len *= 4;
10260 regdump_len += sizeof(struct dump_hdr);
10261
10262 return regdump_len;
10263}
10264
10265static void bnx2x_get_regs(struct net_device *dev,
10266 struct ethtool_regs *regs, void *_p)
10267{
10268 u32 *p = _p, i, j;
10269 struct bnx2x *bp = netdev_priv(dev);
10270 struct dump_hdr dump_hdr = {0};
10271
10272 regs->version = 0;
10273 memset(p, 0, regs->len);
10274
10275 if (!netif_running(bp->dev))
10276 return;
10277
10278 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10279 dump_hdr.dump_sign = dump_sign_all;
10280 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10281 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10282 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10283 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10284 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10285
10286 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10287 p += dump_hdr.hdr_size + 1;
10288
10289 if (CHIP_IS_E1(bp)) {
10290 for (i = 0; i < REGS_COUNT; i++)
10291 if (IS_E1_ONLINE(reg_addrs[i].info))
10292 for (j = 0; j < reg_addrs[i].size; j++)
10293 *p++ = REG_RD(bp,
10294 reg_addrs[i].addr + j*4);
10295
10296 } else { /* E1H */
10297 for (i = 0; i < REGS_COUNT; i++)
10298 if (IS_E1H_ONLINE(reg_addrs[i].info))
10299 for (j = 0; j < reg_addrs[i].size; j++)
10300 *p++ = REG_RD(bp,
10301 reg_addrs[i].addr + j*4);
10302 }
10303}
10304
10305#define PHY_FW_VER_LEN 10
10306
10307static void bnx2x_get_drvinfo(struct net_device *dev,
10308 struct ethtool_drvinfo *info)
10309{
10310 struct bnx2x *bp = netdev_priv(dev);
10311 u8 phy_fw_ver[PHY_FW_VER_LEN];
10312
10313 strcpy(info->driver, DRV_MODULE_NAME);
10314 strcpy(info->version, DRV_MODULE_VERSION);
10315
10316 phy_fw_ver[0] = '\0';
10317 if (bp->port.pmf) {
10318 bnx2x_acquire_phy_lock(bp);
10319 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10320 (bp->state != BNX2X_STATE_CLOSED),
10321 phy_fw_ver, PHY_FW_VER_LEN);
10322 bnx2x_release_phy_lock(bp);
10323 }
10324
10325 strncpy(info->fw_version, bp->fw_ver, 32);
10326 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10327 "bc %d.%d.%d%s%s",
10328 (bp->common.bc_ver & 0xff0000) >> 16,
10329 (bp->common.bc_ver & 0xff00) >> 8,
10330 (bp->common.bc_ver & 0xff),
10331 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10332 strcpy(info->bus_info, pci_name(bp->pdev));
10333 info->n_stats = BNX2X_NUM_STATS;
10334 info->testinfo_len = BNX2X_NUM_TESTS;
10335 info->eedump_len = bp->common.flash_size;
10336 info->regdump_len = bnx2x_get_regs_len(dev);
10337}
10338
10339static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10340{
10341 struct bnx2x *bp = netdev_priv(dev);
10342
10343 if (bp->flags & NO_WOL_FLAG) {
10344 wol->supported = 0;
10345 wol->wolopts = 0;
10346 } else {
10347 wol->supported = WAKE_MAGIC;
10348 if (bp->wol)
10349 wol->wolopts = WAKE_MAGIC;
10350 else
10351 wol->wolopts = 0;
10352 }
10353 memset(&wol->sopass, 0, sizeof(wol->sopass));
10354}
10355
10356static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10357{
10358 struct bnx2x *bp = netdev_priv(dev);
10359
10360 if (wol->wolopts & ~WAKE_MAGIC)
10361 return -EINVAL;
10362
10363 if (wol->wolopts & WAKE_MAGIC) {
10364 if (bp->flags & NO_WOL_FLAG)
10365 return -EINVAL;
10366
10367 bp->wol = 1;
10368 } else
10369 bp->wol = 0;
10370
10371 return 0;
10372}
10373
10374static u32 bnx2x_get_msglevel(struct net_device *dev)
10375{
10376 struct bnx2x *bp = netdev_priv(dev);
10377
10378 return bp->msg_enable;
10379}
10380
10381static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10382{
10383 struct bnx2x *bp = netdev_priv(dev);
10384
10385 if (capable(CAP_NET_ADMIN))
10386 bp->msg_enable = level;
10387}
10388
10389static int bnx2x_nway_reset(struct net_device *dev)
10390{
10391 struct bnx2x *bp = netdev_priv(dev);
10392
10393 if (!bp->port.pmf)
10394 return 0;
10395
10396 if (netif_running(dev)) {
10397 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10398 bnx2x_link_set(bp);
10399 }
10400
10401 return 0;
10402}
10403
10404static u32 bnx2x_get_link(struct net_device *dev)
10405{
10406 struct bnx2x *bp = netdev_priv(dev);
10407
10408 if (bp->flags & MF_FUNC_DIS)
10409 return 0;
10410
10411 return bp->link_vars.link_up;
10412}
10413
10414static int bnx2x_get_eeprom_len(struct net_device *dev)
10415{
10416 struct bnx2x *bp = netdev_priv(dev);
10417
10418 return bp->common.flash_size;
10419}
10420
10421static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10422{
10423 int port = BP_PORT(bp);
10424 int count, i;
10425 u32 val = 0;
10426
10427 /* adjust timeout for emulation/FPGA */
10428 count = NVRAM_TIMEOUT_COUNT;
10429 if (CHIP_REV_IS_SLOW(bp))
10430 count *= 100;
10431
10432 /* request access to nvram interface */
10433 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10434 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10435
10436 for (i = 0; i < count*10; i++) {
10437 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10438 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10439 break;
10440
10441 udelay(5);
10442 }
10443
10444 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10445 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10446 return -EBUSY;
10447 }
10448
10449 return 0;
10450}
10451
10452static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10453{
10454 int port = BP_PORT(bp);
10455 int count, i;
10456 u32 val = 0;
10457
10458 /* adjust timeout for emulation/FPGA */
10459 count = NVRAM_TIMEOUT_COUNT;
10460 if (CHIP_REV_IS_SLOW(bp))
10461 count *= 100;
10462
10463 /* relinquish nvram interface */
10464 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10465 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10466
10467 for (i = 0; i < count*10; i++) {
10468 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10469 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10470 break;
10471
10472 udelay(5);
10473 }
10474
10475 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10476 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10477 return -EBUSY;
10478 }
10479
10480 return 0;
10481}
10482
10483static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10484{
10485 u32 val;
10486
10487 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10488
10489 /* enable both bits, even on read */
10490 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10491 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10492 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10493}
10494
10495static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10496{
10497 u32 val;
10498
10499 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10500
10501 /* disable both bits, even after read */
10502 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10503 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10504 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10505}
10506
10507static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10508 u32 cmd_flags)
10509{
10510 int count, i, rc;
10511 u32 val;
10512
10513 /* build the command word */
10514 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10515
10516 /* need to clear DONE bit separately */
10517 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10518
10519 /* address of the NVRAM to read from */
10520 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10521 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10522
10523 /* issue a read command */
10524 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10525
10526 /* adjust timeout for emulation/FPGA */
10527 count = NVRAM_TIMEOUT_COUNT;
10528 if (CHIP_REV_IS_SLOW(bp))
10529 count *= 100;
10530
10531 /* wait for completion */
10532 *ret_val = 0;
10533 rc = -EBUSY;
10534 for (i = 0; i < count; i++) {
10535 udelay(5);
10536 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10537
10538 if (val & MCPR_NVM_COMMAND_DONE) {
10539 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10540 /* we read nvram data in cpu order
10541 * but ethtool sees it as an array of bytes
10542 * converting to big-endian will do the work */
10543 *ret_val = cpu_to_be32(val);
10544 rc = 0;
10545 break;
10546 }
10547 }
10548
10549 return rc;
10550}
10551
10552static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10553 int buf_size)
10554{
10555 int rc;
10556 u32 cmd_flags;
10557 __be32 val;
10558
10559 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10560 DP(BNX2X_MSG_NVM,
10561 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10562 offset, buf_size);
10563 return -EINVAL;
10564 }
10565
10566 if (offset + buf_size > bp->common.flash_size) {
10567 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10568 " buf_size (0x%x) > flash_size (0x%x)\n",
10569 offset, buf_size, bp->common.flash_size);
10570 return -EINVAL;
10571 }
10572
10573 /* request access to nvram interface */
10574 rc = bnx2x_acquire_nvram_lock(bp);
10575 if (rc)
10576 return rc;
10577
10578 /* enable access to nvram interface */
10579 bnx2x_enable_nvram_access(bp);
10580
10581 /* read the first word(s) */
10582 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10583 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10584 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10585 memcpy(ret_buf, &val, 4);
10586
10587 /* advance to the next dword */
10588 offset += sizeof(u32);
10589 ret_buf += sizeof(u32);
10590 buf_size -= sizeof(u32);
10591 cmd_flags = 0;
10592 }
10593
10594 if (rc == 0) {
10595 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10596 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10597 memcpy(ret_buf, &val, 4);
10598 }
10599
10600 /* disable access to nvram interface */
10601 bnx2x_disable_nvram_access(bp);
10602 bnx2x_release_nvram_lock(bp);
10603
10604 return rc;
10605}
10606
10607static int bnx2x_get_eeprom(struct net_device *dev,
10608 struct ethtool_eeprom *eeprom, u8 *eebuf)
10609{
10610 struct bnx2x *bp = netdev_priv(dev);
10611 int rc;
10612
10613 if (!netif_running(dev))
10614 return -EAGAIN;
10615
10616 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10617 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10618 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10619 eeprom->len, eeprom->len);
10620
10621 /* parameters already validated in ethtool_get_eeprom */
10622
10623 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10624
10625 return rc;
10626}
10627
10628static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10629 u32 cmd_flags)
10630{
10631 int count, i, rc;
10632
10633 /* build the command word */
10634 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10635
10636 /* need to clear DONE bit separately */
10637 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10638
10639 /* write the data */
10640 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10641
10642 /* address of the NVRAM to write to */
10643 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10644 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10645
10646 /* issue the write command */
10647 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10648
10649 /* adjust timeout for emulation/FPGA */
10650 count = NVRAM_TIMEOUT_COUNT;
10651 if (CHIP_REV_IS_SLOW(bp))
10652 count *= 100;
10653
10654 /* wait for completion */
10655 rc = -EBUSY;
10656 for (i = 0; i < count; i++) {
10657 udelay(5);
10658 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10659 if (val & MCPR_NVM_COMMAND_DONE) {
10660 rc = 0;
10661 break;
10662 }
10663 }
10664
10665 return rc;
10666}
10667
10668#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10669
10670static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10671 int buf_size)
10672{
10673 int rc;
10674 u32 cmd_flags;
10675 u32 align_offset;
10676 __be32 val;
10677
10678 if (offset + buf_size > bp->common.flash_size) {
10679 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10680 " buf_size (0x%x) > flash_size (0x%x)\n",
10681 offset, buf_size, bp->common.flash_size);
10682 return -EINVAL;
10683 }
10684
10685 /* request access to nvram interface */
10686 rc = bnx2x_acquire_nvram_lock(bp);
10687 if (rc)
10688 return rc;
10689
10690 /* enable access to nvram interface */
10691 bnx2x_enable_nvram_access(bp);
10692
10693 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10694 align_offset = (offset & ~0x03);
10695 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10696
10697 if (rc == 0) {
10698 val &= ~(0xff << BYTE_OFFSET(offset));
10699 val |= (*data_buf << BYTE_OFFSET(offset));
10700
10701 /* nvram data is returned as an array of bytes
10702 * convert it back to cpu order */
10703 val = be32_to_cpu(val);
10704
10705 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10706 cmd_flags);
10707 }
10708
10709 /* disable access to nvram interface */
10710 bnx2x_disable_nvram_access(bp);
10711 bnx2x_release_nvram_lock(bp);
10712
10713 return rc;
10714}
10715
10716static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10717 int buf_size)
10718{
10719 int rc;
10720 u32 cmd_flags;
10721 u32 val;
10722 u32 written_so_far;
10723
10724 if (buf_size == 1) /* ethtool */
10725 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10726
10727 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10728 DP(BNX2X_MSG_NVM,
10729 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10730 offset, buf_size);
10731 return -EINVAL;
10732 }
10733
10734 if (offset + buf_size > bp->common.flash_size) {
10735 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10736 " buf_size (0x%x) > flash_size (0x%x)\n",
10737 offset, buf_size, bp->common.flash_size);
10738 return -EINVAL;
10739 }
10740
10741 /* request access to nvram interface */
10742 rc = bnx2x_acquire_nvram_lock(bp);
10743 if (rc)
10744 return rc;
10745
10746 /* enable access to nvram interface */
10747 bnx2x_enable_nvram_access(bp);
10748
10749 written_so_far = 0;
10750 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10751 while ((written_so_far < buf_size) && (rc == 0)) {
10752 if (written_so_far == (buf_size - sizeof(u32)))
10753 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10754 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10755 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10756 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10757 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10758
10759 memcpy(&val, data_buf, 4);
10760
10761 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10762
10763 /* advance to the next dword */
10764 offset += sizeof(u32);
10765 data_buf += sizeof(u32);
10766 written_so_far += sizeof(u32);
10767 cmd_flags = 0;
10768 }
10769
10770 /* disable access to nvram interface */
10771 bnx2x_disable_nvram_access(bp);
10772 bnx2x_release_nvram_lock(bp);
10773
10774 return rc;
10775}
10776
10777static int bnx2x_set_eeprom(struct net_device *dev,
10778 struct ethtool_eeprom *eeprom, u8 *eebuf)
10779{
10780 struct bnx2x *bp = netdev_priv(dev);
10781 int port = BP_PORT(bp);
10782 int rc = 0;
10783
10784 if (!netif_running(dev))
10785 return -EAGAIN;
10786
10787 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10788 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10789 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10790 eeprom->len, eeprom->len);
10791
10792 /* parameters already validated in ethtool_set_eeprom */
10793
10794 /* PHY eeprom can be accessed only by the PMF */
10795 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10796 !bp->port.pmf)
10797 return -EINVAL;
10798
10799 if (eeprom->magic == 0x50485950) {
10800 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10801 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10802
10803 bnx2x_acquire_phy_lock(bp);
10804 rc |= bnx2x_link_reset(&bp->link_params,
10805 &bp->link_vars, 0);
10806 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10807 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10808 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10809 MISC_REGISTERS_GPIO_HIGH, port);
10810 bnx2x_release_phy_lock(bp);
10811 bnx2x_link_report(bp);
10812
10813 } else if (eeprom->magic == 0x50485952) {
10814 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10815 if (bp->state == BNX2X_STATE_OPEN) {
10816 bnx2x_acquire_phy_lock(bp);
10817 rc |= bnx2x_link_reset(&bp->link_params,
10818 &bp->link_vars, 1);
10819
10820 rc |= bnx2x_phy_init(&bp->link_params,
10821 &bp->link_vars);
10822 bnx2x_release_phy_lock(bp);
10823 bnx2x_calc_fc_adv(bp);
10824 }
10825 } else if (eeprom->magic == 0x53985943) {
10826 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10827 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10828 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10829 u8 ext_phy_addr =
10830 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10831
10832 /* DSP Remove Download Mode */
10833 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10834 MISC_REGISTERS_GPIO_LOW, port);
10835
10836 bnx2x_acquire_phy_lock(bp);
10837
10838 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10839
10840 /* wait 0.5 sec to allow it to run */
10841 msleep(500);
10842 bnx2x_ext_phy_hw_reset(bp, port);
10843 msleep(500);
10844 bnx2x_release_phy_lock(bp);
10845 }
10846 } else
10847 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10848
10849 return rc;
10850}
10851
10852static int bnx2x_get_coalesce(struct net_device *dev,
10853 struct ethtool_coalesce *coal)
10854{
10855 struct bnx2x *bp = netdev_priv(dev);
10856
10857 memset(coal, 0, sizeof(struct ethtool_coalesce));
10858
10859 coal->rx_coalesce_usecs = bp->rx_ticks;
10860 coal->tx_coalesce_usecs = bp->tx_ticks;
10861
10862 return 0;
10863}
10864
10865static int bnx2x_set_coalesce(struct net_device *dev,
10866 struct ethtool_coalesce *coal)
10867{
10868 struct bnx2x *bp = netdev_priv(dev);
10869
10870 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10871 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10872 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10873
10874 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10875 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10876 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10877
10878 if (netif_running(dev))
10879 bnx2x_update_coalesce(bp);
10880
10881 return 0;
10882}
10883
10884static void bnx2x_get_ringparam(struct net_device *dev,
10885 struct ethtool_ringparam *ering)
10886{
10887 struct bnx2x *bp = netdev_priv(dev);
10888
10889 ering->rx_max_pending = MAX_RX_AVAIL;
10890 ering->rx_mini_max_pending = 0;
10891 ering->rx_jumbo_max_pending = 0;
10892
10893 ering->rx_pending = bp->rx_ring_size;
10894 ering->rx_mini_pending = 0;
10895 ering->rx_jumbo_pending = 0;
10896
10897 ering->tx_max_pending = MAX_TX_AVAIL;
10898 ering->tx_pending = bp->tx_ring_size;
10899}
10900
10901static int bnx2x_set_ringparam(struct net_device *dev,
10902 struct ethtool_ringparam *ering)
10903{
10904 struct bnx2x *bp = netdev_priv(dev);
10905 int rc = 0;
10906
10907 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10908 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10909 return -EAGAIN;
10910 }
10911
10912 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10913 (ering->tx_pending > MAX_TX_AVAIL) ||
10914 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10915 return -EINVAL;
10916
10917 bp->rx_ring_size = ering->rx_pending;
10918 bp->tx_ring_size = ering->tx_pending;
10919
10920 if (netif_running(dev)) {
10921 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10922 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10923 }
10924
10925 return rc;
10926}
10927
10928static void bnx2x_get_pauseparam(struct net_device *dev,
10929 struct ethtool_pauseparam *epause)
10930{
10931 struct bnx2x *bp = netdev_priv(dev);
10932
10933 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10934 BNX2X_FLOW_CTRL_AUTO) &&
10935 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10936
10937 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10938 BNX2X_FLOW_CTRL_RX);
10939 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10940 BNX2X_FLOW_CTRL_TX);
10941
10942 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10943 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10944 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10945}
10946
10947static int bnx2x_set_pauseparam(struct net_device *dev,
10948 struct ethtool_pauseparam *epause)
10949{
10950 struct bnx2x *bp = netdev_priv(dev);
10951
10952 if (IS_E1HMF(bp))
10953 return 0;
10954
10955 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10956 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10957 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10958
10959 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10960
10961 if (epause->rx_pause)
10962 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10963
10964 if (epause->tx_pause)
10965 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10966
10967 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10968 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10969
10970 if (epause->autoneg) {
10971 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10972 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10973 return -EINVAL;
10974 }
10975
10976 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10977 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10978 }
10979
10980 DP(NETIF_MSG_LINK,
10981 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10982
10983 if (netif_running(dev)) {
10984 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10985 bnx2x_link_set(bp);
10986 }
10987
10988 return 0;
10989}
10990
10991static int bnx2x_set_flags(struct net_device *dev, u32 data)
10992{
10993 struct bnx2x *bp = netdev_priv(dev);
10994 int changed = 0;
10995 int rc = 0;
10996
10997 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10998 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10999 return -EAGAIN;
11000 }
11001
11002 /* TPA requires Rx CSUM offloading */
11003 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
11004 if (!disable_tpa) {
11005 if (!(dev->features & NETIF_F_LRO)) {
11006 dev->features |= NETIF_F_LRO;
11007 bp->flags |= TPA_ENABLE_FLAG;
11008 changed = 1;
11009 }
11010 } else
11011 rc = -EINVAL;
11012 } else if (dev->features & NETIF_F_LRO) {
11013 dev->features &= ~NETIF_F_LRO;
11014 bp->flags &= ~TPA_ENABLE_FLAG;
11015 changed = 1;
11016 }
11017
11018 if (data & ETH_FLAG_RXHASH)
11019 dev->features |= NETIF_F_RXHASH;
11020 else
11021 dev->features &= ~NETIF_F_RXHASH;
11022
11023 if (changed && netif_running(dev)) {
11024 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11025 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11026 }
11027
11028 return rc;
11029}
11030
11031static u32 bnx2x_get_rx_csum(struct net_device *dev)
11032{
11033 struct bnx2x *bp = netdev_priv(dev);
11034
11035 return bp->rx_csum;
11036}
11037
11038static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11039{
11040 struct bnx2x *bp = netdev_priv(dev);
11041 int rc = 0;
11042
11043 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11044 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11045 return -EAGAIN;
11046 }
11047
11048 bp->rx_csum = data;
11049
11050 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11051 TPA'ed packets will be discarded due to wrong TCP CSUM */
11052 if (!data) {
11053 u32 flags = ethtool_op_get_flags(dev);
11054
11055 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11056 }
11057
11058 return rc;
11059}
11060
11061static int bnx2x_set_tso(struct net_device *dev, u32 data)
11062{
11063 if (data) {
11064 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11065 dev->features |= NETIF_F_TSO6;
11066 } else {
11067 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11068 dev->features &= ~NETIF_F_TSO6;
11069 }
11070
11071 return 0;
11072}
11073
11074static const struct {
11075 char string[ETH_GSTRING_LEN];
11076} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11077 { "register_test (offline)" },
11078 { "memory_test (offline)" },
11079 { "loopback_test (offline)" },
11080 { "nvram_test (online)" },
11081 { "interrupt_test (online)" },
11082 { "link_test (online)" },
11083 { "idle check (online)" }
11084};
11085
11086static int bnx2x_test_registers(struct bnx2x *bp)
11087{
11088 int idx, i, rc = -ENODEV;
11089 u32 wr_val = 0;
11090 int port = BP_PORT(bp);
11091 static const struct {
11092 u32 offset0;
11093 u32 offset1;
11094 u32 mask;
11095 } reg_tbl[] = {
11096/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11097 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11098 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11099 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11100 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11101 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11102 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11103 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11104 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11105 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11106/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11107 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11108 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11109 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11110 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11111 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11112 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11113 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11114 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11115 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11116/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11117 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11118 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11119 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11120 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11121 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11122 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11123 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11124 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11125 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11126/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11127 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11128 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11129 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11130 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11131 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11132 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11133
11134 { 0xffffffff, 0, 0x00000000 }
11135 };
11136
11137 if (!netif_running(bp->dev))
11138 return rc;
11139
11140 /* Repeat the test twice:
11141 First by writing 0x00000000, second by writing 0xffffffff */
11142 for (idx = 0; idx < 2; idx++) {
11143
11144 switch (idx) {
11145 case 0:
11146 wr_val = 0;
11147 break;
11148 case 1:
11149 wr_val = 0xffffffff;
11150 break;
11151 }
11152
11153 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11154 u32 offset, mask, save_val, val;
11155
11156 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11157 mask = reg_tbl[i].mask;
11158
11159 save_val = REG_RD(bp, offset);
11160
11161 REG_WR(bp, offset, (wr_val & mask));
11162 val = REG_RD(bp, offset);
11163
11164 /* Restore the original register's value */
11165 REG_WR(bp, offset, save_val);
11166
11167 /* verify value is as expected */
11168 if ((val & mask) != (wr_val & mask)) {
11169 DP(NETIF_MSG_PROBE,
11170 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11171 offset, val, wr_val, mask);
11172 goto test_reg_exit;
11173 }
11174 }
11175 }
11176
11177 rc = 0;
11178
11179test_reg_exit:
11180 return rc;
11181}
11182
11183static int bnx2x_test_memory(struct bnx2x *bp)
11184{
11185 int i, j, rc = -ENODEV;
11186 u32 val;
11187 static const struct {
11188 u32 offset;
11189 int size;
11190 } mem_tbl[] = {
11191 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11192 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11193 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11194 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11195 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11196 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11197 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11198
11199 { 0xffffffff, 0 }
11200 };
11201 static const struct {
11202 char *name;
11203 u32 offset;
11204 u32 e1_mask;
11205 u32 e1h_mask;
11206 } prty_tbl[] = {
11207 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11208 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11209 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11210 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11211 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11212 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11213
11214 { NULL, 0xffffffff, 0, 0 }
11215 };
11216
11217 if (!netif_running(bp->dev))
11218 return rc;
11219
11220 /* Go through all the memories */
11221 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11222 for (j = 0; j < mem_tbl[i].size; j++)
11223 REG_RD(bp, mem_tbl[i].offset + j*4);
11224
11225 /* Check the parity status */
11226 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11227 val = REG_RD(bp, prty_tbl[i].offset);
11228 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11229 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11230 DP(NETIF_MSG_HW,
11231 "%s is 0x%x\n", prty_tbl[i].name, val);
11232 goto test_mem_exit;
11233 }
11234 }
11235
11236 rc = 0;
11237
11238test_mem_exit:
11239 return rc;
11240}
11241
11242static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11243{
11244 int cnt = 1000;
11245
11246 if (link_up)
11247 while (bnx2x_link_test(bp) && cnt--)
11248 msleep(10);
11249}
11250
11251static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11252{
11253 unsigned int pkt_size, num_pkts, i;
11254 struct sk_buff *skb;
11255 unsigned char *packet;
11256 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11257 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11258 u16 tx_start_idx, tx_idx;
11259 u16 rx_start_idx, rx_idx;
11260 u16 pkt_prod, bd_prod;
11261 struct sw_tx_bd *tx_buf;
11262 struct eth_tx_start_bd *tx_start_bd;
11263 struct eth_tx_parse_bd *pbd = NULL;
11264 dma_addr_t mapping;
11265 union eth_rx_cqe *cqe;
11266 u8 cqe_fp_flags;
11267 struct sw_rx_bd *rx_buf;
11268 u16 len;
11269 int rc = -ENODEV;
11270
11271 /* check the loopback mode */
11272 switch (loopback_mode) {
11273 case BNX2X_PHY_LOOPBACK:
11274 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11275 return -EINVAL;
11276 break;
11277 case BNX2X_MAC_LOOPBACK:
11278 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11279 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11280 break;
11281 default:
11282 return -EINVAL;
11283 }
11284
11285 /* prepare the loopback packet */
11286 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11287 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11288 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11289 if (!skb) {
11290 rc = -ENOMEM;
11291 goto test_loopback_exit;
11292 }
11293 packet = skb_put(skb, pkt_size);
11294 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11295 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11296 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11297 for (i = ETH_HLEN; i < pkt_size; i++)
11298 packet[i] = (unsigned char) (i & 0xff);
11299
11300 /* send the loopback packet */
11301 num_pkts = 0;
11302 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11303 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11304
11305 pkt_prod = fp_tx->tx_pkt_prod++;
11306 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11307 tx_buf->first_bd = fp_tx->tx_bd_prod;
11308 tx_buf->skb = skb;
11309 tx_buf->flags = 0;
11310
11311 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11312 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11313 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11314 skb_headlen(skb), DMA_TO_DEVICE);
11315 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11316 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11317 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11318 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11319 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11320 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11321 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11322 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11323
11324 /* turn on parsing and get a BD */
11325 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11326 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11327
11328 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11329
11330 wmb();
11331
11332 fp_tx->tx_db.data.prod += 2;
11333 barrier();
11334 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11335
11336 mmiowb();
11337
11338 num_pkts++;
11339 fp_tx->tx_bd_prod += 2; /* start + pbd */
11340
11341 udelay(100);
11342
11343 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11344 if (tx_idx != tx_start_idx + num_pkts)
11345 goto test_loopback_exit;
11346
11347 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11348 if (rx_idx != rx_start_idx + num_pkts)
11349 goto test_loopback_exit;
11350
11351 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11352 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11353 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11354 goto test_loopback_rx_exit;
11355
11356 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11357 if (len != pkt_size)
11358 goto test_loopback_rx_exit;
11359
11360 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11361 skb = rx_buf->skb;
11362 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11363 for (i = ETH_HLEN; i < pkt_size; i++)
11364 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11365 goto test_loopback_rx_exit;
11366
11367 rc = 0;
11368
11369test_loopback_rx_exit:
11370
11371 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11372 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11373 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11374 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11375
11376 /* Update producers */
11377 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11378 fp_rx->rx_sge_prod);
11379
11380test_loopback_exit:
11381 bp->link_params.loopback_mode = LOOPBACK_NONE;
11382
11383 return rc;
11384}
11385
11386static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11387{
11388 int rc = 0, res;
11389
11390 if (BP_NOMCP(bp))
11391 return rc;
11392
11393 if (!netif_running(bp->dev))
11394 return BNX2X_LOOPBACK_FAILED;
11395
11396 bnx2x_netif_stop(bp, 1);
11397 bnx2x_acquire_phy_lock(bp);
11398
11399 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11400 if (res) {
11401 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11402 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11403 }
11404
11405 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11406 if (res) {
11407 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11408 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11409 }
11410
11411 bnx2x_release_phy_lock(bp);
11412 bnx2x_netif_start(bp);
11413
11414 return rc;
11415}
11416
11417#define CRC32_RESIDUAL 0xdebb20e3
11418
11419static int bnx2x_test_nvram(struct bnx2x *bp)
11420{
11421 static const struct {
11422 int offset;
11423 int size;
11424 } nvram_tbl[] = {
11425 { 0, 0x14 }, /* bootstrap */
11426 { 0x14, 0xec }, /* dir */
11427 { 0x100, 0x350 }, /* manuf_info */
11428 { 0x450, 0xf0 }, /* feature_info */
11429 { 0x640, 0x64 }, /* upgrade_key_info */
11430 { 0x6a4, 0x64 },
11431 { 0x708, 0x70 }, /* manuf_key_info */
11432 { 0x778, 0x70 },
11433 { 0, 0 }
11434 };
11435 __be32 buf[0x350 / 4];
11436 u8 *data = (u8 *)buf;
11437 int i, rc;
11438 u32 magic, crc;
11439
11440 if (BP_NOMCP(bp))
11441 return 0;
11442
11443 rc = bnx2x_nvram_read(bp, 0, data, 4);
11444 if (rc) {
11445 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11446 goto test_nvram_exit;
11447 }
11448
11449 magic = be32_to_cpu(buf[0]);
11450 if (magic != 0x669955aa) {
11451 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11452 rc = -ENODEV;
11453 goto test_nvram_exit;
11454 }
11455
11456 for (i = 0; nvram_tbl[i].size; i++) {
11457
11458 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11459 nvram_tbl[i].size);
11460 if (rc) {
11461 DP(NETIF_MSG_PROBE,
11462 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11463 goto test_nvram_exit;
11464 }
11465
11466 crc = ether_crc_le(nvram_tbl[i].size, data);
11467 if (crc != CRC32_RESIDUAL) {
11468 DP(NETIF_MSG_PROBE,
11469 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11470 rc = -ENODEV;
11471 goto test_nvram_exit;
11472 }
11473 }
11474
11475test_nvram_exit:
11476 return rc;
11477}
11478
11479static int bnx2x_test_intr(struct bnx2x *bp)
11480{
11481 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11482 int i, rc;
11483
11484 if (!netif_running(bp->dev))
11485 return -ENODEV;
11486
11487 config->hdr.length = 0;
11488 if (CHIP_IS_E1(bp))
11489 /* use last unicast entries */
11490 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11491 else
11492 config->hdr.offset = BP_FUNC(bp);
11493 config->hdr.client_id = bp->fp->cl_id;
11494 config->hdr.reserved1 = 0;
11495
11496 bp->set_mac_pending++;
11497 smp_wmb();
11498 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11499 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11500 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11501 if (rc == 0) {
11502 for (i = 0; i < 10; i++) {
11503 if (!bp->set_mac_pending)
11504 break;
11505 smp_rmb();
11506 msleep_interruptible(10);
11507 }
11508 if (i == 10)
11509 rc = -ENODEV;
11510 }
11511
11512 return rc;
11513}
11514
11515static void bnx2x_self_test(struct net_device *dev,
11516 struct ethtool_test *etest, u64 *buf)
11517{
11518 struct bnx2x *bp = netdev_priv(dev);
11519
11520 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11521 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11522 etest->flags |= ETH_TEST_FL_FAILED;
11523 return;
11524 }
11525
11526 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11527
11528 if (!netif_running(dev))
11529 return;
11530
11531 /* offline tests are not supported in MF mode */
11532 if (IS_E1HMF(bp))
11533 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11534
11535 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11536 int port = BP_PORT(bp);
11537 u32 val;
11538 u8 link_up;
11539
11540 /* save current value of input enable for TX port IF */
11541 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11542 /* disable input for TX port IF */
11543 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11544
11545 link_up = (bnx2x_link_test(bp) == 0);
11546 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11547 bnx2x_nic_load(bp, LOAD_DIAG);
11548 /* wait until link state is restored */
11549 bnx2x_wait_for_link(bp, link_up);
11550
11551 if (bnx2x_test_registers(bp) != 0) {
11552 buf[0] = 1;
11553 etest->flags |= ETH_TEST_FL_FAILED;
11554 }
11555 if (bnx2x_test_memory(bp) != 0) {
11556 buf[1] = 1;
11557 etest->flags |= ETH_TEST_FL_FAILED;
11558 }
11559 buf[2] = bnx2x_test_loopback(bp, link_up);
11560 if (buf[2] != 0)
11561 etest->flags |= ETH_TEST_FL_FAILED;
11562
11563 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11564
11565 /* restore input for TX port IF */
11566 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11567
11568 bnx2x_nic_load(bp, LOAD_NORMAL);
11569 /* wait until link state is restored */
11570 bnx2x_wait_for_link(bp, link_up);
11571 }
11572 if (bnx2x_test_nvram(bp) != 0) {
11573 buf[3] = 1;
11574 etest->flags |= ETH_TEST_FL_FAILED;
11575 }
11576 if (bnx2x_test_intr(bp) != 0) {
11577 buf[4] = 1;
11578 etest->flags |= ETH_TEST_FL_FAILED;
11579 }
11580 if (bp->port.pmf)
11581 if (bnx2x_link_test(bp) != 0) {
11582 buf[5] = 1;
11583 etest->flags |= ETH_TEST_FL_FAILED;
11584 }
11585
11586#ifdef BNX2X_EXTRA_DEBUG
11587 bnx2x_panic_dump(bp);
11588#endif
11589}
11590
11591static const struct {
11592 long offset;
11593 int size;
11594 u8 string[ETH_GSTRING_LEN];
11595} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11596/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11597 { Q_STATS_OFFSET32(error_bytes_received_hi),
11598 8, "[%d]: rx_error_bytes" },
11599 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11600 8, "[%d]: rx_ucast_packets" },
11601 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11602 8, "[%d]: rx_mcast_packets" },
11603 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11604 8, "[%d]: rx_bcast_packets" },
11605 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11606 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11607 4, "[%d]: rx_phy_ip_err_discards"},
11608 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11609 4, "[%d]: rx_skb_alloc_discard" },
11610 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11611
11612/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11613 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11614 8, "[%d]: tx_ucast_packets" },
11615 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11616 8, "[%d]: tx_mcast_packets" },
11617 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11618 8, "[%d]: tx_bcast_packets" }
11619};
11620
11621static const struct {
11622 long offset;
11623 int size;
11624 u32 flags;
11625#define STATS_FLAGS_PORT 1
11626#define STATS_FLAGS_FUNC 2
11627#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11628 u8 string[ETH_GSTRING_LEN];
11629} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11630/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11631 8, STATS_FLAGS_BOTH, "rx_bytes" },
11632 { STATS_OFFSET32(error_bytes_received_hi),
11633 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11634 { STATS_OFFSET32(total_unicast_packets_received_hi),
11635 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11636 { STATS_OFFSET32(total_multicast_packets_received_hi),
11637 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11638 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11639 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11640 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11641 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11642 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11643 8, STATS_FLAGS_PORT, "rx_align_errors" },
11644 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11645 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11646 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11647 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11648/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11649 8, STATS_FLAGS_PORT, "rx_fragments" },
11650 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11651 8, STATS_FLAGS_PORT, "rx_jabbers" },
11652 { STATS_OFFSET32(no_buff_discard_hi),
11653 8, STATS_FLAGS_BOTH, "rx_discards" },
11654 { STATS_OFFSET32(mac_filter_discard),
11655 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11656 { STATS_OFFSET32(xxoverflow_discard),
11657 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11658 { STATS_OFFSET32(brb_drop_hi),
11659 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11660 { STATS_OFFSET32(brb_truncate_hi),
11661 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11662 { STATS_OFFSET32(pause_frames_received_hi),
11663 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11664 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11665 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11666 { STATS_OFFSET32(nig_timer_max),
11667 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11668/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11669 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11670 { STATS_OFFSET32(rx_skb_alloc_failed),
11671 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11672 { STATS_OFFSET32(hw_csum_err),
11673 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11674
11675 { STATS_OFFSET32(total_bytes_transmitted_hi),
11676 8, STATS_FLAGS_BOTH, "tx_bytes" },
11677 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11678 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11679 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11680 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11681 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11682 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11683 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11684 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11685 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11686 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11687 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11688 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11689/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11690 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11691 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11692 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11693 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11694 8, STATS_FLAGS_PORT, "tx_deferred" },
11695 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11696 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11697 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11698 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11699 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11700 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11701 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11702 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11703 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11704 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11705 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11706 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11707 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11708 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11709/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11710 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11711 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11712 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11713 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11714 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11715 { STATS_OFFSET32(pause_frames_sent_hi),
11716 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11717};
11718
11719#define IS_PORT_STAT(i) \
11720 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11721#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11722#define IS_E1HMF_MODE_STAT(bp) \
11723 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11724
11725static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11726{
11727 struct bnx2x *bp = netdev_priv(dev);
11728 int i, num_stats;
11729
11730 switch (stringset) {
11731 case ETH_SS_STATS:
11732 if (is_multi(bp)) {
11733 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11734 if (!IS_E1HMF_MODE_STAT(bp))
11735 num_stats += BNX2X_NUM_STATS;
11736 } else {
11737 if (IS_E1HMF_MODE_STAT(bp)) {
11738 num_stats = 0;
11739 for (i = 0; i < BNX2X_NUM_STATS; i++)
11740 if (IS_FUNC_STAT(i))
11741 num_stats++;
11742 } else
11743 num_stats = BNX2X_NUM_STATS;
11744 }
11745 return num_stats;
11746
11747 case ETH_SS_TEST:
11748 return BNX2X_NUM_TESTS;
11749
11750 default:
11751 return -EINVAL;
11752 }
11753}
11754
11755static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11756{
11757 struct bnx2x *bp = netdev_priv(dev);
11758 int i, j, k;
11759
11760 switch (stringset) {
11761 case ETH_SS_STATS:
11762 if (is_multi(bp)) {
11763 k = 0;
11764 for_each_queue(bp, i) {
11765 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11766 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11767 bnx2x_q_stats_arr[j].string, i);
11768 k += BNX2X_NUM_Q_STATS;
11769 }
11770 if (IS_E1HMF_MODE_STAT(bp))
11771 break;
11772 for (j = 0; j < BNX2X_NUM_STATS; j++)
11773 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11774 bnx2x_stats_arr[j].string);
11775 } else {
11776 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11777 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11778 continue;
11779 strcpy(buf + j*ETH_GSTRING_LEN,
11780 bnx2x_stats_arr[i].string);
11781 j++;
11782 }
11783 }
11784 break;
11785
11786 case ETH_SS_TEST:
11787 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11788 break;
11789 }
11790}
11791
11792static void bnx2x_get_ethtool_stats(struct net_device *dev,
11793 struct ethtool_stats *stats, u64 *buf)
11794{
11795 struct bnx2x *bp = netdev_priv(dev);
11796 u32 *hw_stats, *offset;
11797 int i, j, k;
11798
11799 if (is_multi(bp)) {
11800 k = 0;
11801 for_each_queue(bp, i) {
11802 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11803 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11804 if (bnx2x_q_stats_arr[j].size == 0) {
11805 /* skip this counter */
11806 buf[k + j] = 0;
11807 continue;
11808 }
11809 offset = (hw_stats +
11810 bnx2x_q_stats_arr[j].offset);
11811 if (bnx2x_q_stats_arr[j].size == 4) {
11812 /* 4-byte counter */
11813 buf[k + j] = (u64) *offset;
11814 continue;
11815 }
11816 /* 8-byte counter */
11817 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11818 }
11819 k += BNX2X_NUM_Q_STATS;
11820 }
11821 if (IS_E1HMF_MODE_STAT(bp))
11822 return;
11823 hw_stats = (u32 *)&bp->eth_stats;
11824 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11825 if (bnx2x_stats_arr[j].size == 0) {
11826 /* skip this counter */
11827 buf[k + j] = 0;
11828 continue;
11829 }
11830 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11831 if (bnx2x_stats_arr[j].size == 4) {
11832 /* 4-byte counter */
11833 buf[k + j] = (u64) *offset;
11834 continue;
11835 }
11836 /* 8-byte counter */
11837 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11838 }
11839 } else {
11840 hw_stats = (u32 *)&bp->eth_stats;
11841 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11842 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11843 continue;
11844 if (bnx2x_stats_arr[i].size == 0) {
11845 /* skip this counter */
11846 buf[j] = 0;
11847 j++;
11848 continue;
11849 }
11850 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11851 if (bnx2x_stats_arr[i].size == 4) {
11852 /* 4-byte counter */
11853 buf[j] = (u64) *offset;
11854 j++;
11855 continue;
11856 }
11857 /* 8-byte counter */
11858 buf[j] = HILO_U64(*offset, *(offset + 1));
11859 j++;
11860 }
11861 }
11862}
11863
11864static int bnx2x_phys_id(struct net_device *dev, u32 data)
11865{
11866 struct bnx2x *bp = netdev_priv(dev);
11867 int i;
11868
11869 if (!netif_running(dev))
11870 return 0;
11871
11872 if (!bp->port.pmf)
11873 return 0;
11874
11875 if (data == 0)
11876 data = 2;
11877
11878 for (i = 0; i < (data * 2); i++) {
11879 if ((i % 2) == 0)
11880 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11881 SPEED_1000);
11882 else
11883 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11884
11885 msleep_interruptible(500);
11886 if (signal_pending(current))
11887 break;
11888 }
11889
11890 if (bp->link_vars.link_up)
11891 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11892 bp->link_vars.line_speed);
11893
11894 return 0;
11895}
11896
11897static const struct ethtool_ops bnx2x_ethtool_ops = {
11898 .get_settings = bnx2x_get_settings,
11899 .set_settings = bnx2x_set_settings,
11900 .get_drvinfo = bnx2x_get_drvinfo,
11901 .get_regs_len = bnx2x_get_regs_len,
11902 .get_regs = bnx2x_get_regs,
11903 .get_wol = bnx2x_get_wol,
11904 .set_wol = bnx2x_set_wol,
11905 .get_msglevel = bnx2x_get_msglevel,
11906 .set_msglevel = bnx2x_set_msglevel,
11907 .nway_reset = bnx2x_nway_reset,
11908 .get_link = bnx2x_get_link,
11909 .get_eeprom_len = bnx2x_get_eeprom_len,
11910 .get_eeprom = bnx2x_get_eeprom,
11911 .set_eeprom = bnx2x_set_eeprom,
11912 .get_coalesce = bnx2x_get_coalesce,
11913 .set_coalesce = bnx2x_set_coalesce,
11914 .get_ringparam = bnx2x_get_ringparam,
11915 .set_ringparam = bnx2x_set_ringparam,
11916 .get_pauseparam = bnx2x_get_pauseparam,
11917 .set_pauseparam = bnx2x_set_pauseparam,
11918 .get_rx_csum = bnx2x_get_rx_csum,
11919 .set_rx_csum = bnx2x_set_rx_csum,
11920 .get_tx_csum = ethtool_op_get_tx_csum,
11921 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11922 .set_flags = bnx2x_set_flags,
11923 .get_flags = ethtool_op_get_flags,
11924 .get_sg = ethtool_op_get_sg,
11925 .set_sg = ethtool_op_set_sg,
11926 .get_tso = ethtool_op_get_tso,
11927 .set_tso = bnx2x_set_tso,
11928 .self_test = bnx2x_self_test,
11929 .get_sset_count = bnx2x_get_sset_count,
11930 .get_strings = bnx2x_get_strings,
11931 .phys_id = bnx2x_phys_id,
11932 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11933};
11934
11935/* end of ethtool_ops */
11936 6788
11937/**************************************************************************** 6789/****************************************************************************
11938* General service functions 6790* General service functions
11939****************************************************************************/ 6791****************************************************************************/
11940 6792
11941static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11942{
11943 u16 pmcsr;
11944
11945 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11946
11947 switch (state) {
11948 case PCI_D0:
11949 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11950 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11951 PCI_PM_CTRL_PME_STATUS));
11952
11953 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11954 /* delay required during transition out of D3hot */
11955 msleep(20);
11956 break;
11957
11958 case PCI_D3hot:
11959 /* If there are other clients above don't
11960 shut down the power */
11961 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11962 return 0;
11963 /* Don't shut down the power for emulation and FPGA */
11964 if (CHIP_REV_IS_SLOW(bp))
11965 return 0;
11966
11967 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11968 pmcsr |= 3;
11969
11970 if (bp->wol)
11971 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11972
11973 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11974 pmcsr);
11975
11976 /* No more memory access after this point until
11977 * device is brought back to D0.
11978 */
11979 break;
11980
11981 default:
11982 return -EINVAL;
11983 }
11984 return 0;
11985}
11986
11987static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11988{
11989 u16 rx_cons_sb;
11990
11991 /* Tell compiler that status block fields can change */
11992 barrier();
11993 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11994 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11995 rx_cons_sb++;
11996 return (fp->rx_comp_cons != rx_cons_sb);
11997}
11998
11999/*
12000 * net_device service functions
12001 */
12002
12003static int bnx2x_poll(struct napi_struct *napi, int budget)
12004{
12005 int work_done = 0;
12006 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12007 napi);
12008 struct bnx2x *bp = fp->bp;
12009
12010 while (1) {
12011#ifdef BNX2X_STOP_ON_ERROR
12012 if (unlikely(bp->panic)) {
12013 napi_complete(napi);
12014 return 0;
12015 }
12016#endif
12017
12018 if (bnx2x_has_tx_work(fp))
12019 bnx2x_tx_int(fp);
12020
12021 if (bnx2x_has_rx_work(fp)) {
12022 work_done += bnx2x_rx_int(fp, budget - work_done);
12023
12024 /* must not complete if we consumed full budget */
12025 if (work_done >= budget)
12026 break;
12027 }
12028
12029 /* Fall out from the NAPI loop if needed */
12030 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12031 bnx2x_update_fpsb_idx(fp);
12032 /* bnx2x_has_rx_work() reads the status block, thus we need
12033 * to ensure that status block indices have been actually read
12034 * (bnx2x_update_fpsb_idx) prior to this check
12035 * (bnx2x_has_rx_work) so that we won't write the "newer"
12036 * value of the status block to IGU (if there was a DMA right
12037 * after bnx2x_has_rx_work and if there is no rmb, the memory
12038 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12039 * before bnx2x_ack_sb). In this case there will never be
12040 * another interrupt until there is another update of the
12041 * status block, while there is still unhandled work.
12042 */
12043 rmb();
12044
12045 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12046 napi_complete(napi);
12047 /* Re-enable interrupts */
12048 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12049 le16_to_cpu(fp->fp_c_idx),
12050 IGU_INT_NOP, 1);
12051 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12052 le16_to_cpu(fp->fp_u_idx),
12053 IGU_INT_ENABLE, 1);
12054 break;
12055 }
12056 }
12057 }
12058
12059 return work_done;
12060}
12061
12062
12063/* we split the first BD into headers and data BDs
12064 * to ease the pain of our fellow microcode engineers
12065 * we use one mapping for both BDs
12066 * So far this has only been observed to happen
12067 * in Other Operating Systems(TM)
12068 */
12069static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12070 struct bnx2x_fastpath *fp,
12071 struct sw_tx_bd *tx_buf,
12072 struct eth_tx_start_bd **tx_bd, u16 hlen,
12073 u16 bd_prod, int nbd)
12074{
12075 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12076 struct eth_tx_bd *d_tx_bd;
12077 dma_addr_t mapping;
12078 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12079
12080 /* first fix first BD */
12081 h_tx_bd->nbd = cpu_to_le16(nbd);
12082 h_tx_bd->nbytes = cpu_to_le16(hlen);
12083
12084 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12085 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12086 h_tx_bd->addr_lo, h_tx_bd->nbd);
12087
12088 /* now get a new data BD
12089 * (after the pbd) and fill it */
12090 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12091 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12092
12093 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12094 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12095
12096 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12097 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12098 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12099
12100 /* this marks the BD as one that has no individual mapping */
12101 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12102
12103 DP(NETIF_MSG_TX_QUEUED,
12104 "TSO split data size is %d (%x:%x)\n",
12105 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12106
12107 /* update tx_bd */
12108 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12109
12110 return bd_prod;
12111}
12112
12113static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12114{
12115 if (fix > 0)
12116 csum = (u16) ~csum_fold(csum_sub(csum,
12117 csum_partial(t_header - fix, fix, 0)));
12118
12119 else if (fix < 0)
12120 csum = (u16) ~csum_fold(csum_add(csum,
12121 csum_partial(t_header, -fix, 0)));
12122
12123 return swab16(csum);
12124}
12125
12126static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12127{
12128 u32 rc;
12129
12130 if (skb->ip_summed != CHECKSUM_PARTIAL)
12131 rc = XMIT_PLAIN;
12132
12133 else {
12134 if (skb->protocol == htons(ETH_P_IPV6)) {
12135 rc = XMIT_CSUM_V6;
12136 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12137 rc |= XMIT_CSUM_TCP;
12138
12139 } else {
12140 rc = XMIT_CSUM_V4;
12141 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12142 rc |= XMIT_CSUM_TCP;
12143 }
12144 }
12145
12146 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12147 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12148
12149 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12150 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12151
12152 return rc;
12153}
12154
12155#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12156/* check if packet requires linearization (packet is too fragmented)
12157 no need to check fragmentation if page size > 8K (there will be no
12158 violation to FW restrictions) */
12159static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12160 u32 xmit_type)
12161{
12162 int to_copy = 0;
12163 int hlen = 0;
12164 int first_bd_sz = 0;
12165
12166 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12167 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12168
12169 if (xmit_type & XMIT_GSO) {
12170 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12171 /* Check if LSO packet needs to be copied:
12172 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12173 int wnd_size = MAX_FETCH_BD - 3;
12174 /* Number of windows to check */
12175 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12176 int wnd_idx = 0;
12177 int frag_idx = 0;
12178 u32 wnd_sum = 0;
12179
12180 /* Headers length */
12181 hlen = (int)(skb_transport_header(skb) - skb->data) +
12182 tcp_hdrlen(skb);
12183
12184 /* Amount of data (w/o headers) on linear part of SKB*/
12185 first_bd_sz = skb_headlen(skb) - hlen;
12186
12187 wnd_sum = first_bd_sz;
12188
12189 /* Calculate the first sum - it's special */
12190 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12191 wnd_sum +=
12192 skb_shinfo(skb)->frags[frag_idx].size;
12193
12194 /* If there was data on linear skb data - check it */
12195 if (first_bd_sz > 0) {
12196 if (unlikely(wnd_sum < lso_mss)) {
12197 to_copy = 1;
12198 goto exit_lbl;
12199 }
12200
12201 wnd_sum -= first_bd_sz;
12202 }
12203
12204 /* Others are easier: run through the frag list and
12205 check all windows */
12206 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12207 wnd_sum +=
12208 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12209
12210 if (unlikely(wnd_sum < lso_mss)) {
12211 to_copy = 1;
12212 break;
12213 }
12214 wnd_sum -=
12215 skb_shinfo(skb)->frags[wnd_idx].size;
12216 }
12217 } else {
12218 /* in non-LSO too fragmented packet should always
12219 be linearized */
12220 to_copy = 1;
12221 }
12222 }
12223
12224exit_lbl:
12225 if (unlikely(to_copy))
12226 DP(NETIF_MSG_TX_QUEUED,
12227 "Linearization IS REQUIRED for %s packet. "
12228 "num_frags %d hlen %d first_bd_sz %d\n",
12229 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12230 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12231
12232 return to_copy;
12233}
12234#endif
12235
12236/* called with netif_tx_lock
12237 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12238 * netif_wake_queue()
12239 */
12240static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12241{
12242 struct bnx2x *bp = netdev_priv(dev);
12243 struct bnx2x_fastpath *fp;
12244 struct netdev_queue *txq;
12245 struct sw_tx_bd *tx_buf;
12246 struct eth_tx_start_bd *tx_start_bd;
12247 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12248 struct eth_tx_parse_bd *pbd = NULL;
12249 u16 pkt_prod, bd_prod;
12250 int nbd, fp_index;
12251 dma_addr_t mapping;
12252 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12253 int i;
12254 u8 hlen = 0;
12255 __le16 pkt_size = 0;
12256 struct ethhdr *eth;
12257 u8 mac_type = UNICAST_ADDRESS;
12258
12259#ifdef BNX2X_STOP_ON_ERROR
12260 if (unlikely(bp->panic))
12261 return NETDEV_TX_BUSY;
12262#endif
12263
12264 fp_index = skb_get_queue_mapping(skb);
12265 txq = netdev_get_tx_queue(dev, fp_index);
12266
12267 fp = &bp->fp[fp_index];
12268
12269 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12270 fp->eth_q_stats.driver_xoff++;
12271 netif_tx_stop_queue(txq);
12272 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12273 return NETDEV_TX_BUSY;
12274 }
12275
12276 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12277 " gso type %x xmit_type %x\n",
12278 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12279 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12280
12281 eth = (struct ethhdr *)skb->data;
12282
12283 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12284 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12285 if (is_broadcast_ether_addr(eth->h_dest))
12286 mac_type = BROADCAST_ADDRESS;
12287 else
12288 mac_type = MULTICAST_ADDRESS;
12289 }
12290
12291#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12292 /* First, check if we need to linearize the skb (due to FW
12293 restrictions). No need to check fragmentation if page size > 8K
12294 (there will be no violation to FW restrictions) */
12295 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12296 /* Statistics of linearization */
12297 bp->lin_cnt++;
12298 if (skb_linearize(skb) != 0) {
12299 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12300 "silently dropping this SKB\n");
12301 dev_kfree_skb_any(skb);
12302 return NETDEV_TX_OK;
12303 }
12304 }
12305#endif
12306
12307 /*
12308 Please read carefully. First we use one BD which we mark as start,
12309 then we have a parsing info BD (used for TSO or xsum),
12310 and only then we have the rest of the TSO BDs.
12311 (don't forget to mark the last one as last,
12312 and to unmap only AFTER you write to the BD ...)
12313 And above all, all pdb sizes are in words - NOT DWORDS!
12314 */
12315
12316 pkt_prod = fp->tx_pkt_prod++;
12317 bd_prod = TX_BD(fp->tx_bd_prod);
12318
12319 /* get a tx_buf and first BD */
12320 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12321 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12322
12323 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12324 tx_start_bd->general_data = (mac_type <<
12325 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12326 /* header nbd */
12327 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12328
12329 /* remember the first BD of the packet */
12330 tx_buf->first_bd = fp->tx_bd_prod;
12331 tx_buf->skb = skb;
12332 tx_buf->flags = 0;
12333
12334 DP(NETIF_MSG_TX_QUEUED,
12335 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12336 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12337
12338#ifdef BCM_VLAN
12339 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12340 (bp->flags & HW_VLAN_TX_FLAG)) {
12341 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12342 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12343 } else
12344#endif
12345 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12346
12347 /* turn on parsing and get a BD */
12348 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12349 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12350
12351 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12352
12353 if (xmit_type & XMIT_CSUM) {
12354 hlen = (skb_network_header(skb) - skb->data) / 2;
12355
12356 /* for now NS flag is not used in Linux */
12357 pbd->global_data =
12358 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12359 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12360
12361 pbd->ip_hlen = (skb_transport_header(skb) -
12362 skb_network_header(skb)) / 2;
12363
12364 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12365
12366 pbd->total_hlen = cpu_to_le16(hlen);
12367 hlen = hlen*2;
12368
12369 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12370
12371 if (xmit_type & XMIT_CSUM_V4)
12372 tx_start_bd->bd_flags.as_bitfield |=
12373 ETH_TX_BD_FLAGS_IP_CSUM;
12374 else
12375 tx_start_bd->bd_flags.as_bitfield |=
12376 ETH_TX_BD_FLAGS_IPV6;
12377
12378 if (xmit_type & XMIT_CSUM_TCP) {
12379 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12380
12381 } else {
12382 s8 fix = SKB_CS_OFF(skb); /* signed! */
12383
12384 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12385
12386 DP(NETIF_MSG_TX_QUEUED,
12387 "hlen %d fix %d csum before fix %x\n",
12388 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12389
12390 /* HW bug: fixup the CSUM */
12391 pbd->tcp_pseudo_csum =
12392 bnx2x_csum_fix(skb_transport_header(skb),
12393 SKB_CS(skb), fix);
12394
12395 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12396 pbd->tcp_pseudo_csum);
12397 }
12398 }
12399
12400 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12401 skb_headlen(skb), DMA_TO_DEVICE);
12402
12403 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12404 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12405 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12406 tx_start_bd->nbd = cpu_to_le16(nbd);
12407 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12408 pkt_size = tx_start_bd->nbytes;
12409
12410 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12411 " nbytes %d flags %x vlan %x\n",
12412 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12413 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12414 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12415
12416 if (xmit_type & XMIT_GSO) {
12417
12418 DP(NETIF_MSG_TX_QUEUED,
12419 "TSO packet len %d hlen %d total len %d tso size %d\n",
12420 skb->len, hlen, skb_headlen(skb),
12421 skb_shinfo(skb)->gso_size);
12422
12423 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12424
12425 if (unlikely(skb_headlen(skb) > hlen))
12426 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12427 hlen, bd_prod, ++nbd);
12428
12429 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12430 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12431 pbd->tcp_flags = pbd_tcp_flags(skb);
12432
12433 if (xmit_type & XMIT_GSO_V4) {
12434 pbd->ip_id = swab16(ip_hdr(skb)->id);
12435 pbd->tcp_pseudo_csum =
12436 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12437 ip_hdr(skb)->daddr,
12438 0, IPPROTO_TCP, 0));
12439
12440 } else
12441 pbd->tcp_pseudo_csum =
12442 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12443 &ipv6_hdr(skb)->daddr,
12444 0, IPPROTO_TCP, 0));
12445
12446 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12447 }
12448 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12449
12450 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12451 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12452
12453 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12454 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12455 if (total_pkt_bd == NULL)
12456 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12457
12458 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12459 frag->page_offset,
12460 frag->size, DMA_TO_DEVICE);
12461
12462 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12463 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12464 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12465 le16_add_cpu(&pkt_size, frag->size);
12466
12467 DP(NETIF_MSG_TX_QUEUED,
12468 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12469 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12470 le16_to_cpu(tx_data_bd->nbytes));
12471 }
12472
12473 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12474
12475 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12476
12477 /* now send a tx doorbell, counting the next BD
12478 * if the packet contains or ends with it
12479 */
12480 if (TX_BD_POFF(bd_prod) < nbd)
12481 nbd++;
12482
12483 if (total_pkt_bd != NULL)
12484 total_pkt_bd->total_pkt_bytes = pkt_size;
12485
12486 if (pbd)
12487 DP(NETIF_MSG_TX_QUEUED,
12488 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12489 " tcp_flags %x xsum %x seq %u hlen %u\n",
12490 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12491 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12492 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12493
12494 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12495
12496 /*
12497 * Make sure that the BD data is updated before updating the producer
12498 * since FW might read the BD right after the producer is updated.
12499 * This is only applicable for weak-ordered memory model archs such
12500 * as IA-64. The following barrier is also mandatory since FW will
12501 * assumes packets must have BDs.
12502 */
12503 wmb();
12504
12505 fp->tx_db.data.prod += nbd;
12506 barrier();
12507 DOORBELL(bp, fp->index, fp->tx_db.raw);
12508
12509 mmiowb();
12510
12511 fp->tx_bd_prod += nbd;
12512
12513 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12514 netif_tx_stop_queue(txq);
12515
12516 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12517 * ordering of set_bit() in netif_tx_stop_queue() and read of
12518 * fp->bd_tx_cons */
12519 smp_mb();
12520
12521 fp->eth_q_stats.driver_xoff++;
12522 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12523 netif_tx_wake_queue(txq);
12524 }
12525 fp->tx_pkt++;
12526
12527 return NETDEV_TX_OK;
12528}
12529
12530/* called with rtnl_lock */ 6793/* called with rtnl_lock */
12531static int bnx2x_open(struct net_device *dev) 6794static int bnx2x_open(struct net_device *dev)
12532{ 6795{
@@ -12586,7 +6849,7 @@ static int bnx2x_close(struct net_device *dev)
12586} 6849}
12587 6850
12588/* called with netif_tx_lock from dev_mcast.c */ 6851/* called with netif_tx_lock from dev_mcast.c */
12589static void bnx2x_set_rx_mode(struct net_device *dev) 6852void bnx2x_set_rx_mode(struct net_device *dev)
12590{ 6853{
12591 struct bnx2x *bp = netdev_priv(dev); 6854 struct bnx2x *bp = netdev_priv(dev);
12592 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 6855 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
@@ -12706,25 +6969,6 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
12706 bnx2x_set_storm_rx_mode(bp); 6969 bnx2x_set_storm_rx_mode(bp);
12707} 6970}
12708 6971
12709/* called with rtnl_lock */
12710static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12711{
12712 struct sockaddr *addr = p;
12713 struct bnx2x *bp = netdev_priv(dev);
12714
12715 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12716 return -EINVAL;
12717
12718 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12719 if (netif_running(dev)) {
12720 if (CHIP_IS_E1(bp))
12721 bnx2x_set_eth_mac_addr_e1(bp, 1);
12722 else
12723 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12724 }
12725
12726 return 0;
12727}
12728 6972
12729/* called with rtnl_lock */ 6973/* called with rtnl_lock */
12730static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 6974static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
@@ -12800,71 +7044,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12800 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 7044 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12801} 7045}
12802 7046
12803/* called with rtnl_lock */
12804static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12805{
12806 struct bnx2x *bp = netdev_priv(dev);
12807 int rc = 0;
12808
12809 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12810 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12811 return -EAGAIN;
12812 }
12813
12814 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12815 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12816 return -EINVAL;
12817
12818 /* This does not race with packet allocation
12819 * because the actual alloc size is
12820 * only updated as part of load
12821 */
12822 dev->mtu = new_mtu;
12823
12824 if (netif_running(dev)) {
12825 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12826 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12827 }
12828
12829 return rc;
12830}
12831
12832static void bnx2x_tx_timeout(struct net_device *dev)
12833{
12834 struct bnx2x *bp = netdev_priv(dev);
12835
12836#ifdef BNX2X_STOP_ON_ERROR
12837 if (!bp->panic)
12838 bnx2x_panic();
12839#endif
12840 /* This allows the netif to be shutdown gracefully before resetting */
12841 schedule_delayed_work(&bp->reset_task, 0);
12842}
12843
12844#ifdef BCM_VLAN
12845/* called with rtnl_lock */
12846static void bnx2x_vlan_rx_register(struct net_device *dev,
12847 struct vlan_group *vlgrp)
12848{
12849 struct bnx2x *bp = netdev_priv(dev);
12850
12851 bp->vlgrp = vlgrp;
12852
12853 /* Set flags according to the required capabilities */
12854 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12855
12856 if (dev->features & NETIF_F_HW_VLAN_TX)
12857 bp->flags |= HW_VLAN_TX_FLAG;
12858
12859 if (dev->features & NETIF_F_HW_VLAN_RX)
12860 bp->flags |= HW_VLAN_RX_FLAG;
12861
12862 if (netif_running(dev))
12863 bnx2x_set_client_config(bp);
12864}
12865
12866#endif
12867
12868#ifdef CONFIG_NET_POLL_CONTROLLER 7047#ifdef CONFIG_NET_POLL_CONTROLLER
12869static void poll_bnx2x(struct net_device *dev) 7048static void poll_bnx2x(struct net_device *dev)
12870{ 7049{
@@ -13013,7 +7192,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
13013 dev->watchdog_timeo = TX_TIMEOUT; 7192 dev->watchdog_timeo = TX_TIMEOUT;
13014 7193
13015 dev->netdev_ops = &bnx2x_netdev_ops; 7194 dev->netdev_ops = &bnx2x_netdev_ops;
13016 dev->ethtool_ops = &bnx2x_ethtool_ops; 7195 bnx2x_set_ethtool_ops(dev);
13017 dev->features |= NETIF_F_SG; 7196 dev->features |= NETIF_F_SG;
13018 dev->features |= NETIF_F_HW_CSUM; 7197 dev->features |= NETIF_F_HW_CSUM;
13019 if (bp->flags & USING_DAC_FLAG) 7198 if (bp->flags & USING_DAC_FLAG)
@@ -13366,73 +7545,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13366 pci_set_drvdata(pdev, NULL); 7545 pci_set_drvdata(pdev, NULL);
13367} 7546}
13368 7547
13369static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13370{
13371 struct net_device *dev = pci_get_drvdata(pdev);
13372 struct bnx2x *bp;
13373
13374 if (!dev) {
13375 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13376 return -ENODEV;
13377 }
13378 bp = netdev_priv(dev);
13379
13380 rtnl_lock();
13381
13382 pci_save_state(pdev);
13383
13384 if (!netif_running(dev)) {
13385 rtnl_unlock();
13386 return 0;
13387 }
13388
13389 netif_device_detach(dev);
13390
13391 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13392
13393 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13394
13395 rtnl_unlock();
13396
13397 return 0;
13398}
13399
13400static int bnx2x_resume(struct pci_dev *pdev)
13401{
13402 struct net_device *dev = pci_get_drvdata(pdev);
13403 struct bnx2x *bp;
13404 int rc;
13405
13406 if (!dev) {
13407 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13408 return -ENODEV;
13409 }
13410 bp = netdev_priv(dev);
13411
13412 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13413 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13414 return -EAGAIN;
13415 }
13416
13417 rtnl_lock();
13418
13419 pci_restore_state(pdev);
13420
13421 if (!netif_running(dev)) {
13422 rtnl_unlock();
13423 return 0;
13424 }
13425
13426 bnx2x_set_power_state(bp, PCI_D0);
13427 netif_device_attach(dev);
13428
13429 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13430
13431 rtnl_unlock();
13432
13433 return rc;
13434}
13435
13436static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 7548static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13437{ 7549{
13438 int i; 7550 int i;
@@ -13754,7 +7866,7 @@ static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13754/* 7866/*
13755 * for commands that have no data 7867 * for commands that have no data
13756 */ 7868 */
13757static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 7869int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13758{ 7870{
13759 struct cnic_ctl_info ctl = {0}; 7871 struct cnic_ctl_info ctl = {0};
13760 7872
@@ -13822,7 +7934,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13822 return rc; 7934 return rc;
13823} 7935}
13824 7936
13825static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 7937void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13826{ 7938{
13827 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 7939 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13828 7940
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index a1f3bf0cd630..a1f3bf0cd630 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
new file mode 100644
index 000000000000..c74724461020
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -0,0 +1,1411 @@
1/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17 #include "bnx2x_cmn.h"
18 #include "bnx2x_stats.h"
19
20/* Statistics */
21
22/****************************************************************************
23* Macros
24****************************************************************************/
25
26/* sum[hi:lo] += add[hi:lo] */
27#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
28 do { \
29 s_lo += a_lo; \
30 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
31 } while (0)
32
33/* difference = minuend - subtrahend */
34#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
35 do { \
36 if (m_lo < s_lo) { \
37 /* underflow */ \
38 d_hi = m_hi - s_hi; \
39 if (d_hi > 0) { \
40 /* we can 'loan' 1 */ \
41 d_hi--; \
42 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
43 } else { \
44 /* m_hi <= s_hi */ \
45 d_hi = 0; \
46 d_lo = 0; \
47 } \
48 } else { \
49 /* m_lo >= s_lo */ \
50 if (m_hi < s_hi) { \
51 d_hi = 0; \
52 d_lo = 0; \
53 } else { \
54 /* m_hi >= s_hi */ \
55 d_hi = m_hi - s_hi; \
56 d_lo = m_lo - s_lo; \
57 } \
58 } \
59 } while (0)
60
61#define UPDATE_STAT64(s, t) \
62 do { \
63 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
64 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
65 pstats->mac_stx[0].t##_hi = new->s##_hi; \
66 pstats->mac_stx[0].t##_lo = new->s##_lo; \
67 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
68 pstats->mac_stx[1].t##_lo, diff.lo); \
69 } while (0)
70
71#define UPDATE_STAT64_NIG(s, t) \
72 do { \
73 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
74 diff.lo, new->s##_lo, old->s##_lo); \
75 ADD_64(estats->t##_hi, diff.hi, \
76 estats->t##_lo, diff.lo); \
77 } while (0)
78
79/* sum[hi:lo] += add */
80#define ADD_EXTEND_64(s_hi, s_lo, a) \
81 do { \
82 s_lo += a; \
83 s_hi += (s_lo < a) ? 1 : 0; \
84 } while (0)
85
86#define UPDATE_EXTEND_STAT(s) \
87 do { \
88 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
89 pstats->mac_stx[1].s##_lo, \
90 new->s); \
91 } while (0)
92
93#define UPDATE_EXTEND_TSTAT(s, t) \
94 do { \
95 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
96 old_tclient->s = tclient->s; \
97 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
98 } while (0)
99
100#define UPDATE_EXTEND_USTAT(s, t) \
101 do { \
102 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
103 old_uclient->s = uclient->s; \
104 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
105 } while (0)
106
107#define UPDATE_EXTEND_XSTAT(s, t) \
108 do { \
109 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
110 old_xclient->s = xclient->s; \
111 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
112 } while (0)
113
114/* minuend -= subtrahend */
115#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
116 do { \
117 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
118 } while (0)
119
120/* minuend[hi:lo] -= subtrahend */
121#define SUB_EXTEND_64(m_hi, m_lo, s) \
122 do { \
123 SUB_64(m_hi, 0, m_lo, s); \
124 } while (0)
125
126#define SUB_EXTEND_USTAT(s, t) \
127 do { \
128 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
129 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
130 } while (0)
131
132/*
133 * General service functions
134 */
135
136static inline long bnx2x_hilo(u32 *hiref)
137{
138 u32 lo = *(hiref + 1);
139#if (BITS_PER_LONG == 64)
140 u32 hi = *hiref;
141
142 return HILO_U64(hi, lo);
143#else
144 return lo;
145#endif
146}
147
148/*
149 * Init service functions
150 */
151
152
153static void bnx2x_storm_stats_post(struct bnx2x *bp)
154{
155 if (!bp->stats_pending) {
156 struct eth_query_ramrod_data ramrod_data = {0};
157 int i, rc;
158
159 spin_lock_bh(&bp->stats_lock);
160
161 ramrod_data.drv_counter = bp->stats_counter++;
162 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
163 for_each_queue(bp, i)
164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
165
166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
167 ((u32 *)&ramrod_data)[1],
168 ((u32 *)&ramrod_data)[0], 0);
169 if (rc == 0) {
170 /* stats ramrod has it's own slot on the spq */
171 bp->spq_left++;
172 bp->stats_pending = 1;
173 }
174
175 spin_unlock_bh(&bp->stats_lock);
176 }
177}
178
179static void bnx2x_hw_stats_post(struct bnx2x *bp)
180{
181 struct dmae_command *dmae = &bp->stats_dmae;
182 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
183
184 *stats_comp = DMAE_COMP_VAL;
185 if (CHIP_REV_IS_SLOW(bp))
186 return;
187
188 /* loader */
189 if (bp->executer_idx) {
190 int loader_idx = PMF_DMAE_C(bp);
191
192 memset(dmae, 0, sizeof(struct dmae_command));
193
194 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
195 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
196 DMAE_CMD_DST_RESET |
197#ifdef __BIG_ENDIAN
198 DMAE_CMD_ENDIANITY_B_DW_SWAP |
199#else
200 DMAE_CMD_ENDIANITY_DW_SWAP |
201#endif
202 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
203 DMAE_CMD_PORT_0) |
204 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
207 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
208 sizeof(struct dmae_command) *
209 (loader_idx + 1)) >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = sizeof(struct dmae_command) >> 2;
212 if (CHIP_IS_E1(bp))
213 dmae->len--;
214 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
215 dmae->comp_addr_hi = 0;
216 dmae->comp_val = 1;
217
218 *stats_comp = 0;
219 bnx2x_post_dmae(bp, dmae, loader_idx);
220
221 } else if (bp->func_stx) {
222 *stats_comp = 0;
223 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
224 }
225}
226
227static int bnx2x_stats_comp(struct bnx2x *bp)
228{
229 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
230 int cnt = 10;
231
232 might_sleep();
233 while (*stats_comp != DMAE_COMP_VAL) {
234 if (!cnt) {
235 BNX2X_ERR("timeout waiting for stats finished\n");
236 break;
237 }
238 cnt--;
239 msleep(1);
240 }
241 return 1;
242}
243
244/*
245 * Statistics service functions
246 */
247
248static void bnx2x_stats_pmf_update(struct bnx2x *bp)
249{
250 struct dmae_command *dmae;
251 u32 opcode;
252 int loader_idx = PMF_DMAE_C(bp);
253 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
254
255 /* sanity */
256 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
257 BNX2X_ERR("BUG!\n");
258 return;
259 }
260
261 bp->executer_idx = 0;
262
263 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
264 DMAE_CMD_C_ENABLE |
265 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
266#ifdef __BIG_ENDIAN
267 DMAE_CMD_ENDIANITY_B_DW_SWAP |
268#else
269 DMAE_CMD_ENDIANITY_DW_SWAP |
270#endif
271 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
272 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
273
274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
275 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
276 dmae->src_addr_lo = bp->port.port_stx >> 2;
277 dmae->src_addr_hi = 0;
278 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
279 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
280 dmae->len = DMAE_LEN32_RD_MAX;
281 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
282 dmae->comp_addr_hi = 0;
283 dmae->comp_val = 1;
284
285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
286 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
287 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
288 dmae->src_addr_hi = 0;
289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
290 DMAE_LEN32_RD_MAX * 4);
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
292 DMAE_LEN32_RD_MAX * 4);
293 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
294 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
295 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
296 dmae->comp_val = DMAE_COMP_VAL;
297
298 *stats_comp = 0;
299 bnx2x_hw_stats_post(bp);
300 bnx2x_stats_comp(bp);
301}
302
303static void bnx2x_port_stats_init(struct bnx2x *bp)
304{
305 struct dmae_command *dmae;
306 int port = BP_PORT(bp);
307 int vn = BP_E1HVN(bp);
308 u32 opcode;
309 int loader_idx = PMF_DMAE_C(bp);
310 u32 mac_addr;
311 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
312
313 /* sanity */
314 if (!bp->link_vars.link_up || !bp->port.pmf) {
315 BNX2X_ERR("BUG!\n");
316 return;
317 }
318
319 bp->executer_idx = 0;
320
321 /* MCP */
322 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
323 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
324 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
325#ifdef __BIG_ENDIAN
326 DMAE_CMD_ENDIANITY_B_DW_SWAP |
327#else
328 DMAE_CMD_ENDIANITY_DW_SWAP |
329#endif
330 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
331 (vn << DMAE_CMD_E1HVN_SHIFT));
332
333 if (bp->port.port_stx) {
334
335 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
336 dmae->opcode = opcode;
337 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
338 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
339 dmae->dst_addr_lo = bp->port.port_stx >> 2;
340 dmae->dst_addr_hi = 0;
341 dmae->len = sizeof(struct host_port_stats) >> 2;
342 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
343 dmae->comp_addr_hi = 0;
344 dmae->comp_val = 1;
345 }
346
347 if (bp->func_stx) {
348
349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
350 dmae->opcode = opcode;
351 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
352 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
353 dmae->dst_addr_lo = bp->func_stx >> 2;
354 dmae->dst_addr_hi = 0;
355 dmae->len = sizeof(struct host_func_stats) >> 2;
356 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
357 dmae->comp_addr_hi = 0;
358 dmae->comp_val = 1;
359 }
360
361 /* MAC */
362 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
363 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
364 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
365#ifdef __BIG_ENDIAN
366 DMAE_CMD_ENDIANITY_B_DW_SWAP |
367#else
368 DMAE_CMD_ENDIANITY_DW_SWAP |
369#endif
370 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
371 (vn << DMAE_CMD_E1HVN_SHIFT));
372
373 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
374
375 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
376 NIG_REG_INGRESS_BMAC0_MEM);
377
378 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
379 BIGMAC_REGISTER_TX_STAT_GTBYT */
380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
381 dmae->opcode = opcode;
382 dmae->src_addr_lo = (mac_addr +
383 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
384 dmae->src_addr_hi = 0;
385 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
386 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
387 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
388 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
389 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
390 dmae->comp_addr_hi = 0;
391 dmae->comp_val = 1;
392
393 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
394 BIGMAC_REGISTER_RX_STAT_GRIPJ */
395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
396 dmae->opcode = opcode;
397 dmae->src_addr_lo = (mac_addr +
398 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
399 dmae->src_addr_hi = 0;
400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
401 offsetof(struct bmac_stats, rx_stat_gr64_lo));
402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
403 offsetof(struct bmac_stats, rx_stat_gr64_lo));
404 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
405 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
407 dmae->comp_addr_hi = 0;
408 dmae->comp_val = 1;
409
410 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
411
412 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
413
414 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
416 dmae->opcode = opcode;
417 dmae->src_addr_lo = (mac_addr +
418 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
419 dmae->src_addr_hi = 0;
420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
421 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
422 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
423 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
424 dmae->comp_addr_hi = 0;
425 dmae->comp_val = 1;
426
427 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
428 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
429 dmae->opcode = opcode;
430 dmae->src_addr_lo = (mac_addr +
431 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
432 dmae->src_addr_hi = 0;
433 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
434 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
435 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
436 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
437 dmae->len = 1;
438 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
439 dmae->comp_addr_hi = 0;
440 dmae->comp_val = 1;
441
442 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
443 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
444 dmae->opcode = opcode;
445 dmae->src_addr_lo = (mac_addr +
446 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
447 dmae->src_addr_hi = 0;
448 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
449 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
450 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
451 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
452 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
453 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
454 dmae->comp_addr_hi = 0;
455 dmae->comp_val = 1;
456 }
457
458 /* NIG */
459 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
460 dmae->opcode = opcode;
461 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
462 NIG_REG_STAT0_BRB_DISCARD) >> 2;
463 dmae->src_addr_hi = 0;
464 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
465 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
466 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
467 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
468 dmae->comp_addr_hi = 0;
469 dmae->comp_val = 1;
470
471 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
472 dmae->opcode = opcode;
473 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
474 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
475 dmae->src_addr_hi = 0;
476 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
477 offsetof(struct nig_stats, egress_mac_pkt0_lo));
478 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
479 offsetof(struct nig_stats, egress_mac_pkt0_lo));
480 dmae->len = (2*sizeof(u32)) >> 2;
481 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
482 dmae->comp_addr_hi = 0;
483 dmae->comp_val = 1;
484
485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
486 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
489#ifdef __BIG_ENDIAN
490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
491#else
492 DMAE_CMD_ENDIANITY_DW_SWAP |
493#endif
494 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
495 (vn << DMAE_CMD_E1HVN_SHIFT));
496 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
497 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
498 dmae->src_addr_hi = 0;
499 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
500 offsetof(struct nig_stats, egress_mac_pkt1_lo));
501 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
502 offsetof(struct nig_stats, egress_mac_pkt1_lo));
503 dmae->len = (2*sizeof(u32)) >> 2;
504 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
505 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
506 dmae->comp_val = DMAE_COMP_VAL;
507
508 *stats_comp = 0;
509}
510
511static void bnx2x_func_stats_init(struct bnx2x *bp)
512{
513 struct dmae_command *dmae = &bp->stats_dmae;
514 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
515
516 /* sanity */
517 if (!bp->func_stx) {
518 BNX2X_ERR("BUG!\n");
519 return;
520 }
521
522 bp->executer_idx = 0;
523 memset(dmae, 0, sizeof(struct dmae_command));
524
525 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
526 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
527 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
528#ifdef __BIG_ENDIAN
529 DMAE_CMD_ENDIANITY_B_DW_SWAP |
530#else
531 DMAE_CMD_ENDIANITY_DW_SWAP |
532#endif
533 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
534 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
535 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
536 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
537 dmae->dst_addr_lo = bp->func_stx >> 2;
538 dmae->dst_addr_hi = 0;
539 dmae->len = sizeof(struct host_func_stats) >> 2;
540 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
541 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
542 dmae->comp_val = DMAE_COMP_VAL;
543
544 *stats_comp = 0;
545}
546
547static void bnx2x_stats_start(struct bnx2x *bp)
548{
549 if (bp->port.pmf)
550 bnx2x_port_stats_init(bp);
551
552 else if (bp->func_stx)
553 bnx2x_func_stats_init(bp);
554
555 bnx2x_hw_stats_post(bp);
556 bnx2x_storm_stats_post(bp);
557}
558
559static void bnx2x_stats_pmf_start(struct bnx2x *bp)
560{
561 bnx2x_stats_comp(bp);
562 bnx2x_stats_pmf_update(bp);
563 bnx2x_stats_start(bp);
564}
565
566static void bnx2x_stats_restart(struct bnx2x *bp)
567{
568 bnx2x_stats_comp(bp);
569 bnx2x_stats_start(bp);
570}
571
572static void bnx2x_bmac_stats_update(struct bnx2x *bp)
573{
574 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
576 struct bnx2x_eth_stats *estats = &bp->eth_stats;
577 struct {
578 u32 lo;
579 u32 hi;
580 } diff;
581
582 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
583 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
584 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
585 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
586 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
587 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
588 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
590 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
591 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
592 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
593 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
594 UPDATE_STAT64(tx_stat_gt127,
595 tx_stat_etherstatspkts65octetsto127octets);
596 UPDATE_STAT64(tx_stat_gt255,
597 tx_stat_etherstatspkts128octetsto255octets);
598 UPDATE_STAT64(tx_stat_gt511,
599 tx_stat_etherstatspkts256octetsto511octets);
600 UPDATE_STAT64(tx_stat_gt1023,
601 tx_stat_etherstatspkts512octetsto1023octets);
602 UPDATE_STAT64(tx_stat_gt1518,
603 tx_stat_etherstatspkts1024octetsto1522octets);
604 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
605 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
606 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
607 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
608 UPDATE_STAT64(tx_stat_gterr,
609 tx_stat_dot3statsinternalmactransmiterrors);
610 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
611
612 estats->pause_frames_received_hi =
613 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
614 estats->pause_frames_received_lo =
615 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
616
617 estats->pause_frames_sent_hi =
618 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
619 estats->pause_frames_sent_lo =
620 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
621}
622
623static void bnx2x_emac_stats_update(struct bnx2x *bp)
624{
625 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
626 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
627 struct bnx2x_eth_stats *estats = &bp->eth_stats;
628
629 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
630 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
631 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
632 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
633 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
634 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
635 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
636 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
637 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
638 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
639 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
640 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
641 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
642 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
643 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
644 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
645 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
646 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
647 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
648 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
649 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
650 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
651 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
652 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
653 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
654 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
655 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
656 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
657 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
658 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
659 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
660
661 estats->pause_frames_received_hi =
662 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
663 estats->pause_frames_received_lo =
664 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
665 ADD_64(estats->pause_frames_received_hi,
666 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
667 estats->pause_frames_received_lo,
668 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
669
670 estats->pause_frames_sent_hi =
671 pstats->mac_stx[1].tx_stat_outxonsent_hi;
672 estats->pause_frames_sent_lo =
673 pstats->mac_stx[1].tx_stat_outxonsent_lo;
674 ADD_64(estats->pause_frames_sent_hi,
675 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
676 estats->pause_frames_sent_lo,
677 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
678}
679
680static int bnx2x_hw_stats_update(struct bnx2x *bp)
681{
682 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
683 struct nig_stats *old = &(bp->port.old_nig_stats);
684 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
685 struct bnx2x_eth_stats *estats = &bp->eth_stats;
686 struct {
687 u32 lo;
688 u32 hi;
689 } diff;
690
691 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
692 bnx2x_bmac_stats_update(bp);
693
694 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
695 bnx2x_emac_stats_update(bp);
696
697 else { /* unreached */
698 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
699 return -1;
700 }
701
702 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
703 new->brb_discard - old->brb_discard);
704 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
705 new->brb_truncate - old->brb_truncate);
706
707 UPDATE_STAT64_NIG(egress_mac_pkt0,
708 etherstatspkts1024octetsto1522octets);
709 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
710
711 memcpy(old, new, sizeof(struct nig_stats));
712
713 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
714 sizeof(struct mac_stx));
715 estats->brb_drop_hi = pstats->brb_drop_hi;
716 estats->brb_drop_lo = pstats->brb_drop_lo;
717
718 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
719
720 if (!BP_NOMCP(bp)) {
721 u32 nig_timer_max =
722 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
723 if (nig_timer_max != estats->nig_timer_max) {
724 estats->nig_timer_max = nig_timer_max;
725 BNX2X_ERR("NIG timer max (%u)\n",
726 estats->nig_timer_max);
727 }
728 }
729
730 return 0;
731}
732
733static int bnx2x_storm_stats_update(struct bnx2x *bp)
734{
735 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
736 struct tstorm_per_port_stats *tport =
737 &stats->tstorm_common.port_statistics;
738 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
739 struct bnx2x_eth_stats *estats = &bp->eth_stats;
740 int i;
741 u16 cur_stats_counter;
742
743 /* Make sure we use the value of the counter
744 * used for sending the last stats ramrod.
745 */
746 spin_lock_bh(&bp->stats_lock);
747 cur_stats_counter = bp->stats_counter - 1;
748 spin_unlock_bh(&bp->stats_lock);
749
750 memcpy(&(fstats->total_bytes_received_hi),
751 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
752 sizeof(struct host_func_stats) - 2*sizeof(u32));
753 estats->error_bytes_received_hi = 0;
754 estats->error_bytes_received_lo = 0;
755 estats->etherstatsoverrsizepkts_hi = 0;
756 estats->etherstatsoverrsizepkts_lo = 0;
757 estats->no_buff_discard_hi = 0;
758 estats->no_buff_discard_lo = 0;
759
760 for_each_queue(bp, i) {
761 struct bnx2x_fastpath *fp = &bp->fp[i];
762 int cl_id = fp->cl_id;
763 struct tstorm_per_client_stats *tclient =
764 &stats->tstorm_common.client_statistics[cl_id];
765 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
766 struct ustorm_per_client_stats *uclient =
767 &stats->ustorm_common.client_statistics[cl_id];
768 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
769 struct xstorm_per_client_stats *xclient =
770 &stats->xstorm_common.client_statistics[cl_id];
771 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
772 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
773 u32 diff;
774
775 /* are storm stats valid? */
776 if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
777 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
778 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
779 i, xclient->stats_counter, cur_stats_counter + 1);
780 return -1;
781 }
782 if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
783 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
784 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
785 i, tclient->stats_counter, cur_stats_counter + 1);
786 return -2;
787 }
788 if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
789 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
790 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
791 i, uclient->stats_counter, cur_stats_counter + 1);
792 return -4;
793 }
794
795 qstats->total_bytes_received_hi =
796 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
797 qstats->total_bytes_received_lo =
798 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
799
800 ADD_64(qstats->total_bytes_received_hi,
801 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
802 qstats->total_bytes_received_lo,
803 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
804
805 ADD_64(qstats->total_bytes_received_hi,
806 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
807 qstats->total_bytes_received_lo,
808 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
809
810 SUB_64(qstats->total_bytes_received_hi,
811 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
812 qstats->total_bytes_received_lo,
813 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
814
815 SUB_64(qstats->total_bytes_received_hi,
816 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
817 qstats->total_bytes_received_lo,
818 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
819
820 SUB_64(qstats->total_bytes_received_hi,
821 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
822 qstats->total_bytes_received_lo,
823 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
824
825 qstats->valid_bytes_received_hi =
826 qstats->total_bytes_received_hi;
827 qstats->valid_bytes_received_lo =
828 qstats->total_bytes_received_lo;
829
830 qstats->error_bytes_received_hi =
831 le32_to_cpu(tclient->rcv_error_bytes.hi);
832 qstats->error_bytes_received_lo =
833 le32_to_cpu(tclient->rcv_error_bytes.lo);
834
835 ADD_64(qstats->total_bytes_received_hi,
836 qstats->error_bytes_received_hi,
837 qstats->total_bytes_received_lo,
838 qstats->error_bytes_received_lo);
839
840 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
841 total_unicast_packets_received);
842 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
843 total_multicast_packets_received);
844 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
845 total_broadcast_packets_received);
846 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
847 etherstatsoverrsizepkts);
848 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
849
850 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
851 total_unicast_packets_received);
852 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
853 total_multicast_packets_received);
854 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
855 total_broadcast_packets_received);
856 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
857 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
858 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
859
860 qstats->total_bytes_transmitted_hi =
861 le32_to_cpu(xclient->unicast_bytes_sent.hi);
862 qstats->total_bytes_transmitted_lo =
863 le32_to_cpu(xclient->unicast_bytes_sent.lo);
864
865 ADD_64(qstats->total_bytes_transmitted_hi,
866 le32_to_cpu(xclient->multicast_bytes_sent.hi),
867 qstats->total_bytes_transmitted_lo,
868 le32_to_cpu(xclient->multicast_bytes_sent.lo));
869
870 ADD_64(qstats->total_bytes_transmitted_hi,
871 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
872 qstats->total_bytes_transmitted_lo,
873 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
874
875 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
876 total_unicast_packets_transmitted);
877 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
878 total_multicast_packets_transmitted);
879 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
880 total_broadcast_packets_transmitted);
881
882 old_tclient->checksum_discard = tclient->checksum_discard;
883 old_tclient->ttl0_discard = tclient->ttl0_discard;
884
885 ADD_64(fstats->total_bytes_received_hi,
886 qstats->total_bytes_received_hi,
887 fstats->total_bytes_received_lo,
888 qstats->total_bytes_received_lo);
889 ADD_64(fstats->total_bytes_transmitted_hi,
890 qstats->total_bytes_transmitted_hi,
891 fstats->total_bytes_transmitted_lo,
892 qstats->total_bytes_transmitted_lo);
893 ADD_64(fstats->total_unicast_packets_received_hi,
894 qstats->total_unicast_packets_received_hi,
895 fstats->total_unicast_packets_received_lo,
896 qstats->total_unicast_packets_received_lo);
897 ADD_64(fstats->total_multicast_packets_received_hi,
898 qstats->total_multicast_packets_received_hi,
899 fstats->total_multicast_packets_received_lo,
900 qstats->total_multicast_packets_received_lo);
901 ADD_64(fstats->total_broadcast_packets_received_hi,
902 qstats->total_broadcast_packets_received_hi,
903 fstats->total_broadcast_packets_received_lo,
904 qstats->total_broadcast_packets_received_lo);
905 ADD_64(fstats->total_unicast_packets_transmitted_hi,
906 qstats->total_unicast_packets_transmitted_hi,
907 fstats->total_unicast_packets_transmitted_lo,
908 qstats->total_unicast_packets_transmitted_lo);
909 ADD_64(fstats->total_multicast_packets_transmitted_hi,
910 qstats->total_multicast_packets_transmitted_hi,
911 fstats->total_multicast_packets_transmitted_lo,
912 qstats->total_multicast_packets_transmitted_lo);
913 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
914 qstats->total_broadcast_packets_transmitted_hi,
915 fstats->total_broadcast_packets_transmitted_lo,
916 qstats->total_broadcast_packets_transmitted_lo);
917 ADD_64(fstats->valid_bytes_received_hi,
918 qstats->valid_bytes_received_hi,
919 fstats->valid_bytes_received_lo,
920 qstats->valid_bytes_received_lo);
921
922 ADD_64(estats->error_bytes_received_hi,
923 qstats->error_bytes_received_hi,
924 estats->error_bytes_received_lo,
925 qstats->error_bytes_received_lo);
926 ADD_64(estats->etherstatsoverrsizepkts_hi,
927 qstats->etherstatsoverrsizepkts_hi,
928 estats->etherstatsoverrsizepkts_lo,
929 qstats->etherstatsoverrsizepkts_lo);
930 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
931 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
932 }
933
934 ADD_64(fstats->total_bytes_received_hi,
935 estats->rx_stat_ifhcinbadoctets_hi,
936 fstats->total_bytes_received_lo,
937 estats->rx_stat_ifhcinbadoctets_lo);
938
939 memcpy(estats, &(fstats->total_bytes_received_hi),
940 sizeof(struct host_func_stats) - 2*sizeof(u32));
941
942 ADD_64(estats->etherstatsoverrsizepkts_hi,
943 estats->rx_stat_dot3statsframestoolong_hi,
944 estats->etherstatsoverrsizepkts_lo,
945 estats->rx_stat_dot3statsframestoolong_lo);
946 ADD_64(estats->error_bytes_received_hi,
947 estats->rx_stat_ifhcinbadoctets_hi,
948 estats->error_bytes_received_lo,
949 estats->rx_stat_ifhcinbadoctets_lo);
950
951 if (bp->port.pmf) {
952 estats->mac_filter_discard =
953 le32_to_cpu(tport->mac_filter_discard);
954 estats->xxoverflow_discard =
955 le32_to_cpu(tport->xxoverflow_discard);
956 estats->brb_truncate_discard =
957 le32_to_cpu(tport->brb_truncate_discard);
958 estats->mac_discard = le32_to_cpu(tport->mac_discard);
959 }
960
961 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
962
963 bp->stats_pending = 0;
964
965 return 0;
966}
967
968static void bnx2x_net_stats_update(struct bnx2x *bp)
969{
970 struct bnx2x_eth_stats *estats = &bp->eth_stats;
971 struct net_device_stats *nstats = &bp->dev->stats;
972 int i;
973
974 nstats->rx_packets =
975 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
976 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
977 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
978
979 nstats->tx_packets =
980 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
981 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
982 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
983
984 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
985
986 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
987
988 nstats->rx_dropped = estats->mac_discard;
989 for_each_queue(bp, i)
990 nstats->rx_dropped +=
991 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
992
993 nstats->tx_dropped = 0;
994
995 nstats->multicast =
996 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
997
998 nstats->collisions =
999 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1000
1001 nstats->rx_length_errors =
1002 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1003 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1004 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1005 bnx2x_hilo(&estats->brb_truncate_hi);
1006 nstats->rx_crc_errors =
1007 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1008 nstats->rx_frame_errors =
1009 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1010 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1011 nstats->rx_missed_errors = estats->xxoverflow_discard;
1012
1013 nstats->rx_errors = nstats->rx_length_errors +
1014 nstats->rx_over_errors +
1015 nstats->rx_crc_errors +
1016 nstats->rx_frame_errors +
1017 nstats->rx_fifo_errors +
1018 nstats->rx_missed_errors;
1019
1020 nstats->tx_aborted_errors =
1021 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1022 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1023 nstats->tx_carrier_errors =
1024 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1025 nstats->tx_fifo_errors = 0;
1026 nstats->tx_heartbeat_errors = 0;
1027 nstats->tx_window_errors = 0;
1028
1029 nstats->tx_errors = nstats->tx_aborted_errors +
1030 nstats->tx_carrier_errors +
1031 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1032}
1033
1034static void bnx2x_drv_stats_update(struct bnx2x *bp)
1035{
1036 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1037 int i;
1038
1039 estats->driver_xoff = 0;
1040 estats->rx_err_discard_pkt = 0;
1041 estats->rx_skb_alloc_failed = 0;
1042 estats->hw_csum_err = 0;
1043 for_each_queue(bp, i) {
1044 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
1045
1046 estats->driver_xoff += qstats->driver_xoff;
1047 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
1048 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
1049 estats->hw_csum_err += qstats->hw_csum_err;
1050 }
1051}
1052
1053static void bnx2x_stats_update(struct bnx2x *bp)
1054{
1055 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1056
1057 if (*stats_comp != DMAE_COMP_VAL)
1058 return;
1059
1060 if (bp->port.pmf)
1061 bnx2x_hw_stats_update(bp);
1062
1063 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1064 BNX2X_ERR("storm stats were not updated for 3 times\n");
1065 bnx2x_panic();
1066 return;
1067 }
1068
1069 bnx2x_net_stats_update(bp);
1070 bnx2x_drv_stats_update(bp);
1071
1072 if (netif_msg_timer(bp)) {
1073 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1074 int i;
1075
1076 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
1077 bp->dev->name,
1078 estats->brb_drop_lo, estats->brb_truncate_lo);
1079
1080 for_each_queue(bp, i) {
1081 struct bnx2x_fastpath *fp = &bp->fp[i];
1082 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1083
1084 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
1085 " rx pkt(%lu) rx calls(%lu %lu)\n",
1086 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
1087 fp->rx_comp_cons),
1088 le16_to_cpu(*fp->rx_cons_sb),
1089 bnx2x_hilo(&qstats->
1090 total_unicast_packets_received_hi),
1091 fp->rx_calls, fp->rx_pkt);
1092 }
1093
1094 for_each_queue(bp, i) {
1095 struct bnx2x_fastpath *fp = &bp->fp[i];
1096 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1097 struct netdev_queue *txq =
1098 netdev_get_tx_queue(bp->dev, i);
1099
1100 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
1101 " tx pkt(%lu) tx calls (%lu)"
1102 " %s (Xoff events %u)\n",
1103 fp->name, bnx2x_tx_avail(fp),
1104 le16_to_cpu(*fp->tx_cons_sb),
1105 bnx2x_hilo(&qstats->
1106 total_unicast_packets_transmitted_hi),
1107 fp->tx_pkt,
1108 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
1109 qstats->driver_xoff);
1110 }
1111 }
1112
1113 bnx2x_hw_stats_post(bp);
1114 bnx2x_storm_stats_post(bp);
1115}
1116
1117static void bnx2x_port_stats_stop(struct bnx2x *bp)
1118{
1119 struct dmae_command *dmae;
1120 u32 opcode;
1121 int loader_idx = PMF_DMAE_C(bp);
1122 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1123
1124 bp->executer_idx = 0;
1125
1126 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
1127 DMAE_CMD_C_ENABLE |
1128 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1129#ifdef __BIG_ENDIAN
1130 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1131#else
1132 DMAE_CMD_ENDIANITY_DW_SWAP |
1133#endif
1134 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1135 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1136
1137 if (bp->port.port_stx) {
1138
1139 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1140 if (bp->func_stx)
1141 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
1142 else
1143 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
1144 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1145 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1146 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1147 dmae->dst_addr_hi = 0;
1148 dmae->len = sizeof(struct host_port_stats) >> 2;
1149 if (bp->func_stx) {
1150 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1151 dmae->comp_addr_hi = 0;
1152 dmae->comp_val = 1;
1153 } else {
1154 dmae->comp_addr_lo =
1155 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1156 dmae->comp_addr_hi =
1157 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1158 dmae->comp_val = DMAE_COMP_VAL;
1159
1160 *stats_comp = 0;
1161 }
1162 }
1163
1164 if (bp->func_stx) {
1165
1166 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1167 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
1168 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1169 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1170 dmae->dst_addr_lo = bp->func_stx >> 2;
1171 dmae->dst_addr_hi = 0;
1172 dmae->len = sizeof(struct host_func_stats) >> 2;
1173 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1174 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1175 dmae->comp_val = DMAE_COMP_VAL;
1176
1177 *stats_comp = 0;
1178 }
1179}
1180
1181static void bnx2x_stats_stop(struct bnx2x *bp)
1182{
1183 int update = 0;
1184
1185 bnx2x_stats_comp(bp);
1186
1187 if (bp->port.pmf)
1188 update = (bnx2x_hw_stats_update(bp) == 0);
1189
1190 update |= (bnx2x_storm_stats_update(bp) == 0);
1191
1192 if (update) {
1193 bnx2x_net_stats_update(bp);
1194
1195 if (bp->port.pmf)
1196 bnx2x_port_stats_stop(bp);
1197
1198 bnx2x_hw_stats_post(bp);
1199 bnx2x_stats_comp(bp);
1200 }
1201}
1202
1203static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1204{
1205}
1206
1207static const struct {
1208 void (*action)(struct bnx2x *bp);
1209 enum bnx2x_stats_state next_state;
1210} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1211/* state event */
1212{
1213/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1214/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1215/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1216/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1217},
1218{
1219/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1220/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1221/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1222/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1223}
1224};
1225
1226void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1227{
1228 enum bnx2x_stats_state state;
1229
1230 if (unlikely(bp->panic))
1231 return;
1232
1233 /* Protect a state change flow */
1234 spin_lock_bh(&bp->stats_lock);
1235 state = bp->stats_state;
1236 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1237 spin_unlock_bh(&bp->stats_lock);
1238
1239 bnx2x_stats_stm[state][event].action(bp);
1240
1241 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1242 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1243 state, event, bp->stats_state);
1244}
1245
1246static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1247{
1248 struct dmae_command *dmae;
1249 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1250
1251 /* sanity */
1252 if (!bp->port.pmf || !bp->port.port_stx) {
1253 BNX2X_ERR("BUG!\n");
1254 return;
1255 }
1256
1257 bp->executer_idx = 0;
1258
1259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1260 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
1261 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
1262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1263#ifdef __BIG_ENDIAN
1264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1265#else
1266 DMAE_CMD_ENDIANITY_DW_SWAP |
1267#endif
1268 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1269 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1270 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1271 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1272 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1273 dmae->dst_addr_hi = 0;
1274 dmae->len = sizeof(struct host_port_stats) >> 2;
1275 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1276 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1277 dmae->comp_val = DMAE_COMP_VAL;
1278
1279 *stats_comp = 0;
1280 bnx2x_hw_stats_post(bp);
1281 bnx2x_stats_comp(bp);
1282}
1283
1284static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1285{
1286 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
1287 int port = BP_PORT(bp);
1288 int func;
1289 u32 func_stx;
1290
1291 /* sanity */
1292 if (!bp->port.pmf || !bp->func_stx) {
1293 BNX2X_ERR("BUG!\n");
1294 return;
1295 }
1296
1297 /* save our func_stx */
1298 func_stx = bp->func_stx;
1299
1300 for (vn = VN_0; vn < vn_max; vn++) {
1301 func = 2*vn + port;
1302
1303 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
1304 bnx2x_func_stats_init(bp);
1305 bnx2x_hw_stats_post(bp);
1306 bnx2x_stats_comp(bp);
1307 }
1308
1309 /* restore our func_stx */
1310 bp->func_stx = func_stx;
1311}
1312
1313static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1314{
1315 struct dmae_command *dmae = &bp->stats_dmae;
1316 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1317
1318 /* sanity */
1319 if (!bp->func_stx) {
1320 BNX2X_ERR("BUG!\n");
1321 return;
1322 }
1323
1324 bp->executer_idx = 0;
1325 memset(dmae, 0, sizeof(struct dmae_command));
1326
1327 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
1328 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
1329 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1330#ifdef __BIG_ENDIAN
1331 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1332#else
1333 DMAE_CMD_ENDIANITY_DW_SWAP |
1334#endif
1335 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1336 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1337 dmae->src_addr_lo = bp->func_stx >> 2;
1338 dmae->src_addr_hi = 0;
1339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
1340 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
1341 dmae->len = sizeof(struct host_func_stats) >> 2;
1342 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1343 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1344 dmae->comp_val = DMAE_COMP_VAL;
1345
1346 *stats_comp = 0;
1347 bnx2x_hw_stats_post(bp);
1348 bnx2x_stats_comp(bp);
1349}
1350
1351void bnx2x_stats_init(struct bnx2x *bp)
1352{
1353 int port = BP_PORT(bp);
1354 int func = BP_FUNC(bp);
1355 int i;
1356
1357 bp->stats_pending = 0;
1358 bp->executer_idx = 0;
1359 bp->stats_counter = 0;
1360
1361 /* port and func stats for management */
1362 if (!BP_NOMCP(bp)) {
1363 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1364 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
1365
1366 } else {
1367 bp->port.port_stx = 0;
1368 bp->func_stx = 0;
1369 }
1370 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1371 bp->port.port_stx, bp->func_stx);
1372
1373 /* port stats */
1374 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1375 bp->port.old_nig_stats.brb_discard =
1376 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1377 bp->port.old_nig_stats.brb_truncate =
1378 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1379 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1380 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1381 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1382 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1383
1384 /* function stats */
1385 for_each_queue(bp, i) {
1386 struct bnx2x_fastpath *fp = &bp->fp[i];
1387
1388 memset(&fp->old_tclient, 0,
1389 sizeof(struct tstorm_per_client_stats));
1390 memset(&fp->old_uclient, 0,
1391 sizeof(struct ustorm_per_client_stats));
1392 memset(&fp->old_xclient, 0,
1393 sizeof(struct xstorm_per_client_stats));
1394 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
1395 }
1396
1397 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
1398 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
1399
1400 bp->stats_state = STATS_STATE_DISABLED;
1401
1402 if (bp->port.pmf) {
1403 if (bp->port.port_stx)
1404 bnx2x_port_stats_base_init(bp);
1405
1406 if (bp->func_stx)
1407 bnx2x_func_stats_base_init(bp);
1408
1409 } else if (bp->func_stx)
1410 bnx2x_func_stats_base_update(bp);
1411}
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
new file mode 100644
index 000000000000..38a4e908f4fb
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -0,0 +1,239 @@
1/* bnx2x_stats.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 */
13
14#ifndef BNX2X_STATS_H
15#define BNX2X_STATS_H
16
17#include <linux/types.h>
18
19struct bnx2x_eth_q_stats {
20 u32 total_bytes_received_hi;
21 u32 total_bytes_received_lo;
22 u32 total_bytes_transmitted_hi;
23 u32 total_bytes_transmitted_lo;
24 u32 total_unicast_packets_received_hi;
25 u32 total_unicast_packets_received_lo;
26 u32 total_multicast_packets_received_hi;
27 u32 total_multicast_packets_received_lo;
28 u32 total_broadcast_packets_received_hi;
29 u32 total_broadcast_packets_received_lo;
30 u32 total_unicast_packets_transmitted_hi;
31 u32 total_unicast_packets_transmitted_lo;
32 u32 total_multicast_packets_transmitted_hi;
33 u32 total_multicast_packets_transmitted_lo;
34 u32 total_broadcast_packets_transmitted_hi;
35 u32 total_broadcast_packets_transmitted_lo;
36 u32 valid_bytes_received_hi;
37 u32 valid_bytes_received_lo;
38
39 u32 error_bytes_received_hi;
40 u32 error_bytes_received_lo;
41 u32 etherstatsoverrsizepkts_hi;
42 u32 etherstatsoverrsizepkts_lo;
43 u32 no_buff_discard_hi;
44 u32 no_buff_discard_lo;
45
46 u32 driver_xoff;
47 u32 rx_err_discard_pkt;
48 u32 rx_skb_alloc_failed;
49 u32 hw_csum_err;
50};
51
52#define BNX2X_NUM_Q_STATS 13
53#define Q_STATS_OFFSET32(stat_name) \
54 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
55
56struct nig_stats {
57 u32 brb_discard;
58 u32 brb_packet;
59 u32 brb_truncate;
60 u32 flow_ctrl_discard;
61 u32 flow_ctrl_octets;
62 u32 flow_ctrl_packet;
63 u32 mng_discard;
64 u32 mng_octet_inp;
65 u32 mng_octet_out;
66 u32 mng_packet_inp;
67 u32 mng_packet_out;
68 u32 pbf_octets;
69 u32 pbf_packet;
70 u32 safc_inp;
71 u32 egress_mac_pkt0_lo;
72 u32 egress_mac_pkt0_hi;
73 u32 egress_mac_pkt1_lo;
74 u32 egress_mac_pkt1_hi;
75};
76
77
78enum bnx2x_stats_event {
79 STATS_EVENT_PMF = 0,
80 STATS_EVENT_LINK_UP,
81 STATS_EVENT_UPDATE,
82 STATS_EVENT_STOP,
83 STATS_EVENT_MAX
84};
85
86enum bnx2x_stats_state {
87 STATS_STATE_DISABLED = 0,
88 STATS_STATE_ENABLED,
89 STATS_STATE_MAX
90};
91
92struct bnx2x_eth_stats {
93 u32 total_bytes_received_hi;
94 u32 total_bytes_received_lo;
95 u32 total_bytes_transmitted_hi;
96 u32 total_bytes_transmitted_lo;
97 u32 total_unicast_packets_received_hi;
98 u32 total_unicast_packets_received_lo;
99 u32 total_multicast_packets_received_hi;
100 u32 total_multicast_packets_received_lo;
101 u32 total_broadcast_packets_received_hi;
102 u32 total_broadcast_packets_received_lo;
103 u32 total_unicast_packets_transmitted_hi;
104 u32 total_unicast_packets_transmitted_lo;
105 u32 total_multicast_packets_transmitted_hi;
106 u32 total_multicast_packets_transmitted_lo;
107 u32 total_broadcast_packets_transmitted_hi;
108 u32 total_broadcast_packets_transmitted_lo;
109 u32 valid_bytes_received_hi;
110 u32 valid_bytes_received_lo;
111
112 u32 error_bytes_received_hi;
113 u32 error_bytes_received_lo;
114 u32 etherstatsoverrsizepkts_hi;
115 u32 etherstatsoverrsizepkts_lo;
116 u32 no_buff_discard_hi;
117 u32 no_buff_discard_lo;
118
119 u32 rx_stat_ifhcinbadoctets_hi;
120 u32 rx_stat_ifhcinbadoctets_lo;
121 u32 tx_stat_ifhcoutbadoctets_hi;
122 u32 tx_stat_ifhcoutbadoctets_lo;
123 u32 rx_stat_dot3statsfcserrors_hi;
124 u32 rx_stat_dot3statsfcserrors_lo;
125 u32 rx_stat_dot3statsalignmenterrors_hi;
126 u32 rx_stat_dot3statsalignmenterrors_lo;
127 u32 rx_stat_dot3statscarriersenseerrors_hi;
128 u32 rx_stat_dot3statscarriersenseerrors_lo;
129 u32 rx_stat_falsecarriererrors_hi;
130 u32 rx_stat_falsecarriererrors_lo;
131 u32 rx_stat_etherstatsundersizepkts_hi;
132 u32 rx_stat_etherstatsundersizepkts_lo;
133 u32 rx_stat_dot3statsframestoolong_hi;
134 u32 rx_stat_dot3statsframestoolong_lo;
135 u32 rx_stat_etherstatsfragments_hi;
136 u32 rx_stat_etherstatsfragments_lo;
137 u32 rx_stat_etherstatsjabbers_hi;
138 u32 rx_stat_etherstatsjabbers_lo;
139 u32 rx_stat_maccontrolframesreceived_hi;
140 u32 rx_stat_maccontrolframesreceived_lo;
141 u32 rx_stat_bmac_xpf_hi;
142 u32 rx_stat_bmac_xpf_lo;
143 u32 rx_stat_bmac_xcf_hi;
144 u32 rx_stat_bmac_xcf_lo;
145 u32 rx_stat_xoffstateentered_hi;
146 u32 rx_stat_xoffstateentered_lo;
147 u32 rx_stat_xonpauseframesreceived_hi;
148 u32 rx_stat_xonpauseframesreceived_lo;
149 u32 rx_stat_xoffpauseframesreceived_hi;
150 u32 rx_stat_xoffpauseframesreceived_lo;
151 u32 tx_stat_outxonsent_hi;
152 u32 tx_stat_outxonsent_lo;
153 u32 tx_stat_outxoffsent_hi;
154 u32 tx_stat_outxoffsent_lo;
155 u32 tx_stat_flowcontroldone_hi;
156 u32 tx_stat_flowcontroldone_lo;
157 u32 tx_stat_etherstatscollisions_hi;
158 u32 tx_stat_etherstatscollisions_lo;
159 u32 tx_stat_dot3statssinglecollisionframes_hi;
160 u32 tx_stat_dot3statssinglecollisionframes_lo;
161 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
162 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
163 u32 tx_stat_dot3statsdeferredtransmissions_hi;
164 u32 tx_stat_dot3statsdeferredtransmissions_lo;
165 u32 tx_stat_dot3statsexcessivecollisions_hi;
166 u32 tx_stat_dot3statsexcessivecollisions_lo;
167 u32 tx_stat_dot3statslatecollisions_hi;
168 u32 tx_stat_dot3statslatecollisions_lo;
169 u32 tx_stat_etherstatspkts64octets_hi;
170 u32 tx_stat_etherstatspkts64octets_lo;
171 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
172 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
173 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
174 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
175 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
176 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
177 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
178 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
179 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
180 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
181 u32 tx_stat_etherstatspktsover1522octets_hi;
182 u32 tx_stat_etherstatspktsover1522octets_lo;
183 u32 tx_stat_bmac_2047_hi;
184 u32 tx_stat_bmac_2047_lo;
185 u32 tx_stat_bmac_4095_hi;
186 u32 tx_stat_bmac_4095_lo;
187 u32 tx_stat_bmac_9216_hi;
188 u32 tx_stat_bmac_9216_lo;
189 u32 tx_stat_bmac_16383_hi;
190 u32 tx_stat_bmac_16383_lo;
191 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
192 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
193 u32 tx_stat_bmac_ufl_hi;
194 u32 tx_stat_bmac_ufl_lo;
195
196 u32 pause_frames_received_hi;
197 u32 pause_frames_received_lo;
198 u32 pause_frames_sent_hi;
199 u32 pause_frames_sent_lo;
200
201 u32 etherstatspkts1024octetsto1522octets_hi;
202 u32 etherstatspkts1024octetsto1522octets_lo;
203 u32 etherstatspktsover1522octets_hi;
204 u32 etherstatspktsover1522octets_lo;
205
206 u32 brb_drop_hi;
207 u32 brb_drop_lo;
208 u32 brb_truncate_hi;
209 u32 brb_truncate_lo;
210
211 u32 mac_filter_discard;
212 u32 xxoverflow_discard;
213 u32 brb_truncate_discard;
214 u32 mac_discard;
215
216 u32 driver_xoff;
217 u32 rx_err_discard_pkt;
218 u32 rx_skb_alloc_failed;
219 u32 hw_csum_err;
220
221 u32 nig_timer_max;
222};
223
224#define BNX2X_NUM_STATS 43
225#define STATS_OFFSET32(stat_name) \
226 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
227
228/* Forward declaration */
229struct bnx2x;
230
231
232void bnx2x_stats_init(struct bnx2x *bp);
233
234extern const u32 dmae_reg_go_c[];
235extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
236 u32 data_hi, u32 data_lo, int common);
237
238
239#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 8d7dfd2f1e90..c746b331771d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -233,34 +233,27 @@ static void tlb_deinitialize(struct bonding *bond)
233 _unlock_tx_hashtbl(bond); 233 _unlock_tx_hashtbl(bond);
234} 234}
235 235
236static long long compute_gap(struct slave *slave)
237{
238 return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
239 (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
240}
241
236/* Caller must hold bond lock for read */ 242/* Caller must hold bond lock for read */
237static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) 243static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
238{ 244{
239 struct slave *slave, *least_loaded; 245 struct slave *slave, *least_loaded;
240 s64 max_gap; 246 long long max_gap;
241 int i, found = 0; 247 int i;
242
243 /* Find the first enabled slave */
244 bond_for_each_slave(bond, slave, i) {
245 if (SLAVE_IS_OK(slave)) {
246 found = 1;
247 break;
248 }
249 }
250
251 if (!found) {
252 return NULL;
253 }
254 248
255 least_loaded = slave; 249 least_loaded = NULL;
256 max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */ 250 max_gap = LLONG_MIN;
257 (s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
258 251
259 /* Find the slave with the largest gap */ 252 /* Find the slave with the largest gap */
260 bond_for_each_slave_from(bond, slave, i, least_loaded) { 253 bond_for_each_slave(bond, slave, i) {
261 if (SLAVE_IS_OK(slave)) { 254 if (SLAVE_IS_OK(slave)) {
262 s64 gap = (s64)(slave->speed << 20) - 255 long long gap = compute_gap(slave);
263 (s64)(SLAVE_TLB_INFO(slave).load << 3); 256
264 if (max_gap < gap) { 257 if (max_gap < gap) {
265 least_loaded = slave; 258 least_loaded = slave;
266 max_gap = gap; 259 max_gap = gap;
@@ -689,7 +682,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
689 client_info->ntt = 0; 682 client_info->ntt = 0;
690 } 683 }
691 684
692 if (!list_empty(&bond->vlan_list)) { 685 if (bond->vlgrp) {
693 if (!vlan_get_tag(skb, &client_info->vlan_id)) 686 if (!vlan_get_tag(skb, &client_info->vlan_id))
694 client_info->tag = 1; 687 client_info->tag = 1;
695 } 688 }
@@ -911,7 +904,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
911 skb->priority = TC_PRIO_CONTROL; 904 skb->priority = TC_PRIO_CONTROL;
912 skb->dev = slave->dev; 905 skb->dev = slave->dev;
913 906
914 if (!list_empty(&bond->vlan_list)) { 907 if (bond->vlgrp) {
915 struct vlan_entry *vlan; 908 struct vlan_entry *vlan;
916 909
917 vlan = bond_next_vlan(bond, 910 vlan = bond_next_vlan(bond,
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 969ffed86b9f..121b073a6c3f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -178,6 +178,8 @@ static int bond_inet6addr_event(struct notifier_block *this,
178 } 178 }
179 179
180 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 180 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
181 if (!bond->vlgrp)
182 continue;
181 vlan_dev = vlan_group_get_device(bond->vlgrp, 183 vlan_dev = vlan_group_get_device(bond->vlgrp,
182 vlan->vlan_id); 184 vlan->vlan_id);
183 if (vlan_dev == event_dev) { 185 if (vlan_dev == event_dev) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c3d98dde2f86..2cc4cfc31892 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
90#define BOND_LINK_ARP_INTERV 0 90#define BOND_LINK_ARP_INTERV 0
91 91
92static int max_bonds = BOND_DEFAULT_MAX_BONDS; 92static int max_bonds = BOND_DEFAULT_MAX_BONDS;
93static int tx_queues = BOND_DEFAULT_TX_QUEUES;
93static int num_grat_arp = 1; 94static int num_grat_arp = 1;
94static int num_unsol_na = 1; 95static int num_unsol_na = 1;
95static int miimon = BOND_LINK_MON_INTERV; 96static int miimon = BOND_LINK_MON_INTERV;
@@ -106,10 +107,13 @@ static int arp_interval = BOND_LINK_ARP_INTERV;
106static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; 107static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
107static char *arp_validate; 108static char *arp_validate;
108static char *fail_over_mac; 109static char *fail_over_mac;
110static int all_slaves_active = 0;
109static struct bond_params bonding_defaults; 111static struct bond_params bonding_defaults;
110 112
111module_param(max_bonds, int, 0); 113module_param(max_bonds, int, 0);
112MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 114MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
115module_param(tx_queues, int, 0);
116MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
113module_param(num_grat_arp, int, 0644); 117module_param(num_grat_arp, int, 0644);
114MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); 118MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
115module_param(num_unsol_na, int, 0644); 119module_param(num_unsol_na, int, 0644);
@@ -155,6 +159,10 @@ module_param(arp_validate, charp, 0);
155MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); 159MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
156module_param(fail_over_mac, charp, 0); 160module_param(fail_over_mac, charp, 0);
157MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); 161MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow");
162module_param(all_slaves_active, int, 0);
163MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
164 "by setting active flag for all slaves. "
165 "0 for never (default), 1 for always.");
158 166
159/*----------------------------- Global variables ----------------------------*/ 167/*----------------------------- Global variables ----------------------------*/
160 168
@@ -168,7 +176,9 @@ static int arp_ip_count;
168static int bond_mode = BOND_MODE_ROUNDROBIN; 176static int bond_mode = BOND_MODE_ROUNDROBIN;
169static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 177static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
170static int lacp_fast; 178static int lacp_fast;
179#ifdef CONFIG_NET_POLL_CONTROLLER
171static int disable_netpoll = 1; 180static int disable_netpoll = 1;
181#endif
172 182
173const struct bond_parm_tbl bond_lacp_tbl[] = { 183const struct bond_parm_tbl bond_lacp_tbl[] = {
174{ "slow", AD_LACP_SLOW}, 184{ "slow", AD_LACP_SLOW},
@@ -414,6 +424,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
414{ 424{
415 unsigned short uninitialized_var(vlan_id); 425 unsigned short uninitialized_var(vlan_id);
416 426
427 /* Test vlan_list not vlgrp to catch and handle 802.1p tags */
417 if (!list_empty(&bond->vlan_list) && 428 if (!list_empty(&bond->vlan_list) &&
418 !(slave_dev->features & NETIF_F_HW_VLAN_TX) && 429 !(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
419 vlan_get_tag(skb, &vlan_id) == 0) { 430 vlan_get_tag(skb, &vlan_id) == 0) {
@@ -477,7 +488,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
477 struct slave *slave; 488 struct slave *slave;
478 int i; 489 int i;
479 490
491 write_lock(&bond->lock);
480 bond->vlgrp = grp; 492 bond->vlgrp = grp;
493 write_unlock(&bond->lock);
481 494
482 bond_for_each_slave(bond, slave, i) { 495 bond_for_each_slave(bond, slave, i) {
483 struct net_device *slave_dev = slave->dev; 496 struct net_device *slave_dev = slave->dev;
@@ -557,10 +570,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
557 struct vlan_entry *vlan; 570 struct vlan_entry *vlan;
558 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 571 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
559 572
560 write_lock_bh(&bond->lock); 573 if (!bond->vlgrp)
561 574 return;
562 if (list_empty(&bond->vlan_list))
563 goto out;
564 575
565 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && 576 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
566 slave_ops->ndo_vlan_rx_register) 577 slave_ops->ndo_vlan_rx_register)
@@ -568,13 +579,10 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
568 579
569 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || 580 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
570 !(slave_ops->ndo_vlan_rx_add_vid)) 581 !(slave_ops->ndo_vlan_rx_add_vid))
571 goto out; 582 return;
572 583
573 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) 584 list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
574 slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id); 585 slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
575
576out:
577 write_unlock_bh(&bond->lock);
578} 586}
579 587
580static void bond_del_vlans_from_slave(struct bonding *bond, 588static void bond_del_vlans_from_slave(struct bonding *bond,
@@ -584,16 +592,16 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
584 struct vlan_entry *vlan; 592 struct vlan_entry *vlan;
585 struct net_device *vlan_dev; 593 struct net_device *vlan_dev;
586 594
587 write_lock_bh(&bond->lock); 595 if (!bond->vlgrp)
588 596 return;
589 if (list_empty(&bond->vlan_list))
590 goto out;
591 597
592 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || 598 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
593 !(slave_ops->ndo_vlan_rx_kill_vid)) 599 !(slave_ops->ndo_vlan_rx_kill_vid))
594 goto unreg; 600 goto unreg;
595 601
596 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 602 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
603 if (!vlan->vlan_id)
604 continue;
597 /* Save and then restore vlan_dev in the grp array, 605 /* Save and then restore vlan_dev in the grp array,
598 * since the slave's driver might clear it. 606 * since the slave's driver might clear it.
599 */ 607 */
@@ -606,9 +614,6 @@ unreg:
606 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && 614 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
607 slave_ops->ndo_vlan_rx_register) 615 slave_ops->ndo_vlan_rx_register)
608 slave_ops->ndo_vlan_rx_register(slave_dev, NULL); 616 slave_ops->ndo_vlan_rx_register(slave_dev, NULL);
609
610out:
611 write_unlock_bh(&bond->lock);
612} 617}
613 618
614/*------------------------------- Link status -------------------------------*/ 619/*------------------------------- Link status -------------------------------*/
@@ -1433,7 +1438,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1433 /* no need to lock since we're protected by rtnl_lock */ 1438 /* no need to lock since we're protected by rtnl_lock */
1434 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { 1439 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1435 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1440 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1436 if (!list_empty(&bond->vlan_list)) { 1441 if (bond->vlgrp) {
1437 pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", 1442 pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
1438 bond_dev->name, slave_dev->name, bond_dev->name); 1443 bond_dev->name, slave_dev->name, bond_dev->name);
1439 return -EPERM; 1444 return -EPERM;
@@ -1522,16 +1527,32 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1522 } 1527 }
1523 } 1528 }
1524 1529
1530 /* If this is the first slave, then we need to set the master's hardware
1531 * address to be the same as the slave's. */
1532 if (bond->slave_cnt == 0)
1533 memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
1534 slave_dev->addr_len);
1535
1536
1525 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1537 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1526 if (!new_slave) { 1538 if (!new_slave) {
1527 res = -ENOMEM; 1539 res = -ENOMEM;
1528 goto err_undo_flags; 1540 goto err_undo_flags;
1529 } 1541 }
1530 1542
1531 /* save slave's original flags before calling 1543 /*
1532 * netdev_set_master and dev_open 1544 * Set the new_slave's queue_id to be zero. Queue ID mapping
1545 * is set via sysfs or module option if desired.
1533 */ 1546 */
1534 new_slave->original_flags = slave_dev->flags; 1547 new_slave->queue_id = 0;
1548
1549 /* Save slave's original mtu and then set it to match the bond */
1550 new_slave->original_mtu = slave_dev->mtu;
1551 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1552 if (res) {
1553 pr_debug("Error %d calling dev_set_mtu\n", res);
1554 goto err_free;
1555 }
1535 1556
1536 /* 1557 /*
1537 * Save slave's original ("permanent") mac address for modes 1558 * Save slave's original ("permanent") mac address for modes
@@ -1550,7 +1571,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1550 res = dev_set_mac_address(slave_dev, &addr); 1571 res = dev_set_mac_address(slave_dev, &addr);
1551 if (res) { 1572 if (res) {
1552 pr_debug("Error %d calling set_mac_address\n", res); 1573 pr_debug("Error %d calling set_mac_address\n", res);
1553 goto err_free; 1574 goto err_restore_mtu;
1554 } 1575 }
1555 } 1576 }
1556 1577
@@ -1793,6 +1814,9 @@ err_restore_mac:
1793 dev_set_mac_address(slave_dev, &addr); 1814 dev_set_mac_address(slave_dev, &addr);
1794 } 1815 }
1795 1816
1817err_restore_mtu:
1818 dev_set_mtu(slave_dev, new_slave->original_mtu);
1819
1796err_free: 1820err_free:
1797 kfree(new_slave); 1821 kfree(new_slave);
1798 1822
@@ -1913,7 +1937,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1913 */ 1937 */
1914 memset(bond_dev->dev_addr, 0, bond_dev->addr_len); 1938 memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
1915 1939
1916 if (list_empty(&bond->vlan_list)) { 1940 if (!bond->vlgrp) {
1917 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1941 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1918 } else { 1942 } else {
1919 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 1943 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
@@ -1980,6 +2004,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1980 dev_set_mac_address(slave_dev, &addr); 2004 dev_set_mac_address(slave_dev, &addr);
1981 } 2005 }
1982 2006
2007 dev_set_mtu(slave_dev, slave->original_mtu);
2008
1983 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | 2009 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
1984 IFF_SLAVE_INACTIVE | IFF_BONDING | 2010 IFF_SLAVE_INACTIVE | IFF_BONDING |
1985 IFF_SLAVE_NEEDARP); 2011 IFF_SLAVE_NEEDARP);
@@ -2103,9 +2129,9 @@ static int bond_release_all(struct net_device *bond_dev)
2103 */ 2129 */
2104 memset(bond_dev->dev_addr, 0, bond_dev->addr_len); 2130 memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
2105 2131
2106 if (list_empty(&bond->vlan_list)) 2132 if (!bond->vlgrp) {
2107 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 2133 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2108 else { 2134 } else {
2109 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 2135 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
2110 bond_dev->name, bond_dev->name); 2136 bond_dev->name, bond_dev->name);
2111 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", 2137 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2538,7 +2564,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2538 if (!targets[i]) 2564 if (!targets[i])
2539 break; 2565 break;
2540 pr_debug("basa: target %x\n", targets[i]); 2566 pr_debug("basa: target %x\n", targets[i]);
2541 if (list_empty(&bond->vlan_list)) { 2567 if (!bond->vlgrp) {
2542 pr_debug("basa: empty vlan: arp_send\n"); 2568 pr_debug("basa: empty vlan: arp_send\n");
2543 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2569 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2544 bond->master_ip, 0); 2570 bond->master_ip, 0);
@@ -2566,7 +2592,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2566 /* 2592 /*
2567 * This target is not on a VLAN 2593 * This target is not on a VLAN
2568 */ 2594 */
2569 if (rt->u.dst.dev == bond->dev) { 2595 if (rt->dst.dev == bond->dev) {
2570 ip_rt_put(rt); 2596 ip_rt_put(rt);
2571 pr_debug("basa: rtdev == bond->dev: arp_send\n"); 2597 pr_debug("basa: rtdev == bond->dev: arp_send\n");
2572 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2598 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
@@ -2577,7 +2603,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2577 vlan_id = 0; 2603 vlan_id = 0;
2578 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2604 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2579 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2605 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2580 if (vlan_dev == rt->u.dst.dev) { 2606 if (vlan_dev == rt->dst.dev) {
2581 vlan_id = vlan->vlan_id; 2607 vlan_id = vlan->vlan_id;
2582 pr_debug("basa: vlan match on %s %d\n", 2608 pr_debug("basa: vlan match on %s %d\n",
2583 vlan_dev->name, vlan_id); 2609 vlan_dev->name, vlan_id);
@@ -2595,7 +2621,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2595 if (net_ratelimit()) { 2621 if (net_ratelimit()) {
2596 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2622 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2597 bond->dev->name, &fl.fl4_dst, 2623 bond->dev->name, &fl.fl4_dst,
2598 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL"); 2624 rt->dst.dev ? rt->dst.dev->name : "NULL");
2599 } 2625 }
2600 ip_rt_put(rt); 2626 ip_rt_put(rt);
2601 } 2627 }
@@ -2627,6 +2653,9 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2627 bond->master_ip, 0); 2653 bond->master_ip, 0);
2628 } 2654 }
2629 2655
2656 if (!bond->vlgrp)
2657 return;
2658
2630 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2659 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2631 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2660 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2632 if (vlan->vlan_ip) { 2661 if (vlan->vlan_ip) {
@@ -3276,6 +3305,7 @@ static void bond_info_show_slave(struct seq_file *seq,
3276 else 3305 else
3277 seq_puts(seq, "Aggregator ID: N/A\n"); 3306 seq_puts(seq, "Aggregator ID: N/A\n");
3278 } 3307 }
3308 seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
3279} 3309}
3280 3310
3281static int bond_info_seq_show(struct seq_file *seq, void *v) 3311static int bond_info_seq_show(struct seq_file *seq, void *v)
@@ -3558,6 +3588,8 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3558 } 3588 }
3559 3589
3560 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 3590 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
3591 if (!bond->vlgrp)
3592 continue;
3561 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 3593 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
3562 if (vlan_dev == event_dev) { 3594 if (vlan_dev == event_dev) {
3563 switch (event) { 3595 switch (event) {
@@ -3785,50 +3817,49 @@ static int bond_close(struct net_device *bond_dev)
3785 return 0; 3817 return 0;
3786} 3818}
3787 3819
3788static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) 3820static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3821 struct rtnl_link_stats64 *stats)
3789{ 3822{
3790 struct bonding *bond = netdev_priv(bond_dev); 3823 struct bonding *bond = netdev_priv(bond_dev);
3791 struct net_device_stats *stats = &bond_dev->stats; 3824 struct rtnl_link_stats64 temp;
3792 struct net_device_stats local_stats;
3793 struct slave *slave; 3825 struct slave *slave;
3794 int i; 3826 int i;
3795 3827
3796 memset(&local_stats, 0, sizeof(struct net_device_stats)); 3828 memset(stats, 0, sizeof(*stats));
3797 3829
3798 read_lock_bh(&bond->lock); 3830 read_lock_bh(&bond->lock);
3799 3831
3800 bond_for_each_slave(bond, slave, i) { 3832 bond_for_each_slave(bond, slave, i) {
3801 const struct net_device_stats *sstats = dev_get_stats(slave->dev); 3833 const struct rtnl_link_stats64 *sstats =
3834 dev_get_stats(slave->dev, &temp);
3802 3835
3803 local_stats.rx_packets += sstats->rx_packets; 3836 stats->rx_packets += sstats->rx_packets;
3804 local_stats.rx_bytes += sstats->rx_bytes; 3837 stats->rx_bytes += sstats->rx_bytes;
3805 local_stats.rx_errors += sstats->rx_errors; 3838 stats->rx_errors += sstats->rx_errors;
3806 local_stats.rx_dropped += sstats->rx_dropped; 3839 stats->rx_dropped += sstats->rx_dropped;
3807 3840
3808 local_stats.tx_packets += sstats->tx_packets; 3841 stats->tx_packets += sstats->tx_packets;
3809 local_stats.tx_bytes += sstats->tx_bytes; 3842 stats->tx_bytes += sstats->tx_bytes;
3810 local_stats.tx_errors += sstats->tx_errors; 3843 stats->tx_errors += sstats->tx_errors;
3811 local_stats.tx_dropped += sstats->tx_dropped; 3844 stats->tx_dropped += sstats->tx_dropped;
3812 3845
3813 local_stats.multicast += sstats->multicast; 3846 stats->multicast += sstats->multicast;
3814 local_stats.collisions += sstats->collisions; 3847 stats->collisions += sstats->collisions;
3815 3848
3816 local_stats.rx_length_errors += sstats->rx_length_errors; 3849 stats->rx_length_errors += sstats->rx_length_errors;
3817 local_stats.rx_over_errors += sstats->rx_over_errors; 3850 stats->rx_over_errors += sstats->rx_over_errors;
3818 local_stats.rx_crc_errors += sstats->rx_crc_errors; 3851 stats->rx_crc_errors += sstats->rx_crc_errors;
3819 local_stats.rx_frame_errors += sstats->rx_frame_errors; 3852 stats->rx_frame_errors += sstats->rx_frame_errors;
3820 local_stats.rx_fifo_errors += sstats->rx_fifo_errors; 3853 stats->rx_fifo_errors += sstats->rx_fifo_errors;
3821 local_stats.rx_missed_errors += sstats->rx_missed_errors; 3854 stats->rx_missed_errors += sstats->rx_missed_errors;
3822 3855
3823 local_stats.tx_aborted_errors += sstats->tx_aborted_errors; 3856 stats->tx_aborted_errors += sstats->tx_aborted_errors;
3824 local_stats.tx_carrier_errors += sstats->tx_carrier_errors; 3857 stats->tx_carrier_errors += sstats->tx_carrier_errors;
3825 local_stats.tx_fifo_errors += sstats->tx_fifo_errors; 3858 stats->tx_fifo_errors += sstats->tx_fifo_errors;
3826 local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors; 3859 stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
3827 local_stats.tx_window_errors += sstats->tx_window_errors; 3860 stats->tx_window_errors += sstats->tx_window_errors;
3828 } 3861 }
3829 3862
3830 memcpy(stats, &local_stats, sizeof(struct net_device_stats));
3831
3832 read_unlock_bh(&bond->lock); 3863 read_unlock_bh(&bond->lock);
3833 3864
3834 return stats; 3865 return stats;
@@ -4412,9 +4443,59 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
4412 } 4443 }
4413} 4444}
4414 4445
4446/*
4447 * Lookup the slave that corresponds to a qid
4448 */
4449static inline int bond_slave_override(struct bonding *bond,
4450 struct sk_buff *skb)
4451{
4452 int i, res = 1;
4453 struct slave *slave = NULL;
4454 struct slave *check_slave;
4455
4456 read_lock(&bond->lock);
4457
4458 if (!BOND_IS_OK(bond) || !skb->queue_mapping)
4459 goto out;
4460
4461 /* Find out if any slaves have the same mapping as this skb. */
4462 bond_for_each_slave(bond, check_slave, i) {
4463 if (check_slave->queue_id == skb->queue_mapping) {
4464 slave = check_slave;
4465 break;
4466 }
4467 }
4468
4469 /* If the slave isn't UP, use default transmit policy. */
4470 if (slave && slave->queue_id && IS_UP(slave->dev) &&
4471 (slave->link == BOND_LINK_UP)) {
4472 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4473 }
4474
4475out:
4476 read_unlock(&bond->lock);
4477 return res;
4478}
4479
4480static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4481{
4482 /*
4483 * This helper function exists to help dev_pick_tx get the correct
4484 * destination queue. Using a helper function skips the a call to
4485 * skb_tx_hash and will put the skbs in the queue we expect on their
4486 * way down to the bonding driver.
4487 */
4488 return skb->queue_mapping;
4489}
4490
4415static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4491static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4416{ 4492{
4417 const struct bonding *bond = netdev_priv(dev); 4493 struct bonding *bond = netdev_priv(dev);
4494
4495 if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
4496 if (!bond_slave_override(bond, skb))
4497 return NETDEV_TX_OK;
4498 }
4418 4499
4419 switch (bond->params.mode) { 4500 switch (bond->params.mode) {
4420 case BOND_MODE_ROUNDROBIN: 4501 case BOND_MODE_ROUNDROBIN:
@@ -4499,7 +4580,8 @@ static const struct net_device_ops bond_netdev_ops = {
4499 .ndo_open = bond_open, 4580 .ndo_open = bond_open,
4500 .ndo_stop = bond_close, 4581 .ndo_stop = bond_close,
4501 .ndo_start_xmit = bond_start_xmit, 4582 .ndo_start_xmit = bond_start_xmit,
4502 .ndo_get_stats = bond_get_stats, 4583 .ndo_select_queue = bond_select_queue,
4584 .ndo_get_stats64 = bond_get_stats,
4503 .ndo_do_ioctl = bond_do_ioctl, 4585 .ndo_do_ioctl = bond_do_ioctl,
4504 .ndo_set_multicast_list = bond_set_multicast_list, 4586 .ndo_set_multicast_list = bond_set_multicast_list,
4505 .ndo_change_mtu = bond_change_mtu, 4587 .ndo_change_mtu = bond_change_mtu,
@@ -4604,6 +4686,7 @@ static void bond_work_cancel_all(struct bonding *bond)
4604static void bond_uninit(struct net_device *bond_dev) 4686static void bond_uninit(struct net_device *bond_dev)
4605{ 4687{
4606 struct bonding *bond = netdev_priv(bond_dev); 4688 struct bonding *bond = netdev_priv(bond_dev);
4689 struct vlan_entry *vlan, *tmp;
4607 4690
4608 bond_netpoll_cleanup(bond_dev); 4691 bond_netpoll_cleanup(bond_dev);
4609 4692
@@ -4617,6 +4700,11 @@ static void bond_uninit(struct net_device *bond_dev)
4617 bond_remove_proc_entry(bond); 4700 bond_remove_proc_entry(bond);
4618 4701
4619 __hw_addr_flush(&bond->mc_list); 4702 __hw_addr_flush(&bond->mc_list);
4703
4704 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
4705 list_del(&vlan->vlan_list);
4706 kfree(vlan);
4707 }
4620} 4708}
4621 4709
4622/*------------------------- Module initialization ---------------------------*/ 4710/*------------------------- Module initialization ---------------------------*/
@@ -4767,6 +4855,20 @@ static int bond_check_params(struct bond_params *params)
4767 } 4855 }
4768 } 4856 }
4769 4857
4858 if (tx_queues < 1 || tx_queues > 255) {
4859 pr_warning("Warning: tx_queues (%d) should be between "
4860 "1 and 255, resetting to %d\n",
4861 tx_queues, BOND_DEFAULT_TX_QUEUES);
4862 tx_queues = BOND_DEFAULT_TX_QUEUES;
4863 }
4864
4865 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4866 pr_warning("Warning: all_slaves_active module parameter (%d), "
4867 "not of valid value (0/1), so it was set to "
4868 "0\n", all_slaves_active);
4869 all_slaves_active = 0;
4870 }
4871
4770 /* reset values for TLB/ALB */ 4872 /* reset values for TLB/ALB */
4771 if ((bond_mode == BOND_MODE_TLB) || 4873 if ((bond_mode == BOND_MODE_TLB) ||
4772 (bond_mode == BOND_MODE_ALB)) { 4874 (bond_mode == BOND_MODE_ALB)) {
@@ -4937,6 +5039,8 @@ static int bond_check_params(struct bond_params *params)
4937 params->primary[0] = 0; 5039 params->primary[0] = 0;
4938 params->primary_reselect = primary_reselect_value; 5040 params->primary_reselect = primary_reselect_value;
4939 params->fail_over_mac = fail_over_mac_value; 5041 params->fail_over_mac = fail_over_mac_value;
5042 params->tx_queues = tx_queues;
5043 params->all_slaves_active = all_slaves_active;
4940 5044
4941 if (primary) { 5045 if (primary) {
4942 strncpy(params->primary, primary, IFNAMSIZ); 5046 strncpy(params->primary, primary, IFNAMSIZ);
@@ -5023,8 +5127,8 @@ int bond_create(struct net *net, const char *name)
5023 5127
5024 rtnl_lock(); 5128 rtnl_lock();
5025 5129
5026 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", 5130 bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "",
5027 bond_setup); 5131 bond_setup, tx_queues);
5028 if (!bond_dev) { 5132 if (!bond_dev) {
5029 pr_err("%s: eek! can't alloc netdev!\n", name); 5133 pr_err("%s: eek! can't alloc netdev!\n", name);
5030 rtnl_unlock(); 5134 rtnl_unlock();
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b8bec086daa1..c311aed9bd02 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -211,7 +211,8 @@ static ssize_t bonding_show_slaves(struct device *d,
211/* 211/*
212 * Set the slaves in the current bond. The bond interface must be 212 * Set the slaves in the current bond. The bond interface must be
213 * up for this to succeed. 213 * up for this to succeed.
214 * This function is largely the same flow as bonding_update_bonds(). 214 * This is supposed to be only thin wrapper for bond_enslave and bond_release.
215 * All hard work should be done there.
215 */ 216 */
216static ssize_t bonding_store_slaves(struct device *d, 217static ssize_t bonding_store_slaves(struct device *d,
217 struct device_attribute *attr, 218 struct device_attribute *attr,
@@ -219,10 +220,8 @@ static ssize_t bonding_store_slaves(struct device *d,
219{ 220{
220 char command[IFNAMSIZ + 1] = { 0, }; 221 char command[IFNAMSIZ + 1] = { 0, };
221 char *ifname; 222 char *ifname;
222 int i, res, found, ret = count; 223 int res, ret = count;
223 u32 original_mtu; 224 struct net_device *dev;
224 struct slave *slave;
225 struct net_device *dev = NULL;
226 struct bonding *bond = to_bond(d); 225 struct bonding *bond = to_bond(d);
227 226
228 /* Quick sanity check -- is the bond interface up? */ 227 /* Quick sanity check -- is the bond interface up? */
@@ -231,8 +230,6 @@ static ssize_t bonding_store_slaves(struct device *d,
231 bond->dev->name); 230 bond->dev->name);
232 } 231 }
233 232
234 /* Note: We can't hold bond->lock here, as bond_create grabs it. */
235
236 if (!rtnl_trylock()) 233 if (!rtnl_trylock())
237 return restart_syscall(); 234 return restart_syscall();
238 235
@@ -242,91 +239,33 @@ static ssize_t bonding_store_slaves(struct device *d,
242 !dev_valid_name(ifname)) 239 !dev_valid_name(ifname))
243 goto err_no_cmd; 240 goto err_no_cmd;
244 241
245 if (command[0] == '+') { 242 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
246 243 if (!dev) {
247 /* Got a slave name in ifname. Is it already in the list? */ 244 pr_info("%s: Interface %s does not exist!\n",
248 found = 0; 245 bond->dev->name, ifname);
249 246 ret = -ENODEV;
250 dev = __dev_get_by_name(dev_net(bond->dev), ifname); 247 goto out;
251 if (!dev) { 248 }
252 pr_info("%s: Interface %s does not exist!\n",
253 bond->dev->name, ifname);
254 ret = -ENODEV;
255 goto out;
256 }
257
258 if (dev->flags & IFF_UP) {
259 pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
260 bond->dev->name, dev->name);
261 ret = -EPERM;
262 goto out;
263 }
264
265 read_lock(&bond->lock);
266 bond_for_each_slave(bond, slave, i)
267 if (slave->dev == dev) {
268 pr_err("%s: Interface %s is already enslaved!\n",
269 bond->dev->name, ifname);
270 ret = -EPERM;
271 read_unlock(&bond->lock);
272 goto out;
273 }
274 read_unlock(&bond->lock);
275
276 pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
277
278 /* If this is the first slave, then we need to set
279 the master's hardware address to be the same as the
280 slave's. */
281 if (is_zero_ether_addr(bond->dev->dev_addr))
282 memcpy(bond->dev->dev_addr, dev->dev_addr,
283 dev->addr_len);
284
285 /* Set the slave's MTU to match the bond */
286 original_mtu = dev->mtu;
287 res = dev_set_mtu(dev, bond->dev->mtu);
288 if (res) {
289 ret = res;
290 goto out;
291 }
292 249
250 switch (command[0]) {
251 case '+':
252 pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
293 res = bond_enslave(bond->dev, dev); 253 res = bond_enslave(bond->dev, dev);
294 bond_for_each_slave(bond, slave, i) 254 break;
295 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
296 slave->original_mtu = original_mtu;
297 if (res)
298 ret = res;
299 255
300 goto out; 256 case '-':
301 } 257 pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
258 res = bond_release(bond->dev, dev);
259 break;
302 260
303 if (command[0] == '-') { 261 default:
304 dev = NULL; 262 goto err_no_cmd;
305 original_mtu = 0;
306 bond_for_each_slave(bond, slave, i)
307 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
308 dev = slave->dev;
309 original_mtu = slave->original_mtu;
310 break;
311 }
312 if (dev) {
313 pr_info("%s: Removing slave %s\n",
314 bond->dev->name, dev->name);
315 res = bond_release(bond->dev, dev);
316 if (res) {
317 ret = res;
318 goto out;
319 }
320 /* set the slave MTU to the default */
321 dev_set_mtu(dev, original_mtu);
322 } else {
323 pr_err("unable to remove non-existent slave %s for bond %s.\n",
324 ifname, bond->dev->name);
325 ret = -ENODEV;
326 }
327 goto out;
328 } 263 }
329 264
265 if (res)
266 ret = res;
267 goto out;
268
330err_no_cmd: 269err_no_cmd:
331 pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n", 270 pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
332 bond->dev->name); 271 bond->dev->name);
@@ -374,19 +313,26 @@ static ssize_t bonding_store_mode(struct device *d,
374 bond->dev->name, (int)strlen(buf) - 1, buf); 313 bond->dev->name, (int)strlen(buf) - 1, buf);
375 ret = -EINVAL; 314 ret = -EINVAL;
376 goto out; 315 goto out;
377 } else { 316 }
378 if (bond->params.mode == BOND_MODE_8023AD) 317 if ((new_value == BOND_MODE_ALB ||
379 bond_unset_master_3ad_flags(bond); 318 new_value == BOND_MODE_TLB) &&
319 bond->params.arp_interval) {
320 pr_err("%s: %s mode is incompatible with arp monitoring.\n",
321 bond->dev->name, bond_mode_tbl[new_value].modename);
322 ret = -EINVAL;
323 goto out;
324 }
325 if (bond->params.mode == BOND_MODE_8023AD)
326 bond_unset_master_3ad_flags(bond);
380 327
381 if (bond->params.mode == BOND_MODE_ALB) 328 if (bond->params.mode == BOND_MODE_ALB)
382 bond_unset_master_alb_flags(bond); 329 bond_unset_master_alb_flags(bond);
383 330
384 bond->params.mode = new_value; 331 bond->params.mode = new_value;
385 bond_set_mode_ops(bond, bond->params.mode); 332 bond_set_mode_ops(bond, bond->params.mode);
386 pr_info("%s: setting mode to %s (%d).\n", 333 pr_info("%s: setting mode to %s (%d).\n",
387 bond->dev->name, bond_mode_tbl[new_value].modename, 334 bond->dev->name, bond_mode_tbl[new_value].modename,
388 new_value); 335 new_value);
389 }
390out: 336out:
391 return ret; 337 return ret;
392} 338}
@@ -571,7 +517,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
571 ret = -EINVAL; 517 ret = -EINVAL;
572 goto out; 518 goto out;
573 } 519 }
574 520 if (bond->params.mode == BOND_MODE_ALB ||
521 bond->params.mode == BOND_MODE_TLB) {
522 pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
523 bond->dev->name, bond->dev->name);
524 ret = -EINVAL;
525 goto out;
526 }
575 pr_info("%s: Setting ARP monitoring interval to %d.\n", 527 pr_info("%s: Setting ARP monitoring interval to %d.\n",
576 bond->dev->name, new_value); 528 bond->dev->name, new_value);
577 bond->params.arp_interval = new_value; 529 bond->params.arp_interval = new_value;
@@ -1472,7 +1424,173 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
1472} 1424}
1473static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); 1425static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1474 1426
1427/*
1428 * Show the queue_ids of the slaves in the current bond.
1429 */
1430static ssize_t bonding_show_queue_id(struct device *d,
1431 struct device_attribute *attr,
1432 char *buf)
1433{
1434 struct slave *slave;
1435 int i, res = 0;
1436 struct bonding *bond = to_bond(d);
1437
1438 if (!rtnl_trylock())
1439 return restart_syscall();
1440
1441 read_lock(&bond->lock);
1442 bond_for_each_slave(bond, slave, i) {
1443 if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
1444 /* not enough space for another interface_name:queue_id pair */
1445 if ((PAGE_SIZE - res) > 10)
1446 res = PAGE_SIZE - 10;
1447 res += sprintf(buf + res, "++more++ ");
1448 break;
1449 }
1450 res += sprintf(buf + res, "%s:%d ",
1451 slave->dev->name, slave->queue_id);
1452 }
1453 read_unlock(&bond->lock);
1454 if (res)
1455 buf[res-1] = '\n'; /* eat the leftover space */
1456 rtnl_unlock();
1457 return res;
1458}
1459
1460/*
1461 * Set the queue_ids of the slaves in the current bond. The bond
1462 * interface must be enslaved for this to work.
1463 */
1464static ssize_t bonding_store_queue_id(struct device *d,
1465 struct device_attribute *attr,
1466 const char *buffer, size_t count)
1467{
1468 struct slave *slave, *update_slave;
1469 struct bonding *bond = to_bond(d);
1470 u16 qid;
1471 int i, ret = count;
1472 char *delim;
1473 struct net_device *sdev = NULL;
1475 1474
1475 if (!rtnl_trylock())
1476 return restart_syscall();
1477
1478 /* delim will point to queue id if successful */
1479 delim = strchr(buffer, ':');
1480 if (!delim)
1481 goto err_no_cmd;
1482
1483 /*
1484 * Terminate string that points to device name and bump it
1485 * up one, so we can read the queue id there.
1486 */
1487 *delim = '\0';
1488 if (sscanf(++delim, "%hd\n", &qid) != 1)
1489 goto err_no_cmd;
1490
1491 /* Check buffer length, valid ifname and queue id */
1492 if (strlen(buffer) > IFNAMSIZ ||
1493 !dev_valid_name(buffer) ||
1494 qid > bond->params.tx_queues)
1495 goto err_no_cmd;
1496
1497 /* Get the pointer to that interface if it exists */
1498 sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
1499 if (!sdev)
1500 goto err_no_cmd;
1501
1502 read_lock(&bond->lock);
1503
1504 /* Search for thes slave and check for duplicate qids */
1505 update_slave = NULL;
1506 bond_for_each_slave(bond, slave, i) {
1507 if (sdev == slave->dev)
1508 /*
1509 * We don't need to check the matching
1510 * slave for dups, since we're overwriting it
1511 */
1512 update_slave = slave;
1513 else if (qid && qid == slave->queue_id) {
1514 goto err_no_cmd_unlock;
1515 }
1516 }
1517
1518 if (!update_slave)
1519 goto err_no_cmd_unlock;
1520
1521 /* Actually set the qids for the slave */
1522 update_slave->queue_id = qid;
1523
1524 read_unlock(&bond->lock);
1525out:
1526 rtnl_unlock();
1527 return ret;
1528
1529err_no_cmd_unlock:
1530 read_unlock(&bond->lock);
1531err_no_cmd:
1532 pr_info("invalid input for queue_id set for %s.\n",
1533 bond->dev->name);
1534 ret = -EPERM;
1535 goto out;
1536}
1537
1538static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
1539 bonding_store_queue_id);
1540
1541
1542/*
1543 * Show and set the all_slaves_active flag.
1544 */
1545static ssize_t bonding_show_slaves_active(struct device *d,
1546 struct device_attribute *attr,
1547 char *buf)
1548{
1549 struct bonding *bond = to_bond(d);
1550
1551 return sprintf(buf, "%d\n", bond->params.all_slaves_active);
1552}
1553
1554static ssize_t bonding_store_slaves_active(struct device *d,
1555 struct device_attribute *attr,
1556 const char *buf, size_t count)
1557{
1558 int i, new_value, ret = count;
1559 struct bonding *bond = to_bond(d);
1560 struct slave *slave;
1561
1562 if (sscanf(buf, "%d", &new_value) != 1) {
1563 pr_err("%s: no all_slaves_active value specified.\n",
1564 bond->dev->name);
1565 ret = -EINVAL;
1566 goto out;
1567 }
1568
1569 if (new_value == bond->params.all_slaves_active)
1570 goto out;
1571
1572 if ((new_value == 0) || (new_value == 1)) {
1573 bond->params.all_slaves_active = new_value;
1574 } else {
1575 pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
1576 bond->dev->name, new_value);
1577 ret = -EINVAL;
1578 goto out;
1579 }
1580
1581 bond_for_each_slave(bond, slave, i) {
1582 if (slave->state == BOND_STATE_BACKUP) {
1583 if (new_value)
1584 slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
1585 else
1586 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
1587 }
1588 }
1589out:
1590 return count;
1591}
1592static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1593 bonding_show_slaves_active, bonding_store_slaves_active);
1476 1594
1477static struct attribute *per_bond_attrs[] = { 1595static struct attribute *per_bond_attrs[] = {
1478 &dev_attr_slaves.attr, 1596 &dev_attr_slaves.attr,
@@ -1499,6 +1617,8 @@ static struct attribute *per_bond_attrs[] = {
1499 &dev_attr_ad_actor_key.attr, 1617 &dev_attr_ad_actor_key.attr,
1500 &dev_attr_ad_partner_key.attr, 1618 &dev_attr_ad_partner_key.attr,
1501 &dev_attr_ad_partner_mac.attr, 1619 &dev_attr_ad_partner_mac.attr,
1620 &dev_attr_queue_id.attr,
1621 &dev_attr_all_slaves_active.attr,
1502 NULL, 1622 NULL,
1503}; 1623};
1504 1624
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2aa336720591..c6fdd851579a 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,8 +23,8 @@
23#include "bond_3ad.h" 23#include "bond_3ad.h"
24#include "bond_alb.h" 24#include "bond_alb.h"
25 25
26#define DRV_VERSION "3.6.0" 26#define DRV_VERSION "3.7.0"
27#define DRV_RELDATE "September 26, 2009" 27#define DRV_RELDATE "June 2, 2010"
28#define DRV_NAME "bonding" 28#define DRV_NAME "bonding"
29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
30 30
@@ -60,6 +60,9 @@
60 ((mode) == BOND_MODE_TLB) || \ 60 ((mode) == BOND_MODE_TLB) || \
61 ((mode) == BOND_MODE_ALB)) 61 ((mode) == BOND_MODE_ALB))
62 62
63#define TX_QUEUE_OVERRIDE(mode) \
64 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
65 ((mode) == BOND_MODE_ROUNDROBIN))
63/* 66/*
64 * Less bad way to call ioctl from within the kernel; this needs to be 67 * Less bad way to call ioctl from within the kernel; this needs to be
65 * done some other way to get the call out of interrupt context. 68 * done some other way to get the call out of interrupt context.
@@ -131,6 +134,8 @@ struct bond_params {
131 char primary[IFNAMSIZ]; 134 char primary[IFNAMSIZ];
132 int primary_reselect; 135 int primary_reselect;
133 __be32 arp_targets[BOND_MAX_ARP_TARGETS]; 136 __be32 arp_targets[BOND_MAX_ARP_TARGETS];
137 int tx_queues;
138 int all_slaves_active;
134}; 139};
135 140
136struct bond_parm_tbl { 141struct bond_parm_tbl {
@@ -159,12 +164,12 @@ struct slave {
159 s8 link; /* one of BOND_LINK_XXXX */ 164 s8 link; /* one of BOND_LINK_XXXX */
160 s8 new_link; 165 s8 new_link;
161 s8 state; /* one of BOND_STATE_XXXX */ 166 s8 state; /* one of BOND_STATE_XXXX */
162 u32 original_flags;
163 u32 original_mtu; 167 u32 original_mtu;
164 u32 link_failure_count; 168 u32 link_failure_count;
165 u8 perm_hwaddr[ETH_ALEN]; 169 u8 perm_hwaddr[ETH_ALEN];
166 u16 speed; 170 u16 speed;
167 u8 duplex; 171 u8 duplex;
172 u16 queue_id;
168 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 173 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
169 struct tlb_slave_info tlb_info; 174 struct tlb_slave_info tlb_info;
170}; 175};
@@ -291,7 +296,8 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave)
291 struct bonding *bond = netdev_priv(slave->dev->master); 296 struct bonding *bond = netdev_priv(slave->dev->master);
292 if (!bond_is_lb(bond)) 297 if (!bond_is_lb(bond))
293 slave->state = BOND_STATE_BACKUP; 298 slave->state = BOND_STATE_BACKUP;
294 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; 299 if (!bond->params.all_slaves_active)
300 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
295 if (slave_do_arp_validate(bond, slave)) 301 if (slave_do_arp_validate(bond, slave))
296 slave->dev->priv_flags |= IFF_SLAVE_NEEDARP; 302 slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
297} 303}
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 0b28e0107697..631a6242b011 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -2,16 +2,32 @@
2# CAIF physical drivers 2# CAIF physical drivers
3# 3#
4 4
5if CAIF
6
7comment "CAIF transport drivers" 5comment "CAIF transport drivers"
8 6
9config CAIF_TTY 7config CAIF_TTY
10 tristate "CAIF TTY transport driver" 8 tristate "CAIF TTY transport driver"
9 depends on CAIF
11 default n 10 default n
12 ---help--- 11 ---help---
13 The CAIF TTY transport driver is a Line Discipline (ldisc) 12 The CAIF TTY transport driver is a Line Discipline (ldisc)
14 identified as N_CAIF. When this ldisc is opened from user space 13 identified as N_CAIF. When this ldisc is opened from user space
15 it will redirect the TTY's traffic into the CAIF stack. 14 it will redirect the TTY's traffic into the CAIF stack.
16 15
17endif # CAIF 16config CAIF_SPI_SLAVE
17 tristate "CAIF SPI transport driver for slave interface"
18 depends on CAIF
19 default n
20 ---help---
21 The CAIF Link layer SPI Protocol driver for Slave SPI interface.
22 This driver implements a platform driver to accommodate for a
23 platform specific SPI device. A sample CAIF SPI Platform device is
24 provided in Documentation/networking/caif/spi_porting.txt
25
26config CAIF_SPI_SYNC
27 bool "Next command and length in start of frame"
28 depends on CAIF_SPI_SLAVE
29 default n
30 ---help---
31 Putting the next command and length in the start of the frame can
32 help to synchronize to the next transfer in case of over or under-runs.
33 This option also needs to be enabled on the modem.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 52b6d1f826f8..3a11d619452b 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,12 +1,10 @@
1ifeq ($(CONFIG_CAIF_DEBUG),1) 1ifeq ($(CONFIG_CAIF_DEBUG),y)
2CAIF_DBG_FLAGS := -DDEBUG 2EXTRA_CFLAGS += -DDEBUG
3endif 3endif
4 4
5KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
6
7ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
8clean-dirs:= .tmp_versions
9clean-files:= Module.symvers modules.order *.cmd *~ \
10
11# Serial interface 5# Serial interface
12obj-$(CONFIG_CAIF_TTY) += caif_serial.o 6obj-$(CONFIG_CAIF_TTY) += caif_serial.o
7
8# SPI slave physical interfaces module
9cfspi_slave-objs := caif_spi.o caif_spi_slave.o
10obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 09257ca8f563..3df0c0f8b8bf 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -174,6 +174,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
174 struct ser_device *ser; 174 struct ser_device *ser;
175 int ret; 175 int ret;
176 u8 *p; 176 u8 *p;
177
177 ser = tty->disc_data; 178 ser = tty->disc_data;
178 179
179 /* 180 /*
@@ -221,6 +222,7 @@ static int handle_tx(struct ser_device *ser)
221 struct tty_struct *tty; 222 struct tty_struct *tty;
222 struct sk_buff *skb; 223 struct sk_buff *skb;
223 int tty_wr, len, room; 224 int tty_wr, len, room;
225
224 tty = ser->tty; 226 tty = ser->tty;
225 ser->tx_started = true; 227 ser->tx_started = true;
226 228
@@ -281,6 +283,7 @@ error:
281static int caif_xmit(struct sk_buff *skb, struct net_device *dev) 283static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
282{ 284{
283 struct ser_device *ser; 285 struct ser_device *ser;
286
284 BUG_ON(dev == NULL); 287 BUG_ON(dev == NULL);
285 ser = netdev_priv(dev); 288 ser = netdev_priv(dev);
286 289
@@ -299,6 +302,7 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
299static void ldisc_tx_wakeup(struct tty_struct *tty) 302static void ldisc_tx_wakeup(struct tty_struct *tty)
300{ 303{
301 struct ser_device *ser; 304 struct ser_device *ser;
305
302 ser = tty->disc_data; 306 ser = tty->disc_data;
303 BUG_ON(ser == NULL); 307 BUG_ON(ser == NULL);
304 BUG_ON(ser->tty != tty); 308 BUG_ON(ser->tty != tty);
@@ -348,6 +352,7 @@ static void ldisc_close(struct tty_struct *tty)
348 struct ser_device *ser = tty->disc_data; 352 struct ser_device *ser = tty->disc_data;
349 /* Remove may be called inside or outside of rtnl_lock */ 353 /* Remove may be called inside or outside of rtnl_lock */
350 int islocked = rtnl_is_locked(); 354 int islocked = rtnl_is_locked();
355
351 if (!islocked) 356 if (!islocked)
352 rtnl_lock(); 357 rtnl_lock();
353 /* device is freed automagically by net-sysfs */ 358 /* device is freed automagically by net-sysfs */
@@ -374,6 +379,7 @@ static struct tty_ldisc_ops caif_ldisc = {
374static int register_ldisc(void) 379static int register_ldisc(void)
375{ 380{
376 int result; 381 int result;
382
377 result = tty_register_ldisc(N_CAIF, &caif_ldisc); 383 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
378 if (result < 0) { 384 if (result < 0) {
379 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, 385 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
@@ -391,12 +397,12 @@ static const struct net_device_ops netdev_ops = {
391static void caifdev_setup(struct net_device *dev) 397static void caifdev_setup(struct net_device *dev)
392{ 398{
393 struct ser_device *serdev = netdev_priv(dev); 399 struct ser_device *serdev = netdev_priv(dev);
400
394 dev->features = 0; 401 dev->features = 0;
395 dev->netdev_ops = &netdev_ops; 402 dev->netdev_ops = &netdev_ops;
396 dev->type = ARPHRD_CAIF; 403 dev->type = ARPHRD_CAIF;
397 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 404 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
398 dev->mtu = CAIF_MAX_MTU; 405 dev->mtu = CAIF_MAX_MTU;
399 dev->hard_header_len = CAIF_NEEDED_HEADROOM;
400 dev->tx_queue_len = 0; 406 dev->tx_queue_len = 0;
401 dev->destructor = free_netdev; 407 dev->destructor = free_netdev;
402 skb_queue_head_init(&serdev->head); 408 skb_queue_head_init(&serdev->head);
@@ -410,8 +416,6 @@ static void caifdev_setup(struct net_device *dev)
410 416
411static int caif_net_open(struct net_device *dev) 417static int caif_net_open(struct net_device *dev)
412{ 418{
413 struct ser_device *ser;
414 ser = netdev_priv(dev);
415 netif_wake_queue(dev); 419 netif_wake_queue(dev);
416 return 0; 420 return 0;
417} 421}
@@ -425,6 +429,7 @@ static int caif_net_close(struct net_device *dev)
425static int __init caif_ser_init(void) 429static int __init caif_ser_init(void)
426{ 430{
427 int ret; 431 int ret;
432
428 ret = register_ldisc(); 433 ret = register_ldisc();
429 debugfsdir = debugfs_create_dir("caif_serial", NULL); 434 debugfsdir = debugfs_create_dir("caif_serial", NULL);
430 return ret; 435 return ret;
@@ -435,6 +440,7 @@ static void __exit caif_ser_exit(void)
435 struct ser_device *ser = NULL; 440 struct ser_device *ser = NULL;
436 struct list_head *node; 441 struct list_head *node;
437 struct list_head *_tmp; 442 struct list_head *_tmp;
443
438 list_for_each_safe(node, _tmp, &ser_list) { 444 list_for_each_safe(node, _tmp, &ser_list) {
439 ser = list_entry(node, struct ser_device, node); 445 ser = list_entry(node, struct ser_device, node);
440 dev_close(ser->dev); 446 dev_close(ser->dev);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
new file mode 100644
index 000000000000..f5058ff2b210
--- /dev/null
+++ b/drivers/net/caif/caif_spi.c
@@ -0,0 +1,850 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2.
6 */
7
8#include <linux/version.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/string.h>
14#include <linux/workqueue.h>
15#include <linux/completion.h>
16#include <linux/list.h>
17#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/debugfs.h>
22#include <linux/if_arp.h>
23#include <net/caif/caif_layer.h>
24#include <net/caif/caif_spi.h>
25
26#ifndef CONFIG_CAIF_SPI_SYNC
27#define FLAVOR "Flavour: Vanilla.\n"
28#else
29#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
30#endif /* CONFIG_CAIF_SPI_SYNC */
31
32MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
34MODULE_DESCRIPTION("CAIF SPI driver");
35
36static int spi_loop;
37module_param(spi_loop, bool, S_IRUGO);
38MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
39
40/* SPI frame alignment. */
41module_param(spi_frm_align, int, S_IRUGO);
42MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
43
44/* SPI padding options. */
45module_param(spi_up_head_align, int, S_IRUGO);
46MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
47
48module_param(spi_up_tail_align, int, S_IRUGO);
49MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
50
51module_param(spi_down_head_align, int, S_IRUGO);
52MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
53
54module_param(spi_down_tail_align, int, S_IRUGO);
55MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
56
57#ifdef CONFIG_ARM
58#define BYTE_HEX_FMT "%02X"
59#else
60#define BYTE_HEX_FMT "%02hhX"
61#endif
62
63#define SPI_MAX_PAYLOAD_SIZE 4096
64/*
65 * Threshold values for the SPI packet queue. Flowcontrol will be asserted
66 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
67 * deasserted before the number of packets drops below LOW_WATER_MARK.
68 */
69#define LOW_WATER_MARK 100
70#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
71
72#ifdef CONFIG_UML
73
74/*
75 * We sometimes use UML for debugging, but it cannot handle
76 * dma_alloc_coherent so we have to wrap it.
77 */
78static inline void *dma_alloc(dma_addr_t *daddr)
79{
80 return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
81}
82
83static inline void dma_free(void *cpu_addr, dma_addr_t handle)
84{
85 kfree(cpu_addr);
86}
87
88#else
89
90static inline void *dma_alloc(dma_addr_t *daddr)
91{
92 return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
93 GFP_KERNEL);
94}
95
96static inline void dma_free(void *cpu_addr, dma_addr_t handle)
97{
98 dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
99}
100#endif /* CONFIG_UML */
101
102#ifdef CONFIG_DEBUG_FS
103
104#define DEBUGFS_BUF_SIZE 4096
105
106static struct dentry *dbgfs_root;
107
108static inline void driver_debugfs_create(void)
109{
110 dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
111}
112
113static inline void driver_debugfs_remove(void)
114{
115 debugfs_remove(dbgfs_root);
116}
117
118static inline void dev_debugfs_rem(struct cfspi *cfspi)
119{
120 debugfs_remove(cfspi->dbgfs_frame);
121 debugfs_remove(cfspi->dbgfs_state);
122 debugfs_remove(cfspi->dbgfs_dir);
123}
124
125static int dbgfs_open(struct inode *inode, struct file *file)
126{
127 file->private_data = inode->i_private;
128 return 0;
129}
130
131static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
132 size_t count, loff_t *ppos)
133{
134 char *buf;
135 int len = 0;
136 ssize_t size;
137 struct cfspi *cfspi = file->private_data;
138
139 buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
140 if (!buf)
141 return 0;
142
143 /* Print out debug information. */
144 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
145 "CAIF SPI debug information:\n");
146
147 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
148
149 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
150 "STATE: %d\n", cfspi->dbg_state);
151 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
152 "Previous CMD: 0x%x\n", cfspi->pcmd);
153 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
154 "Current CMD: 0x%x\n", cfspi->cmd);
155 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
156 "Previous TX len: %d\n", cfspi->tx_ppck_len);
157 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
158 "Previous RX len: %d\n", cfspi->rx_ppck_len);
159 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
160 "Current TX len: %d\n", cfspi->tx_cpck_len);
161 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
162 "Current RX len: %d\n", cfspi->rx_cpck_len);
163 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
164 "Next TX len: %d\n", cfspi->tx_npck_len);
165 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
166 "Next RX len: %d\n", cfspi->rx_npck_len);
167
168 if (len > DEBUGFS_BUF_SIZE)
169 len = DEBUGFS_BUF_SIZE;
170
171 size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
172 kfree(buf);
173
174 return size;
175}
176
177static ssize_t print_frame(char *buf, size_t size, char *frm,
178 size_t count, size_t cut)
179{
180 int len = 0;
181 int i;
182 for (i = 0; i < count; i++) {
183 len += snprintf((buf + len), (size - len),
184 "[0x" BYTE_HEX_FMT "]",
185 frm[i]);
186 if ((i == cut) && (count > (cut * 2))) {
187 /* Fast forward. */
188 i = count - cut;
189 len += snprintf((buf + len), (size - len),
190 "--- %u bytes skipped ---\n",
191 (int)(count - (cut * 2)));
192 }
193
194 if ((!(i % 10)) && i) {
195 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
196 "\n");
197 }
198 }
199 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
200 return len;
201}
202
203static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
204 size_t count, loff_t *ppos)
205{
206 char *buf;
207 int len = 0;
208 ssize_t size;
209 struct cfspi *cfspi;
210
211 cfspi = file->private_data;
212 buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
213 if (!buf)
214 return 0;
215
216 /* Print out debug information. */
217 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
218 "Current frame:\n");
219
220 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
221 "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
222
223 len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
224 cfspi->xfer.va_tx,
225 (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
226
227 len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
228 "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
229
230 len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
231 cfspi->xfer.va_rx,
232 (cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
233
234 size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
235 kfree(buf);
236
237 return size;
238}
239
240static const struct file_operations dbgfs_state_fops = {
241 .open = dbgfs_open,
242 .read = dbgfs_state,
243 .owner = THIS_MODULE
244};
245
246static const struct file_operations dbgfs_frame_fops = {
247 .open = dbgfs_open,
248 .read = dbgfs_frame,
249 .owner = THIS_MODULE
250};
251
252static inline void dev_debugfs_add(struct cfspi *cfspi)
253{
254 cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
255 cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO,
256 cfspi->dbgfs_dir, cfspi,
257 &dbgfs_state_fops);
258 cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO,
259 cfspi->dbgfs_dir, cfspi,
260 &dbgfs_frame_fops);
261}
262
263inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
264{
265 cfspi->dbg_state = state;
266};
267#else
268
269static inline void driver_debugfs_create(void)
270{
271}
272
273static inline void driver_debugfs_remove(void)
274{
275}
276
277static inline void dev_debugfs_add(struct cfspi *cfspi)
278{
279}
280
281static inline void dev_debugfs_rem(struct cfspi *cfspi)
282{
283}
284
285inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
286{
287}
288#endif /* CONFIG_DEBUG_FS */
289
290static LIST_HEAD(cfspi_list);
291static spinlock_t cfspi_list_lock;
292
293/* SPI uplink head alignment. */
294static ssize_t show_up_head_align(struct device_driver *driver, char *buf)
295{
296 return sprintf(buf, "%d\n", spi_up_head_align);
297}
298
299static DRIVER_ATTR(up_head_align, S_IRUSR, show_up_head_align, NULL);
300
301/* SPI uplink tail alignment. */
302static ssize_t show_up_tail_align(struct device_driver *driver, char *buf)
303{
304 return sprintf(buf, "%d\n", spi_up_tail_align);
305}
306
307static DRIVER_ATTR(up_tail_align, S_IRUSR, show_up_tail_align, NULL);
308
309/* SPI downlink head alignment. */
310static ssize_t show_down_head_align(struct device_driver *driver, char *buf)
311{
312 return sprintf(buf, "%d\n", spi_down_head_align);
313}
314
315static DRIVER_ATTR(down_head_align, S_IRUSR, show_down_head_align, NULL);
316
317/* SPI downlink tail alignment. */
318static ssize_t show_down_tail_align(struct device_driver *driver, char *buf)
319{
320 return sprintf(buf, "%d\n", spi_down_tail_align);
321}
322
323static DRIVER_ATTR(down_tail_align, S_IRUSR, show_down_tail_align, NULL);
324
325/* SPI frame alignment. */
326static ssize_t show_frame_align(struct device_driver *driver, char *buf)
327{
328 return sprintf(buf, "%d\n", spi_frm_align);
329}
330
331static DRIVER_ATTR(frame_align, S_IRUSR, show_frame_align, NULL);
332
333int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
334{
335 u8 *dst = buf;
336 caif_assert(buf);
337
338 do {
339 struct sk_buff *skb;
340 struct caif_payload_info *info;
341 int spad = 0;
342 int epad;
343
344 skb = skb_dequeue(&cfspi->chead);
345 if (!skb)
346 break;
347
348 /*
349 * Calculate length of frame including SPI padding.
350 * The payload position is found in the control buffer.
351 */
352 info = (struct caif_payload_info *)&skb->cb;
353
354 /*
355 * Compute head offset i.e. number of bytes to add to
356 * get the start of the payload aligned.
357 */
358 if (spi_up_head_align) {
359 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
360 *dst = (u8)(spad - 1);
361 dst += spad;
362 }
363
364 /* Copy in CAIF frame. */
365 skb_copy_bits(skb, 0, dst, skb->len);
366 dst += skb->len;
367 cfspi->ndev->stats.tx_packets++;
368 cfspi->ndev->stats.tx_bytes += skb->len;
369
370 /*
371 * Compute tail offset i.e. number of bytes to add to
372 * get the complete CAIF frame aligned.
373 */
374 epad = (skb->len + spad) & spi_up_tail_align;
375 dst += epad;
376
377 dev_kfree_skb(skb);
378
379 } while ((dst - buf) < len);
380
381 return dst - buf;
382}
383
384int cfspi_xmitlen(struct cfspi *cfspi)
385{
386 struct sk_buff *skb = NULL;
387 int frm_len = 0;
388 int pkts = 0;
389
390 /*
391 * Decommit previously commited frames.
392 * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
393 */
394 while (skb_peek(&cfspi->chead)) {
395 skb = skb_dequeue_tail(&cfspi->chead);
396 skb_queue_head(&cfspi->qhead, skb);
397 }
398
399 do {
400 struct caif_payload_info *info = NULL;
401 int spad = 0;
402 int epad = 0;
403
404 skb = skb_dequeue(&cfspi->qhead);
405 if (!skb)
406 break;
407
408 /*
409 * Calculate length of frame including SPI padding.
410 * The payload position is found in the control buffer.
411 */
412 info = (struct caif_payload_info *)&skb->cb;
413
414 /*
415 * Compute head offset i.e. number of bytes to add to
416 * get the start of the payload aligned.
417 */
418 if (spi_up_head_align)
419 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
420
421 /*
422 * Compute tail offset i.e. number of bytes to add to
423 * get the complete CAIF frame aligned.
424 */
425 epad = (skb->len + spad) & spi_up_tail_align;
426
427 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
428 skb_queue_tail(&cfspi->chead, skb);
429 pkts++;
430 frm_len += skb->len + spad + epad;
431 } else {
432 /* Put back packet. */
433 skb_queue_head(&cfspi->qhead, skb);
434 }
435 } while (pkts <= CAIF_MAX_SPI_PKTS);
436
437 /*
438 * Send flow on if previously sent flow off
439 * and now go below the low water mark
440 */
441 if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
442 cfspi->cfdev.flowctrl) {
443 cfspi->flow_off_sent = 0;
444 cfspi->cfdev.flowctrl(cfspi->ndev, 1);
445 }
446
447 return frm_len;
448}
449
450static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
451{
452 struct cfspi *cfspi = (struct cfspi *)ifc->priv;
453
454 if (!in_interrupt())
455 spin_lock(&cfspi->lock);
456 if (assert) {
457 set_bit(SPI_SS_ON, &cfspi->state);
458 set_bit(SPI_XFER, &cfspi->state);
459 } else {
460 set_bit(SPI_SS_OFF, &cfspi->state);
461 }
462 if (!in_interrupt())
463 spin_unlock(&cfspi->lock);
464
465 /* Wake up the xfer thread. */
466 wake_up_interruptible(&cfspi->wait);
467}
468
469static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
470{
471 struct cfspi *cfspi = (struct cfspi *)ifc->priv;
472
473 /* Transfer done, complete work queue */
474 complete(&cfspi->comp);
475}
476
477static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
478{
479 struct cfspi *cfspi = NULL;
480 unsigned long flags;
481 if (!dev)
482 return -EINVAL;
483
484 cfspi = netdev_priv(dev);
485
486 skb_queue_tail(&cfspi->qhead, skb);
487
488 spin_lock_irqsave(&cfspi->lock, flags);
489 if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
490 /* Wake up xfer thread. */
491 wake_up_interruptible(&cfspi->wait);
492 }
493 spin_unlock_irqrestore(&cfspi->lock, flags);
494
495 /* Send flow off if number of bytes is above high water mark */
496 if (!cfspi->flow_off_sent &&
497 cfspi->qhead.qlen > cfspi->qd_high_mark &&
498 cfspi->cfdev.flowctrl) {
499 cfspi->flow_off_sent = 1;
500 cfspi->cfdev.flowctrl(cfspi->ndev, 0);
501 }
502
503 return 0;
504}
505
506int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
507{
508 u8 *src = buf;
509
510 caif_assert(buf != NULL);
511
512 do {
513 int res;
514 struct sk_buff *skb = NULL;
515 int spad = 0;
516 int epad = 0;
517 u8 *dst = NULL;
518 int pkt_len = 0;
519
520 /*
521 * Compute head offset i.e. number of bytes added to
522 * get the start of the payload aligned.
523 */
524 if (spi_down_head_align) {
525 spad = 1 + *src;
526 src += spad;
527 }
528
529 /* Read length of CAIF frame (little endian). */
530 pkt_len = *src;
531 pkt_len |= ((*(src+1)) << 8) & 0xFF00;
532 pkt_len += 2; /* Add FCS fields. */
533
534 /* Get a suitable caif packet and copy in data. */
535
536 skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
537 caif_assert(skb != NULL);
538
539 dst = skb_put(skb, pkt_len);
540 memcpy(dst, src, pkt_len);
541 src += pkt_len;
542
543 skb->protocol = htons(ETH_P_CAIF);
544 skb_reset_mac_header(skb);
545 skb->dev = cfspi->ndev;
546
547 /*
548 * Push received packet up the stack.
549 */
550 if (!spi_loop)
551 res = netif_rx_ni(skb);
552 else
553 res = cfspi_xmit(skb, cfspi->ndev);
554
555 if (!res) {
556 cfspi->ndev->stats.rx_packets++;
557 cfspi->ndev->stats.rx_bytes += pkt_len;
558 } else
559 cfspi->ndev->stats.rx_dropped++;
560
561 /*
562 * Compute tail offset i.e. number of bytes added to
563 * get the complete CAIF frame aligned.
564 */
565 epad = (pkt_len + spad) & spi_down_tail_align;
566 src += epad;
567 } while ((src - buf) < len);
568
569 return src - buf;
570}
571
572static int cfspi_open(struct net_device *dev)
573{
574 netif_wake_queue(dev);
575 return 0;
576}
577
578static int cfspi_close(struct net_device *dev)
579{
580 netif_stop_queue(dev);
581 return 0;
582}
583static const struct net_device_ops cfspi_ops = {
584 .ndo_open = cfspi_open,
585 .ndo_stop = cfspi_close,
586 .ndo_start_xmit = cfspi_xmit
587};
588
589static void cfspi_setup(struct net_device *dev)
590{
591 struct cfspi *cfspi = netdev_priv(dev);
592 dev->features = 0;
593 dev->netdev_ops = &cfspi_ops;
594 dev->type = ARPHRD_CAIF;
595 dev->flags = IFF_NOARP | IFF_POINTOPOINT;
596 dev->tx_queue_len = 0;
597 dev->mtu = SPI_MAX_PAYLOAD_SIZE;
598 dev->destructor = free_netdev;
599 skb_queue_head_init(&cfspi->qhead);
600 skb_queue_head_init(&cfspi->chead);
601 cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
602 cfspi->cfdev.use_frag = false;
603 cfspi->cfdev.use_stx = false;
604 cfspi->cfdev.use_fcs = false;
605 cfspi->ndev = dev;
606}
607
608int cfspi_spi_probe(struct platform_device *pdev)
609{
610 struct cfspi *cfspi = NULL;
611 struct net_device *ndev;
612 struct cfspi_dev *dev;
613 int res;
614 dev = (struct cfspi_dev *)pdev->dev.platform_data;
615
616 ndev = alloc_netdev(sizeof(struct cfspi),
617 "cfspi%d", cfspi_setup);
618 if (!dev)
619 return -ENODEV;
620
621 cfspi = netdev_priv(ndev);
622 netif_stop_queue(ndev);
623 cfspi->ndev = ndev;
624 cfspi->pdev = pdev;
625
626 /* Set flow info */
627 cfspi->flow_off_sent = 0;
628 cfspi->qd_low_mark = LOW_WATER_MARK;
629 cfspi->qd_high_mark = HIGH_WATER_MARK;
630
631 /* Assign the SPI device. */
632 cfspi->dev = dev;
633 /* Assign the device ifc to this SPI interface. */
634 dev->ifc = &cfspi->ifc;
635
636 /* Allocate DMA buffers. */
637 cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
638 if (!cfspi->xfer.va_tx) {
639 printk(KERN_WARNING
640 "CFSPI: failed to allocate dma TX buffer.\n");
641 res = -ENODEV;
642 goto err_dma_alloc_tx;
643 }
644
645 cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
646
647 if (!cfspi->xfer.va_rx) {
648 printk(KERN_WARNING
649 "CFSPI: failed to allocate dma TX buffer.\n");
650 res = -ENODEV;
651 goto err_dma_alloc_rx;
652 }
653
654 /* Initialize the work queue. */
655 INIT_WORK(&cfspi->work, cfspi_xfer);
656
657 /* Initialize spin locks. */
658 spin_lock_init(&cfspi->lock);
659
660 /* Initialize flow control state. */
661 cfspi->flow_stop = false;
662
663 /* Initialize wait queue. */
664 init_waitqueue_head(&cfspi->wait);
665
666 /* Create work thread. */
667 cfspi->wq = create_singlethread_workqueue(dev->name);
668 if (!cfspi->wq) {
669 printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
670 res = -ENODEV;
671 goto err_create_wq;
672 }
673
674 /* Initialize work queue. */
675 init_completion(&cfspi->comp);
676
677 /* Create debugfs entries. */
678 dev_debugfs_add(cfspi);
679
680 /* Set up the ifc. */
681 cfspi->ifc.ss_cb = cfspi_ss_cb;
682 cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
683 cfspi->ifc.priv = cfspi;
684
685 /* Add CAIF SPI device to list. */
686 spin_lock(&cfspi_list_lock);
687 list_add_tail(&cfspi->list, &cfspi_list);
688 spin_unlock(&cfspi_list_lock);
689
690 /* Schedule the work queue. */
691 queue_work(cfspi->wq, &cfspi->work);
692
693 /* Register network device. */
694 res = register_netdev(ndev);
695 if (res) {
696 printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
697 goto err_net_reg;
698 }
699 return res;
700
701 err_net_reg:
702 dev_debugfs_rem(cfspi);
703 set_bit(SPI_TERMINATE, &cfspi->state);
704 wake_up_interruptible(&cfspi->wait);
705 destroy_workqueue(cfspi->wq);
706 err_create_wq:
707 dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
708 err_dma_alloc_rx:
709 dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
710 err_dma_alloc_tx:
711 free_netdev(ndev);
712
713 return res;
714}
715
716int cfspi_spi_remove(struct platform_device *pdev)
717{
718 struct list_head *list_node;
719 struct list_head *n;
720 struct cfspi *cfspi = NULL;
721 struct cfspi_dev *dev;
722
723 dev = (struct cfspi_dev *)pdev->dev.platform_data;
724 spin_lock(&cfspi_list_lock);
725 list_for_each_safe(list_node, n, &cfspi_list) {
726 cfspi = list_entry(list_node, struct cfspi, list);
727 /* Find the corresponding device. */
728 if (cfspi->dev == dev) {
729 /* Remove from list. */
730 list_del(list_node);
731 /* Free DMA buffers. */
732 dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
733 dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
734 set_bit(SPI_TERMINATE, &cfspi->state);
735 wake_up_interruptible(&cfspi->wait);
736 destroy_workqueue(cfspi->wq);
737 /* Destroy debugfs directory and files. */
738 dev_debugfs_rem(cfspi);
739 unregister_netdev(cfspi->ndev);
740 spin_unlock(&cfspi_list_lock);
741 return 0;
742 }
743 }
744 spin_unlock(&cfspi_list_lock);
745 return -ENODEV;
746}
747
748static void __exit cfspi_exit_module(void)
749{
750 struct list_head *list_node;
751 struct list_head *n;
752 struct cfspi *cfspi = NULL;
753
754 list_for_each_safe(list_node, n, &cfspi_list) {
755 cfspi = list_entry(list_node, struct cfspi, list);
756 platform_device_unregister(cfspi->pdev);
757 }
758
759 /* Destroy sysfs files. */
760 driver_remove_file(&cfspi_spi_driver.driver,
761 &driver_attr_up_head_align);
762 driver_remove_file(&cfspi_spi_driver.driver,
763 &driver_attr_up_tail_align);
764 driver_remove_file(&cfspi_spi_driver.driver,
765 &driver_attr_down_head_align);
766 driver_remove_file(&cfspi_spi_driver.driver,
767 &driver_attr_down_tail_align);
768 driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
769 /* Unregister platform driver. */
770 platform_driver_unregister(&cfspi_spi_driver);
771 /* Destroy debugfs root directory. */
772 driver_debugfs_remove();
773}
774
775static int __init cfspi_init_module(void)
776{
777 int result;
778
779 /* Initialize spin lock. */
780 spin_lock_init(&cfspi_list_lock);
781
782 /* Register platform driver. */
783 result = platform_driver_register(&cfspi_spi_driver);
784 if (result) {
785 printk(KERN_ERR "Could not register platform SPI driver.\n");
786 goto err_dev_register;
787 }
788
789 /* Create sysfs files. */
790 result =
791 driver_create_file(&cfspi_spi_driver.driver,
792 &driver_attr_up_head_align);
793 if (result) {
794 printk(KERN_ERR "Sysfs creation failed 1.\n");
795 goto err_create_up_head_align;
796 }
797
798 result =
799 driver_create_file(&cfspi_spi_driver.driver,
800 &driver_attr_up_tail_align);
801 if (result) {
802 printk(KERN_ERR "Sysfs creation failed 2.\n");
803 goto err_create_up_tail_align;
804 }
805
806 result =
807 driver_create_file(&cfspi_spi_driver.driver,
808 &driver_attr_down_head_align);
809 if (result) {
810 printk(KERN_ERR "Sysfs creation failed 3.\n");
811 goto err_create_down_head_align;
812 }
813
814 result =
815 driver_create_file(&cfspi_spi_driver.driver,
816 &driver_attr_down_tail_align);
817 if (result) {
818 printk(KERN_ERR "Sysfs creation failed 4.\n");
819 goto err_create_down_tail_align;
820 }
821
822 result =
823 driver_create_file(&cfspi_spi_driver.driver,
824 &driver_attr_frame_align);
825 if (result) {
826 printk(KERN_ERR "Sysfs creation failed 5.\n");
827 goto err_create_frame_align;
828 }
829 driver_debugfs_create();
830 return result;
831
832 err_create_frame_align:
833 driver_remove_file(&cfspi_spi_driver.driver,
834 &driver_attr_down_tail_align);
835 err_create_down_tail_align:
836 driver_remove_file(&cfspi_spi_driver.driver,
837 &driver_attr_down_head_align);
838 err_create_down_head_align:
839 driver_remove_file(&cfspi_spi_driver.driver,
840 &driver_attr_up_tail_align);
841 err_create_up_tail_align:
842 driver_remove_file(&cfspi_spi_driver.driver,
843 &driver_attr_up_head_align);
844 err_create_up_head_align:
845 err_dev_register:
846 return result;
847}
848
849module_init(cfspi_init_module);
850module_exit(cfspi_exit_module);
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
new file mode 100644
index 000000000000..077ccf840edf
--- /dev/null
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -0,0 +1,252 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2.
6 */
7#include <linux/version.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/platform_device.h>
12#include <linux/string.h>
13#include <linux/semaphore.h>
14#include <linux/workqueue.h>
15#include <linux/completion.h>
16#include <linux/list.h>
17#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/debugfs.h>
22#include <net/caif/caif_spi.h>
23
24#ifndef CONFIG_CAIF_SPI_SYNC
25#define SPI_DATA_POS SPI_CMD_SZ
26static inline int forward_to_spi_cmd(struct cfspi *cfspi)
27{
28 return cfspi->rx_cpck_len;
29}
30#else
31#define SPI_DATA_POS 0
32static inline int forward_to_spi_cmd(struct cfspi *cfspi)
33{
34 return 0;
35}
36#endif
37
38int spi_frm_align = 2;
39int spi_up_head_align = 1;
40int spi_up_tail_align;
41int spi_down_head_align = 3;
42int spi_down_tail_align = 1;
43
44#ifdef CONFIG_DEBUG_FS
45static inline void debugfs_store_prev(struct cfspi *cfspi)
46{
47 /* Store previous command for debugging reasons.*/
48 cfspi->pcmd = cfspi->cmd;
49 /* Store previous transfer. */
50 cfspi->tx_ppck_len = cfspi->tx_cpck_len;
51 cfspi->rx_ppck_len = cfspi->rx_cpck_len;
52}
53#else
54static inline void debugfs_store_prev(struct cfspi *cfspi)
55{
56}
57#endif
58
59void cfspi_xfer(struct work_struct *work)
60{
61 struct cfspi *cfspi;
62 u8 *ptr = NULL;
63 unsigned long flags;
64 int ret;
65 cfspi = container_of(work, struct cfspi, work);
66
67 /* Initialize state. */
68 cfspi->cmd = SPI_CMD_EOT;
69
70 for (;;) {
71
72 cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
73
74 /* Wait for master talk or transmit event. */
75 wait_event_interruptible(cfspi->wait,
76 test_bit(SPI_XFER, &cfspi->state) ||
77 test_bit(SPI_TERMINATE, &cfspi->state));
78
79 if (test_bit(SPI_TERMINATE, &cfspi->state))
80 return;
81
82#if CFSPI_DBG_PREFILL
83 /* Prefill buffers for easier debugging. */
84 memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
85 memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
86#endif /* CFSPI_DBG_PREFILL */
87
88 cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
89
90 /* Check whether we have a committed frame. */
91 if (cfspi->tx_cpck_len) {
92 int len;
93
94 cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
95
96 /* Copy commited SPI frames after the SPI indication. */
97 ptr = (u8 *) cfspi->xfer.va_tx;
98 ptr += SPI_IND_SZ;
99 len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
100 WARN_ON(len != cfspi->tx_cpck_len);
101 }
102
103 cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
104
105 /* Get length of next frame to commit. */
106 cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
107
108 WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
109
110 /*
111 * Add indication and length at the beginning of the frame,
112 * using little endian.
113 */
114 ptr = (u8 *) cfspi->xfer.va_tx;
115 *ptr++ = SPI_CMD_IND;
116 *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8;
117 *ptr++ = cfspi->tx_npck_len & 0x00FF;
118 *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
119
120 /* Calculate length of DMAs. */
121 cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
122 cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
123
124 /* Add SPI TX frame alignment padding, if necessary. */
125 if (cfspi->tx_cpck_len &&
126 (cfspi->xfer.tx_dma_len % spi_frm_align)) {
127
128 cfspi->xfer.tx_dma_len += spi_frm_align -
129 (cfspi->xfer.tx_dma_len % spi_frm_align);
130 }
131
132 /* Add SPI RX frame alignment padding, if necessary. */
133 if (cfspi->rx_cpck_len &&
134 (cfspi->xfer.rx_dma_len % spi_frm_align)) {
135
136 cfspi->xfer.rx_dma_len += spi_frm_align -
137 (cfspi->xfer.rx_dma_len % spi_frm_align);
138 }
139
140 cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
141
142 /* Start transfer. */
143 ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
144 WARN_ON(ret);
145
146 cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
147
148 /*
149 * TODO: We might be able to make an assumption if this is the
150 * first loop. Make sure that minimum toggle time is respected.
151 */
152 udelay(MIN_TRANSITION_TIME_USEC);
153
154 cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
155
156 /* Signal that we are ready to recieve data. */
157 cfspi->dev->sig_xfer(true, cfspi->dev);
158
159 cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
160
161 /* Wait for transfer completion. */
162 wait_for_completion(&cfspi->comp);
163
164 cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
165
166 if (cfspi->cmd == SPI_CMD_EOT) {
167 /*
168 * Clear the master talk bit. A xfer is always at
169 * least two bursts.
170 */
171 clear_bit(SPI_SS_ON, &cfspi->state);
172 }
173
174 cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
175
176 /* Make sure that the minimum toggle time is respected. */
177 if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
178 cfspi->dev->clk_mhz) <
179 MIN_TRANSITION_TIME_USEC) {
180
181 udelay(MIN_TRANSITION_TIME_USEC -
182 SPI_XFER_TIME_USEC
183 (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
184 }
185
186 cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
187
188 /* De-assert transfer signal. */
189 cfspi->dev->sig_xfer(false, cfspi->dev);
190
191 /* Check whether we received a CAIF packet. */
192 if (cfspi->rx_cpck_len) {
193 int len;
194
195 cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
196
197 /* Parse SPI frame. */
198 ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
199
200 len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
201 WARN_ON(len != cfspi->rx_cpck_len);
202 }
203
204 /* Check the next SPI command and length. */
205 ptr = (u8 *) cfspi->xfer.va_rx;
206
207 ptr += forward_to_spi_cmd(cfspi);
208
209 cfspi->cmd = *ptr++;
210 cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
211 cfspi->rx_npck_len = *ptr++;
212 cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
213
214 WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
215 WARN_ON(cfspi->cmd > SPI_CMD_EOT);
216
217 debugfs_store_prev(cfspi);
218
219 /* Check whether the master issued an EOT command. */
220 if (cfspi->cmd == SPI_CMD_EOT) {
221 /* Reset state. */
222 cfspi->tx_cpck_len = 0;
223 cfspi->rx_cpck_len = 0;
224 } else {
225 /* Update state. */
226 cfspi->tx_cpck_len = cfspi->tx_npck_len;
227 cfspi->rx_cpck_len = cfspi->rx_npck_len;
228 }
229
230 /*
231 * Check whether we need to clear the xfer bit.
232 * Spin lock needed for packet insertion.
233 * Test and clear of different bits
234 * are not supported.
235 */
236 spin_lock_irqsave(&cfspi->lock, flags);
237 if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
238 && !test_bit(SPI_SS_ON, &cfspi->state))
239 clear_bit(SPI_XFER, &cfspi->state);
240
241 spin_unlock_irqrestore(&cfspi->lock, flags);
242 }
243}
244
245struct platform_driver cfspi_spi_driver = {
246 .probe = cfspi_spi_probe,
247 .remove = cfspi_spi_remove,
248 .driver = {
249 .name = "cfspi_sspi",
250 .owner = THIS_MODULE,
251 },
252};
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2c5227c02fa0..9d9e45394433 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -73,6 +73,15 @@ config CAN_JANZ_ICAN3
73 This driver can also be built as a module. If so, the module will be 73 This driver can also be built as a module. If so, the module will be
74 called janz-ican3.ko. 74 called janz-ican3.ko.
75 75
76config HAVE_CAN_FLEXCAN
77 bool
78
79config CAN_FLEXCAN
80 tristate "Support for Freescale FLEXCAN based chips"
81 depends on CAN_DEV && HAVE_CAN_FLEXCAN
82 ---help---
83 Say Y here if you want to support for Freescale FlexCAN.
84
76source "drivers/net/can/mscan/Kconfig" 85source "drivers/net/can/mscan/Kconfig"
77 86
78source "drivers/net/can/sja1000/Kconfig" 87source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 9047cd066fea..00575373bbd0 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -16,5 +16,6 @@ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
17obj-$(CONFIG_CAN_BFIN) += bfin_can.o 17obj-$(CONFIG_CAN_BFIN) += bfin_can.o
18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
19obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
19 20
20ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 21ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
new file mode 100644
index 000000000000..ef443a090ba7
--- /dev/null
+++ b/drivers/net/can/flexcan.c
@@ -0,0 +1,1030 @@
1/*
2 * flexcan.c - FLEXCAN CAN controller driver
3 *
4 * Copyright (c) 2005-2006 Varma Electronics Oy
5 * Copyright (c) 2009 Sascha Hauer, Pengutronix
6 * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix
7 *
8 * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
9 *
10 * LICENCE:
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 */
21
22#include <linux/netdevice.h>
23#include <linux/can.h>
24#include <linux/can/dev.h>
25#include <linux/can/error.h>
26#include <linux/can/platform/flexcan.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/if_arp.h>
30#include <linux/if_ether.h>
31#include <linux/interrupt.h>
32#include <linux/io.h>
33#include <linux/kernel.h>
34#include <linux/list.h>
35#include <linux/module.h>
36#include <linux/platform_device.h>
37
38#include <mach/clock.h>
39
40#define DRV_NAME "flexcan"
41
42/* 8 for RX fifo and 2 error handling */
43#define FLEXCAN_NAPI_WEIGHT (8 + 2)
44
45/* FLEXCAN module configuration register (CANMCR) bits */
46#define FLEXCAN_MCR_MDIS BIT(31)
47#define FLEXCAN_MCR_FRZ BIT(30)
48#define FLEXCAN_MCR_FEN BIT(29)
49#define FLEXCAN_MCR_HALT BIT(28)
50#define FLEXCAN_MCR_NOT_RDY BIT(27)
51#define FLEXCAN_MCR_WAK_MSK BIT(26)
52#define FLEXCAN_MCR_SOFTRST BIT(25)
53#define FLEXCAN_MCR_FRZ_ACK BIT(24)
54#define FLEXCAN_MCR_SUPV BIT(23)
55#define FLEXCAN_MCR_SLF_WAK BIT(22)
56#define FLEXCAN_MCR_WRN_EN BIT(21)
57#define FLEXCAN_MCR_LPM_ACK BIT(20)
58#define FLEXCAN_MCR_WAK_SRC BIT(19)
59#define FLEXCAN_MCR_DOZE BIT(18)
60#define FLEXCAN_MCR_SRX_DIS BIT(17)
61#define FLEXCAN_MCR_BCC BIT(16)
62#define FLEXCAN_MCR_LPRIO_EN BIT(13)
63#define FLEXCAN_MCR_AEN BIT(12)
64#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf)
65#define FLEXCAN_MCR_IDAM_A (0 << 8)
66#define FLEXCAN_MCR_IDAM_B (1 << 8)
67#define FLEXCAN_MCR_IDAM_C (2 << 8)
68#define FLEXCAN_MCR_IDAM_D (3 << 8)
69
70/* FLEXCAN control register (CANCTRL) bits */
71#define FLEXCAN_CTRL_PRESDIV(x) (((x) & 0xff) << 24)
72#define FLEXCAN_CTRL_RJW(x) (((x) & 0x03) << 22)
73#define FLEXCAN_CTRL_PSEG1(x) (((x) & 0x07) << 19)
74#define FLEXCAN_CTRL_PSEG2(x) (((x) & 0x07) << 16)
75#define FLEXCAN_CTRL_BOFF_MSK BIT(15)
76#define FLEXCAN_CTRL_ERR_MSK BIT(14)
77#define FLEXCAN_CTRL_CLK_SRC BIT(13)
78#define FLEXCAN_CTRL_LPB BIT(12)
79#define FLEXCAN_CTRL_TWRN_MSK BIT(11)
80#define FLEXCAN_CTRL_RWRN_MSK BIT(10)
81#define FLEXCAN_CTRL_SMP BIT(7)
82#define FLEXCAN_CTRL_BOFF_REC BIT(6)
83#define FLEXCAN_CTRL_TSYN BIT(5)
84#define FLEXCAN_CTRL_LBUF BIT(4)
85#define FLEXCAN_CTRL_LOM BIT(3)
86#define FLEXCAN_CTRL_PROPSEG(x) ((x) & 0x07)
87#define FLEXCAN_CTRL_ERR_BUS (FLEXCAN_CTRL_ERR_MSK)
88#define FLEXCAN_CTRL_ERR_STATE \
89 (FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \
90 FLEXCAN_CTRL_BOFF_MSK)
91#define FLEXCAN_CTRL_ERR_ALL \
92 (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
93
94/* FLEXCAN error and status register (ESR) bits */
95#define FLEXCAN_ESR_TWRN_INT BIT(17)
96#define FLEXCAN_ESR_RWRN_INT BIT(16)
97#define FLEXCAN_ESR_BIT1_ERR BIT(15)
98#define FLEXCAN_ESR_BIT0_ERR BIT(14)
99#define FLEXCAN_ESR_ACK_ERR BIT(13)
100#define FLEXCAN_ESR_CRC_ERR BIT(12)
101#define FLEXCAN_ESR_FRM_ERR BIT(11)
102#define FLEXCAN_ESR_STF_ERR BIT(10)
103#define FLEXCAN_ESR_TX_WRN BIT(9)
104#define FLEXCAN_ESR_RX_WRN BIT(8)
105#define FLEXCAN_ESR_IDLE BIT(7)
106#define FLEXCAN_ESR_TXRX BIT(6)
107#define FLEXCAN_EST_FLT_CONF_SHIFT (4)
108#define FLEXCAN_ESR_FLT_CONF_MASK (0x3 << FLEXCAN_EST_FLT_CONF_SHIFT)
109#define FLEXCAN_ESR_FLT_CONF_ACTIVE (0x0 << FLEXCAN_EST_FLT_CONF_SHIFT)
110#define FLEXCAN_ESR_FLT_CONF_PASSIVE (0x1 << FLEXCAN_EST_FLT_CONF_SHIFT)
111#define FLEXCAN_ESR_BOFF_INT BIT(2)
112#define FLEXCAN_ESR_ERR_INT BIT(1)
113#define FLEXCAN_ESR_WAK_INT BIT(0)
114#define FLEXCAN_ESR_ERR_BUS \
115 (FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \
116 FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \
117 FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR)
118#define FLEXCAN_ESR_ERR_STATE \
119 (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
120#define FLEXCAN_ESR_ERR_ALL \
121 (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
122
123/* FLEXCAN interrupt flag register (IFLAG) bits */
124#define FLEXCAN_TX_BUF_ID 8
125#define FLEXCAN_IFLAG_BUF(x) BIT(x)
126#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
127#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
128#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
129#define FLEXCAN_IFLAG_DEFAULT \
130 (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \
131 FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
132
133/* FLEXCAN message buffers */
134#define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24)
135#define FLEXCAN_MB_CNT_SRR BIT(22)
136#define FLEXCAN_MB_CNT_IDE BIT(21)
137#define FLEXCAN_MB_CNT_RTR BIT(20)
138#define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16)
139#define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff)
140
141#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
142
143/* Structure of the message buffer */
144struct flexcan_mb {
145 u32 can_ctrl;
146 u32 can_id;
147 u32 data[2];
148};
149
150/* Structure of the hardware registers */
151struct flexcan_regs {
152 u32 mcr; /* 0x00 */
153 u32 ctrl; /* 0x04 */
154 u32 timer; /* 0x08 */
155 u32 _reserved1; /* 0x0c */
156 u32 rxgmask; /* 0x10 */
157 u32 rx14mask; /* 0x14 */
158 u32 rx15mask; /* 0x18 */
159 u32 ecr; /* 0x1c */
160 u32 esr; /* 0x20 */
161 u32 imask2; /* 0x24 */
162 u32 imask1; /* 0x28 */
163 u32 iflag2; /* 0x2c */
164 u32 iflag1; /* 0x30 */
165 u32 _reserved2[19];
166 struct flexcan_mb cantxfg[64];
167};
168
169struct flexcan_priv {
170 struct can_priv can;
171 struct net_device *dev;
172 struct napi_struct napi;
173
174 void __iomem *base;
175 u32 reg_esr;
176 u32 reg_ctrl_default;
177
178 struct clk *clk;
179 struct flexcan_platform_data *pdata;
180};
181
182static struct can_bittiming_const flexcan_bittiming_const = {
183 .name = DRV_NAME,
184 .tseg1_min = 4,
185 .tseg1_max = 16,
186 .tseg2_min = 2,
187 .tseg2_max = 8,
188 .sjw_max = 4,
189 .brp_min = 1,
190 .brp_max = 256,
191 .brp_inc = 1,
192};
193
194/*
195 * Swtich transceiver on or off
196 */
197static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on)
198{
199 if (priv->pdata && priv->pdata->transceiver_switch)
200 priv->pdata->transceiver_switch(on);
201}
202
203static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
204 u32 reg_esr)
205{
206 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
207 (reg_esr & FLEXCAN_ESR_ERR_BUS);
208}
209
210static inline void flexcan_chip_enable(struct flexcan_priv *priv)
211{
212 struct flexcan_regs __iomem *regs = priv->base;
213 u32 reg;
214
215 reg = readl(&regs->mcr);
216 reg &= ~FLEXCAN_MCR_MDIS;
217 writel(reg, &regs->mcr);
218
219 udelay(10);
220}
221
222static inline void flexcan_chip_disable(struct flexcan_priv *priv)
223{
224 struct flexcan_regs __iomem *regs = priv->base;
225 u32 reg;
226
227 reg = readl(&regs->mcr);
228 reg |= FLEXCAN_MCR_MDIS;
229 writel(reg, &regs->mcr);
230}
231
232static int flexcan_get_berr_counter(const struct net_device *dev,
233 struct can_berr_counter *bec)
234{
235 const struct flexcan_priv *priv = netdev_priv(dev);
236 struct flexcan_regs __iomem *regs = priv->base;
237 u32 reg = readl(&regs->ecr);
238
239 bec->txerr = (reg >> 0) & 0xff;
240 bec->rxerr = (reg >> 8) & 0xff;
241
242 return 0;
243}
244
245static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
246{
247 const struct flexcan_priv *priv = netdev_priv(dev);
248 struct net_device_stats *stats = &dev->stats;
249 struct flexcan_regs __iomem *regs = priv->base;
250 struct can_frame *cf = (struct can_frame *)skb->data;
251 u32 can_id;
252 u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16);
253
254 if (can_dropped_invalid_skb(dev, skb))
255 return NETDEV_TX_OK;
256
257 netif_stop_queue(dev);
258
259 if (cf->can_id & CAN_EFF_FLAG) {
260 can_id = cf->can_id & CAN_EFF_MASK;
261 ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
262 } else {
263 can_id = (cf->can_id & CAN_SFF_MASK) << 18;
264 }
265
266 if (cf->can_id & CAN_RTR_FLAG)
267 ctrl |= FLEXCAN_MB_CNT_RTR;
268
269 if (cf->can_dlc > 0) {
270 u32 data = be32_to_cpup((__be32 *)&cf->data[0]);
271 writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[0]);
272 }
273 if (cf->can_dlc > 3) {
274 u32 data = be32_to_cpup((__be32 *)&cf->data[4]);
275 writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]);
276 }
277
278 writel(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
279 writel(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
280
281 kfree_skb(skb);
282
283 /* tx_packets is incremented in flexcan_irq */
284 stats->tx_bytes += cf->can_dlc;
285
286 return NETDEV_TX_OK;
287}
288
289static void do_bus_err(struct net_device *dev,
290 struct can_frame *cf, u32 reg_esr)
291{
292 struct flexcan_priv *priv = netdev_priv(dev);
293 int rx_errors = 0, tx_errors = 0;
294
295 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
296
297 if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
298 dev_dbg(dev->dev.parent, "BIT1_ERR irq\n");
299 cf->data[2] |= CAN_ERR_PROT_BIT1;
300 tx_errors = 1;
301 }
302 if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
303 dev_dbg(dev->dev.parent, "BIT0_ERR irq\n");
304 cf->data[2] |= CAN_ERR_PROT_BIT0;
305 tx_errors = 1;
306 }
307 if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
308 dev_dbg(dev->dev.parent, "ACK_ERR irq\n");
309 cf->can_id |= CAN_ERR_ACK;
310 cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
311 tx_errors = 1;
312 }
313 if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
314 dev_dbg(dev->dev.parent, "CRC_ERR irq\n");
315 cf->data[2] |= CAN_ERR_PROT_BIT;
316 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
317 rx_errors = 1;
318 }
319 if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
320 dev_dbg(dev->dev.parent, "FRM_ERR irq\n");
321 cf->data[2] |= CAN_ERR_PROT_FORM;
322 rx_errors = 1;
323 }
324 if (reg_esr & FLEXCAN_ESR_STF_ERR) {
325 dev_dbg(dev->dev.parent, "STF_ERR irq\n");
326 cf->data[2] |= CAN_ERR_PROT_STUFF;
327 rx_errors = 1;
328 }
329
330 priv->can.can_stats.bus_error++;
331 if (rx_errors)
332 dev->stats.rx_errors++;
333 if (tx_errors)
334 dev->stats.tx_errors++;
335}
336
337static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
338{
339 struct sk_buff *skb;
340 struct can_frame *cf;
341
342 skb = alloc_can_err_skb(dev, &cf);
343 if (unlikely(!skb))
344 return 0;
345
346 do_bus_err(dev, cf, reg_esr);
347 netif_receive_skb(skb);
348
349 dev->stats.rx_packets++;
350 dev->stats.rx_bytes += cf->can_dlc;
351
352 return 1;
353}
354
355static void do_state(struct net_device *dev,
356 struct can_frame *cf, enum can_state new_state)
357{
358 struct flexcan_priv *priv = netdev_priv(dev);
359 struct can_berr_counter bec;
360
361 flexcan_get_berr_counter(dev, &bec);
362
363 switch (priv->can.state) {
364 case CAN_STATE_ERROR_ACTIVE:
365 /*
366 * from: ERROR_ACTIVE
367 * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
368 * => : there was a warning int
369 */
370 if (new_state >= CAN_STATE_ERROR_WARNING &&
371 new_state <= CAN_STATE_BUS_OFF) {
372 dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
373 priv->can.can_stats.error_warning++;
374
375 cf->can_id |= CAN_ERR_CRTL;
376 cf->data[1] = (bec.txerr > bec.rxerr) ?
377 CAN_ERR_CRTL_TX_WARNING :
378 CAN_ERR_CRTL_RX_WARNING;
379 }
380 case CAN_STATE_ERROR_WARNING: /* fallthrough */
381 /*
382 * from: ERROR_ACTIVE, ERROR_WARNING
383 * to : ERROR_PASSIVE, BUS_OFF
384 * => : error passive int
385 */
386 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
387 new_state <= CAN_STATE_BUS_OFF) {
388 dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
389 priv->can.can_stats.error_passive++;
390
391 cf->can_id |= CAN_ERR_CRTL;
392 cf->data[1] = (bec.txerr > bec.rxerr) ?
393 CAN_ERR_CRTL_TX_PASSIVE :
394 CAN_ERR_CRTL_RX_PASSIVE;
395 }
396 break;
397 case CAN_STATE_BUS_OFF:
398 dev_err(dev->dev.parent,
399 "BUG! hardware recovered automatically from BUS_OFF\n");
400 break;
401 default:
402 break;
403 }
404
405 /* process state changes depending on the new state */
406 switch (new_state) {
407 case CAN_STATE_ERROR_ACTIVE:
408 dev_dbg(dev->dev.parent, "Error Active\n");
409 cf->can_id |= CAN_ERR_PROT;
410 cf->data[2] = CAN_ERR_PROT_ACTIVE;
411 break;
412 case CAN_STATE_BUS_OFF:
413 cf->can_id |= CAN_ERR_BUSOFF;
414 can_bus_off(dev);
415 break;
416 default:
417 break;
418 }
419}
420
421static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
422{
423 struct flexcan_priv *priv = netdev_priv(dev);
424 struct sk_buff *skb;
425 struct can_frame *cf;
426 enum can_state new_state;
427 int flt;
428
429 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
430 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
431 if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN |
432 FLEXCAN_ESR_RX_WRN))))
433 new_state = CAN_STATE_ERROR_ACTIVE;
434 else
435 new_state = CAN_STATE_ERROR_WARNING;
436 } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE))
437 new_state = CAN_STATE_ERROR_PASSIVE;
438 else
439 new_state = CAN_STATE_BUS_OFF;
440
441 /* state hasn't changed */
442 if (likely(new_state == priv->can.state))
443 return 0;
444
445 skb = alloc_can_err_skb(dev, &cf);
446 if (unlikely(!skb))
447 return 0;
448
449 do_state(dev, cf, new_state);
450 priv->can.state = new_state;
451 netif_receive_skb(skb);
452
453 dev->stats.rx_packets++;
454 dev->stats.rx_bytes += cf->can_dlc;
455
456 return 1;
457}
458
459static void flexcan_read_fifo(const struct net_device *dev,
460 struct can_frame *cf)
461{
462 const struct flexcan_priv *priv = netdev_priv(dev);
463 struct flexcan_regs __iomem *regs = priv->base;
464 struct flexcan_mb __iomem *mb = &regs->cantxfg[0];
465 u32 reg_ctrl, reg_id;
466
467 reg_ctrl = readl(&mb->can_ctrl);
468 reg_id = readl(&mb->can_id);
469 if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
470 cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
471 else
472 cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
473
474 if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
475 cf->can_id |= CAN_RTR_FLAG;
476 cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
477
478 *(__be32 *)(cf->data + 0) = cpu_to_be32(readl(&mb->data[0]));
479 *(__be32 *)(cf->data + 4) = cpu_to_be32(readl(&mb->data[1]));
480
481 /* mark as read */
482 writel(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
483 readl(&regs->timer);
484}
485
486static int flexcan_read_frame(struct net_device *dev)
487{
488 struct net_device_stats *stats = &dev->stats;
489 struct can_frame *cf;
490 struct sk_buff *skb;
491
492 skb = alloc_can_skb(dev, &cf);
493 if (unlikely(!skb)) {
494 stats->rx_dropped++;
495 return 0;
496 }
497
498 flexcan_read_fifo(dev, cf);
499 netif_receive_skb(skb);
500
501 stats->rx_packets++;
502 stats->rx_bytes += cf->can_dlc;
503
504 return 1;
505}
506
507static int flexcan_poll(struct napi_struct *napi, int quota)
508{
509 struct net_device *dev = napi->dev;
510 const struct flexcan_priv *priv = netdev_priv(dev);
511 struct flexcan_regs __iomem *regs = priv->base;
512 u32 reg_iflag1, reg_esr;
513 int work_done = 0;
514
515 /*
516 * The error bits are cleared on read,
517 * use saved value from irq handler.
518 */
519 reg_esr = readl(&regs->esr) | priv->reg_esr;
520
521 /* handle state changes */
522 work_done += flexcan_poll_state(dev, reg_esr);
523
524 /* handle RX-FIFO */
525 reg_iflag1 = readl(&regs->iflag1);
526 while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
527 work_done < quota) {
528 work_done += flexcan_read_frame(dev);
529 reg_iflag1 = readl(&regs->iflag1);
530 }
531
532 /* report bus errors */
533 if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota)
534 work_done += flexcan_poll_bus_err(dev, reg_esr);
535
536 if (work_done < quota) {
537 napi_complete(napi);
538 /* enable IRQs */
539 writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
540 writel(priv->reg_ctrl_default, &regs->ctrl);
541 }
542
543 return work_done;
544}
545
546static irqreturn_t flexcan_irq(int irq, void *dev_id)
547{
548 struct net_device *dev = dev_id;
549 struct net_device_stats *stats = &dev->stats;
550 struct flexcan_priv *priv = netdev_priv(dev);
551 struct flexcan_regs __iomem *regs = priv->base;
552 u32 reg_iflag1, reg_esr;
553
554 reg_iflag1 = readl(&regs->iflag1);
555 reg_esr = readl(&regs->esr);
556 writel(FLEXCAN_ESR_ERR_INT, &regs->esr); /* ACK err IRQ */
557
558 /*
559 * schedule NAPI in case of:
560 * - rx IRQ
561 * - state change IRQ
562 * - bus error IRQ and bus error reporting is activated
563 */
564 if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) ||
565 (reg_esr & FLEXCAN_ESR_ERR_STATE) ||
566 flexcan_has_and_handle_berr(priv, reg_esr)) {
567 /*
568 * The error bits are cleared on read,
569 * save them for later use.
570 */
571 priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
572 writel(FLEXCAN_IFLAG_DEFAULT & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE,
573 &regs->imask1);
574 writel(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
575 &regs->ctrl);
576 napi_schedule(&priv->napi);
577 }
578
579 /* FIFO overflow */
580 if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
581 writel(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
582 dev->stats.rx_over_errors++;
583 dev->stats.rx_errors++;
584 }
585
586 /* transmission complete interrupt */
587 if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
588 /* tx_bytes is incremented in flexcan_start_xmit */
589 stats->tx_packets++;
590 writel((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
591 netif_wake_queue(dev);
592 }
593
594 return IRQ_HANDLED;
595}
596
597static void flexcan_set_bittiming(struct net_device *dev)
598{
599 const struct flexcan_priv *priv = netdev_priv(dev);
600 const struct can_bittiming *bt = &priv->can.bittiming;
601 struct flexcan_regs __iomem *regs = priv->base;
602 u32 reg;
603
604 reg = readl(&regs->ctrl);
605 reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
606 FLEXCAN_CTRL_RJW(0x3) |
607 FLEXCAN_CTRL_PSEG1(0x7) |
608 FLEXCAN_CTRL_PSEG2(0x7) |
609 FLEXCAN_CTRL_PROPSEG(0x7) |
610 FLEXCAN_CTRL_LPB |
611 FLEXCAN_CTRL_SMP |
612 FLEXCAN_CTRL_LOM);
613
614 reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) |
615 FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) |
616 FLEXCAN_CTRL_PSEG2(bt->phase_seg2 - 1) |
617 FLEXCAN_CTRL_RJW(bt->sjw - 1) |
618 FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1);
619
620 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
621 reg |= FLEXCAN_CTRL_LPB;
622 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
623 reg |= FLEXCAN_CTRL_LOM;
624 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
625 reg |= FLEXCAN_CTRL_SMP;
626
627 dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg);
628 writel(reg, &regs->ctrl);
629
630 /* print chip status */
631 dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
632 readl(&regs->mcr), readl(&regs->ctrl));
633}
634
635/*
636 * flexcan_chip_start
637 *
638 * this functions is entered with clocks enabled
639 *
640 */
641static int flexcan_chip_start(struct net_device *dev)
642{
643 struct flexcan_priv *priv = netdev_priv(dev);
644 struct flexcan_regs __iomem *regs = priv->base;
645 unsigned int i;
646 int err;
647 u32 reg_mcr, reg_ctrl;
648
649 /* enable module */
650 flexcan_chip_enable(priv);
651
652 /* soft reset */
653 writel(FLEXCAN_MCR_SOFTRST, &regs->mcr);
654 udelay(10);
655
656 reg_mcr = readl(&regs->mcr);
657 if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
658 dev_err(dev->dev.parent,
659 "Failed to softreset can module (mcr=0x%08x)\n",
660 reg_mcr);
661 err = -ENODEV;
662 goto out;
663 }
664
665 flexcan_set_bittiming(dev);
666
667 /*
668 * MCR
669 *
670 * enable freeze
671 * enable fifo
672 * halt now
673 * only supervisor access
674 * enable warning int
675 * choose format C
676 *
677 */
678 reg_mcr = readl(&regs->mcr);
679 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
680 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
681 FLEXCAN_MCR_IDAM_C;
682 dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr);
683 writel(reg_mcr, &regs->mcr);
684
685 /*
686 * CTRL
687 *
688 * disable timer sync feature
689 *
690 * disable auto busoff recovery
691 * transmit lowest buffer first
692 *
693 * enable tx and rx warning interrupt
694 * enable bus off interrupt
695 * (== FLEXCAN_CTRL_ERR_STATE)
696 *
697 * _note_: we enable the "error interrupt"
698 * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
699 * warning or bus passive interrupts.
700 */
701 reg_ctrl = readl(&regs->ctrl);
702 reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
703 reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
704 FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK;
705
706 /* save for later use */
707 priv->reg_ctrl_default = reg_ctrl;
708 dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
709 writel(reg_ctrl, &regs->ctrl);
710
711 for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
712 writel(0, &regs->cantxfg[i].can_ctrl);
713 writel(0, &regs->cantxfg[i].can_id);
714 writel(0, &regs->cantxfg[i].data[0]);
715 writel(0, &regs->cantxfg[i].data[1]);
716
717 /* put MB into rx queue */
718 writel(FLEXCAN_MB_CNT_CODE(0x4), &regs->cantxfg[i].can_ctrl);
719 }
720
721 /* acceptance mask/acceptance code (accept everything) */
722 writel(0x0, &regs->rxgmask);
723 writel(0x0, &regs->rx14mask);
724 writel(0x0, &regs->rx15mask);
725
726 flexcan_transceiver_switch(priv, 1);
727
728 /* synchronize with the can bus */
729 reg_mcr = readl(&regs->mcr);
730 reg_mcr &= ~FLEXCAN_MCR_HALT;
731 writel(reg_mcr, &regs->mcr);
732
733 priv->can.state = CAN_STATE_ERROR_ACTIVE;
734
735 /* enable FIFO interrupts */
736 writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
737
738 /* print chip status */
739 dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n",
740 __func__, readl(&regs->mcr), readl(&regs->ctrl));
741
742 return 0;
743
744 out:
745 flexcan_chip_disable(priv);
746 return err;
747}
748
749/*
750 * flexcan_chip_stop
751 *
752 * this functions is entered with clocks enabled
753 *
754 */
755static void flexcan_chip_stop(struct net_device *dev)
756{
757 struct flexcan_priv *priv = netdev_priv(dev);
758 struct flexcan_regs __iomem *regs = priv->base;
759 u32 reg;
760
761 /* Disable all interrupts */
762 writel(0, &regs->imask1);
763
764 /* Disable + halt module */
765 reg = readl(&regs->mcr);
766 reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
767 writel(reg, &regs->mcr);
768
769 flexcan_transceiver_switch(priv, 0);
770 priv->can.state = CAN_STATE_STOPPED;
771
772 return;
773}
774
775static int flexcan_open(struct net_device *dev)
776{
777 struct flexcan_priv *priv = netdev_priv(dev);
778 int err;
779
780 clk_enable(priv->clk);
781
782 err = open_candev(dev);
783 if (err)
784 goto out;
785
786 err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
787 if (err)
788 goto out_close;
789
790 /* start chip and queuing */
791 err = flexcan_chip_start(dev);
792 if (err)
793 goto out_close;
794 napi_enable(&priv->napi);
795 netif_start_queue(dev);
796
797 return 0;
798
799 out_close:
800 close_candev(dev);
801 out:
802 clk_disable(priv->clk);
803
804 return err;
805}
806
807static int flexcan_close(struct net_device *dev)
808{
809 struct flexcan_priv *priv = netdev_priv(dev);
810
811 netif_stop_queue(dev);
812 napi_disable(&priv->napi);
813 flexcan_chip_stop(dev);
814
815 free_irq(dev->irq, dev);
816 clk_disable(priv->clk);
817
818 close_candev(dev);
819
820 return 0;
821}
822
823static int flexcan_set_mode(struct net_device *dev, enum can_mode mode)
824{
825 int err;
826
827 switch (mode) {
828 case CAN_MODE_START:
829 err = flexcan_chip_start(dev);
830 if (err)
831 return err;
832
833 netif_wake_queue(dev);
834 break;
835
836 default:
837 return -EOPNOTSUPP;
838 }
839
840 return 0;
841}
842
843static const struct net_device_ops flexcan_netdev_ops = {
844 .ndo_open = flexcan_open,
845 .ndo_stop = flexcan_close,
846 .ndo_start_xmit = flexcan_start_xmit,
847};
848
849static int __devinit register_flexcandev(struct net_device *dev)
850{
851 struct flexcan_priv *priv = netdev_priv(dev);
852 struct flexcan_regs __iomem *regs = priv->base;
853 u32 reg, err;
854
855 clk_enable(priv->clk);
856
857 /* select "bus clock", chip must be disabled */
858 flexcan_chip_disable(priv);
859 reg = readl(&regs->ctrl);
860 reg |= FLEXCAN_CTRL_CLK_SRC;
861 writel(reg, &regs->ctrl);
862
863 flexcan_chip_enable(priv);
864
865 /* set freeze, halt and activate FIFO, restrict register access */
866 reg = readl(&regs->mcr);
867 reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
868 FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
869 writel(reg, &regs->mcr);
870
871 /*
872 * Currently we only support newer versions of this core
873 * featuring a RX FIFO. Older cores found on some Coldfire
874 * derivates are not yet supported.
875 */
876 reg = readl(&regs->mcr);
877 if (!(reg & FLEXCAN_MCR_FEN)) {
878 dev_err(dev->dev.parent,
879 "Could not enable RX FIFO, unsupported core\n");
880 err = -ENODEV;
881 goto out;
882 }
883
884 err = register_candev(dev);
885
886 out:
887 /* disable core and turn off clocks */
888 flexcan_chip_disable(priv);
889 clk_disable(priv->clk);
890
891 return err;
892}
893
894static void __devexit unregister_flexcandev(struct net_device *dev)
895{
896 unregister_candev(dev);
897}
898
899static int __devinit flexcan_probe(struct platform_device *pdev)
900{
901 struct net_device *dev;
902 struct flexcan_priv *priv;
903 struct resource *mem;
904 struct clk *clk;
905 void __iomem *base;
906 resource_size_t mem_size;
907 int err, irq;
908
909 clk = clk_get(&pdev->dev, NULL);
910 if (IS_ERR(clk)) {
911 dev_err(&pdev->dev, "no clock defined\n");
912 err = PTR_ERR(clk);
913 goto failed_clock;
914 }
915
916 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
917 irq = platform_get_irq(pdev, 0);
918 if (!mem || irq <= 0) {
919 err = -ENODEV;
920 goto failed_get;
921 }
922
923 mem_size = resource_size(mem);
924 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
925 err = -EBUSY;
926 goto failed_req;
927 }
928
929 base = ioremap(mem->start, mem_size);
930 if (!base) {
931 err = -ENOMEM;
932 goto failed_map;
933 }
934
935 dev = alloc_candev(sizeof(struct flexcan_priv), 0);
936 if (!dev) {
937 err = -ENOMEM;
938 goto failed_alloc;
939 }
940
941 dev->netdev_ops = &flexcan_netdev_ops;
942 dev->irq = irq;
943 dev->flags |= IFF_ECHO; /* we support local echo in hardware */
944
945 priv = netdev_priv(dev);
946 priv->can.clock.freq = clk_get_rate(clk);
947 priv->can.bittiming_const = &flexcan_bittiming_const;
948 priv->can.do_set_mode = flexcan_set_mode;
949 priv->can.do_get_berr_counter = flexcan_get_berr_counter;
950 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
951 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES |
952 CAN_CTRLMODE_BERR_REPORTING;
953 priv->base = base;
954 priv->dev = dev;
955 priv->clk = clk;
956 priv->pdata = pdev->dev.platform_data;
957
958 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
959
960 dev_set_drvdata(&pdev->dev, dev);
961 SET_NETDEV_DEV(dev, &pdev->dev);
962
963 err = register_flexcandev(dev);
964 if (err) {
965 dev_err(&pdev->dev, "registering netdev failed\n");
966 goto failed_register;
967 }
968
969 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
970 priv->base, dev->irq);
971
972 return 0;
973
974 failed_register:
975 free_candev(dev);
976 failed_alloc:
977 iounmap(base);
978 failed_map:
979 release_mem_region(mem->start, mem_size);
980 failed_req:
981 clk_put(clk);
982 failed_get:
983 failed_clock:
984 return err;
985}
986
987static int __devexit flexcan_remove(struct platform_device *pdev)
988{
989 struct net_device *dev = platform_get_drvdata(pdev);
990 struct flexcan_priv *priv = netdev_priv(dev);
991 struct resource *mem;
992
993 unregister_flexcandev(dev);
994 platform_set_drvdata(pdev, NULL);
995 free_candev(dev);
996 iounmap(priv->base);
997
998 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
999 release_mem_region(mem->start, resource_size(mem));
1000
1001 clk_put(priv->clk);
1002
1003 return 0;
1004}
1005
1006static struct platform_driver flexcan_driver = {
1007 .driver.name = DRV_NAME,
1008 .probe = flexcan_probe,
1009 .remove = __devexit_p(flexcan_remove),
1010};
1011
1012static int __init flexcan_init(void)
1013{
1014 pr_info("%s netdevice driver\n", DRV_NAME);
1015 return platform_driver_register(&flexcan_driver);
1016}
1017
1018static void __exit flexcan_exit(void)
1019{
1020 platform_driver_unregister(&flexcan_driver);
1021 pr_info("%s: driver removed\n", DRV_NAME);
1022}
1023
1024module_init(flexcan_init);
1025module_exit(flexcan_exit);
1026
1027MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
1028 "Marc Kleine-Budde <kernel@pengutronix.de>");
1029MODULE_LICENSE("GPL v2");
1030MODULE_DESCRIPTION("CAN port driver for flexcan based chip");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 4ff966473bc9..b43e9f5d3268 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -227,7 +227,7 @@ struct mscan_regs {
227 u16 time; /* + 0x7c 0x3e */ 227 u16 time; /* + 0x7c 0x3e */
228 } tx; 228 } tx;
229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */ 229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */
230} __attribute__ ((packed)); 230} __packed;
231 231
232#undef _MSCAN_RESERVED_ 232#undef _MSCAN_RESERVED_
233#define MSCAN_REGION sizeof(struct mscan) 233#define MSCAN_REGION sizeof(struct mscan)
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 97ff6febad63..04525495b15b 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -7,4 +7,10 @@ config CAN_EMS_USB
7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface 7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
8 from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). 8 from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
9 9
10config CAN_ESD_USB2
11 tristate "ESD USB/2 CAN/USB interface"
12 ---help---
13 This driver supports the CAN-USB/2 interface
14 from esd electronic system design gmbh (http://www.esd.eu).
15
10endmenu 16endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 0afd51d4c7a5..fce3cf11719f 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
6obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
6 7
7ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 1fc0871d2ef7..e75f1a876972 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -197,7 +197,7 @@ struct cpc_can_err_counter {
197}; 197};
198 198
199/* Main message type used between library and application */ 199/* Main message type used between library and application */
200struct __attribute__ ((packed)) ems_cpc_msg { 200struct __packed ems_cpc_msg {
201 u8 type; /* type of message */ 201 u8 type; /* type of message */
202 u8 length; /* length of data within union 'msg' */ 202 u8 length; /* length of data within union 'msg' */
203 u8 msgid; /* confirmation handle */ 203 u8 msgid; /* confirmation handle */
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
new file mode 100644
index 000000000000..05a52754f486
--- /dev/null
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -0,0 +1,1132 @@
1/*
2 * CAN driver for esd CAN-USB/2
3 *
4 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19#include <linux/init.h>
20#include <linux/signal.h>
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/netdevice.h>
24#include <linux/usb.h>
25
26#include <linux/can.h>
27#include <linux/can/dev.h>
28#include <linux/can/error.h>
29
30MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
31MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces");
32MODULE_LICENSE("GPL v2");
33
34/* Define these values to match your devices */
35#define USB_ESDGMBH_VENDOR_ID 0x0ab4
36#define USB_CANUSB2_PRODUCT_ID 0x0010
37
38#define ESD_USB2_CAN_CLOCK 60000000
39#define ESD_USB2_MAX_NETS 2
40
41/* USB2 commands */
42#define CMD_VERSION 1 /* also used for VERSION_REPLY */
43#define CMD_CAN_RX 2 /* device to host only */
44#define CMD_CAN_TX 3 /* also used for TX_DONE */
45#define CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */
46#define CMD_TS 5 /* also used for TS_REPLY */
47#define CMD_IDADD 6 /* also used for IDADD_REPLY */
48
49/* esd CAN message flags - dlc field */
50#define ESD_RTR 0x10
51
52/* esd CAN message flags - id field */
53#define ESD_EXTID 0x20000000
54#define ESD_EVENT 0x40000000
55#define ESD_IDMASK 0x1fffffff
56
57/* esd CAN event ids used by this driver */
58#define ESD_EV_CAN_ERROR_EXT 2
59
60/* baudrate message flags */
61#define ESD_USB2_UBR 0x80000000
62#define ESD_USB2_LOM 0x40000000
63#define ESD_USB2_NO_BAUDRATE 0x7fffffff
64#define ESD_USB2_TSEG1_MIN 1
65#define ESD_USB2_TSEG1_MAX 16
66#define ESD_USB2_TSEG1_SHIFT 16
67#define ESD_USB2_TSEG2_MIN 1
68#define ESD_USB2_TSEG2_MAX 8
69#define ESD_USB2_TSEG2_SHIFT 20
70#define ESD_USB2_SJW_MAX 4
71#define ESD_USB2_SJW_SHIFT 14
72#define ESD_USB2_BRP_MIN 1
73#define ESD_USB2_BRP_MAX 1024
74#define ESD_USB2_BRP_INC 1
75#define ESD_USB2_3_SAMPLES 0x00800000
76
77/* esd IDADD message */
78#define ESD_ID_ENABLE 0x80
79#define ESD_MAX_ID_SEGMENT 64
80
81/* SJA1000 ECC register (emulated by usb2 firmware) */
82#define SJA1000_ECC_SEG 0x1F
83#define SJA1000_ECC_DIR 0x20
84#define SJA1000_ECC_ERR 0x06
85#define SJA1000_ECC_BIT 0x00
86#define SJA1000_ECC_FORM 0x40
87#define SJA1000_ECC_STUFF 0x80
88#define SJA1000_ECC_MASK 0xc0
89
90/* esd bus state event codes */
91#define ESD_BUSSTATE_MASK 0xc0
92#define ESD_BUSSTATE_WARN 0x40
93#define ESD_BUSSTATE_ERRPASSIVE 0x80
94#define ESD_BUSSTATE_BUSOFF 0xc0
95
96#define RX_BUFFER_SIZE 1024
97#define MAX_RX_URBS 4
98#define MAX_TX_URBS 16 /* must be power of 2 */
99
100struct header_msg {
101 u8 len; /* len is always the total message length in 32bit words */
102 u8 cmd;
103 u8 rsvd[2];
104};
105
106struct version_msg {
107 u8 len;
108 u8 cmd;
109 u8 rsvd;
110 u8 flags;
111 __le32 drv_version;
112};
113
114struct version_reply_msg {
115 u8 len;
116 u8 cmd;
117 u8 nets;
118 u8 features;
119 __le32 version;
120 u8 name[16];
121 __le32 rsvd;
122 __le32 ts;
123};
124
125struct rx_msg {
126 u8 len;
127 u8 cmd;
128 u8 net;
129 u8 dlc;
130 __le32 ts;
131 __le32 id; /* upper 3 bits contain flags */
132 u8 data[8];
133};
134
135struct tx_msg {
136 u8 len;
137 u8 cmd;
138 u8 net;
139 u8 dlc;
140 __le32 hnd;
141 __le32 id; /* upper 3 bits contain flags */
142 u8 data[8];
143};
144
145struct tx_done_msg {
146 u8 len;
147 u8 cmd;
148 u8 net;
149 u8 status;
150 __le32 hnd;
151 __le32 ts;
152};
153
154struct id_filter_msg {
155 u8 len;
156 u8 cmd;
157 u8 net;
158 u8 option;
159 __le32 mask[ESD_MAX_ID_SEGMENT + 1];
160};
161
162struct set_baudrate_msg {
163 u8 len;
164 u8 cmd;
165 u8 net;
166 u8 rsvd;
167 __le32 baud;
168};
169
170/* Main message type used between library and application */
171struct __attribute__ ((packed)) esd_usb2_msg {
172 union {
173 struct header_msg hdr;
174 struct version_msg version;
175 struct version_reply_msg version_reply;
176 struct rx_msg rx;
177 struct tx_msg tx;
178 struct tx_done_msg txdone;
179 struct set_baudrate_msg setbaud;
180 struct id_filter_msg filter;
181 } msg;
182};
183
184static struct usb_device_id esd_usb2_table[] = {
185 {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
186 {}
187};
188MODULE_DEVICE_TABLE(usb, esd_usb2_table);
189
190struct esd_usb2_net_priv;
191
192struct esd_tx_urb_context {
193 struct esd_usb2_net_priv *priv;
194 u32 echo_index;
195 int dlc;
196};
197
198struct esd_usb2 {
199 struct usb_device *udev;
200 struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
201
202 struct usb_anchor rx_submitted;
203
204 int net_count;
205 u32 version;
206 int rxinitdone;
207};
208
209struct esd_usb2_net_priv {
210 struct can_priv can; /* must be the first member */
211
212 atomic_t active_tx_jobs;
213 struct usb_anchor tx_submitted;
214 struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
215
216 int open_time;
217 struct esd_usb2 *usb2;
218 struct net_device *netdev;
219 int index;
220 u8 old_state;
221 struct can_berr_counter bec;
222};
223
224static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
225 struct esd_usb2_msg *msg)
226{
227 struct net_device_stats *stats = &priv->netdev->stats;
228 struct can_frame *cf;
229 struct sk_buff *skb;
230 u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK;
231
232 if (id == ESD_EV_CAN_ERROR_EXT) {
233 u8 state = msg->msg.rx.data[0];
234 u8 ecc = msg->msg.rx.data[1];
235 u8 txerr = msg->msg.rx.data[2];
236 u8 rxerr = msg->msg.rx.data[3];
237
238 skb = alloc_can_err_skb(priv->netdev, &cf);
239 if (skb == NULL) {
240 stats->rx_dropped++;
241 return;
242 }
243
244 if (state != priv->old_state) {
245 priv->old_state = state;
246
247 switch (state & ESD_BUSSTATE_MASK) {
248 case ESD_BUSSTATE_BUSOFF:
249 priv->can.state = CAN_STATE_BUS_OFF;
250 cf->can_id |= CAN_ERR_BUSOFF;
251 can_bus_off(priv->netdev);
252 break;
253 case ESD_BUSSTATE_WARN:
254 priv->can.state = CAN_STATE_ERROR_WARNING;
255 priv->can.can_stats.error_warning++;
256 break;
257 case ESD_BUSSTATE_ERRPASSIVE:
258 priv->can.state = CAN_STATE_ERROR_PASSIVE;
259 priv->can.can_stats.error_passive++;
260 break;
261 default:
262 priv->can.state = CAN_STATE_ERROR_ACTIVE;
263 break;
264 }
265 } else {
266 priv->can.can_stats.bus_error++;
267 stats->rx_errors++;
268
269 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
270
271 switch (ecc & SJA1000_ECC_MASK) {
272 case SJA1000_ECC_BIT:
273 cf->data[2] |= CAN_ERR_PROT_BIT;
274 break;
275 case SJA1000_ECC_FORM:
276 cf->data[2] |= CAN_ERR_PROT_FORM;
277 break;
278 case SJA1000_ECC_STUFF:
279 cf->data[2] |= CAN_ERR_PROT_STUFF;
280 break;
281 default:
282 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
283 cf->data[3] = ecc & SJA1000_ECC_SEG;
284 break;
285 }
286
287 /* Error occured during transmission? */
288 if (!(ecc & SJA1000_ECC_DIR))
289 cf->data[2] |= CAN_ERR_PROT_TX;
290
291 if (priv->can.state == CAN_STATE_ERROR_WARNING ||
292 priv->can.state == CAN_STATE_ERROR_PASSIVE) {
293 cf->data[1] = (txerr > rxerr) ?
294 CAN_ERR_CRTL_TX_PASSIVE :
295 CAN_ERR_CRTL_RX_PASSIVE;
296 }
297 cf->data[6] = txerr;
298 cf->data[7] = rxerr;
299 }
300
301 netif_rx(skb);
302
303 priv->bec.txerr = txerr;
304 priv->bec.rxerr = rxerr;
305
306 stats->rx_packets++;
307 stats->rx_bytes += cf->can_dlc;
308 }
309}
310
311static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
312 struct esd_usb2_msg *msg)
313{
314 struct net_device_stats *stats = &priv->netdev->stats;
315 struct can_frame *cf;
316 struct sk_buff *skb;
317 int i;
318 u32 id;
319
320 if (!netif_device_present(priv->netdev))
321 return;
322
323 id = le32_to_cpu(msg->msg.rx.id);
324
325 if (id & ESD_EVENT) {
326 esd_usb2_rx_event(priv, msg);
327 } else {
328 skb = alloc_can_skb(priv->netdev, &cf);
329 if (skb == NULL) {
330 stats->rx_dropped++;
331 return;
332 }
333
334 cf->can_id = id & ESD_IDMASK;
335 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
336
337 if (id & ESD_EXTID)
338 cf->can_id |= CAN_EFF_FLAG;
339
340 if (msg->msg.rx.dlc & ESD_RTR) {
341 cf->can_id |= CAN_RTR_FLAG;
342 } else {
343 for (i = 0; i < cf->can_dlc; i++)
344 cf->data[i] = msg->msg.rx.data[i];
345 }
346
347 netif_rx(skb);
348
349 stats->rx_packets++;
350 stats->rx_bytes += cf->can_dlc;
351 }
352
353 return;
354}
355
356static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
357 struct esd_usb2_msg *msg)
358{
359 struct net_device_stats *stats = &priv->netdev->stats;
360 struct net_device *netdev = priv->netdev;
361 struct esd_tx_urb_context *context;
362
363 if (!netif_device_present(netdev))
364 return;
365
366 context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)];
367
368 if (!msg->msg.txdone.status) {
369 stats->tx_packets++;
370 stats->tx_bytes += context->dlc;
371 can_get_echo_skb(netdev, context->echo_index);
372 } else {
373 stats->tx_errors++;
374 can_free_echo_skb(netdev, context->echo_index);
375 }
376
377 /* Release context */
378 context->echo_index = MAX_TX_URBS;
379 atomic_dec(&priv->active_tx_jobs);
380
381 netif_wake_queue(netdev);
382}
383
384static void esd_usb2_read_bulk_callback(struct urb *urb)
385{
386 struct esd_usb2 *dev = urb->context;
387 int retval;
388 int pos = 0;
389 int i;
390
391 switch (urb->status) {
392 case 0: /* success */
393 break;
394
395 case -ENOENT:
396 case -ESHUTDOWN:
397 return;
398
399 default:
400 dev_info(dev->udev->dev.parent,
401 "Rx URB aborted (%d)\n", urb->status);
402 goto resubmit_urb;
403 }
404
405 while (pos < urb->actual_length) {
406 struct esd_usb2_msg *msg;
407
408 msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
409
410 switch (msg->msg.hdr.cmd) {
411 case CMD_CAN_RX:
412 esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
413 break;
414
415 case CMD_CAN_TX:
416 esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
417 msg);
418 break;
419 }
420
421 pos += msg->msg.hdr.len << 2;
422
423 if (pos > urb->actual_length) {
424 dev_err(dev->udev->dev.parent, "format error\n");
425 break;
426 }
427 }
428
429resubmit_urb:
430 usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
431 urb->transfer_buffer, RX_BUFFER_SIZE,
432 esd_usb2_read_bulk_callback, dev);
433
434 retval = usb_submit_urb(urb, GFP_ATOMIC);
435 if (retval == -ENODEV) {
436 for (i = 0; i < dev->net_count; i++) {
437 if (dev->nets[i])
438 netif_device_detach(dev->nets[i]->netdev);
439 }
440 } else if (retval) {
441 dev_err(dev->udev->dev.parent,
442 "failed resubmitting read bulk urb: %d\n", retval);
443 }
444
445 return;
446}
447
448/*
449 * callback for bulk IN urb
450 */
451static void esd_usb2_write_bulk_callback(struct urb *urb)
452{
453 struct esd_tx_urb_context *context = urb->context;
454 struct esd_usb2_net_priv *priv;
455 struct esd_usb2 *dev;
456 struct net_device *netdev;
457 size_t size = sizeof(struct esd_usb2_msg);
458
459 WARN_ON(!context);
460
461 priv = context->priv;
462 netdev = priv->netdev;
463 dev = priv->usb2;
464
465 /* free up our allocated buffer */
466 usb_free_coherent(urb->dev, size,
467 urb->transfer_buffer, urb->transfer_dma);
468
469 if (!netif_device_present(netdev))
470 return;
471
472 if (urb->status)
473 dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
474 urb->status);
475
476 netdev->trans_start = jiffies;
477}
478
479static ssize_t show_firmware(struct device *d,
480 struct device_attribute *attr, char *buf)
481{
482 struct usb_interface *intf = to_usb_interface(d);
483 struct esd_usb2 *dev = usb_get_intfdata(intf);
484
485 return sprintf(buf, "%d.%d.%d\n",
486 (dev->version >> 12) & 0xf,
487 (dev->version >> 8) & 0xf,
488 dev->version & 0xff);
489}
490static DEVICE_ATTR(firmware, S_IRUGO, show_firmware, NULL);
491
492static ssize_t show_hardware(struct device *d,
493 struct device_attribute *attr, char *buf)
494{
495 struct usb_interface *intf = to_usb_interface(d);
496 struct esd_usb2 *dev = usb_get_intfdata(intf);
497
498 return sprintf(buf, "%d.%d.%d\n",
499 (dev->version >> 28) & 0xf,
500 (dev->version >> 24) & 0xf,
501 (dev->version >> 16) & 0xff);
502}
503static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL);
504
505static ssize_t show_nets(struct device *d,
506 struct device_attribute *attr, char *buf)
507{
508 struct usb_interface *intf = to_usb_interface(d);
509 struct esd_usb2 *dev = usb_get_intfdata(intf);
510
511 return sprintf(buf, "%d", dev->net_count);
512}
513static DEVICE_ATTR(nets, S_IRUGO, show_nets, NULL);
514
515static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
516{
517 int actual_length;
518
519 return usb_bulk_msg(dev->udev,
520 usb_sndbulkpipe(dev->udev, 2),
521 msg,
522 msg->msg.hdr.len << 2,
523 &actual_length,
524 1000);
525}
526
527static int esd_usb2_wait_msg(struct esd_usb2 *dev,
528 struct esd_usb2_msg *msg)
529{
530 int actual_length;
531
532 return usb_bulk_msg(dev->udev,
533 usb_rcvbulkpipe(dev->udev, 1),
534 msg,
535 sizeof(*msg),
536 &actual_length,
537 1000);
538}
539
540static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
541{
542 int i, err = 0;
543
544 if (dev->rxinitdone)
545 return 0;
546
547 for (i = 0; i < MAX_RX_URBS; i++) {
548 struct urb *urb = NULL;
549 u8 *buf = NULL;
550
551 /* create a URB, and a buffer for it */
552 urb = usb_alloc_urb(0, GFP_KERNEL);
553 if (!urb) {
554 dev_warn(dev->udev->dev.parent,
555 "No memory left for URBs\n");
556 err = -ENOMEM;
557 break;
558 }
559
560 buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
561 &urb->transfer_dma);
562 if (!buf) {
563 dev_warn(dev->udev->dev.parent,
564 "No memory left for USB buffer\n");
565 err = -ENOMEM;
566 goto freeurb;
567 }
568
569 usb_fill_bulk_urb(urb, dev->udev,
570 usb_rcvbulkpipe(dev->udev, 1),
571 buf, RX_BUFFER_SIZE,
572 esd_usb2_read_bulk_callback, dev);
573 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
574 usb_anchor_urb(urb, &dev->rx_submitted);
575
576 err = usb_submit_urb(urb, GFP_KERNEL);
577 if (err) {
578 usb_unanchor_urb(urb);
579 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
580 urb->transfer_dma);
581 }
582
583freeurb:
584 /* Drop reference, USB core will take care of freeing it */
585 usb_free_urb(urb);
586 if (err)
587 break;
588 }
589
590 /* Did we submit any URBs */
591 if (i == 0) {
592 dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n");
593 return err;
594 }
595
596 /* Warn if we've couldn't transmit all the URBs */
597 if (i < MAX_RX_URBS) {
598 dev_warn(dev->udev->dev.parent,
599 "rx performance may be slow\n");
600 }
601
602 dev->rxinitdone = 1;
603 return 0;
604}
605
606/*
607 * Start interface
608 */
609static int esd_usb2_start(struct esd_usb2_net_priv *priv)
610{
611 struct esd_usb2 *dev = priv->usb2;
612 struct net_device *netdev = priv->netdev;
613 struct esd_usb2_msg msg;
614 int err, i;
615
616 /*
617 * Enable all IDs
618 * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
619 * Each bit represents one 11 bit CAN identifier. A set bit
620 * enables reception of the corresponding CAN identifier. A cleared
621 * bit disabled this identifier. An additional bitmask value
622 * following the CAN 2.0A bits is used to enable reception of
623 * extended CAN frames. Only the LSB of this final mask is checked
624 * for the complete 29 bit ID range. The IDADD message also allows
625 * filter configuration for an ID subset. In this case you can add
626 * the number of the starting bitmask (0..64) to the filter.option
627 * field followed by only some bitmasks.
628 */
629 msg.msg.hdr.cmd = CMD_IDADD;
630 msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
631 msg.msg.filter.net = priv->index;
632 msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
633 for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
634 msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff);
635 /* enable 29bit extended IDs */
636 msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
637
638 err = esd_usb2_send_msg(dev, &msg);
639 if (err)
640 goto failed;
641
642 err = esd_usb2_setup_rx_urbs(dev);
643 if (err)
644 goto failed;
645
646 priv->can.state = CAN_STATE_ERROR_ACTIVE;
647
648 return 0;
649
650failed:
651 if (err == -ENODEV)
652 netif_device_detach(netdev);
653
654 dev_err(netdev->dev.parent, "couldn't start device: %d\n", err);
655
656 return err;
657}
658
659static void unlink_all_urbs(struct esd_usb2 *dev)
660{
661 struct esd_usb2_net_priv *priv;
662 int i;
663
664 usb_kill_anchored_urbs(&dev->rx_submitted);
665 for (i = 0; i < dev->net_count; i++) {
666 priv = dev->nets[i];
667 if (priv) {
668 usb_kill_anchored_urbs(&priv->tx_submitted);
669 atomic_set(&priv->active_tx_jobs, 0);
670
671 for (i = 0; i < MAX_TX_URBS; i++)
672 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
673 }
674 }
675}
676
677static int esd_usb2_open(struct net_device *netdev)
678{
679 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
680 int err;
681
682 /* common open */
683 err = open_candev(netdev);
684 if (err)
685 return err;
686
687 /* finally start device */
688 err = esd_usb2_start(priv);
689 if (err) {
690 dev_warn(netdev->dev.parent,
691 "couldn't start device: %d\n", err);
692 close_candev(netdev);
693 return err;
694 }
695
696 priv->open_time = jiffies;
697
698 netif_start_queue(netdev);
699
700 return 0;
701}
702
703static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
704 struct net_device *netdev)
705{
706 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
707 struct esd_usb2 *dev = priv->usb2;
708 struct esd_tx_urb_context *context = NULL;
709 struct net_device_stats *stats = &netdev->stats;
710 struct can_frame *cf = (struct can_frame *)skb->data;
711 struct esd_usb2_msg *msg;
712 struct urb *urb;
713 u8 *buf;
714 int i, err;
715 int ret = NETDEV_TX_OK;
716 size_t size = sizeof(struct esd_usb2_msg);
717
718 if (can_dropped_invalid_skb(netdev, skb))
719 return NETDEV_TX_OK;
720
721 /* create a URB, and a buffer for it, and copy the data to the URB */
722 urb = usb_alloc_urb(0, GFP_ATOMIC);
723 if (!urb) {
724 dev_err(netdev->dev.parent, "No memory left for URBs\n");
725 stats->tx_dropped++;
726 dev_kfree_skb(skb);
727 goto nourbmem;
728 }
729
730 buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC,
731 &urb->transfer_dma);
732 if (!buf) {
733 dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
734 stats->tx_dropped++;
735 dev_kfree_skb(skb);
736 goto nobufmem;
737 }
738
739 msg = (struct esd_usb2_msg *)buf;
740
741 msg->msg.hdr.len = 3; /* minimal length */
742 msg->msg.hdr.cmd = CMD_CAN_TX;
743 msg->msg.tx.net = priv->index;
744 msg->msg.tx.dlc = cf->can_dlc;
745 msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
746
747 if (cf->can_id & CAN_RTR_FLAG)
748 msg->msg.tx.dlc |= ESD_RTR;
749
750 if (cf->can_id & CAN_EFF_FLAG)
751 msg->msg.tx.id |= cpu_to_le32(ESD_EXTID);
752
753 for (i = 0; i < cf->can_dlc; i++)
754 msg->msg.tx.data[i] = cf->data[i];
755
756 msg->msg.hdr.len += (cf->can_dlc + 3) >> 2;
757
758 for (i = 0; i < MAX_TX_URBS; i++) {
759 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
760 context = &priv->tx_contexts[i];
761 break;
762 }
763 }
764
765 /*
766 * This may never happen.
767 */
768 if (!context) {
769 dev_warn(netdev->dev.parent, "couldn't find free context\n");
770 ret = NETDEV_TX_BUSY;
771 goto releasebuf;
772 }
773
774 context->priv = priv;
775 context->echo_index = i;
776 context->dlc = cf->can_dlc;
777
778 /* hnd must not be 0 - MSB is stripped in txdone handling */
779 msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
780
781 usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
782 msg->msg.hdr.len << 2,
783 esd_usb2_write_bulk_callback, context);
784
785 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
786
787 usb_anchor_urb(urb, &priv->tx_submitted);
788
789 can_put_echo_skb(skb, netdev, context->echo_index);
790
791 atomic_inc(&priv->active_tx_jobs);
792
793 /* Slow down tx path */
794 if (atomic_read(&priv->active_tx_jobs) >= MAX_TX_URBS)
795 netif_stop_queue(netdev);
796
797 err = usb_submit_urb(urb, GFP_ATOMIC);
798 if (err) {
799 can_free_echo_skb(netdev, context->echo_index);
800
801 atomic_dec(&priv->active_tx_jobs);
802 usb_unanchor_urb(urb);
803
804 stats->tx_dropped++;
805
806 if (err == -ENODEV)
807 netif_device_detach(netdev);
808 else
809 dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
810
811 goto releasebuf;
812 }
813
814 netdev->trans_start = jiffies;
815
816 /*
817 * Release our reference to this URB, the USB core will eventually free
818 * it entirely.
819 */
820 usb_free_urb(urb);
821
822 return NETDEV_TX_OK;
823
824releasebuf:
825 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
826
827nobufmem:
828 usb_free_urb(urb);
829
830nourbmem:
831 return ret;
832}
833
834static int esd_usb2_close(struct net_device *netdev)
835{
836 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
837 struct esd_usb2_msg msg;
838 int i;
839
840 /* Disable all IDs (see esd_usb2_start()) */
841 msg.msg.hdr.cmd = CMD_IDADD;
842 msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
843 msg.msg.filter.net = priv->index;
844 msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
845 for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
846 msg.msg.filter.mask[i] = 0;
847 if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
848 dev_err(netdev->dev.parent, "sending idadd message failed\n");
849
850 /* set CAN controller to reset mode */
851 msg.msg.hdr.len = 2;
852 msg.msg.hdr.cmd = CMD_SETBAUD;
853 msg.msg.setbaud.net = priv->index;
854 msg.msg.setbaud.rsvd = 0;
855 msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
856 if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
857 dev_err(netdev->dev.parent, "sending setbaud message failed\n");
858
859 priv->can.state = CAN_STATE_STOPPED;
860
861 netif_stop_queue(netdev);
862
863 close_candev(netdev);
864
865 priv->open_time = 0;
866
867 return 0;
868}
869
870static const struct net_device_ops esd_usb2_netdev_ops = {
871 .ndo_open = esd_usb2_open,
872 .ndo_stop = esd_usb2_close,
873 .ndo_start_xmit = esd_usb2_start_xmit,
874};
875
876static struct can_bittiming_const esd_usb2_bittiming_const = {
877 .name = "esd_usb2",
878 .tseg1_min = ESD_USB2_TSEG1_MIN,
879 .tseg1_max = ESD_USB2_TSEG1_MAX,
880 .tseg2_min = ESD_USB2_TSEG2_MIN,
881 .tseg2_max = ESD_USB2_TSEG2_MAX,
882 .sjw_max = ESD_USB2_SJW_MAX,
883 .brp_min = ESD_USB2_BRP_MIN,
884 .brp_max = ESD_USB2_BRP_MAX,
885 .brp_inc = ESD_USB2_BRP_INC,
886};
887
888static int esd_usb2_set_bittiming(struct net_device *netdev)
889{
890 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
891 struct can_bittiming *bt = &priv->can.bittiming;
892 struct esd_usb2_msg msg;
893 u32 canbtr;
894
895 canbtr = ESD_USB2_UBR;
896 canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
897 canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
898 << ESD_USB2_SJW_SHIFT;
899 canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
900 & (ESD_USB2_TSEG1_MAX - 1))
901 << ESD_USB2_TSEG1_SHIFT;
902 canbtr |= ((bt->phase_seg2 - 1) & (ESD_USB2_TSEG2_MAX - 1))
903 << ESD_USB2_TSEG2_SHIFT;
904 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
905 canbtr |= ESD_USB2_3_SAMPLES;
906
907 msg.msg.hdr.len = 2;
908 msg.msg.hdr.cmd = CMD_SETBAUD;
909 msg.msg.setbaud.net = priv->index;
910 msg.msg.setbaud.rsvd = 0;
911 msg.msg.setbaud.baud = cpu_to_le32(canbtr);
912
913 dev_info(netdev->dev.parent, "setting BTR=%#x\n", canbtr);
914
915 return esd_usb2_send_msg(priv->usb2, &msg);
916}
917
918static int esd_usb2_get_berr_counter(const struct net_device *netdev,
919 struct can_berr_counter *bec)
920{
921 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
922
923 bec->txerr = priv->bec.txerr;
924 bec->rxerr = priv->bec.rxerr;
925
926 return 0;
927}
928
929static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
930{
931 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
932
933 if (!priv->open_time)
934 return -EINVAL;
935
936 switch (mode) {
937 case CAN_MODE_START:
938 netif_wake_queue(netdev);
939 break;
940
941 default:
942 return -EOPNOTSUPP;
943 }
944
945 return 0;
946}
947
948static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
949{
950 struct esd_usb2 *dev = usb_get_intfdata(intf);
951 struct net_device *netdev;
952 struct esd_usb2_net_priv *priv;
953 int err = 0;
954 int i;
955
956 netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
957 if (!netdev) {
958 dev_err(&intf->dev, "couldn't alloc candev\n");
959 err = -ENOMEM;
960 goto done;
961 }
962
963 priv = netdev_priv(netdev);
964
965 init_usb_anchor(&priv->tx_submitted);
966 atomic_set(&priv->active_tx_jobs, 0);
967
968 for (i = 0; i < MAX_TX_URBS; i++)
969 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
970
971 priv->usb2 = dev;
972 priv->netdev = netdev;
973 priv->index = index;
974
975 priv->can.state = CAN_STATE_STOPPED;
976 priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
977 priv->can.bittiming_const = &esd_usb2_bittiming_const;
978 priv->can.do_set_bittiming = esd_usb2_set_bittiming;
979 priv->can.do_set_mode = esd_usb2_set_mode;
980 priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
981 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
982
983 netdev->flags |= IFF_ECHO; /* we support local echo */
984
985 netdev->netdev_ops = &esd_usb2_netdev_ops;
986
987 SET_NETDEV_DEV(netdev, &intf->dev);
988
989 err = register_candev(netdev);
990 if (err) {
991 dev_err(&intf->dev,
992 "couldn't register CAN device: %d\n", err);
993 free_candev(netdev);
994 err = -ENOMEM;
995 goto done;
996 }
997
998 dev->nets[index] = priv;
999 dev_info(netdev->dev.parent, "device %s registered\n", netdev->name);
1000
1001done:
1002 return err;
1003}
1004
1005/*
1006 * probe function for new USB2 devices
1007 *
1008 * check version information and number of available
1009 * CAN interfaces
1010 */
1011static int esd_usb2_probe(struct usb_interface *intf,
1012 const struct usb_device_id *id)
1013{
1014 struct esd_usb2 *dev;
1015 struct esd_usb2_msg msg;
1016 int i, err;
1017
1018 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1019 if (!dev) {
1020 err = -ENOMEM;
1021 goto done;
1022 }
1023
1024 dev->udev = interface_to_usbdev(intf);
1025
1026 init_usb_anchor(&dev->rx_submitted);
1027
1028 usb_set_intfdata(intf, dev);
1029
1030 /* query number of CAN interfaces (nets) */
1031 msg.msg.hdr.cmd = CMD_VERSION;
1032 msg.msg.hdr.len = 2;
1033 msg.msg.version.rsvd = 0;
1034 msg.msg.version.flags = 0;
1035 msg.msg.version.drv_version = 0;
1036
1037 err = esd_usb2_send_msg(dev, &msg);
1038 if (err < 0) {
1039 dev_err(&intf->dev, "sending version message failed\n");
1040 goto free_dev;
1041 }
1042
1043 err = esd_usb2_wait_msg(dev, &msg);
1044 if (err < 0) {
1045 dev_err(&intf->dev, "no version message answer\n");
1046 goto free_dev;
1047 }
1048
1049 dev->net_count = (int)msg.msg.version_reply.nets;
1050 dev->version = le32_to_cpu(msg.msg.version_reply.version);
1051
1052 if (device_create_file(&intf->dev, &dev_attr_firmware))
1053 dev_err(&intf->dev,
1054 "Couldn't create device file for firmware\n");
1055
1056 if (device_create_file(&intf->dev, &dev_attr_hardware))
1057 dev_err(&intf->dev,
1058 "Couldn't create device file for hardware\n");
1059
1060 if (device_create_file(&intf->dev, &dev_attr_nets))
1061 dev_err(&intf->dev,
1062 "Couldn't create device file for nets\n");
1063
1064 /* do per device probing */
1065 for (i = 0; i < dev->net_count; i++)
1066 esd_usb2_probe_one_net(intf, i);
1067
1068 return 0;
1069
1070free_dev:
1071 kfree(dev);
1072done:
1073 return err;
1074}
1075
1076/*
1077 * called by the usb core when the device is removed from the system
1078 */
1079static void esd_usb2_disconnect(struct usb_interface *intf)
1080{
1081 struct esd_usb2 *dev = usb_get_intfdata(intf);
1082 struct net_device *netdev;
1083 int i;
1084
1085 device_remove_file(&intf->dev, &dev_attr_firmware);
1086 device_remove_file(&intf->dev, &dev_attr_hardware);
1087 device_remove_file(&intf->dev, &dev_attr_nets);
1088
1089 usb_set_intfdata(intf, NULL);
1090
1091 if (dev) {
1092 for (i = 0; i < dev->net_count; i++) {
1093 if (dev->nets[i]) {
1094 netdev = dev->nets[i]->netdev;
1095 unregister_netdev(netdev);
1096 free_candev(netdev);
1097 }
1098 }
1099 unlink_all_urbs(dev);
1100 }
1101}
1102
1103/* usb specific object needed to register this driver with the usb subsystem */
1104static struct usb_driver esd_usb2_driver = {
1105 .name = "esd_usb2",
1106 .probe = esd_usb2_probe,
1107 .disconnect = esd_usb2_disconnect,
1108 .id_table = esd_usb2_table,
1109};
1110
1111static int __init esd_usb2_init(void)
1112{
1113 int err;
1114
1115 /* register this driver with the USB subsystem */
1116 err = usb_register(&esd_usb2_driver);
1117
1118 if (err) {
1119 err("usb_register failed. Error number %d\n", err);
1120 return err;
1121 }
1122
1123 return 0;
1124}
1125module_init(esd_usb2_init);
1126
1127static void __exit esd_usb2_exit(void)
1128{
1129 /* deregister this driver with the USB subsystem */
1130 usb_deregister(&esd_usb2_driver);
1131}
1132module_exit(esd_usb2_exit);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 04a03f7003a0..28c88eeec757 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -107,12 +107,7 @@
107#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 107#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
108#define CAS_NCPUS num_online_cpus() 108#define CAS_NCPUS num_online_cpus()
109 109
110#ifdef CONFIG_CASSINI_NAPI
111#define USE_NAPI
112#define cas_skb_release(x) netif_receive_skb(x)
113#else
114#define cas_skb_release(x) netif_rx(x) 110#define cas_skb_release(x) netif_rx(x)
115#endif
116 111
117/* select which firmware to use */ 112/* select which firmware to use */
118#define USE_HP_WORKAROUND 113#define USE_HP_WORKAROUND
@@ -3063,9 +3058,6 @@ static void cas_init_mac(struct cas *cp)
3063{ 3058{
3064 unsigned char *e = &cp->dev->dev_addr[0]; 3059 unsigned char *e = &cp->dev->dev_addr[0];
3065 int i; 3060 int i;
3066#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3067 u32 rxcfg;
3068#endif
3069 cas_mac_reset(cp); 3061 cas_mac_reset(cp);
3070 3062
3071 /* setup core arbitration weight register */ 3063 /* setup core arbitration weight register */
@@ -3133,23 +3125,8 @@ static void cas_init_mac(struct cas *cp)
3133 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); 3125 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3134 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); 3126 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3135 3127
3136#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3137 cp->mac_rx_cfg = cas_setup_multicast(cp); 3128 cp->mac_rx_cfg = cas_setup_multicast(cp);
3138#else 3129
3139 /* WTZ: Do what Adrian did in cas_set_multicast. Doing
3140 * a writel does not seem to be necessary because Cassini
3141 * seems to preserve the configuration when we do the reset.
3142 * If the chip is in trouble, though, it is not clear if we
3143 * can really count on this behavior. cas_set_multicast uses
3144 * spin_lock_irqsave, but we are called only in cas_init_hw and
3145 * cas_init_hw is protected by cas_lock_all, which calls
3146 * spin_lock_irq (so it doesn't need to save the flags, and
3147 * we should be OK for the writel, as that is the only
3148 * difference).
3149 */
3150 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
3151 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
3152#endif
3153 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3130 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3154 cas_clear_mac_err(cp); 3131 cas_clear_mac_err(cp);
3155 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3132 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index fd17a002b453..dbc47878d83b 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -2844,10 +2844,6 @@ struct cas {
2844 atomic_t reset_task_pending_all; 2844 atomic_t reset_task_pending_all;
2845#endif 2845#endif
2846 2846
2847#ifdef CONFIG_CASSINI_QGE_DEBUG
2848 atomic_t interrupt_seen; /* 1 if any interrupts are getting through */
2849#endif
2850
2851 /* Link-down problem workaround */ 2847 /* Link-down problem workaround */
2852#define LINK_TRANSITION_UNKNOWN 0 2848#define LINK_TRANSITION_UNKNOWN 0
2853#define LINK_TRANSITION_ON_FAILURE 1 2849#define LINK_TRANSITION_ON_FAILURE 1
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 036b2dfb1d40..092f31a126e6 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -286,7 +286,6 @@ struct board_info {
286 unsigned int clock_mc3; 286 unsigned int clock_mc3;
287 unsigned int clock_mc4; 287 unsigned int clock_mc4;
288 unsigned int espi_nports; 288 unsigned int espi_nports;
289 unsigned int clock_cspi;
290 unsigned int clock_elmer0; 289 unsigned int clock_elmer0;
291 unsigned char mdio_mdien; 290 unsigned char mdio_mdien;
292 unsigned char mdio_mdiinv; 291 unsigned char mdio_mdiinv;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 53bde15fc94d..599d178df62d 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -185,9 +185,6 @@ static int t1_pci_intr_handler(adapter_t *adapter)
185 return 0; 185 return 0;
186} 186}
187 187
188#ifdef CONFIG_CHELSIO_T1_COUGAR
189#include "cspi.h"
190#endif
191#ifdef CONFIG_CHELSIO_T1_1G 188#ifdef CONFIG_CHELSIO_T1_1G
192#include "fpga_defs.h" 189#include "fpga_defs.h"
193 190
@@ -280,7 +277,7 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
280 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); 277 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
281} 278}
282 279
283#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) 280#if defined(CONFIG_CHELSIO_T1_1G)
284/* 281/*
285 * Elmer MI1 MDIO read/write operations. 282 * Elmer MI1 MDIO read/write operations.
286 */ 283 */
@@ -317,7 +314,7 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
317 return 0; 314 return 0;
318} 315}
319 316
320#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) 317#if defined(CONFIG_CHELSIO_T1_1G)
321static const struct mdio_ops mi1_mdio_ops = { 318static const struct mdio_ops mi1_mdio_ops = {
322 .init = mi1_mdio_init, 319 .init = mi1_mdio_init,
323 .read = mi1_mdio_read, 320 .read = mi1_mdio_read,
@@ -752,31 +749,6 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
752 mod_detect ? "removed" : "inserted"); 749 mod_detect ? "removed" : "inserted");
753 } 750 }
754 break; 751 break;
755#ifdef CONFIG_CHELSIO_T1_COUGAR
756 case CHBT_BOARD_COUGAR:
757 if (adapter->params.nports == 1) {
758 if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */
759 struct cmac *mac = adapter->port[0].mac;
760 mac->ops->interrupt_handler(mac);
761 }
762 if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
763 }
764 } else {
765 int i, port_bit;
766
767 for_each_port(adapter, i) {
768 port_bit = i ? i + 1 : 0;
769 if (!(cause & (1 << port_bit)))
770 continue;
771
772 phy = adapter->port[i].phy;
773 phy_cause = phy->ops->interrupt_handler(phy);
774 if (phy_cause & cphy_cause_link_change)
775 t1_link_changed(adapter, i);
776 }
777 }
778 break;
779#endif
780 } 752 }
781 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); 753 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
782 return 0; 754 return 0;
@@ -955,7 +927,6 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
955 case CHBT_BOARD_N110: 927 case CHBT_BOARD_N110:
956 case CHBT_BOARD_N210: 928 case CHBT_BOARD_N210:
957 case CHBT_BOARD_CHT210: 929 case CHBT_BOARD_CHT210:
958 case CHBT_BOARD_COUGAR:
959 t1_tpi_par(adapter, 0xf); 930 t1_tpi_par(adapter, 0xf);
960 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); 931 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
961 break; 932 break;
@@ -1004,10 +975,6 @@ int t1_init_hw_modules(adapter_t *adapter)
1004 adapter->regs + A_MC5_CONFIG); 975 adapter->regs + A_MC5_CONFIG);
1005 } 976 }
1006 977
1007#ifdef CONFIG_CHELSIO_T1_COUGAR
1008 if (adapter->cspi && t1_cspi_init(adapter->cspi))
1009 goto out_err;
1010#endif
1011 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, 978 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
1012 bi->espi_nports)) 979 bi->espi_nports))
1013 goto out_err; 980 goto out_err;
@@ -1061,10 +1028,6 @@ void t1_free_sw_modules(adapter_t *adapter)
1061 t1_tp_destroy(adapter->tp); 1028 t1_tp_destroy(adapter->tp);
1062 if (adapter->espi) 1029 if (adapter->espi)
1063 t1_espi_destroy(adapter->espi); 1030 t1_espi_destroy(adapter->espi);
1064#ifdef CONFIG_CHELSIO_T1_COUGAR
1065 if (adapter->cspi)
1066 t1_cspi_destroy(adapter->cspi);
1067#endif
1068} 1031}
1069 1032
1070static void __devinit init_link_config(struct link_config *lc, 1033static void __devinit init_link_config(struct link_config *lc,
@@ -1084,14 +1047,6 @@ static void __devinit init_link_config(struct link_config *lc,
1084 } 1047 }
1085} 1048}
1086 1049
1087#ifdef CONFIG_CHELSIO_T1_COUGAR
1088 if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
1089 pr_err("%s: CSPI initialization failed\n",
1090 adapter->name);
1091 goto error;
1092 }
1093#endif
1094
1095/* 1050/*
1096 * Allocate and initialize the data structures that hold the SW state of 1051 * Allocate and initialize the data structures that hold the SW state of
1097 * the Terminator HW modules. 1052 * the Terminator HW modules.
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 80471269977a..09610323a948 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -40,9 +40,9 @@
40 40
41#include "cnic_if.h" 41#include "cnic_if.h"
42#include "bnx2.h" 42#include "bnx2.h"
43#include "bnx2x_reg.h" 43#include "bnx2x/bnx2x_reg.h"
44#include "bnx2x_fw_defs.h" 44#include "bnx2x/bnx2x_fw_defs.h"
45#include "bnx2x_hsi.h" 45#include "bnx2x/bnx2x_hsi.h"
46#include "../scsi/bnx2i/57xx_iscsi_constants.h" 46#include "../scsi/bnx2i/57xx_iscsi_constants.h"
47#include "../scsi/bnx2i/57xx_iscsi_hsi.h" 47#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
48#include "cnic.h" 48#include "cnic.h"
@@ -257,7 +257,7 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
257{ 257{
258 u32 i; 258 u32 i;
259 259
260 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 260 for (i = 0; i < cp->max_cid_space; i++) {
261 if (cp->ctx_tbl[i].cid == cid) { 261 if (cp->ctx_tbl[i].cid == cid) {
262 *l5_cid = i; 262 *l5_cid = i;
263 return 0; 263 return 0;
@@ -804,7 +804,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
804 cnic_free_dma(dev, &cp->conn_buf_info); 804 cnic_free_dma(dev, &cp->conn_buf_info);
805 cnic_free_dma(dev, &cp->kwq_info); 805 cnic_free_dma(dev, &cp->kwq_info);
806 cnic_free_dma(dev, &cp->kwq_16_data_info); 806 cnic_free_dma(dev, &cp->kwq_16_data_info);
807 cnic_free_dma(dev, &cp->kcq_info); 807 cnic_free_dma(dev, &cp->kcq1.dma);
808 kfree(cp->iscsi_tbl); 808 kfree(cp->iscsi_tbl);
809 cp->iscsi_tbl = NULL; 809 cp->iscsi_tbl = NULL;
810 kfree(cp->ctx_tbl); 810 kfree(cp->ctx_tbl);
@@ -863,6 +863,37 @@ static int cnic_alloc_context(struct cnic_dev *dev)
863 return 0; 863 return 0;
864} 864}
865 865
866static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
867{
868 int err, i, is_bnx2 = 0;
869 struct kcqe **kcq;
870
871 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
872 is_bnx2 = 1;
873
874 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
875 if (err)
876 return err;
877
878 kcq = (struct kcqe **) info->dma.pg_arr;
879 info->kcq = kcq;
880
881 if (is_bnx2)
882 return 0;
883
884 for (i = 0; i < KCQ_PAGE_CNT; i++) {
885 struct bnx2x_bd_chain_next *next =
886 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
887 int j = i + 1;
888
889 if (j >= KCQ_PAGE_CNT)
890 j = 0;
891 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
892 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
893 }
894 return 0;
895}
896
866static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) 897static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
867{ 898{
868 struct cnic_local *cp = dev->cnic_priv; 899 struct cnic_local *cp = dev->cnic_priv;
@@ -954,10 +985,9 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
954 goto error; 985 goto error;
955 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 986 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
956 987
957 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); 988 ret = cnic_alloc_kcq(dev, &cp->kcq1);
958 if (ret) 989 if (ret)
959 goto error; 990 goto error;
960 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
961 991
962 ret = cnic_alloc_context(dev); 992 ret = cnic_alloc_context(dev);
963 if (ret) 993 if (ret)
@@ -981,17 +1011,10 @@ error:
981static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1011static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
982{ 1012{
983 struct cnic_local *cp = dev->cnic_priv; 1013 struct cnic_local *cp = dev->cnic_priv;
984 struct cnic_eth_dev *ethdev = cp->ethdev;
985 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1014 int ctx_blk_size = cp->ethdev->ctx_blk_size;
986 int total_mem, blks, i, cid_space; 1015 int total_mem, blks, i;
987
988 if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
989 return -EINVAL;
990
991 cid_space = MAX_ISCSI_TBL_SZ +
992 (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
993 1016
994 total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space; 1017 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
995 blks = total_mem / ctx_blk_size; 1018 blks = total_mem / ctx_blk_size;
996 if (total_mem % ctx_blk_size) 1019 if (total_mem % ctx_blk_size)
997 blks++; 1020 blks++;
@@ -1035,16 +1058,27 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1035static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1058static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1036{ 1059{
1037 struct cnic_local *cp = dev->cnic_priv; 1060 struct cnic_local *cp = dev->cnic_priv;
1061 struct cnic_eth_dev *ethdev = cp->ethdev;
1062 u32 start_cid = ethdev->starting_cid;
1038 int i, j, n, ret, pages; 1063 int i, j, n, ret, pages;
1039 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1064 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1040 1065
1066 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1067 cp->iscsi_start_cid = start_cid;
1068 if (start_cid < BNX2X_ISCSI_START_CID) {
1069 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1070
1071 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1072 cp->max_cid_space += delta;
1073 }
1074
1041 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1075 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1042 GFP_KERNEL); 1076 GFP_KERNEL);
1043 if (!cp->iscsi_tbl) 1077 if (!cp->iscsi_tbl)
1044 goto error; 1078 goto error;
1045 1079
1046 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1080 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1047 MAX_CNIC_L5_CONTEXT, GFP_KERNEL); 1081 cp->max_cid_space, GFP_KERNEL);
1048 if (!cp->ctx_tbl) 1082 if (!cp->ctx_tbl)
1049 goto error; 1083 goto error;
1050 1084
@@ -1053,7 +1087,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1053 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1087 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1054 } 1088 }
1055 1089
1056 pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) / 1090 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1057 PAGE_SIZE; 1091 PAGE_SIZE;
1058 1092
1059 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1093 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
@@ -1061,7 +1095,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1061 return -ENOMEM; 1095 return -ENOMEM;
1062 1096
1063 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1097 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1064 for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1098 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1065 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1099 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1066 1100
1067 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1101 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
@@ -1072,22 +1106,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1072 j++; 1106 j++;
1073 } 1107 }
1074 1108
1075 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0); 1109 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1076 if (ret) 1110 if (ret)
1077 goto error; 1111 goto error;
1078 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
1079
1080 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1081 struct bnx2x_bd_chain_next *next =
1082 (struct bnx2x_bd_chain_next *)
1083 &cp->kcq[i][MAX_KCQE_CNT];
1084 int j = i + 1;
1085
1086 if (j >= KCQ_PAGE_CNT)
1087 j = 0;
1088 next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
1089 next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
1090 }
1091 1112
1092 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * 1113 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1093 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; 1114 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
@@ -2120,18 +2141,20 @@ static u16 cnic_bnx2x_hw_idx(u16 idx)
2120 return idx; 2141 return idx;
2121} 2142}
2122 2143
2123static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) 2144static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2124{ 2145{
2125 struct cnic_local *cp = dev->cnic_priv; 2146 struct cnic_local *cp = dev->cnic_priv;
2126 u16 i, ri, last; 2147 u16 i, ri, hw_prod, last;
2127 struct kcqe *kcqe; 2148 struct kcqe *kcqe;
2128 int kcqe_cnt = 0, last_cnt = 0; 2149 int kcqe_cnt = 0, last_cnt = 0;
2129 2150
2130 i = ri = last = *sw_prod; 2151 i = ri = last = info->sw_prod_idx;
2131 ri &= MAX_KCQ_IDX; 2152 ri &= MAX_KCQ_IDX;
2153 hw_prod = *info->hw_prod_idx_ptr;
2154 hw_prod = cp->hw_idx(hw_prod);
2132 2155
2133 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2156 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2134 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2157 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2135 cp->completed_kcq[kcqe_cnt++] = kcqe; 2158 cp->completed_kcq[kcqe_cnt++] = kcqe;
2136 i = cp->next_idx(i); 2159 i = cp->next_idx(i);
2137 ri = i & MAX_KCQ_IDX; 2160 ri = i & MAX_KCQ_IDX;
@@ -2141,7 +2164,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
2141 } 2164 }
2142 } 2165 }
2143 2166
2144 *sw_prod = last; 2167 info->sw_prod_idx = last;
2145 return last_cnt; 2168 return last_cnt;
2146} 2169}
2147 2170
@@ -2184,6 +2207,9 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp)
2184 u16 tx_cons = *cp->tx_cons_ptr; 2207 u16 tx_cons = *cp->tx_cons_ptr;
2185 int comp = 0; 2208 int comp = 0;
2186 2209
2210 if (!test_bit(CNIC_F_CNIC_UP, &cp->dev->flags))
2211 return;
2212
2187 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2213 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2188 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2214 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2189 comp = cnic_l2_completion(cp); 2215 comp = cnic_l2_completion(cp);
@@ -2197,103 +2223,79 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp)
2197 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2223 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2198} 2224}
2199 2225
2200static int cnic_service_bnx2(void *data, void *status_blk) 2226static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2201{ 2227{
2202 struct cnic_dev *dev = data;
2203 struct status_block *sblk = status_blk;
2204 struct cnic_local *cp = dev->cnic_priv; 2228 struct cnic_local *cp = dev->cnic_priv;
2205 u32 status_idx = sblk->status_idx; 2229 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2206 u16 hw_prod, sw_prod;
2207 int kcqe_cnt; 2230 int kcqe_cnt;
2208 2231
2209 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2210 return status_idx;
2211
2212 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2232 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2213 2233
2214 hw_prod = sblk->status_completion_producer_index; 2234 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2215 sw_prod = cp->kcq_prod_idx;
2216 while (sw_prod != hw_prod) {
2217 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2218 if (kcqe_cnt == 0)
2219 goto done;
2220 2235
2221 service_kcqes(dev, kcqe_cnt); 2236 service_kcqes(dev, kcqe_cnt);
2222 2237
2223 /* Tell compiler that status_blk fields can change. */ 2238 /* Tell compiler that status_blk fields can change. */
2224 barrier(); 2239 barrier();
2225 if (status_idx != sblk->status_idx) { 2240 if (status_idx != *cp->kcq1.status_idx_ptr) {
2226 status_idx = sblk->status_idx; 2241 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2227 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2242 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2228 hw_prod = sblk->status_completion_producer_index;
2229 } else 2243 } else
2230 break; 2244 break;
2231 } 2245 }
2232 2246
2233done: 2247 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2234 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
2235
2236 cp->kcq_prod_idx = sw_prod;
2237 2248
2238 cnic_chk_pkt_rings(cp); 2249 cnic_chk_pkt_rings(cp);
2250
2239 return status_idx; 2251 return status_idx;
2240} 2252}
2241 2253
2242static void cnic_service_bnx2_msix(unsigned long data) 2254static int cnic_service_bnx2(void *data, void *status_blk)
2243{ 2255{
2244 struct cnic_dev *dev = (struct cnic_dev *) data; 2256 struct cnic_dev *dev = data;
2245 struct cnic_local *cp = dev->cnic_priv; 2257 struct cnic_local *cp = dev->cnic_priv;
2246 struct status_block_msix *status_blk = cp->status_blk.bnx2; 2258 u32 status_idx = *cp->kcq1.status_idx_ptr;
2247 u32 status_idx = status_blk->status_idx;
2248 u16 hw_prod, sw_prod;
2249 int kcqe_cnt;
2250 2259
2251 cp->kwq_con_idx = status_blk->status_cmd_consumer_index; 2260 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2252 2261 return status_idx;
2253 hw_prod = status_blk->status_completion_producer_index;
2254 sw_prod = cp->kcq_prod_idx;
2255 while (sw_prod != hw_prod) {
2256 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2257 if (kcqe_cnt == 0)
2258 goto done;
2259
2260 service_kcqes(dev, kcqe_cnt);
2261 2262
2262 /* Tell compiler that status_blk fields can change. */ 2263 return cnic_service_bnx2_queues(dev);
2263 barrier(); 2264}
2264 if (status_idx != status_blk->status_idx) {
2265 status_idx = status_blk->status_idx;
2266 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
2267 hw_prod = status_blk->status_completion_producer_index;
2268 } else
2269 break;
2270 }
2271 2265
2272done: 2266static void cnic_service_bnx2_msix(unsigned long data)
2273 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 2267{
2274 cp->kcq_prod_idx = sw_prod; 2268 struct cnic_dev *dev = (struct cnic_dev *) data;
2269 struct cnic_local *cp = dev->cnic_priv;
2275 2270
2276 cnic_chk_pkt_rings(cp); 2271 cp->last_status_idx = cnic_service_bnx2_queues(dev);
2277 2272
2278 cp->last_status_idx = status_idx;
2279 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2273 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2280 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2274 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2281} 2275}
2282 2276
2277static void cnic_doirq(struct cnic_dev *dev)
2278{
2279 struct cnic_local *cp = dev->cnic_priv;
2280 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2281
2282 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2283 prefetch(cp->status_blk.gen);
2284 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2285
2286 tasklet_schedule(&cp->cnic_irq_task);
2287 }
2288}
2289
2283static irqreturn_t cnic_irq(int irq, void *dev_instance) 2290static irqreturn_t cnic_irq(int irq, void *dev_instance)
2284{ 2291{
2285 struct cnic_dev *dev = dev_instance; 2292 struct cnic_dev *dev = dev_instance;
2286 struct cnic_local *cp = dev->cnic_priv; 2293 struct cnic_local *cp = dev->cnic_priv;
2287 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2288 2294
2289 if (cp->ack_int) 2295 if (cp->ack_int)
2290 cp->ack_int(dev); 2296 cp->ack_int(dev);
2291 2297
2292 prefetch(cp->status_blk.gen); 2298 cnic_doirq(dev);
2293 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2294
2295 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2296 tasklet_schedule(&cp->cnic_irq_task);
2297 2299
2298 return IRQ_HANDLED; 2300 return IRQ_HANDLED;
2299} 2301}
@@ -2324,60 +2326,50 @@ static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2324 IGU_INT_DISABLE, 0); 2326 IGU_INT_DISABLE, 0);
2325} 2327}
2326 2328
2327static void cnic_service_bnx2x_bh(unsigned long data) 2329static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2328{ 2330{
2329 struct cnic_dev *dev = (struct cnic_dev *) data; 2331 u32 last_status = *info->status_idx_ptr;
2330 struct cnic_local *cp = dev->cnic_priv;
2331 u16 hw_prod, sw_prod;
2332 struct cstorm_status_block_c *sblk =
2333 &cp->status_blk.bnx2x->c_status_block;
2334 u32 status_idx = sblk->status_block_index;
2335 int kcqe_cnt; 2332 int kcqe_cnt;
2336 2333
2337 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2334 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2338 return;
2339
2340 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2341 hw_prod = cp->hw_idx(hw_prod);
2342 sw_prod = cp->kcq_prod_idx;
2343 while (sw_prod != hw_prod) {
2344 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2345 if (kcqe_cnt == 0)
2346 goto done;
2347 2335
2348 service_kcqes(dev, kcqe_cnt); 2336 service_kcqes(dev, kcqe_cnt);
2349 2337
2350 /* Tell compiler that sblk fields can change. */ 2338 /* Tell compiler that sblk fields can change. */
2351 barrier(); 2339 barrier();
2352 if (status_idx == sblk->status_block_index) 2340 if (last_status == *info->status_idx_ptr)
2353 break; 2341 break;
2354 2342
2355 status_idx = sblk->status_block_index; 2343 last_status = *info->status_idx_ptr;
2356 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2357 hw_prod = cp->hw_idx(hw_prod);
2358 } 2344 }
2345 return last_status;
2346}
2359 2347
2360done: 2348static void cnic_service_bnx2x_bh(unsigned long data)
2361 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX); 2349{
2350 struct cnic_dev *dev = (struct cnic_dev *) data;
2351 struct cnic_local *cp = dev->cnic_priv;
2352 u32 status_idx;
2353
2354 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2355 return;
2356
2357 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2358
2359 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2362 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 2360 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
2363 status_idx, IGU_INT_ENABLE, 1); 2361 status_idx, IGU_INT_ENABLE, 1);
2364
2365 cp->kcq_prod_idx = sw_prod;
2366} 2362}
2367 2363
2368static int cnic_service_bnx2x(void *data, void *status_blk) 2364static int cnic_service_bnx2x(void *data, void *status_blk)
2369{ 2365{
2370 struct cnic_dev *dev = data; 2366 struct cnic_dev *dev = data;
2371 struct cnic_local *cp = dev->cnic_priv; 2367 struct cnic_local *cp = dev->cnic_priv;
2372 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2373 2368
2374 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2369 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2375 prefetch(cp->status_blk.bnx2x); 2370 cnic_doirq(dev);
2376 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2377 2371
2378 tasklet_schedule(&cp->cnic_irq_task); 2372 cnic_chk_pkt_rings(cp);
2379 cnic_chk_pkt_rings(cp);
2380 }
2381 2373
2382 return 0; 2374 return 0;
2383} 2375}
@@ -2824,7 +2816,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
2824 2816
2825 err = ip_route_output_key(&init_net, &rt, &fl); 2817 err = ip_route_output_key(&init_net, &rt, &fl);
2826 if (!err) 2818 if (!err)
2827 *dst = &rt->u.dst; 2819 *dst = &rt->dst;
2828 return err; 2820 return err;
2829#else 2821#else
2830 return -ENETUNREACH; 2822 return -ENETUNREACH;
@@ -2996,7 +2988,7 @@ err_out:
2996static int cnic_cm_abort(struct cnic_sock *csk) 2988static int cnic_cm_abort(struct cnic_sock *csk)
2997{ 2989{
2998 struct cnic_local *cp = csk->dev->cnic_priv; 2990 struct cnic_local *cp = csk->dev->cnic_priv;
2999 u32 opcode; 2991 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3000 2992
3001 if (!cnic_in_use(csk)) 2993 if (!cnic_in_use(csk))
3002 return -EINVAL; 2994 return -EINVAL;
@@ -3008,12 +3000,9 @@ static int cnic_cm_abort(struct cnic_sock *csk)
3008 * connect was not successful. 3000 * connect was not successful.
3009 */ 3001 */
3010 3002
3011 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3012 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3013 opcode = csk->state;
3014 else
3015 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3016 cp->close_conn(csk, opcode); 3003 cp->close_conn(csk, opcode);
3004 if (csk->state != opcode)
3005 return -EALREADY;
3017 3006
3018 return 0; 3007 return 0;
3019} 3008}
@@ -3026,6 +3015,8 @@ static int cnic_cm_close(struct cnic_sock *csk)
3026 if (cnic_close_prep(csk)) { 3015 if (cnic_close_prep(csk)) {
3027 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3016 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3028 return cnic_cm_close_req(csk); 3017 return cnic_cm_close_req(csk);
3018 } else {
3019 return -EALREADY;
3029 } 3020 }
3030 return 0; 3021 return 0;
3031} 3022}
@@ -3141,12 +3132,6 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3141 break; 3132 break;
3142 3133
3143 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3134 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3144 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
3145 cnic_cm_upcall(cp, csk, opcode);
3146 break;
3147 } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
3148 csk->state = opcode;
3149 /* fall through */
3150 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3135 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3151 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3136 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3152 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3137 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
@@ -3202,19 +3187,22 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3202 3187
3203static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 3188static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3204{ 3189{
3205 if ((opcode == csk->state) || 3190 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3206 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && 3191 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3207 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { 3192 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3208 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) 3193 csk->state = opcode;
3209 return 1;
3210 } 3194 }
3211 /* 57710+ only workaround to handle unsolicited RESET_COMP 3195
3212 * which will be treated like a RESET RCVD notification 3196 /* 1. If event opcode matches the expected event in csk->state
3213 * which triggers the clean up procedure 3197 * 2. If the expected event is CLOSE_COMP, we accept any event
3198 * 3. If the expected event is 0, meaning the connection was never
3199 * never established, we accept the opcode from cm_abort.
3214 */ 3200 */
3215 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) { 3201 if (opcode == csk->state || csk->state == 0 ||
3202 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3216 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 3203 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3217 csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 3204 if (csk->state == 0)
3205 csk->state = opcode;
3218 return 1; 3206 return 1;
3219 } 3207 }
3220 } 3208 }
@@ -3226,8 +3214,14 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3226 struct cnic_dev *dev = csk->dev; 3214 struct cnic_dev *dev = csk->dev;
3227 struct cnic_local *cp = dev->cnic_priv; 3215 struct cnic_local *cp = dev->cnic_priv;
3228 3216
3217 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3218 cnic_cm_upcall(cp, csk, opcode);
3219 return;
3220 }
3221
3229 clear_bit(SK_F_CONNECT_START, &csk->flags); 3222 clear_bit(SK_F_CONNECT_START, &csk->flags);
3230 cnic_close_conn(csk); 3223 cnic_close_conn(csk);
3224 csk->state = opcode;
3231 cnic_cm_upcall(cp, csk, opcode); 3225 cnic_cm_upcall(cp, csk, opcode);
3232} 3226}
3233 3227
@@ -3257,8 +3251,12 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3257 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3251 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3258 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3252 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3259 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3253 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3260 if (cnic_ready_to_close(csk, opcode)) 3254 if (cnic_ready_to_close(csk, opcode)) {
3261 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 3255 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3256 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3257 else
3258 close_complete = 1;
3259 }
3262 break; 3260 break;
3263 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3261 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3264 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3262 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
@@ -3694,7 +3692,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3694 struct cnic_local *cp = dev->cnic_priv; 3692 struct cnic_local *cp = dev->cnic_priv;
3695 struct cnic_eth_dev *ethdev = cp->ethdev; 3693 struct cnic_eth_dev *ethdev = cp->ethdev;
3696 struct status_block *sblk = cp->status_blk.gen; 3694 struct status_block *sblk = cp->status_blk.gen;
3697 u32 val; 3695 u32 val, kcq_cid_addr, kwq_cid_addr;
3698 int err; 3696 int err;
3699 3697
3700 cnic_set_bnx2_mac(dev); 3698 cnic_set_bnx2_mac(dev);
@@ -3719,7 +3717,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3719 cnic_init_context(dev, KWQ_CID); 3717 cnic_init_context(dev, KWQ_CID);
3720 cnic_init_context(dev, KCQ_CID); 3718 cnic_init_context(dev, KCQ_CID);
3721 3719
3722 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 3720 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
3723 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 3721 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
3724 3722
3725 cp->max_kwq_idx = MAX_KWQ_IDX; 3723 cp->max_kwq_idx = MAX_KWQ_IDX;
@@ -3735,50 +3733,59 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3735 /* Initialize the kernel work queue context. */ 3733 /* Initialize the kernel work queue context. */
3736 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3734 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3737 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3735 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3738 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); 3736 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
3739 3737
3740 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 3738 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
3741 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3739 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3742 3740
3743 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 3741 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
3744 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3742 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3745 3743
3746 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 3744 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
3747 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3745 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3748 3746
3749 val = (u32) cp->kwq_info.pgtbl_map; 3747 val = (u32) cp->kwq_info.pgtbl_map;
3750 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3748 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3749
3750 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
3751 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
3751 3752
3752 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 3753 cp->kcq1.sw_prod_idx = 0;
3753 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 3754 cp->kcq1.hw_prod_idx_ptr =
3755 (u16 *) &sblk->status_completion_producer_index;
3754 3756
3755 cp->kcq_prod_idx = 0; 3757 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
3756 3758
3757 /* Initialize the kernel complete queue context. */ 3759 /* Initialize the kernel complete queue context. */
3758 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3760 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3759 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3761 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3760 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); 3762 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
3761 3763
3762 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 3764 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
3763 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3765 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3764 3766
3765 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 3767 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
3766 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3768 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3767 3769
3768 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); 3770 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
3769 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3771 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3770 3772
3771 val = (u32) cp->kcq_info.pgtbl_map; 3773 val = (u32) cp->kcq1.dma.pgtbl_map;
3772 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3774 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3773 3775
3774 cp->int_num = 0; 3776 cp->int_num = 0;
3775 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3777 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3778 struct status_block_msix *msblk = cp->status_blk.bnx2;
3776 u32 sb_id = cp->status_blk_num; 3779 u32 sb_id = cp->status_blk_num;
3777 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 3780 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
3778 3781
3782 cp->kcq1.hw_prod_idx_ptr =
3783 (u16 *) &msblk->status_completion_producer_index;
3784 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
3785 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
3779 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 3786 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
3780 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3787 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3781 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3788 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3782 } 3789 }
3783 3790
3784 /* Enable Commnad Scheduler notification when we write to the 3791 /* Enable Commnad Scheduler notification when we write to the
@@ -4123,33 +4130,39 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4123 u8 sb_id = cp->status_blk_num; 4130 u8 sb_id = cp->status_blk_num;
4124 4131
4125 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 4132 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4126 BNX2X_ISCSI_START_CID); 4133 cp->iscsi_start_cid);
4127 4134
4128 if (ret) 4135 if (ret)
4129 return -ENOMEM; 4136 return -ENOMEM;
4130 4137
4131 cp->kcq_io_addr = BAR_CSTRORM_INTMEM + 4138 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4132 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); 4139 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
4133 cp->kcq_prod_idx = 0; 4140 cp->kcq1.sw_prod_idx = 0;
4141
4142 cp->kcq1.hw_prod_idx_ptr =
4143 &cp->status_blk.bnx2x->c_status_block.index_values[
4144 HC_INDEX_C_ISCSI_EQ_CONS];
4145 cp->kcq1.status_idx_ptr =
4146 &cp->status_blk.bnx2x->c_status_block.status_block_index;
4134 4147
4135 cnic_get_bnx2x_iscsi_info(dev); 4148 cnic_get_bnx2x_iscsi_info(dev);
4136 4149
4137 /* Only 1 EQ */ 4150 /* Only 1 EQ */
4138 CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX); 4151 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4139 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4152 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4140 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); 4153 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
4141 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4154 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4142 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), 4155 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
4143 cp->kcq_info.pg_map_arr[1] & 0xffffffff); 4156 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4144 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4157 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4145 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, 4158 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
4146 (u64) cp->kcq_info.pg_map_arr[1] >> 32); 4159 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4147 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4160 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4148 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), 4161 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
4149 cp->kcq_info.pg_map_arr[0] & 0xffffffff); 4162 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4150 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4163 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4151 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, 4164 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
4152 (u64) cp->kcq_info.pg_map_arr[0] >> 32); 4165 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4153 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4166 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4154 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); 4167 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
4155 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4168 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
@@ -4377,7 +4390,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4377 0); 4390 0);
4378 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4391 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4379 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0); 4392 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
4380 CNIC_WR16(dev, cp->kcq_io_addr, 0); 4393 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
4381 cnic_free_resc(dev); 4394 cnic_free_resc(dev);
4382} 4395}
4383 4396
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 08b1235d987d..275c36114d85 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -169,6 +169,16 @@ struct cnic_context {
169 } proto; 169 } proto;
170}; 170};
171 171
172struct kcq_info {
173 struct cnic_dma dma;
174 struct kcqe **kcq;
175
176 u16 *hw_prod_idx_ptr;
177 u16 sw_prod_idx;
178 u16 *status_idx_ptr;
179 u32 io_addr;
180};
181
172struct cnic_local { 182struct cnic_local {
173 183
174 spinlock_t cnic_ulp_lock; 184 spinlock_t cnic_ulp_lock;
@@ -202,9 +212,6 @@ struct cnic_local {
202 u16 rx_cons; 212 u16 rx_cons;
203 u16 tx_cons; 213 u16 tx_cons;
204 214
205 u32 kwq_cid_addr;
206 u32 kcq_cid_addr;
207
208 struct cnic_dma kwq_info; 215 struct cnic_dma kwq_info;
209 struct kwqe **kwq; 216 struct kwqe **kwq;
210 217
@@ -218,11 +225,7 @@ struct cnic_local {
218 u16 *kwq_con_idx_ptr; 225 u16 *kwq_con_idx_ptr;
219 u16 kwq_con_idx; 226 u16 kwq_con_idx;
220 227
221 struct cnic_dma kcq_info; 228 struct kcq_info kcq1;
222 struct kcqe **kcq;
223
224 u16 kcq_prod_idx;
225 u32 kcq_io_addr;
226 229
227 union { 230 union {
228 void *gen; 231 void *gen;
@@ -248,8 +251,10 @@ struct cnic_local {
248 struct cnic_iscsi *iscsi_tbl; 251 struct cnic_iscsi *iscsi_tbl;
249 struct cnic_context *ctx_tbl; 252 struct cnic_context *ctx_tbl;
250 struct cnic_id_tbl cid_tbl; 253 struct cnic_id_tbl cid_tbl;
251 int max_iscsi_conn;
252 atomic_t iscsi_conn; 254 atomic_t iscsi_conn;
255 u32 iscsi_start_cid;
256
257 u32 max_cid_space;
253 258
254 /* per connection parameters */ 259 /* per connection parameters */
255 int num_iscsi_tasks; 260 int num_iscsi_tasks;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 0c55177db046..344c842d55ab 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.1.2" 15#define CNIC_MODULE_VERSION "2.1.3"
16#define CNIC_MODULE_RELDATE "May 26, 2010" 16#define CNIC_MODULE_RELDATE "June 24, 2010"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 23786ee34bed..e1f6156b3710 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -846,11 +846,8 @@ static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
846 return -EINVAL; 846 return -EINVAL;
847 if (!priv->phy) 847 if (!priv->phy)
848 return -EINVAL; 848 return -EINVAL;
849 if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
850 (cmd == SIOCSMIIREG))
851 return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
852 849
853 return -EOPNOTSUPP; 850 return phy_mii_ioctl(priv->phy, ifr, cmd);
854} 851}
855 852
856static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 853static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -964,7 +961,7 @@ static int cpmac_open(struct net_device *dev)
964 struct sk_buff *skb; 961 struct sk_buff *skb;
965 962
966 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 963 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
967 if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { 964 if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
968 if (netif_msg_drv(priv)) 965 if (netif_msg_drv(priv))
969 printk(KERN_ERR "%s: failed to request registers\n", 966 printk(KERN_ERR "%s: failed to request registers\n",
970 dev->name); 967 dev->name);
@@ -972,7 +969,7 @@ static int cpmac_open(struct net_device *dev)
972 goto fail_reserve; 969 goto fail_reserve;
973 } 970 }
974 971
975 priv->regs = ioremap(mem->start, mem->end - mem->start); 972 priv->regs = ioremap(mem->start, resource_size(mem));
976 if (!priv->regs) { 973 if (!priv->regs) {
977 if (netif_msg_drv(priv)) 974 if (netif_msg_drv(priv))
978 printk(KERN_ERR "%s: failed to remap registers\n", 975 printk(KERN_ERR "%s: failed to remap registers\n",
@@ -1049,7 +1046,7 @@ fail_alloc:
1049 iounmap(priv->regs); 1046 iounmap(priv->regs);
1050 1047
1051fail_remap: 1048fail_remap:
1052 release_mem_region(mem->start, mem->end - mem->start); 1049 release_mem_region(mem->start, resource_size(mem));
1053 1050
1054fail_reserve: 1051fail_reserve:
1055 return res; 1052 return res;
@@ -1077,7 +1074,7 @@ static int cpmac_stop(struct net_device *dev)
1077 free_irq(dev->irq, dev); 1074 free_irq(dev->irq, dev);
1078 iounmap(priv->regs); 1075 iounmap(priv->regs);
1079 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 1076 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1080 release_mem_region(mem->start, mem->end - mem->start); 1077 release_mem_region(mem->start, resource_size(mem));
1081 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 1078 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1082 for (i = 0; i < priv->ring_size; i++) { 1079 for (i = 0; i < priv->ring_size; i++) {
1083 if (priv->rx_head[i].skb) { 1080 if (priv->rx_head[i].skb) {
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 7e00027b9f8e..81475cc80e1c 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1702,11 +1702,7 @@ e100_set_network_leds(int active)
1702 1702
1703 if (!current_speed) { 1703 if (!current_speed) {
1704 /* Make LED red, link is down */ 1704 /* Make LED red, link is down */
1705#if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION)
1706 CRIS_LED_NETWORK_SET(CRIS_LED_RED);
1707#else
1708 CRIS_LED_NETWORK_SET(CRIS_LED_OFF); 1705 CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
1709#endif
1710 } else if (light_leds) { 1706 } else if (light_leds) {
1711 if (current_speed == 10) { 1707 if (current_speed == 10) {
1712 CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE); 1708 CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE);
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 2ccb9f12805b..d325e01a53e0 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -170,22 +170,12 @@ static char version[] __initdata =
170/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps 170/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
171 them to system IRQ numbers. This mapping is card specific and is set to 171 them to system IRQ numbers. This mapping is card specific and is set to
172 the configuration of the Cirrus Eval board for this chip. */ 172 the configuration of the Cirrus Eval board for this chip. */
173#if defined(CONFIG_SH_HICOSH4) 173#if defined(CONFIG_MACH_IXDP2351)
174static unsigned int netcard_portlist[] __used __initdata =
175 { 0x0300, 0};
176static unsigned int cs8900_irq_map[] = {1,0,0,0};
177#elif defined(CONFIG_MACH_IXDP2351)
178static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; 174static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
179static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; 175static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
180#elif defined(CONFIG_ARCH_IXDP2X01) 176#elif defined(CONFIG_ARCH_IXDP2X01)
181static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; 177static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
182static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 178static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
183#elif defined(CONFIG_ARCH_PNX010X)
184#include <mach/gpio.h>
185#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */
186#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
187static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0};
188static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0};
189#elif defined(CONFIG_MACH_MX31ADS) 179#elif defined(CONFIG_MACH_MX31ADS)
190#include <mach/board-mx31ads.h> 180#include <mach/board-mx31ads.h>
191static unsigned int netcard_portlist[] __used __initdata = { 181static unsigned int netcard_portlist[] __used __initdata = {
@@ -218,7 +208,6 @@ static unsigned int net_debug = DEBUGGING;
218 208
219/* Information that need to be kept for each board. */ 209/* Information that need to be kept for each board. */
220struct net_local { 210struct net_local {
221 struct net_device_stats stats;
222 int chip_type; /* one of: CS8900, CS8920, CS8920M */ 211 int chip_type; /* one of: CS8900, CS8920, CS8920M */
223 char chip_revision; /* revision letter of the chip ('A'...) */ 212 char chip_revision; /* revision letter of the chip ('A'...) */
224 int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */ 213 int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */
@@ -257,7 +246,7 @@ static void reset_chip(struct net_device *dev);
257static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer); 246static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer);
258static int get_eeprom_cksum(int off, int len, int *buffer); 247static int get_eeprom_cksum(int off, int len, int *buffer);
259static int set_mac_address(struct net_device *dev, void *addr); 248static int set_mac_address(struct net_device *dev, void *addr);
260static void count_rx_errors(int status, struct net_local *lp); 249static void count_rx_errors(int status, struct net_device *dev);
261#ifdef CONFIG_NET_POLL_CONTROLLER 250#ifdef CONFIG_NET_POLL_CONTROLLER
262static void net_poll_controller(struct net_device *dev); 251static void net_poll_controller(struct net_device *dev);
263#endif 252#endif
@@ -372,18 +361,6 @@ writeword(unsigned long base_addr, int portno, u16 value)
372{ 361{
373 __raw_writel(value, base_addr + (portno << 1)); 362 __raw_writel(value, base_addr + (portno << 1));
374} 363}
375#elif defined(CONFIG_ARCH_PNX010X)
376static u16
377readword(unsigned long base_addr, int portno)
378{
379 return inw(base_addr + (portno << 1));
380}
381
382static void
383writeword(unsigned long base_addr, int portno, u16 value)
384{
385 outw(value, base_addr + (portno << 1));
386}
387#else 364#else
388static u16 365static u16
389readword(unsigned long base_addr, int portno) 366readword(unsigned long base_addr, int portno)
@@ -546,30 +523,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
546#endif 523#endif
547 } 524 }
548 525
549#ifdef CONFIG_ARCH_PNX010X
550 initialize_ebi();
551
552 /* Map GPIO registers for the pins connected to the CS8900a. */
553 if (map_cirrus_gpio() < 0)
554 return -ENODEV;
555
556 reset_cirrus();
557
558 /* Map event-router registers. */
559 if (map_event_router() < 0)
560 return -ENODEV;
561
562 enable_cirrus_irq();
563
564 unmap_cirrus_gpio();
565 unmap_event_router();
566
567 dev->base_addr = ioaddr;
568
569 for (i = 0 ; i < 3 ; i++)
570 readreg(dev, 0);
571#endif
572
573 /* Grab the region so we can find another board if autoIRQ fails. */ 526 /* Grab the region so we can find another board if autoIRQ fails. */
574 /* WTF is going on here? */ 527 /* WTF is going on here? */
575 if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) { 528 if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) {
@@ -579,12 +532,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
579 goto out1; 532 goto out1;
580 } 533 }
581 534
582#ifdef CONFIG_SH_HICOSH4
583 /* truly reset the chip */
584 writeword(ioaddr, ADD_PORT, 0x0114);
585 writeword(ioaddr, DATA_PORT, 0x0040);
586#endif
587
588 /* if they give us an odd I/O address, then do ONE write to 535 /* if they give us an odd I/O address, then do ONE write to
589 the address port, to get it back to address zero, where we 536 the address port, to get it back to address zero, where we
590 expect to find the EISA signature word. An IO with a base of 0x3 537 expect to find the EISA signature word. An IO with a base of 0x3
@@ -650,37 +597,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
650 the driver will always do *something* instead of complain that 597 the driver will always do *something* instead of complain that
651 adapter_cnf is 0. */ 598 adapter_cnf is 0. */
652 599
653#ifdef CONFIG_SH_HICOSH4
654 if (1) {
655 /* For the HiCO.SH4 board, things are different: we don't
656 have EEPROM, but there is some data in flash, so we go
657 get it there directly (MAC). */
658 __u16 *confd;
659 short cnt;
660 if (((* (volatile __u32 *) 0xa0013ff0) & 0x00ffffff)
661 == 0x006c3000) {
662 confd = (__u16*) 0xa0013fc0;
663 } else {
664 confd = (__u16*) 0xa001ffc0;
665 }
666 cnt = (*confd++ & 0x00ff) >> 1;
667 while (--cnt > 0) {
668 __u16 j = *confd++;
669
670 switch (j & 0x0fff) {
671 case PP_IA:
672 for (i = 0; i < ETH_ALEN/2; i++) {
673 dev->dev_addr[i*2] = confd[i] & 0xFF;
674 dev->dev_addr[i*2+1] = confd[i] >> 8;
675 }
676 break;
677 }
678 j = (j >> 12) + 1;
679 confd += j;
680 cnt -= j;
681 }
682 } else
683#endif
684 600
685 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == 601 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
686 (EEPROM_OK|EEPROM_PRESENT)) { 602 (EEPROM_OK|EEPROM_PRESENT)) {
@@ -735,11 +651,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
735 printk("\n"); 651 printk("\n");
736 652
737 /* First check to see if an EEPROM is attached. */ 653 /* First check to see if an EEPROM is attached. */
738#ifdef CONFIG_SH_HICOSH4 /* no EEPROM on HiCO, don't hazzle with it here */ 654
739 if (1) {
740 printk(KERN_NOTICE "cs89x0: No EEPROM on HiCO.SH4\n");
741 } else
742#endif
743 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0) 655 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
744 printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n"); 656 printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
745 else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) { 657 else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
@@ -983,7 +895,7 @@ dma_rx(struct net_device *dev)
983 dev->name, (unsigned long)bp, status, length); 895 dev->name, (unsigned long)bp, status, length);
984 } 896 }
985 if ((status & RX_OK) == 0) { 897 if ((status & RX_OK) == 0) {
986 count_rx_errors(status, lp); 898 count_rx_errors(status, dev);
987 goto skip_this_frame; 899 goto skip_this_frame;
988 } 900 }
989 901
@@ -992,7 +904,7 @@ dma_rx(struct net_device *dev)
992 if (skb == NULL) { 904 if (skb == NULL) {
993 if (net_debug) /* I don't think we want to do this to a stressed system */ 905 if (net_debug) /* I don't think we want to do this to a stressed system */
994 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 906 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
995 lp->stats.rx_dropped++; 907 dev->stats.rx_dropped++;
996 908
997 /* AKPM: advance bp to the next frame */ 909 /* AKPM: advance bp to the next frame */
998skip_this_frame: 910skip_this_frame:
@@ -1022,8 +934,8 @@ skip_this_frame:
1022 } 934 }
1023 skb->protocol=eth_type_trans(skb,dev); 935 skb->protocol=eth_type_trans(skb,dev);
1024 netif_rx(skb); 936 netif_rx(skb);
1025 lp->stats.rx_packets++; 937 dev->stats.rx_packets++;
1026 lp->stats.rx_bytes += length; 938 dev->stats.rx_bytes += length;
1027} 939}
1028 940
1029#endif /* ALLOW_DMA */ 941#endif /* ALLOW_DMA */
@@ -1276,7 +1188,6 @@ net_open(struct net_device *dev)
1276 int i; 1188 int i;
1277 int ret; 1189 int ret;
1278 1190
1279#if !defined(CONFIG_SH_HICOSH4) && !defined(CONFIG_ARCH_PNX010X) /* uses irq#1, so this won't work */
1280 if (dev->irq < 2) { 1191 if (dev->irq < 2) {
1281 /* Allow interrupts to be generated by the chip */ 1192 /* Allow interrupts to be generated by the chip */
1282/* Cirrus' release had this: */ 1193/* Cirrus' release had this: */
@@ -1305,7 +1216,6 @@ net_open(struct net_device *dev)
1305 } 1216 }
1306 } 1217 }
1307 else 1218 else
1308#endif
1309 { 1219 {
1310#ifndef CONFIG_CS89x0_NONISA_IRQ 1220#ifndef CONFIG_CS89x0_NONISA_IRQ
1311 if (((1 << dev->irq) & lp->irq_map) == 0) { 1221 if (((1 << dev->irq) & lp->irq_map) == 0) {
@@ -1391,9 +1301,6 @@ net_open(struct net_device *dev)
1391 case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break; 1301 case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break;
1392 default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2); 1302 default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2);
1393 } 1303 }
1394#ifdef CONFIG_ARCH_PNX010X
1395 result = A_CNF_10B_T;
1396#endif
1397 if (!result) { 1304 if (!result) {
1398 printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); 1305 printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name);
1399release_dma: 1306release_dma:
@@ -1552,7 +1459,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
1552 /* Write the contents of the packet */ 1459 /* Write the contents of the packet */
1553 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); 1460 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
1554 spin_unlock_irqrestore(&lp->lock, flags); 1461 spin_unlock_irqrestore(&lp->lock, flags);
1555 lp->stats.tx_bytes += skb->len; 1462 dev->stats.tx_bytes += skb->len;
1556 dev_kfree_skb (skb); 1463 dev_kfree_skb (skb);
1557 1464
1558 /* 1465 /*
@@ -1598,18 +1505,23 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
1598 net_rx(dev); 1505 net_rx(dev);
1599 break; 1506 break;
1600 case ISQ_TRANSMITTER_EVENT: 1507 case ISQ_TRANSMITTER_EVENT:
1601 lp->stats.tx_packets++; 1508 dev->stats.tx_packets++;
1602 netif_wake_queue(dev); /* Inform upper layers. */ 1509 netif_wake_queue(dev); /* Inform upper layers. */
1603 if ((status & ( TX_OK | 1510 if ((status & ( TX_OK |
1604 TX_LOST_CRS | 1511 TX_LOST_CRS |
1605 TX_SQE_ERROR | 1512 TX_SQE_ERROR |
1606 TX_LATE_COL | 1513 TX_LATE_COL |
1607 TX_16_COL)) != TX_OK) { 1514 TX_16_COL)) != TX_OK) {
1608 if ((status & TX_OK) == 0) lp->stats.tx_errors++; 1515 if ((status & TX_OK) == 0)
1609 if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++; 1516 dev->stats.tx_errors++;
1610 if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++; 1517 if (status & TX_LOST_CRS)
1611 if (status & TX_LATE_COL) lp->stats.tx_window_errors++; 1518 dev->stats.tx_carrier_errors++;
1612 if (status & TX_16_COL) lp->stats.tx_aborted_errors++; 1519 if (status & TX_SQE_ERROR)
1520 dev->stats.tx_heartbeat_errors++;
1521 if (status & TX_LATE_COL)
1522 dev->stats.tx_window_errors++;
1523 if (status & TX_16_COL)
1524 dev->stats.tx_aborted_errors++;
1613 } 1525 }
1614 break; 1526 break;
1615 case ISQ_BUFFER_EVENT: 1527 case ISQ_BUFFER_EVENT:
@@ -1651,10 +1563,10 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
1651#endif 1563#endif
1652 break; 1564 break;
1653 case ISQ_RX_MISS_EVENT: 1565 case ISQ_RX_MISS_EVENT:
1654 lp->stats.rx_missed_errors += (status >>6); 1566 dev->stats.rx_missed_errors += (status >> 6);
1655 break; 1567 break;
1656 case ISQ_TX_COL_EVENT: 1568 case ISQ_TX_COL_EVENT:
1657 lp->stats.collisions += (status >>6); 1569 dev->stats.collisions += (status >> 6);
1658 break; 1570 break;
1659 } 1571 }
1660 } 1572 }
@@ -1662,22 +1574,24 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
1662} 1574}
1663 1575
1664static void 1576static void
1665count_rx_errors(int status, struct net_local *lp) 1577count_rx_errors(int status, struct net_device *dev)
1666{ 1578{
1667 lp->stats.rx_errors++; 1579 dev->stats.rx_errors++;
1668 if (status & RX_RUNT) lp->stats.rx_length_errors++; 1580 if (status & RX_RUNT)
1669 if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++; 1581 dev->stats.rx_length_errors++;
1670 if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT))) 1582 if (status & RX_EXTRA_DATA)
1583 dev->stats.rx_length_errors++;
1584 if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT)))
1671 /* per str 172 */ 1585 /* per str 172 */
1672 lp->stats.rx_crc_errors++; 1586 dev->stats.rx_crc_errors++;
1673 if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++; 1587 if (status & RX_DRIBBLE)
1588 dev->stats.rx_frame_errors++;
1674} 1589}
1675 1590
1676/* We have a good packet(s), get it/them out of the buffers. */ 1591/* We have a good packet(s), get it/them out of the buffers. */
1677static void 1592static void
1678net_rx(struct net_device *dev) 1593net_rx(struct net_device *dev)
1679{ 1594{
1680 struct net_local *lp = netdev_priv(dev);
1681 struct sk_buff *skb; 1595 struct sk_buff *skb;
1682 int status, length; 1596 int status, length;
1683 1597
@@ -1686,7 +1600,7 @@ net_rx(struct net_device *dev)
1686 length = readword(ioaddr, RX_FRAME_PORT); 1600 length = readword(ioaddr, RX_FRAME_PORT);
1687 1601
1688 if ((status & RX_OK) == 0) { 1602 if ((status & RX_OK) == 0) {
1689 count_rx_errors(status, lp); 1603 count_rx_errors(status, dev);
1690 return; 1604 return;
1691 } 1605 }
1692 1606
@@ -1696,7 +1610,7 @@ net_rx(struct net_device *dev)
1696#if 0 /* Again, this seems a cruel thing to do */ 1610#if 0 /* Again, this seems a cruel thing to do */
1697 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); 1611 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
1698#endif 1612#endif
1699 lp->stats.rx_dropped++; 1613 dev->stats.rx_dropped++;
1700 return; 1614 return;
1701 } 1615 }
1702 skb_reserve(skb, 2); /* longword align L3 header */ 1616 skb_reserve(skb, 2); /* longword align L3 header */
@@ -1713,8 +1627,8 @@ net_rx(struct net_device *dev)
1713 1627
1714 skb->protocol=eth_type_trans(skb,dev); 1628 skb->protocol=eth_type_trans(skb,dev);
1715 netif_rx(skb); 1629 netif_rx(skb);
1716 lp->stats.rx_packets++; 1630 dev->stats.rx_packets++;
1717 lp->stats.rx_bytes += length; 1631 dev->stats.rx_bytes += length;
1718} 1632}
1719 1633
1720#if ALLOW_DMA 1634#if ALLOW_DMA
@@ -1765,11 +1679,11 @@ net_get_stats(struct net_device *dev)
1765 1679
1766 spin_lock_irqsave(&lp->lock, flags); 1680 spin_lock_irqsave(&lp->lock, flags);
1767 /* Update the statistics from the device registers. */ 1681 /* Update the statistics from the device registers. */
1768 lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); 1682 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
1769 lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6); 1683 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
1770 spin_unlock_irqrestore(&lp->lock, flags); 1684 spin_unlock_irqrestore(&lp->lock, flags);
1771 1685
1772 return &lp->stats; 1686 return &dev->stats;
1773} 1687}
1774 1688
1775static void set_multicast_list(struct net_device *dev) 1689static void set_multicast_list(struct net_device *dev)
diff --git a/drivers/net/cs89x0.h b/drivers/net/cs89x0.h
index 204ed37fa9d5..91423b70bb45 100644
--- a/drivers/net/cs89x0.h
+++ b/drivers/net/cs89x0.h
@@ -437,11 +437,7 @@
437#define IRQ_MAP_EEPROM_DATA 0x0046 /* Offset into eeprom for the IRQ map */ 437#define IRQ_MAP_EEPROM_DATA 0x0046 /* Offset into eeprom for the IRQ map */
438#define IRQ_MAP_LEN 0x0004 /* No of bytes to read for the IRQ map */ 438#define IRQ_MAP_LEN 0x0004 /* No of bytes to read for the IRQ map */
439#define PNP_IRQ_FRMT 0x0022 /* PNP small item IRQ format */ 439#define PNP_IRQ_FRMT 0x0022 /* PNP small item IRQ format */
440#ifdef CONFIG_SH_HICOSH4
441#define CS8900_IRQ_MAP 0x0002 /* HiCO-SH4 board has its IRQ on #1 */
442#else
443#define CS8900_IRQ_MAP 0x1c20 /* This IRQ map is fixed */ 440#define CS8900_IRQ_MAP 0x1c20 /* This IRQ map is fixed */
444#endif
445 441
446#define CS8920_NO_INTS 0x0F /* Max CS8920 interrupt select # */ 442#define CS8920_NO_INTS 0x0F /* Max CS8920 interrupt select # */
447 443
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index e3f1b8566495..066fd5b09fda 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2311,15 +2311,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2311 if (copy_from_user(&t, useraddr, sizeof(t))) 2311 if (copy_from_user(&t, useraddr, sizeof(t)))
2312 return -EFAULT; 2312 return -EFAULT;
2313 /* Check t.len sanity ? */ 2313 /* Check t.len sanity ? */
2314 fw_data = kmalloc(t.len, GFP_KERNEL); 2314 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2315 if (!fw_data) 2315 if (IS_ERR(fw_data))
2316 return -ENOMEM; 2316 return PTR_ERR(fw_data);
2317
2318 if (copy_from_user
2319 (fw_data, useraddr + sizeof(t), t.len)) {
2320 kfree(fw_data);
2321 return -EFAULT;
2322 }
2323 2317
2324 ret = t3_load_fw(adapter, fw_data, t.len); 2318 ret = t3_load_fw(adapter, fw_data, t.len);
2325 kfree(fw_data); 2319 kfree(fw_data);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 5962b911b5bd..8ff96c6f6de5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -203,15 +203,11 @@ static inline void refill_rspq(struct adapter *adapter,
203 */ 203 */
204static inline int need_skb_unmap(void) 204static inline int need_skb_unmap(void)
205{ 205{
206 /* 206#ifdef CONFIG_NEED_DMA_MAP_STATE
207 * This structure is used to tell if the platform needs buffer 207 return 1;
208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. 208#else
209 */ 209 return 0;
210 struct dummy { 210#endif
211 DEFINE_DMA_UNMAP_ADDR(addr);
212 };
213
214 return sizeof(struct dummy) != 0;
215} 211}
216 212
217/** 213/**
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 95a8ba0759f1..427c451be1a7 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -679,14 +679,6 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
680} 680}
681 681
682/*
683 * Convert a character holding a hex digit to a number.
684 */
685static unsigned int hex2int(unsigned char c)
686{
687 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
688}
689
690/** 682/**
691 * get_vpd_params - read VPD parameters from VPD EEPROM 683 * get_vpd_params - read VPD parameters from VPD EEPROM
692 * @adapter: adapter to read 684 * @adapter: adapter to read
@@ -727,15 +719,15 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
727 p->port_type[0] = uses_xaui(adapter) ? 1 : 2; 719 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
728 p->port_type[1] = uses_xaui(adapter) ? 6 : 2; 720 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
729 } else { 721 } else {
730 p->port_type[0] = hex2int(vpd.port0_data[0]); 722 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
731 p->port_type[1] = hex2int(vpd.port1_data[0]); 723 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
732 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); 724 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
733 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); 725 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
734 } 726 }
735 727
736 for (i = 0; i < 6; i++) 728 for (i = 0; i < 6; i++)
737 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 + 729 p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
738 hex2int(vpd.na_data[2 * i + 1]); 730 hex_to_bin(vpd.na_data[2 * i + 1]);
739 return 0; 731 return 0;
740} 732}
741 733
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 9d0bd9dd9ab1..8bda06e366c8 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,10 +35,10 @@
35#define DRV_DESC "Chelsio T3 Network Driver" 35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3" 36#define DRV_NAME "cxgb3"
37/* Driver version */ 37/* Driver version */
38#define DRV_VERSION "1.1.3-ko" 38#define DRV_VERSION "1.1.4-ko"
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 7 41#define FW_VERSION_MAJOR 7
42#define FW_VERSION_MINOR 4 42#define FW_VERSION_MINOR 10
43#define FW_VERSION_MICRO 0 43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index dd1770e075e6..6e562c0dad7d 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -219,6 +219,10 @@ struct adapter_params {
219 struct vpd_params vpd; 219 struct vpd_params vpd;
220 struct pci_params pci; 220 struct pci_params pci;
221 221
222 unsigned int sf_size; /* serial flash size in bytes */
223 unsigned int sf_nsec; /* # of flash sectors */
224 unsigned int sf_fw_start; /* start of FW image in flash */
225
222 unsigned int fw_vers; 226 unsigned int fw_vers;
223 unsigned int tp_vers; 227 unsigned int tp_vers;
224 u8 api_vers[7]; 228 u8 api_vers[7];
@@ -290,7 +294,9 @@ struct port_info {
290 u8 rx_offload; /* CSO, etc */ 294 u8 rx_offload; /* CSO, etc */
291 u8 nqsets; /* # of qsets */ 295 u8 nqsets; /* # of qsets */
292 u8 first_qset; /* index of first qset */ 296 u8 first_qset; /* index of first qset */
297 u8 rss_mode;
293 struct link_config link_cfg; 298 struct link_config link_cfg;
299 u16 *rss;
294}; 300};
295 301
296/* port_info.rx_offload flags */ 302/* port_info.rx_offload flags */
@@ -305,7 +311,6 @@ enum { /* adapter flags */
305 FULL_INIT_DONE = (1 << 0), 311 FULL_INIT_DONE = (1 << 0),
306 USING_MSI = (1 << 1), 312 USING_MSI = (1 << 1),
307 USING_MSIX = (1 << 2), 313 USING_MSIX = (1 << 2),
308 QUEUES_BOUND = (1 << 3),
309 FW_OK = (1 << 4), 314 FW_OK = (1 << 4),
310}; 315};
311 316
@@ -477,7 +482,8 @@ struct adapter {
477 struct pci_dev *pdev; 482 struct pci_dev *pdev;
478 struct device *pdev_dev; 483 struct device *pdev_dev;
479 unsigned long registered_device_map; 484 unsigned long registered_device_map;
480 unsigned long flags; 485 unsigned int fn;
486 unsigned int flags;
481 487
482 const char *name; 488 const char *name;
483 int msg_enable; 489 int msg_enable;
@@ -646,6 +652,7 @@ void t4_intr_disable(struct adapter *adapter);
646void t4_intr_clear(struct adapter *adapter); 652void t4_intr_clear(struct adapter *adapter);
647int t4_slow_intr_handler(struct adapter *adapter); 653int t4_slow_intr_handler(struct adapter *adapter);
648 654
655int t4_wait_dev_ready(struct adapter *adap);
649int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 656int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
650 struct link_config *lc); 657 struct link_config *lc);
651int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 658int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 58045b00cf40..c327527fbbc8 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -67,7 +67,7 @@
67#include "t4fw_api.h" 67#include "t4fw_api.h"
68#include "l2t.h" 68#include "l2t.h"
69 69
70#define DRV_VERSION "1.0.0-ko" 70#define DRV_VERSION "1.3.0-ko"
71#define DRV_DESC "Chelsio T4 Network Driver" 71#define DRV_DESC "Chelsio T4 Network Driver"
72 72
73/* 73/*
@@ -77,6 +77,76 @@
77 */ 77 */
78#define MAX_SGE_TIMERVAL 200U 78#define MAX_SGE_TIMERVAL 200U
79 79
80#ifdef CONFIG_PCI_IOV
81/*
82 * Virtual Function provisioning constants. We need two extra Ingress Queues
83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85 * Lists associated with them). For each Ethernet/Control Egress Queue and
86 * for each Free List, we need an Egress Context.
87 */
88enum {
89 VFRES_NPORTS = 1, /* # of "ports" per VF */
90 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
91
92 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
93 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
94 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
96 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
97 VFRES_TC = 0, /* PCI-E traffic class */
98 VFRES_NEXACTF = 16, /* # of exact MPS filters */
99
100 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102};
103
104/*
105 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
106 * static and likely not to be useful in the long run. We really need to
107 * implement some form of persistent configuration which the firmware
108 * controls.
109 */
110static unsigned int pfvfres_pmask(struct adapter *adapter,
111 unsigned int pf, unsigned int vf)
112{
113 unsigned int portn, portvec;
114
115 /*
116 * Give PF's access to all of the ports.
117 */
118 if (vf == 0)
119 return FW_PFVF_CMD_PMASK_MASK;
120
121 /*
122 * For VFs, we'll assign them access to the ports based purely on the
123 * PF. We assign active ports in order, wrapping around if there are
124 * fewer active ports than PFs: e.g. active port[pf % nports].
125 * Unfortunately the adapter's port_info structs haven't been
126 * initialized yet so we have to compute this.
127 */
128 if (adapter->params.nports == 0)
129 return 0;
130
131 portn = pf % adapter->params.nports;
132 portvec = adapter->params.portvec;
133 for (;;) {
134 /*
135 * Isolate the lowest set bit in the port vector. If we're at
136 * the port number that we want, return that as the pmask.
137 * otherwise mask that bit out of the port vector and
138 * decrement our port number ...
139 */
140 unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 if (portn == 0)
142 return pmask;
143 portn--;
144 portvec &= ~pmask;
145 }
146 /*NOTREACHED*/
147}
148#endif
149
80enum { 150enum {
81 MEMWIN0_APERTURE = 65536, 151 MEMWIN0_APERTURE = 65536,
82 MEMWIN0_BASE = 0x30000, 152 MEMWIN0_BASE = 0x30000,
@@ -101,10 +171,20 @@ enum {
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 171 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 172 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
103 173
104#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 } 174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
105 175
106static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { 176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
107 CH_DEVICE(0xa000), /* PE10K */ 177 CH_DEVICE(0xa000, 0), /* PE10K */
178 CH_DEVICE(0x4001, 0),
179 CH_DEVICE(0x4002, 0),
180 CH_DEVICE(0x4003, 0),
181 CH_DEVICE(0x4004, 0),
182 CH_DEVICE(0x4005, 0),
183 CH_DEVICE(0x4006, 0),
184 CH_DEVICE(0x4007, 0),
185 CH_DEVICE(0x4008, 0),
186 CH_DEVICE(0x4009, 0),
187 CH_DEVICE(0x400a, 0),
108 { 0, } 188 { 0, }
109}; 189};
110 190
@@ -216,7 +296,7 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
216void t4_os_portmod_changed(const struct adapter *adap, int port_id) 296void t4_os_portmod_changed(const struct adapter *adap, int port_id)
217{ 297{
218 static const char *mod_str[] = { 298 static const char *mod_str[] = {
219 NULL, "LR", "SR", "ER", "passive DA", "active DA" 299 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
220 }; 300 };
221 301
222 const struct net_device *dev = adap->port[port_id]; 302 const struct net_device *dev = adap->port[port_id];
@@ -224,7 +304,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
224 304
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 305 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 netdev_info(dev, "port module unplugged\n"); 306 netdev_info(dev, "port module unplugged\n");
227 else 307 else if (pi->mod_type < ARRAY_SIZE(mod_str))
228 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); 308 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
229} 309}
230 310
@@ -244,12 +324,13 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
244 int uc_cnt = netdev_uc_count(dev); 324 int uc_cnt = netdev_uc_count(dev);
245 int mc_cnt = netdev_mc_count(dev); 325 int mc_cnt = netdev_mc_count(dev);
246 const struct port_info *pi = netdev_priv(dev); 326 const struct port_info *pi = netdev_priv(dev);
327 unsigned int mb = pi->adapter->fn;
247 328
248 /* first do the secondary unicast addresses */ 329 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha, dev) { 330 netdev_for_each_uc_addr(ha, dev) {
250 addr[naddr++] = ha->addr; 331 addr[naddr++] = ha->addr;
251 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { 332 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
252 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, 333 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
253 naddr, addr, filt_idx, &uhash, sleep); 334 naddr, addr, filt_idx, &uhash, sleep);
254 if (ret < 0) 335 if (ret < 0)
255 return ret; 336 return ret;
@@ -263,7 +344,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
263 netdev_for_each_mc_addr(ha, dev) { 344 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = ha->addr; 345 addr[naddr++] = ha->addr;
265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { 346 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, 347 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep); 348 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0) 349 if (ret < 0)
269 return ret; 350 return ret;
@@ -273,7 +354,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
273 } 354 }
274 } 355 }
275 356
276 return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0, 357 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
277 uhash | mhash, sleep); 358 uhash | mhash, sleep);
278} 359}
279 360
@@ -288,7 +369,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
288 369
289 ret = set_addr_filters(dev, sleep_ok); 370 ret = set_addr_filters(dev, sleep_ok);
290 if (ret == 0) 371 if (ret == 0)
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, 372 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0, 373 (dev->flags & IFF_PROMISC) ? 1 : 0,
293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, 374 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
294 sleep_ok); 375 sleep_ok);
@@ -305,15 +386,16 @@ static int link_start(struct net_device *dev)
305{ 386{
306 int ret; 387 int ret;
307 struct port_info *pi = netdev_priv(dev); 388 struct port_info *pi = netdev_priv(dev);
389 unsigned int mb = pi->adapter->fn;
308 390
309 /* 391 /*
310 * We do not set address filters and promiscuity here, the stack does 392 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly. 393 * that step explicitly.
312 */ 394 */
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, 395 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
314 pi->vlan_grp != NULL, true); 396 pi->vlan_grp != NULL, true);
315 if (ret == 0) { 397 if (ret == 0) {
316 ret = t4_change_mac(pi->adapter, 0, pi->viid, 398 ret = t4_change_mac(pi->adapter, mb, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true, 399 pi->xact_addr_filt, dev->dev_addr, true,
318 true); 400 true);
319 if (ret >= 0) { 401 if (ret >= 0) {
@@ -322,9 +404,10 @@ static int link_start(struct net_device *dev)
322 } 404 }
323 } 405 }
324 if (ret == 0) 406 if (ret == 0)
325 ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg); 407 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
408 &pi->link_cfg);
326 if (ret == 0) 409 if (ret == 0)
327 ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true); 410 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
328 return ret; 411 return ret;
329} 412}
330 413
@@ -527,30 +610,47 @@ static void free_msix_queue_irqs(struct adapter *adap)
527} 610}
528 611
529/** 612/**
613 * write_rss - write the RSS table for a given port
614 * @pi: the port
615 * @queues: array of queue indices for RSS
616 *
617 * Sets up the portion of the HW RSS table for the port's VI to distribute
618 * packets to the Rx queues in @queues.
619 */
620static int write_rss(const struct port_info *pi, const u16 *queues)
621{
622 u16 *rss;
623 int i, err;
624 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
625
626 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
627 if (!rss)
628 return -ENOMEM;
629
630 /* map the queue indices to queue ids */
631 for (i = 0; i < pi->rss_size; i++, queues++)
632 rss[i] = q[*queues].rspq.abs_id;
633
634 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
635 pi->rss_size, rss, pi->rss_size);
636 kfree(rss);
637 return err;
638}
639
640/**
530 * setup_rss - configure RSS 641 * setup_rss - configure RSS
531 * @adap: the adapter 642 * @adap: the adapter
532 * 643 *
533 * Sets up RSS to distribute packets to multiple receive queues. We 644 * Sets up RSS for each port.
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
539 */ 645 */
540static int setup_rss(struct adapter *adap) 646static int setup_rss(struct adapter *adap)
541{ 647{
542 int i, j, err; 648 int i, err;
543 u16 rss[MAX_ETH_QSETS];
544 649
545 for_each_port(adap, i) { 650 for_each_port(adap, i) {
546 const struct port_info *pi = adap2pinfo(adap, i); 651 const struct port_info *pi = adap2pinfo(adap, i);
547 const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
548
549 for (j = 0; j < pi->nqsets; j++)
550 rss[j] = q[j].rspq.abs_id;
551 652
552 err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size, 653 err = write_rss(pi, pi->rss);
553 rss, pi->nqsets);
554 if (err) 654 if (err)
555 return err; 655 return err;
556 } 656 }
@@ -963,10 +1063,11 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
963 * Return a version number to identify the type of adapter. The scheme is: 1063 * Return a version number to identify the type of adapter. The scheme is:
964 * - bits 0..9: chip version 1064 * - bits 0..9: chip version
965 * - bits 10..15: chip revision 1065 * - bits 10..15: chip revision
1066 * - bits 16..23: register dump version
966 */ 1067 */
967static inline unsigned int mk_adap_vers(const struct adapter *ap) 1068static inline unsigned int mk_adap_vers(const struct adapter *ap)
968{ 1069{
969 return 4 | (ap->params.rev << 10); 1070 return 4 | (ap->params.rev << 10) | (1 << 16);
970} 1071}
971 1072
972static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, 1073static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -1041,7 +1142,9 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1041 0xdfc0, 0xdfe0, 1142 0xdfc0, 0xdfe0,
1042 0xe000, 0xea7c, 1143 0xe000, 0xea7c,
1043 0xf000, 0x11190, 1144 0xf000, 0x11190,
1044 0x19040, 0x19124, 1145 0x19040, 0x1906c,
1146 0x19078, 0x19080,
1147 0x1908c, 0x19124,
1045 0x19150, 0x191b0, 1148 0x19150, 0x191b0,
1046 0x191d0, 0x191e8, 1149 0x191d0, 0x191e8,
1047 0x19238, 0x1924c, 1150 0x19238, 0x1924c,
@@ -1054,49 +1157,49 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1054 0x1a190, 0x1a1c4, 1157 0x1a190, 0x1a1c4,
1055 0x1a1fc, 0x1a1fc, 1158 0x1a1fc, 0x1a1fc,
1056 0x1e040, 0x1e04c, 1159 0x1e040, 0x1e04c,
1057 0x1e240, 0x1e28c, 1160 0x1e284, 0x1e28c,
1058 0x1e2c0, 0x1e2c0, 1161 0x1e2c0, 0x1e2c0,
1059 0x1e2e0, 0x1e2e0, 1162 0x1e2e0, 0x1e2e0,
1060 0x1e300, 0x1e384, 1163 0x1e300, 0x1e384,
1061 0x1e3c0, 0x1e3c8, 1164 0x1e3c0, 0x1e3c8,
1062 0x1e440, 0x1e44c, 1165 0x1e440, 0x1e44c,
1063 0x1e640, 0x1e68c, 1166 0x1e684, 0x1e68c,
1064 0x1e6c0, 0x1e6c0, 1167 0x1e6c0, 0x1e6c0,
1065 0x1e6e0, 0x1e6e0, 1168 0x1e6e0, 0x1e6e0,
1066 0x1e700, 0x1e784, 1169 0x1e700, 0x1e784,
1067 0x1e7c0, 0x1e7c8, 1170 0x1e7c0, 0x1e7c8,
1068 0x1e840, 0x1e84c, 1171 0x1e840, 0x1e84c,
1069 0x1ea40, 0x1ea8c, 1172 0x1ea84, 0x1ea8c,
1070 0x1eac0, 0x1eac0, 1173 0x1eac0, 0x1eac0,
1071 0x1eae0, 0x1eae0, 1174 0x1eae0, 0x1eae0,
1072 0x1eb00, 0x1eb84, 1175 0x1eb00, 0x1eb84,
1073 0x1ebc0, 0x1ebc8, 1176 0x1ebc0, 0x1ebc8,
1074 0x1ec40, 0x1ec4c, 1177 0x1ec40, 0x1ec4c,
1075 0x1ee40, 0x1ee8c, 1178 0x1ee84, 0x1ee8c,
1076 0x1eec0, 0x1eec0, 1179 0x1eec0, 0x1eec0,
1077 0x1eee0, 0x1eee0, 1180 0x1eee0, 0x1eee0,
1078 0x1ef00, 0x1ef84, 1181 0x1ef00, 0x1ef84,
1079 0x1efc0, 0x1efc8, 1182 0x1efc0, 0x1efc8,
1080 0x1f040, 0x1f04c, 1183 0x1f040, 0x1f04c,
1081 0x1f240, 0x1f28c, 1184 0x1f284, 0x1f28c,
1082 0x1f2c0, 0x1f2c0, 1185 0x1f2c0, 0x1f2c0,
1083 0x1f2e0, 0x1f2e0, 1186 0x1f2e0, 0x1f2e0,
1084 0x1f300, 0x1f384, 1187 0x1f300, 0x1f384,
1085 0x1f3c0, 0x1f3c8, 1188 0x1f3c0, 0x1f3c8,
1086 0x1f440, 0x1f44c, 1189 0x1f440, 0x1f44c,
1087 0x1f640, 0x1f68c, 1190 0x1f684, 0x1f68c,
1088 0x1f6c0, 0x1f6c0, 1191 0x1f6c0, 0x1f6c0,
1089 0x1f6e0, 0x1f6e0, 1192 0x1f6e0, 0x1f6e0,
1090 0x1f700, 0x1f784, 1193 0x1f700, 0x1f784,
1091 0x1f7c0, 0x1f7c8, 1194 0x1f7c0, 0x1f7c8,
1092 0x1f840, 0x1f84c, 1195 0x1f840, 0x1f84c,
1093 0x1fa40, 0x1fa8c, 1196 0x1fa84, 0x1fa8c,
1094 0x1fac0, 0x1fac0, 1197 0x1fac0, 0x1fac0,
1095 0x1fae0, 0x1fae0, 1198 0x1fae0, 0x1fae0,
1096 0x1fb00, 0x1fb84, 1199 0x1fb00, 0x1fb84,
1097 0x1fbc0, 0x1fbc8, 1200 0x1fbc0, 0x1fbc8,
1098 0x1fc40, 0x1fc4c, 1201 0x1fc40, 0x1fc4c,
1099 0x1fe40, 0x1fe8c, 1202 0x1fe84, 0x1fe8c,
1100 0x1fec0, 0x1fec0, 1203 0x1fec0, 0x1fec0,
1101 0x1fee0, 0x1fee0, 1204 0x1fee0, 0x1fee0,
1102 0x1ff00, 0x1ff84, 1205 0x1ff00, 0x1ff84,
@@ -1217,16 +1320,18 @@ static int restart_autoneg(struct net_device *dev)
1217 return -EAGAIN; 1320 return -EAGAIN;
1218 if (p->link_cfg.autoneg != AUTONEG_ENABLE) 1321 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1219 return -EINVAL; 1322 return -EINVAL;
1220 t4_restart_aneg(p->adapter, 0, p->tx_chan); 1323 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1221 return 0; 1324 return 0;
1222} 1325}
1223 1326
1224static int identify_port(struct net_device *dev, u32 data) 1327static int identify_port(struct net_device *dev, u32 data)
1225{ 1328{
1329 struct adapter *adap = netdev2adap(dev);
1330
1226 if (data == 0) 1331 if (data == 0)
1227 data = 2; /* default to 2 seconds */ 1332 data = 2; /* default to 2 seconds */
1228 1333
1229 return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid, 1334 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
1230 data * 5); 1335 data * 5);
1231} 1336}
1232 1337
@@ -1234,7 +1339,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1234{ 1339{
1235 unsigned int v = 0; 1340 unsigned int v = 0;
1236 1341
1237 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) { 1342 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1343 type == FW_PORT_TYPE_BT_XAUI) {
1238 v |= SUPPORTED_TP; 1344 v |= SUPPORTED_TP;
1239 if (caps & FW_PORT_CAP_SPEED_100M) 1345 if (caps & FW_PORT_CAP_SPEED_100M)
1240 v |= SUPPORTED_100baseT_Full; 1346 v |= SUPPORTED_100baseT_Full;
@@ -1250,7 +1356,10 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1250 v |= SUPPORTED_10000baseKX4_Full; 1356 v |= SUPPORTED_10000baseKX4_Full;
1251 } else if (type == FW_PORT_TYPE_KR) 1357 } else if (type == FW_PORT_TYPE_KR)
1252 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; 1358 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1253 else if (type == FW_PORT_TYPE_FIBER) 1359 else if (type == FW_PORT_TYPE_BP_AP)
1360 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
1361 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1362 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1254 v |= SUPPORTED_FIBRE; 1363 v |= SUPPORTED_FIBRE;
1255 1364
1256 if (caps & FW_PORT_CAP_ANEG) 1365 if (caps & FW_PORT_CAP_ANEG)
@@ -1276,13 +1385,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1276 const struct port_info *p = netdev_priv(dev); 1385 const struct port_info *p = netdev_priv(dev);
1277 1386
1278 if (p->port_type == FW_PORT_TYPE_BT_SGMII || 1387 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1388 p->port_type == FW_PORT_TYPE_BT_XFI ||
1279 p->port_type == FW_PORT_TYPE_BT_XAUI) 1389 p->port_type == FW_PORT_TYPE_BT_XAUI)
1280 cmd->port = PORT_TP; 1390 cmd->port = PORT_TP;
1281 else if (p->port_type == FW_PORT_TYPE_FIBER) 1391 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1392 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1282 cmd->port = PORT_FIBRE; 1393 cmd->port = PORT_FIBRE;
1283 else if (p->port_type == FW_PORT_TYPE_TWINAX) 1394 else if (p->port_type == FW_PORT_TYPE_SFP) {
1284 cmd->port = PORT_DA; 1395 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1285 else 1396 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1397 cmd->port = PORT_DA;
1398 else
1399 cmd->port = PORT_FIBRE;
1400 } else
1286 cmd->port = PORT_OTHER; 1401 cmd->port = PORT_OTHER;
1287 1402
1288 if (p->mdio_addr >= 0) { 1403 if (p->mdio_addr >= 0) {
@@ -1356,7 +1471,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1356 lc->autoneg = cmd->autoneg; 1471 lc->autoneg = cmd->autoneg;
1357 1472
1358 if (netif_running(dev)) 1473 if (netif_running(dev))
1359 return t4_link_start(p->adapter, 0, p->tx_chan, lc); 1474 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1475 lc);
1360 return 0; 1476 return 0;
1361} 1477}
1362 1478
@@ -1388,7 +1504,8 @@ static int set_pauseparam(struct net_device *dev,
1388 if (epause->tx_pause) 1504 if (epause->tx_pause)
1389 lc->requested_fc |= PAUSE_TX; 1505 lc->requested_fc |= PAUSE_TX;
1390 if (netif_running(dev)) 1506 if (netif_running(dev))
1391 return t4_link_start(p->adapter, 0, p->tx_chan, lc); 1507 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1508 lc);
1392 return 0; 1509 return 0;
1393} 1510}
1394 1511
@@ -1520,7 +1637,8 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1520 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 1637 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1521 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | 1638 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1522 FW_PARAMS_PARAM_YZ(q->cntxt_id); 1639 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1523 err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx); 1640 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1641 &new_idx);
1524 if (err) 1642 if (err)
1525 return err; 1643 return err;
1526 } 1644 }
@@ -1708,27 +1826,114 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1708 return err; 1826 return err;
1709} 1827}
1710 1828
1829#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1830
1711static int set_tso(struct net_device *dev, u32 value) 1831static int set_tso(struct net_device *dev, u32 value)
1712{ 1832{
1713 if (value) 1833 if (value)
1714 dev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1834 dev->features |= TSO_FLAGS;
1715 else 1835 else
1716 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 1836 dev->features &= ~TSO_FLAGS;
1717 return 0; 1837 return 0;
1718} 1838}
1719 1839
1720static int set_flags(struct net_device *dev, u32 flags) 1840static int set_flags(struct net_device *dev, u32 flags)
1721{ 1841{
1722 if (flags & ~ETH_FLAG_RXHASH) 1842 return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
1723 return -EOPNOTSUPP; 1843}
1724 1844
1725 if (flags & ETH_FLAG_RXHASH) 1845static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1726 dev->features |= NETIF_F_RXHASH; 1846{
1727 else 1847 const struct port_info *pi = netdev_priv(dev);
1728 dev->features &= ~NETIF_F_RXHASH; 1848 unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1849
1850 p->size = pi->rss_size;
1851 while (n--)
1852 p->ring_index[n] = pi->rss[n];
1729 return 0; 1853 return 0;
1730} 1854}
1731 1855
1856static int set_rss_table(struct net_device *dev,
1857 const struct ethtool_rxfh_indir *p)
1858{
1859 unsigned int i;
1860 struct port_info *pi = netdev_priv(dev);
1861
1862 if (p->size != pi->rss_size)
1863 return -EINVAL;
1864 for (i = 0; i < p->size; i++)
1865 if (p->ring_index[i] >= pi->nqsets)
1866 return -EINVAL;
1867 for (i = 0; i < p->size; i++)
1868 pi->rss[i] = p->ring_index[i];
1869 if (pi->adapter->flags & FULL_INIT_DONE)
1870 return write_rss(pi, pi->rss);
1871 return 0;
1872}
1873
1874static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1875 void *rules)
1876{
1877 const struct port_info *pi = netdev_priv(dev);
1878
1879 switch (info->cmd) {
1880 case ETHTOOL_GRXFH: {
1881 unsigned int v = pi->rss_mode;
1882
1883 info->data = 0;
1884 switch (info->flow_type) {
1885 case TCP_V4_FLOW:
1886 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1887 info->data = RXH_IP_SRC | RXH_IP_DST |
1888 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1889 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1890 info->data = RXH_IP_SRC | RXH_IP_DST;
1891 break;
1892 case UDP_V4_FLOW:
1893 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1894 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1895 info->data = RXH_IP_SRC | RXH_IP_DST |
1896 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1897 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1898 info->data = RXH_IP_SRC | RXH_IP_DST;
1899 break;
1900 case SCTP_V4_FLOW:
1901 case AH_ESP_V4_FLOW:
1902 case IPV4_FLOW:
1903 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1904 info->data = RXH_IP_SRC | RXH_IP_DST;
1905 break;
1906 case TCP_V6_FLOW:
1907 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1908 info->data = RXH_IP_SRC | RXH_IP_DST |
1909 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1910 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1911 info->data = RXH_IP_SRC | RXH_IP_DST;
1912 break;
1913 case UDP_V6_FLOW:
1914 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1915 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1916 info->data = RXH_IP_SRC | RXH_IP_DST |
1917 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1918 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1919 info->data = RXH_IP_SRC | RXH_IP_DST;
1920 break;
1921 case SCTP_V6_FLOW:
1922 case AH_ESP_V6_FLOW:
1923 case IPV6_FLOW:
1924 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1925 info->data = RXH_IP_SRC | RXH_IP_DST;
1926 break;
1927 }
1928 return 0;
1929 }
1930 case ETHTOOL_GRXRINGS:
1931 info->data = pi->nqsets;
1932 return 0;
1933 }
1934 return -EOPNOTSUPP;
1935}
1936
1732static struct ethtool_ops cxgb_ethtool_ops = { 1937static struct ethtool_ops cxgb_ethtool_ops = {
1733 .get_settings = get_settings, 1938 .get_settings = get_settings,
1734 .set_settings = set_settings, 1939 .set_settings = set_settings,
@@ -1760,6 +1965,9 @@ static struct ethtool_ops cxgb_ethtool_ops = {
1760 .set_wol = set_wol, 1965 .set_wol = set_wol,
1761 .set_tso = set_tso, 1966 .set_tso = set_tso,
1762 .set_flags = set_flags, 1967 .set_flags = set_flags,
1968 .get_rxnfc = get_rxnfc,
1969 .get_rxfh_indir = get_rss_table,
1970 .set_rxfh_indir = set_rss_table,
1763 .flash_device = set_flash, 1971 .flash_device = set_flash,
1764}; 1972};
1765 1973
@@ -2306,9 +2514,11 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2306 lli.adapter_type = adap->params.rev; 2514 lli.adapter_type = adap->params.rev;
2307 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 2515 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2308 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( 2516 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2309 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); 2517 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2518 (adap->fn * 4));
2310 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 2519 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2311 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); 2520 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2521 (adap->fn * 4));
2312 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); 2522 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2313 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 2523 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2314 lli.fw_vers = adap->params.fw_vers; 2524 lli.fw_vers = adap->params.fw_vers;
@@ -2483,6 +2693,7 @@ static void cxgb_down(struct adapter *adapter)
2483 t4_intr_disable(adapter); 2693 t4_intr_disable(adapter);
2484 cancel_work_sync(&adapter->tid_release_task); 2694 cancel_work_sync(&adapter->tid_release_task);
2485 adapter->tid_release_task_busy = false; 2695 adapter->tid_release_task_busy = false;
2696 adapter->tid_release_head = NULL;
2486 2697
2487 if (adapter->flags & USING_MSIX) { 2698 if (adapter->flags & USING_MSIX) {
2488 free_msix_queue_irqs(adapter); 2699 free_msix_queue_irqs(adapter);
@@ -2511,9 +2722,10 @@ static int cxgb_open(struct net_device *dev)
2511 } 2722 }
2512 2723
2513 dev->real_num_tx_queues = pi->nqsets; 2724 dev->real_num_tx_queues = pi->nqsets;
2514 link_start(dev); 2725 err = link_start(dev);
2515 netif_tx_start_all_queues(dev); 2726 if (!err)
2516 return 0; 2727 netif_tx_start_all_queues(dev);
2728 return err;
2517} 2729}
2518 2730
2519static int cxgb_close(struct net_device *dev) 2731static int cxgb_close(struct net_device *dev)
@@ -2523,15 +2735,15 @@ static int cxgb_close(struct net_device *dev)
2523 2735
2524 netif_tx_stop_all_queues(dev); 2736 netif_tx_stop_all_queues(dev);
2525 netif_carrier_off(dev); 2737 netif_carrier_off(dev);
2526 return t4_enable_vi(adapter, 0, pi->viid, false, false); 2738 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
2527} 2739}
2528 2740
2529static struct net_device_stats *cxgb_get_stats(struct net_device *dev) 2741static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2742 struct rtnl_link_stats64 *ns)
2530{ 2743{
2531 struct port_stats stats; 2744 struct port_stats stats;
2532 struct port_info *p = netdev_priv(dev); 2745 struct port_info *p = netdev_priv(dev);
2533 struct adapter *adapter = p->adapter; 2746 struct adapter *adapter = p->adapter;
2534 struct net_device_stats *ns = &dev->stats;
2535 2747
2536 spin_lock(&adapter->stats_lock); 2748 spin_lock(&adapter->stats_lock);
2537 t4_get_port_stats(adapter, p->tx_chan, &stats); 2749 t4_get_port_stats(adapter, p->tx_chan, &stats);
@@ -2570,6 +2782,7 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
2570 2782
2571static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 2783static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2572{ 2784{
2785 unsigned int mbox;
2573 int ret = 0, prtad, devad; 2786 int ret = 0, prtad, devad;
2574 struct port_info *pi = netdev_priv(dev); 2787 struct port_info *pi = netdev_priv(dev);
2575 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; 2788 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
@@ -2592,11 +2805,12 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2592 } else 2805 } else
2593 return -EINVAL; 2806 return -EINVAL;
2594 2807
2808 mbox = pi->adapter->fn;
2595 if (cmd == SIOCGMIIREG) 2809 if (cmd == SIOCGMIIREG)
2596 ret = t4_mdio_rd(pi->adapter, 0, prtad, devad, 2810 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2597 data->reg_num, &data->val_out); 2811 data->reg_num, &data->val_out);
2598 else 2812 else
2599 ret = t4_mdio_wr(pi->adapter, 0, prtad, devad, 2813 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2600 data->reg_num, data->val_in); 2814 data->reg_num, data->val_in);
2601 break; 2815 break;
2602 default: 2816 default:
@@ -2618,8 +2832,8 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2618 2832
2619 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ 2833 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2620 return -EINVAL; 2834 return -EINVAL;
2621 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1, 2835 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2622 true); 2836 -1, -1, -1, true);
2623 if (!ret) 2837 if (!ret)
2624 dev->mtu = new_mtu; 2838 dev->mtu = new_mtu;
2625 return ret; 2839 return ret;
@@ -2634,8 +2848,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2634 if (!is_valid_ether_addr(addr->sa_data)) 2848 if (!is_valid_ether_addr(addr->sa_data))
2635 return -EINVAL; 2849 return -EINVAL;
2636 2850
2637 ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt, 2851 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2638 addr->sa_data, true, true); 2852 pi->xact_addr_filt, addr->sa_data, true, true);
2639 if (ret < 0) 2853 if (ret < 0)
2640 return ret; 2854 return ret;
2641 2855
@@ -2649,8 +2863,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2649 struct port_info *pi = netdev_priv(dev); 2863 struct port_info *pi = netdev_priv(dev);
2650 2864
2651 pi->vlan_grp = grp; 2865 pi->vlan_grp = grp;
2652 t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL, 2866 t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
2653 true); 2867 grp != NULL, true);
2654} 2868}
2655 2869
2656#ifdef CONFIG_NET_POLL_CONTROLLER 2870#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2674,7 +2888,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
2674 .ndo_open = cxgb_open, 2888 .ndo_open = cxgb_open,
2675 .ndo_stop = cxgb_close, 2889 .ndo_stop = cxgb_close,
2676 .ndo_start_xmit = t4_eth_xmit, 2890 .ndo_start_xmit = t4_eth_xmit,
2677 .ndo_get_stats = cxgb_get_stats, 2891 .ndo_get_stats64 = cxgb_get_stats,
2678 .ndo_set_rx_mode = cxgb_set_rxmode, 2892 .ndo_set_rx_mode = cxgb_set_rxmode,
2679 .ndo_set_mac_address = cxgb_set_mac_addr, 2893 .ndo_set_mac_address = cxgb_set_mac_addr,
2680 .ndo_validate_addr = eth_validate_addr, 2894 .ndo_validate_addr = eth_validate_addr,
@@ -2707,6 +2921,76 @@ static void setup_memwin(struct adapter *adap)
2707 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 2921 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2708 (bar0 + MEMWIN2_BASE) | BIR(0) | 2922 (bar0 + MEMWIN2_BASE) | BIR(0) |
2709 WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 2923 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2924 if (adap->vres.ocq.size) {
2925 unsigned int start, sz_kb;
2926
2927 start = pci_resource_start(adap->pdev, 2) +
2928 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2929 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2930 t4_write_reg(adap,
2931 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2932 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2933 t4_write_reg(adap,
2934 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2935 adap->vres.ocq.start);
2936 t4_read_reg(adap,
2937 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2938 }
2939}
2940
2941static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2942{
2943 u32 v;
2944 int ret;
2945
2946 /* get device capabilities */
2947 memset(c, 0, sizeof(*c));
2948 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2949 FW_CMD_REQUEST | FW_CMD_READ);
2950 c->retval_len16 = htonl(FW_LEN16(*c));
2951 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
2952 if (ret < 0)
2953 return ret;
2954
2955 /* select capabilities we'll be using */
2956 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2957 if (!vf_acls)
2958 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2959 else
2960 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2961 } else if (vf_acls) {
2962 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2963 return ret;
2964 }
2965 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2966 FW_CMD_REQUEST | FW_CMD_WRITE);
2967 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
2968 if (ret < 0)
2969 return ret;
2970
2971 ret = t4_config_glbl_rss(adap, adap->fn,
2972 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2973 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2974 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2975 if (ret < 0)
2976 return ret;
2977
2978 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2979 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2980 if (ret < 0)
2981 return ret;
2982
2983 t4_sge_init(adap);
2984
2985 /* tweak some settings */
2986 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2987 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2988 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2989 v = t4_read_reg(adap, TP_PIO_DATA);
2990 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2991
2992 /* get basic stuff going */
2993 return t4_early_init(adap, adap->fn);
2710} 2994}
2711 2995
2712/* 2996/*
@@ -2734,7 +3018,7 @@ static int adap_init0(struct adapter *adap)
2734 return ret; 3018 return ret;
2735 3019
2736 /* contact FW, request master */ 3020 /* contact FW, request master */
2737 ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state); 3021 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
2738 if (ret < 0) { 3022 if (ret < 0) {
2739 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", 3023 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2740 ret); 3024 ret);
@@ -2742,44 +3026,7 @@ static int adap_init0(struct adapter *adap)
2742 } 3026 }
2743 3027
2744 /* reset device */ 3028 /* reset device */
2745 ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST); 3029 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
2746 if (ret < 0)
2747 goto bye;
2748
2749 /* get device capabilities */
2750 memset(&c, 0, sizeof(c));
2751 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2752 FW_CMD_REQUEST | FW_CMD_READ);
2753 c.retval_len16 = htonl(FW_LEN16(c));
2754 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2755 if (ret < 0)
2756 goto bye;
2757
2758 /* select capabilities we'll be using */
2759 if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2760 if (!vf_acls)
2761 c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2762 else
2763 c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2764 } else if (vf_acls) {
2765 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2766 goto bye;
2767 }
2768 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2769 FW_CMD_REQUEST | FW_CMD_WRITE);
2770 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
2771 if (ret < 0)
2772 goto bye;
2773
2774 ret = t4_config_glbl_rss(adap, 0,
2775 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2776 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2777 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2778 if (ret < 0)
2779 goto bye;
2780
2781 ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2782 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2783 if (ret < 0) 3030 if (ret < 0)
2784 goto bye; 3031 goto bye;
2785 3032
@@ -2790,27 +3037,31 @@ static int adap_init0(struct adapter *adap)
2790 for (v = 1; v < SGE_NCOUNTERS; v++) 3037 for (v = 1; v < SGE_NCOUNTERS; v++)
2791 adap->sge.counter_val[v] = min(intr_cnt[v - 1], 3038 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2792 THRESHOLD_3_MASK); 3039 THRESHOLD_3_MASK);
2793 t4_sge_init(adap); 3040#define FW_PARAM_DEV(param) \
3041 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3042 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2794 3043
2795 /* get basic stuff going */ 3044 params[0] = FW_PARAM_DEV(CCLK);
2796 ret = t4_early_init(adap, 0); 3045 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
2797 if (ret < 0) 3046 if (ret < 0)
2798 goto bye; 3047 goto bye;
3048 adap->params.vpd.cclk = val[0];
2799 3049
2800#define FW_PARAM_DEV(param) \ 3050 ret = adap_init1(adap, &c);
2801 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3051 if (ret < 0)
2802 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3052 goto bye;
2803 3053
2804#define FW_PARAM_PFVF(param) \ 3054#define FW_PARAM_PFVF(param) \
2805 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3055 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2806 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 3056 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3057 FW_PARAMS_PARAM_Y(adap->fn))
2807 3058
2808 params[0] = FW_PARAM_DEV(PORTVEC); 3059 params[0] = FW_PARAM_DEV(PORTVEC);
2809 params[1] = FW_PARAM_PFVF(L2T_START); 3060 params[1] = FW_PARAM_PFVF(L2T_START);
2810 params[2] = FW_PARAM_PFVF(L2T_END); 3061 params[2] = FW_PARAM_PFVF(L2T_END);
2811 params[3] = FW_PARAM_PFVF(FILTER_START); 3062 params[3] = FW_PARAM_PFVF(FILTER_START);
2812 params[4] = FW_PARAM_PFVF(FILTER_END); 3063 params[4] = FW_PARAM_PFVF(FILTER_END);
2813 ret = t4_query_params(adap, 0, 0, 0, 5, params, val); 3064 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
2814 if (ret < 0) 3065 if (ret < 0)
2815 goto bye; 3066 goto bye;
2816 port_vec = val[0]; 3067 port_vec = val[0];
@@ -2825,7 +3076,8 @@ static int adap_init0(struct adapter *adap)
2825 params[3] = FW_PARAM_PFVF(TDDP_START); 3076 params[3] = FW_PARAM_PFVF(TDDP_START);
2826 params[4] = FW_PARAM_PFVF(TDDP_END); 3077 params[4] = FW_PARAM_PFVF(TDDP_END);
2827 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3078 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2828 ret = t4_query_params(adap, 0, 0, 0, 6, params, val); 3079 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3080 val);
2829 if (ret < 0) 3081 if (ret < 0)
2830 goto bye; 3082 goto bye;
2831 adap->tids.ntids = val[0]; 3083 adap->tids.ntids = val[0];
@@ -2844,7 +3096,8 @@ static int adap_init0(struct adapter *adap)
2844 params[3] = FW_PARAM_PFVF(RQ_END); 3096 params[3] = FW_PARAM_PFVF(RQ_END);
2845 params[4] = FW_PARAM_PFVF(PBL_START); 3097 params[4] = FW_PARAM_PFVF(PBL_START);
2846 params[5] = FW_PARAM_PFVF(PBL_END); 3098 params[5] = FW_PARAM_PFVF(PBL_END);
2847 ret = t4_query_params(adap, 0, 0, 0, 6, params, val); 3099 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3100 val);
2848 if (ret < 0) 3101 if (ret < 0)
2849 goto bye; 3102 goto bye;
2850 adap->vres.stag.start = val[0]; 3103 adap->vres.stag.start = val[0];
@@ -2853,11 +3106,29 @@ static int adap_init0(struct adapter *adap)
2853 adap->vres.rq.size = val[3] - val[2] + 1; 3106 adap->vres.rq.size = val[3] - val[2] + 1;
2854 adap->vres.pbl.start = val[4]; 3107 adap->vres.pbl.start = val[4];
2855 adap->vres.pbl.size = val[5] - val[4] + 1; 3108 adap->vres.pbl.size = val[5] - val[4] + 1;
3109
3110 params[0] = FW_PARAM_PFVF(SQRQ_START);
3111 params[1] = FW_PARAM_PFVF(SQRQ_END);
3112 params[2] = FW_PARAM_PFVF(CQ_START);
3113 params[3] = FW_PARAM_PFVF(CQ_END);
3114 params[4] = FW_PARAM_PFVF(OCQ_START);
3115 params[5] = FW_PARAM_PFVF(OCQ_END);
3116 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3117 val);
3118 if (ret < 0)
3119 goto bye;
3120 adap->vres.qp.start = val[0];
3121 adap->vres.qp.size = val[1] - val[0] + 1;
3122 adap->vres.cq.start = val[2];
3123 adap->vres.cq.size = val[3] - val[2] + 1;
3124 adap->vres.ocq.start = val[4];
3125 adap->vres.ocq.size = val[5] - val[4] + 1;
2856 } 3126 }
2857 if (c.iscsicaps) { 3127 if (c.iscsicaps) {
2858 params[0] = FW_PARAM_PFVF(ISCSI_START); 3128 params[0] = FW_PARAM_PFVF(ISCSI_START);
2859 params[1] = FW_PARAM_PFVF(ISCSI_END); 3129 params[1] = FW_PARAM_PFVF(ISCSI_END);
2860 ret = t4_query_params(adap, 0, 0, 0, 2, params, val); 3130 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3131 val);
2861 if (ret < 0) 3132 if (ret < 0)
2862 goto bye; 3133 goto bye;
2863 adap->vres.iscsi.start = val[0]; 3134 adap->vres.iscsi.start = val[0];
@@ -2877,12 +3148,41 @@ static int adap_init0(struct adapter *adap)
2877 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 3148 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2878 adap->params.b_wnd); 3149 adap->params.b_wnd);
2879 3150
2880 /* tweak some settings */ 3151#ifdef CONFIG_PCI_IOV
2881 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); 3152 /*
2882 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); 3153 * Provision resource limits for Virtual Functions. We currently
2883 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); 3154 * grant them all the same static resource limits except for the Port
2884 v = t4_read_reg(adap, TP_PIO_DATA); 3155 * Access Rights Mask which we're assigning based on the PF. All of
2885 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); 3156 * the static provisioning stuff for both the PF and VF really needs
3157 * to be managed in a persistent manner for each device which the
3158 * firmware controls.
3159 */
3160 {
3161 int pf, vf;
3162
3163 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3164 if (num_vf[pf] <= 0)
3165 continue;
3166
3167 /* VF numbering starts at 1! */
3168 for (vf = 1; vf <= num_vf[pf]; vf++) {
3169 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
3170 VFRES_NEQ, VFRES_NETHCTRL,
3171 VFRES_NIQFLINT, VFRES_NIQ,
3172 VFRES_TC, VFRES_NVI,
3173 FW_PFVF_CMD_CMASK_MASK,
3174 pfvfres_pmask(adap, pf, vf),
3175 VFRES_NEXACTF,
3176 VFRES_R_CAPS, VFRES_WX_CAPS);
3177 if (ret < 0)
3178 dev_warn(adap->pdev_dev, "failed to "
3179 "provision pf/vf=%d/%d; "
3180 "err=%d\n", pf, vf, ret);
3181 }
3182 }
3183 }
3184#endif
3185
2886 setup_memwin(adap); 3186 setup_memwin(adap);
2887 return 0; 3187 return 0;
2888 3188
@@ -2892,10 +3192,114 @@ static int adap_init0(struct adapter *adap)
2892 * commands. 3192 * commands.
2893 */ 3193 */
2894bye: if (ret != -ETIMEDOUT && ret != -EIO) 3194bye: if (ret != -ETIMEDOUT && ret != -EIO)
2895 t4_fw_bye(adap, 0); 3195 t4_fw_bye(adap, adap->fn);
2896 return ret; 3196 return ret;
2897} 3197}
2898 3198
3199/* EEH callbacks */
3200
3201static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3202 pci_channel_state_t state)
3203{
3204 int i;
3205 struct adapter *adap = pci_get_drvdata(pdev);
3206
3207 if (!adap)
3208 goto out;
3209
3210 rtnl_lock();
3211 adap->flags &= ~FW_OK;
3212 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3213 for_each_port(adap, i) {
3214 struct net_device *dev = adap->port[i];
3215
3216 netif_device_detach(dev);
3217 netif_carrier_off(dev);
3218 }
3219 if (adap->flags & FULL_INIT_DONE)
3220 cxgb_down(adap);
3221 rtnl_unlock();
3222 pci_disable_device(pdev);
3223out: return state == pci_channel_io_perm_failure ?
3224 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3225}
3226
3227static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3228{
3229 int i, ret;
3230 struct fw_caps_config_cmd c;
3231 struct adapter *adap = pci_get_drvdata(pdev);
3232
3233 if (!adap) {
3234 pci_restore_state(pdev);
3235 pci_save_state(pdev);
3236 return PCI_ERS_RESULT_RECOVERED;
3237 }
3238
3239 if (pci_enable_device(pdev)) {
3240 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3241 return PCI_ERS_RESULT_DISCONNECT;
3242 }
3243
3244 pci_set_master(pdev);
3245 pci_restore_state(pdev);
3246 pci_save_state(pdev);
3247 pci_cleanup_aer_uncorrect_error_status(pdev);
3248
3249 if (t4_wait_dev_ready(adap) < 0)
3250 return PCI_ERS_RESULT_DISCONNECT;
3251 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
3252 return PCI_ERS_RESULT_DISCONNECT;
3253 adap->flags |= FW_OK;
3254 if (adap_init1(adap, &c))
3255 return PCI_ERS_RESULT_DISCONNECT;
3256
3257 for_each_port(adap, i) {
3258 struct port_info *p = adap2pinfo(adap, i);
3259
3260 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3261 NULL, NULL);
3262 if (ret < 0)
3263 return PCI_ERS_RESULT_DISCONNECT;
3264 p->viid = ret;
3265 p->xact_addr_filt = -1;
3266 }
3267
3268 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3269 adap->params.b_wnd);
3270 setup_memwin(adap);
3271 if (cxgb_up(adap))
3272 return PCI_ERS_RESULT_DISCONNECT;
3273 return PCI_ERS_RESULT_RECOVERED;
3274}
3275
3276static void eeh_resume(struct pci_dev *pdev)
3277{
3278 int i;
3279 struct adapter *adap = pci_get_drvdata(pdev);
3280
3281 if (!adap)
3282 return;
3283
3284 rtnl_lock();
3285 for_each_port(adap, i) {
3286 struct net_device *dev = adap->port[i];
3287
3288 if (netif_running(dev)) {
3289 link_start(dev);
3290 cxgb_set_rxmode(dev);
3291 }
3292 netif_device_attach(dev);
3293 }
3294 rtnl_unlock();
3295}
3296
3297static struct pci_error_handlers cxgb4_eeh = {
3298 .error_detected = eeh_err_detected,
3299 .slot_reset = eeh_slot_reset,
3300 .resume = eeh_resume,
3301};
3302
2899static inline bool is_10g_port(const struct link_config *lc) 3303static inline bool is_10g_port(const struct link_config *lc)
2900{ 3304{
2901 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; 3305 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
@@ -3076,10 +3480,27 @@ static int __devinit enable_msix(struct adapter *adap)
3076 3480
3077#undef EXTRA_VECS 3481#undef EXTRA_VECS
3078 3482
3483static int __devinit init_rss(struct adapter *adap)
3484{
3485 unsigned int i, j;
3486
3487 for_each_port(adap, i) {
3488 struct port_info *pi = adap2pinfo(adap, i);
3489
3490 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3491 if (!pi->rss)
3492 return -ENOMEM;
3493 for (j = 0; j < pi->rss_size; j++)
3494 pi->rss[j] = j % pi->nqsets;
3495 }
3496 return 0;
3497}
3498
3079static void __devinit print_port_info(struct adapter *adap) 3499static void __devinit print_port_info(struct adapter *adap)
3080{ 3500{
3081 static const char *base[] = { 3501 static const char *base[] = {
3082 "R", "KX4", "T", "KX", "T", "KR", "CX4" 3502 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3503 "KX", "KR", "KR SFP+", "KR FEC"
3083 }; 3504 };
3084 3505
3085 int i; 3506 int i;
@@ -3121,7 +3542,31 @@ static void __devinit print_port_info(struct adapter *adap)
3121 } 3542 }
3122} 3543}
3123 3544
3124#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\ 3545/*
3546 * Free the following resources:
3547 * - memory used for tables
3548 * - MSI/MSI-X
3549 * - net devices
3550 * - resources FW is holding for us
3551 */
3552static void free_some_resources(struct adapter *adapter)
3553{
3554 unsigned int i;
3555
3556 t4_free_mem(adapter->l2t);
3557 t4_free_mem(adapter->tids.tid_tab);
3558 disable_msi(adapter);
3559
3560 for_each_port(adapter, i)
3561 if (adapter->port[i]) {
3562 kfree(adap2pinfo(adapter, i)->rss);
3563 free_netdev(adapter->port[i]);
3564 }
3565 if (adapter->flags & FW_OK)
3566 t4_fw_bye(adapter, adapter->fn);
3567}
3568
3569#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3125 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 3570 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3126 3571
3127static int __devinit init_one(struct pci_dev *pdev, 3572static int __devinit init_one(struct pci_dev *pdev,
@@ -3141,10 +3586,12 @@ static int __devinit init_one(struct pci_dev *pdev,
3141 return err; 3586 return err;
3142 } 3587 }
3143 3588
3144 /* We control everything through PF 0 */ 3589 /* We control everything through one PF */
3145 func = PCI_FUNC(pdev->devfn); 3590 func = PCI_FUNC(pdev->devfn);
3146 if (func > 0) 3591 if (func != ent->driver_data) {
3592 pci_save_state(pdev); /* to restore SR-IOV later */
3147 goto sriov; 3593 goto sriov;
3594 }
3148 3595
3149 err = pci_enable_device(pdev); 3596 err = pci_enable_device(pdev);
3150 if (err) { 3597 if (err) {
@@ -3187,6 +3634,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3187 3634
3188 adapter->pdev = pdev; 3635 adapter->pdev = pdev;
3189 adapter->pdev_dev = &pdev->dev; 3636 adapter->pdev_dev = &pdev->dev;
3637 adapter->fn = func;
3190 adapter->name = pci_name(pdev); 3638 adapter->name = pci_name(pdev);
3191 adapter->msg_enable = dflt_msg_enable; 3639 adapter->msg_enable = dflt_msg_enable;
3192 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); 3640 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -3225,7 +3673,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3225 netif_tx_stop_all_queues(netdev); 3673 netif_tx_stop_all_queues(netdev);
3226 netdev->irq = pdev->irq; 3674 netdev->irq = pdev->irq;
3227 3675
3228 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; 3676 netdev->features |= NETIF_F_SG | TSO_FLAGS;
3229 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3677 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3230 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma; 3678 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
3231 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3679 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -3238,7 +3686,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3238 pci_set_drvdata(pdev, adapter); 3686 pci_set_drvdata(pdev, adapter);
3239 3687
3240 if (adapter->flags & FW_OK) { 3688 if (adapter->flags & FW_OK) {
3241 err = t4_port_init(adapter, 0, 0, 0); 3689 err = t4_port_init(adapter, func, func, 0);
3242 if (err) 3690 if (err)
3243 goto out_free_dev; 3691 goto out_free_dev;
3244 } 3692 }
@@ -3262,6 +3710,16 @@ static int __devinit init_one(struct pci_dev *pdev,
3262 adapter->params.offload = 0; 3710 adapter->params.offload = 0;
3263 } 3711 }
3264 3712
3713 /* See what interrupts we'll be using */
3714 if (msi > 1 && enable_msix(adapter) == 0)
3715 adapter->flags |= USING_MSIX;
3716 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3717 adapter->flags |= USING_MSI;
3718
3719 err = init_rss(adapter);
3720 if (err)
3721 goto out_free_dev;
3722
3265 /* 3723 /*
3266 * The card is now ready to go. If any errors occur during device 3724 * The card is now ready to go. If any errors occur during device
3267 * registration we do not fail the whole card but rather proceed only 3725 * registration we do not fail the whole card but rather proceed only
@@ -3297,12 +3755,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3297 setup_debugfs(adapter); 3755 setup_debugfs(adapter);
3298 } 3756 }
3299 3757
3300 /* See what interrupts we'll be using */
3301 if (msi > 1 && enable_msix(adapter) == 0)
3302 adapter->flags |= USING_MSIX;
3303 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3304 adapter->flags |= USING_MSI;
3305
3306 if (is_offload(adapter)) 3758 if (is_offload(adapter))
3307 attach_ulds(adapter); 3759 attach_ulds(adapter);
3308 3760
@@ -3319,13 +3771,7 @@ sriov:
3319 return 0; 3771 return 0;
3320 3772
3321 out_free_dev: 3773 out_free_dev:
3322 t4_free_mem(adapter->tids.tid_tab); 3774 free_some_resources(adapter);
3323 t4_free_mem(adapter->l2t);
3324 for_each_port(adapter, i)
3325 if (adapter->port[i])
3326 free_netdev(adapter->port[i]);
3327 if (adapter->flags & FW_OK)
3328 t4_fw_bye(adapter, 0);
3329 out_unmap_bar: 3775 out_unmap_bar:
3330 iounmap(adapter->regs); 3776 iounmap(adapter->regs);
3331 out_free_adapter: 3777 out_free_adapter:
@@ -3360,16 +3806,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
3360 3806
3361 if (adapter->flags & FULL_INIT_DONE) 3807 if (adapter->flags & FULL_INIT_DONE)
3362 cxgb_down(adapter); 3808 cxgb_down(adapter);
3363 t4_free_mem(adapter->l2t);
3364 t4_free_mem(adapter->tids.tid_tab);
3365 disable_msi(adapter);
3366
3367 for_each_port(adapter, i)
3368 if (adapter->port[i])
3369 free_netdev(adapter->port[i]);
3370 3809
3371 if (adapter->flags & FW_OK) 3810 free_some_resources(adapter);
3372 t4_fw_bye(adapter, 0);
3373 iounmap(adapter->regs); 3811 iounmap(adapter->regs);
3374 kfree(adapter); 3812 kfree(adapter);
3375 pci_disable_pcie_error_reporting(pdev); 3813 pci_disable_pcie_error_reporting(pdev);
@@ -3385,6 +3823,7 @@ static struct pci_driver cxgb4_driver = {
3385 .id_table = cxgb4_pci_tbl, 3823 .id_table = cxgb4_pci_tbl,
3386 .probe = init_one, 3824 .probe = init_one,
3387 .remove = __devexit_p(remove_one), 3825 .remove = __devexit_p(remove_one),
3826 .err_handler = &cxgb4_eeh,
3388}; 3827};
3389 3828
3390static int __init cxgb4_init_module(void) 3829static int __init cxgb4_init_module(void)
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 5b98546ac92d..85d74e751ce0 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -185,8 +185,14 @@ struct cxgb4_virt_res { /* virtualized HW resources */
185 struct cxgb4_range stag; 185 struct cxgb4_range stag;
186 struct cxgb4_range rq; 186 struct cxgb4_range rq;
187 struct cxgb4_range pbl; 187 struct cxgb4_range pbl;
188 struct cxgb4_range qp;
189 struct cxgb4_range cq;
190 struct cxgb4_range ocq;
188}; 191};
189 192
193#define OCQ_WIN_OFFSET(pdev, vres) \
194 (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
195
190/* 196/*
191 * Block of information the LLD provides to ULDs attaching to a device. 197 * Block of information the LLD provides to ULDs attaching to a device.
192 */ 198 */
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
index 9f96724a133a..e8f0f55e9d08 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/cxgb4/l2t.c
@@ -310,6 +310,13 @@ static void t4_l2e_free(struct l2t_entry *e)
310 neigh_release(e->neigh); 310 neigh_release(e->neigh);
311 e->neigh = NULL; 311 e->neigh = NULL;
312 } 312 }
313 while (e->arpq_head) {
314 struct sk_buff *skb = e->arpq_head;
315
316 e->arpq_head = skb->next;
317 kfree_skb(skb);
318 }
319 e->arpq_tail = NULL;
313 } 320 }
314 spin_unlock_bh(&e->lock); 321 spin_unlock_bh(&e->lock);
315 322
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index d1f8f225e45a..bf38cfc57565 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -938,16 +938,16 @@ out_free: dev_kfree_skb(skb);
938 938
939 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | 939 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
940 FW_WR_IMMDLEN(sizeof(*lso))); 940 FW_WR_IMMDLEN(sizeof(*lso)));
941 lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | 941 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
942 LSO_FIRST_SLICE | LSO_LAST_SLICE | 942 LSO_FIRST_SLICE | LSO_LAST_SLICE |
943 LSO_IPV6(v6) | 943 LSO_IPV6(v6) |
944 LSO_ETHHDR_LEN(eth_xtra_len / 4) | 944 LSO_ETHHDR_LEN(eth_xtra_len / 4) |
945 LSO_IPHDR_LEN(l3hdr_len / 4) | 945 LSO_IPHDR_LEN(l3hdr_len / 4) |
946 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); 946 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
947 lso->ipid_ofst = htons(0); 947 lso->c.ipid_ofst = htons(0);
948 lso->mss = htons(ssi->gso_size); 948 lso->c.mss = htons(ssi->gso_size);
949 lso->seqno_offset = htonl(0); 949 lso->c.seqno_offset = htonl(0);
950 lso->len = htonl(skb->len); 950 lso->c.len = htonl(skb->len);
951 cpl = (void *)(lso + 1); 951 cpl = (void *)(lso + 1);
952 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 952 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
953 TXPKT_IPHDR_LEN(l3hdr_len) | 953 TXPKT_IPHDR_LEN(l3hdr_len) |
@@ -1593,14 +1593,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1593 1593
1594 if (csum_ok && (pi->rx_offload & RX_CSO) && 1594 if (csum_ok && (pi->rx_offload & RX_CSO) &&
1595 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { 1595 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1596 if (!pkt->ip_frag) 1596 if (!pkt->ip_frag) {
1597 skb->ip_summed = CHECKSUM_UNNECESSARY; 1597 skb->ip_summed = CHECKSUM_UNNECESSARY;
1598 else { 1598 rxq->stats.rx_cso++;
1599 } else if (pkt->l2info & htonl(RXF_IP)) {
1599 __sum16 c = (__force __sum16)pkt->csum; 1600 __sum16 c = (__force __sum16)pkt->csum;
1600 skb->csum = csum_unfold(c); 1601 skb->csum = csum_unfold(c);
1601 skb->ip_summed = CHECKSUM_COMPLETE; 1602 skb->ip_summed = CHECKSUM_COMPLETE;
1603 rxq->stats.rx_cso++;
1602 } 1604 }
1603 rxq->stats.rx_cso++;
1604 } else 1605 } else
1605 skb->ip_summed = CHECKSUM_NONE; 1606 skb->ip_summed = CHECKSUM_NONE;
1606 1607
@@ -1718,7 +1719,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1718 free_rx_bufs(q->adap, &rxq->fl, 1); 1719 free_rx_bufs(q->adap, &rxq->fl, 1);
1719 q->offset = 0; 1720 q->offset = 0;
1720 } 1721 }
1721 len &= RSPD_LEN; 1722 len = RSPD_LEN(len);
1722 } 1723 }
1723 si.tot_len = len; 1724 si.tot_len = len;
1724 1725
@@ -1998,7 +1999,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1998 memset(&c, 0, sizeof(c)); 1999 memset(&c, 0, sizeof(c));
1999 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | 2000 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2000 FW_CMD_WRITE | FW_CMD_EXEC | 2001 FW_CMD_WRITE | FW_CMD_EXEC |
2001 FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0)); 2002 FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
2002 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | 2003 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
2003 FW_LEN16(c)); 2004 FW_LEN16(c));
2004 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2005 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
@@ -2030,7 +2031,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2030 c.fl0addr = cpu_to_be64(fl->addr); 2031 c.fl0addr = cpu_to_be64(fl->addr);
2031 } 2032 }
2032 2033
2033 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); 2034 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2034 if (ret) 2035 if (ret)
2035 goto err; 2036 goto err;
2036 2037
@@ -2109,7 +2110,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2109 memset(&c, 0, sizeof(c)); 2110 memset(&c, 0, sizeof(c));
2110 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | 2111 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2111 FW_CMD_WRITE | FW_CMD_EXEC | 2112 FW_CMD_WRITE | FW_CMD_EXEC |
2112 FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0)); 2113 FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2113 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | 2114 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2114 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 2115 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2115 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); 2116 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
@@ -2122,7 +2123,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2122 FW_EQ_ETH_CMD_EQSIZE(nentries)); 2123 FW_EQ_ETH_CMD_EQSIZE(nentries));
2123 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2124 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2124 2125
2125 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); 2126 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2126 if (ret) { 2127 if (ret) {
2127 kfree(txq->q.sdesc); 2128 kfree(txq->q.sdesc);
2128 txq->q.sdesc = NULL; 2129 txq->q.sdesc = NULL;
@@ -2159,7 +2160,8 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2159 2160
2160 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | 2161 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2161 FW_CMD_WRITE | FW_CMD_EXEC | 2162 FW_CMD_WRITE | FW_CMD_EXEC |
2162 FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0)); 2163 FW_EQ_CTRL_CMD_PFN(adap->fn) |
2164 FW_EQ_CTRL_CMD_VFN(0));
2163 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | 2165 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2164 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 2166 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2165 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); 2167 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
@@ -2173,7 +2175,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2173 FW_EQ_CTRL_CMD_EQSIZE(nentries)); 2175 FW_EQ_CTRL_CMD_EQSIZE(nentries));
2174 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2176 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2175 2177
2176 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); 2178 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2177 if (ret) { 2179 if (ret) {
2178 dma_free_coherent(adap->pdev_dev, 2180 dma_free_coherent(adap->pdev_dev,
2179 nentries * sizeof(struct tx_desc), 2181 nentries * sizeof(struct tx_desc),
@@ -2209,7 +2211,8 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2209 memset(&c, 0, sizeof(c)); 2211 memset(&c, 0, sizeof(c));
2210 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | 2212 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2211 FW_CMD_WRITE | FW_CMD_EXEC | 2213 FW_CMD_WRITE | FW_CMD_EXEC |
2212 FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0)); 2214 FW_EQ_OFLD_CMD_PFN(adap->fn) |
2215 FW_EQ_OFLD_CMD_VFN(0));
2213 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | 2216 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2214 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 2217 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2215 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | 2218 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
@@ -2221,7 +2224,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2221 FW_EQ_OFLD_CMD_EQSIZE(nentries)); 2224 FW_EQ_OFLD_CMD_EQSIZE(nentries));
2222 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2225 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2223 2226
2224 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); 2227 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2225 if (ret) { 2228 if (ret) {
2226 kfree(txq->q.sdesc); 2229 kfree(txq->q.sdesc);
2227 txq->q.sdesc = NULL; 2230 txq->q.sdesc = NULL;
@@ -2257,8 +2260,8 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2257 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2260 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2258 2261
2259 adap->sge.ingr_map[rq->cntxt_id] = NULL; 2262 adap->sge.ingr_map[rq->cntxt_id] = NULL;
2260 t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, 2263 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2261 0xffff); 2264 rq->cntxt_id, fl_id, 0xffff);
2262 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2265 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2263 rq->desc, rq->phys_addr); 2266 rq->desc, rq->phys_addr);
2264 netif_napi_del(&rq->napi); 2267 netif_napi_del(&rq->napi);
@@ -2295,7 +2298,8 @@ void t4_free_sge_resources(struct adapter *adap)
2295 if (eq->rspq.desc) 2298 if (eq->rspq.desc)
2296 free_rspq_fl(adap, &eq->rspq, &eq->fl); 2299 free_rspq_fl(adap, &eq->rspq, &eq->fl);
2297 if (etq->q.desc) { 2300 if (etq->q.desc) {
2298 t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id); 2301 t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2302 etq->q.cntxt_id);
2299 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 2303 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2300 kfree(etq->q.sdesc); 2304 kfree(etq->q.sdesc);
2301 free_txq(adap, &etq->q); 2305 free_txq(adap, &etq->q);
@@ -2318,7 +2322,8 @@ void t4_free_sge_resources(struct adapter *adap)
2318 2322
2319 if (q->q.desc) { 2323 if (q->q.desc) {
2320 tasklet_kill(&q->qresume_tsk); 2324 tasklet_kill(&q->qresume_tsk);
2321 t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id); 2325 t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2326 q->q.cntxt_id);
2322 free_tx_desc(adap, &q->q, q->q.in_use, false); 2327 free_tx_desc(adap, &q->q, q->q.in_use, false);
2323 kfree(q->q.sdesc); 2328 kfree(q->q.sdesc);
2324 __skb_queue_purge(&q->sendq); 2329 __skb_queue_purge(&q->sendq);
@@ -2332,7 +2337,8 @@ void t4_free_sge_resources(struct adapter *adap)
2332 2337
2333 if (cq->q.desc) { 2338 if (cq->q.desc) {
2334 tasklet_kill(&cq->qresume_tsk); 2339 tasklet_kill(&cq->qresume_tsk);
2335 t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id); 2340 t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2341 cq->q.cntxt_id);
2336 __skb_queue_purge(&cq->sendq); 2342 __skb_queue_purge(&cq->sendq);
2337 free_txq(adap, &cq->q); 2343 free_txq(adap, &cq->q);
2338 } 2344 }
@@ -2400,6 +2406,7 @@ void t4_sge_stop(struct adapter *adap)
2400 */ 2406 */
2401void t4_sge_init(struct adapter *adap) 2407void t4_sge_init(struct adapter *adap)
2402{ 2408{
2409 unsigned int i, v;
2403 struct sge *s = &adap->sge; 2410 struct sge *s = &adap->sge;
2404 unsigned int fl_align_log = ilog2(FL_ALIGN); 2411 unsigned int fl_align_log = ilog2(FL_ALIGN);
2405 2412
@@ -2408,8 +2415,10 @@ void t4_sge_init(struct adapter *adap)
2408 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | 2415 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2409 RXPKTCPLMODE | 2416 RXPKTCPLMODE |
2410 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); 2417 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2411 t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK, 2418
2412 HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); 2419 for (i = v = 0; i < 32; i += 4)
2420 v |= (PAGE_SHIFT - 10) << i;
2421 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
2413 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); 2422 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2414#if FL_PG_ORDER > 0 2423#if FL_PG_ORDER > 0
2415 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); 2424 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index da272a98fdbc..9e1a4b49b47a 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -221,6 +221,13 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
221 if ((size & 15) || size > MBOX_LEN) 221 if ((size & 15) || size > MBOX_LEN)
222 return -EINVAL; 222 return -EINVAL;
223 223
224 /*
225 * If the device is off-line, as in EEH, commands will time out.
226 * Fail them early so we don't waste time waiting.
227 */
228 if (adap->pdev->error_state != pci_channel_io_normal)
229 return -EIO;
230
224 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
225 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 232 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
226 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 233 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
@@ -449,12 +456,10 @@ enum {
449 SF_RD_STATUS = 5, /* read status register */ 456 SF_RD_STATUS = 5, /* read status register */
450 SF_WR_ENABLE = 6, /* enable writes */ 457 SF_WR_ENABLE = 6, /* enable writes */
451 SF_RD_DATA_FAST = 0xb, /* read flash */ 458 SF_RD_DATA_FAST = 0xb, /* read flash */
459 SF_RD_ID = 0x9f, /* read ID */
452 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 460 SF_ERASE_SECTOR = 0xd8, /* erase sector */
453 461
454 FW_START_SEC = 8, /* first flash sector for FW */ 462 FW_MAX_SIZE = 512 * 1024,
455 FW_END_SEC = 15, /* last flash sector for FW */
456 FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
457 FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
458}; 463};
459 464
460/** 465/**
@@ -558,7 +563,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
558{ 563{
559 int ret; 564 int ret;
560 565
561 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) 566 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
562 return -EINVAL; 567 return -EINVAL;
563 568
564 addr = swab32(addr) | SF_RD_DATA_FAST; 569 addr = swab32(addr) | SF_RD_DATA_FAST;
@@ -596,7 +601,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
596 u32 buf[64]; 601 u32 buf[64];
597 unsigned int i, c, left, val, offset = addr & 0xff; 602 unsigned int i, c, left, val, offset = addr & 0xff;
598 603
599 if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE) 604 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
600 return -EINVAL; 605 return -EINVAL;
601 606
602 val = swab32(addr) | SF_PROG_PAGE; 607 val = swab32(addr) | SF_PROG_PAGE;
@@ -614,7 +619,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
614 if (ret) 619 if (ret)
615 goto unlock; 620 goto unlock;
616 } 621 }
617 ret = flash_wait_op(adapter, 5, 1); 622 ret = flash_wait_op(adapter, 8, 1);
618 if (ret) 623 if (ret)
619 goto unlock; 624 goto unlock;
620 625
@@ -647,9 +652,8 @@ unlock:
647 */ 652 */
648static int get_fw_version(struct adapter *adapter, u32 *vers) 653static int get_fw_version(struct adapter *adapter, u32 *vers)
649{ 654{
650 return t4_read_flash(adapter, 655 return t4_read_flash(adapter, adapter->params.sf_fw_start +
651 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, 656 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
652 vers, 0);
653} 657}
654 658
655/** 659/**
@@ -661,8 +665,8 @@ static int get_fw_version(struct adapter *adapter, u32 *vers)
661 */ 665 */
662static int get_tp_version(struct adapter *adapter, u32 *vers) 666static int get_tp_version(struct adapter *adapter, u32 *vers)
663{ 667{
664 return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, 668 return t4_read_flash(adapter, adapter->params.sf_fw_start +
665 tp_microcode_ver), 669 offsetof(struct fw_hdr, tp_microcode_ver),
666 1, vers, 0); 670 1, vers, 0);
667} 671}
668 672
@@ -684,9 +688,9 @@ int t4_check_fw_version(struct adapter *adapter)
684 if (!ret) 688 if (!ret)
685 ret = get_tp_version(adapter, &adapter->params.tp_vers); 689 ret = get_tp_version(adapter, &adapter->params.tp_vers);
686 if (!ret) 690 if (!ret)
687 ret = t4_read_flash(adapter, 691 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
688 FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), 692 offsetof(struct fw_hdr, intfver_nic),
689 2, api_vers, 1); 693 2, api_vers, 1);
690 if (ret) 694 if (ret)
691 return ret; 695 return ret;
692 696
@@ -726,7 +730,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
726 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 730 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
727 (ret = sf1_write(adapter, 4, 0, 1, 731 (ret = sf1_write(adapter, 4, 0, 1,
728 SF_ERASE_SECTOR | (start << 8))) != 0 || 732 SF_ERASE_SECTOR | (start << 8))) != 0 ||
729 (ret = flash_wait_op(adapter, 5, 500)) != 0) { 733 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
730 dev_err(adapter->pdev_dev, 734 dev_err(adapter->pdev_dev,
731 "erase of flash sector %d failed, error %d\n", 735 "erase of flash sector %d failed, error %d\n",
732 start, ret); 736 start, ret);
@@ -754,6 +758,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
754 u8 first_page[SF_PAGE_SIZE]; 758 u8 first_page[SF_PAGE_SIZE];
755 const u32 *p = (const u32 *)fw_data; 759 const u32 *p = (const u32 *)fw_data;
756 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 760 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
761 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
762 unsigned int fw_img_start = adap->params.sf_fw_start;
763 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
757 764
758 if (!size) { 765 if (!size) {
759 dev_err(adap->pdev_dev, "FW image has no data\n"); 766 dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -784,8 +791,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
784 return -EINVAL; 791 return -EINVAL;
785 } 792 }
786 793
787 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ 794 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
788 ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); 795 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
789 if (ret) 796 if (ret)
790 goto out; 797 goto out;
791 798
@@ -796,11 +803,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
796 */ 803 */
797 memcpy(first_page, fw_data, SF_PAGE_SIZE); 804 memcpy(first_page, fw_data, SF_PAGE_SIZE);
798 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 805 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
799 ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); 806 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
800 if (ret) 807 if (ret)
801 goto out; 808 goto out;
802 809
803 addr = FW_IMG_START; 810 addr = fw_img_start;
804 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 811 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
805 addr += SF_PAGE_SIZE; 812 addr += SF_PAGE_SIZE;
806 fw_data += SF_PAGE_SIZE; 813 fw_data += SF_PAGE_SIZE;
@@ -810,7 +817,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
810 } 817 }
811 818
812 ret = t4_write_flash(adap, 819 ret = t4_write_flash(adap,
813 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 820 fw_img_start + offsetof(struct fw_hdr, fw_ver),
814 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 821 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
815out: 822out:
816 if (ret) 823 if (ret)
@@ -1128,6 +1135,7 @@ static void cim_intr_handler(struct adapter *adapter)
1128static void ulprx_intr_handler(struct adapter *adapter) 1135static void ulprx_intr_handler(struct adapter *adapter)
1129{ 1136{
1130 static struct intr_info ulprx_intr_info[] = { 1137 static struct intr_info ulprx_intr_info[] = {
1138 { 0x1800000, "ULPRX context error", -1, 1 },
1131 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1139 { 0x7fffff, "ULPRX parity error", -1, 1 },
1132 { 0 } 1140 { 0 }
1133 }; 1141 };
@@ -1436,7 +1444,7 @@ static void pl_intr_handler(struct adapter *adap)
1436 t4_fatal_err(adap); 1444 t4_fatal_err(adap);
1437} 1445}
1438 1446
1439#define PF_INTR_MASK (PFSW | PFCIM) 1447#define PF_INTR_MASK (PFSW)
1440#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 1448#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1441 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 1449 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1442 CPL_SWITCH | SGE | ULP_TX) 1450 CPL_SWITCH | SGE | ULP_TX)
@@ -2510,7 +2518,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2510 c.retval_len16 = htonl(FW_LEN16(c)); 2518 c.retval_len16 = htonl(FW_LEN16(c));
2511 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | 2519 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2512 FW_PFVF_CMD_NIQ(rxq)); 2520 FW_PFVF_CMD_NIQ(rxq));
2513 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | 2521 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2514 FW_PFVF_CMD_PMASK(pmask) | 2522 FW_PFVF_CMD_PMASK(pmask) |
2515 FW_PFVF_CMD_NEQ(txq)); 2523 FW_PFVF_CMD_NEQ(txq));
2516 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | 2524 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
@@ -2572,7 +2580,7 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2572 } 2580 }
2573 if (rss_size) 2581 if (rss_size)
2574 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); 2582 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2575 return ntohs(c.viid_pkd); 2583 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
2576} 2584}
2577 2585
2578/** 2586/**
@@ -2595,7 +2603,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2595 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | 2603 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2596 FW_VI_CMD_VFN(vf)); 2604 FW_VI_CMD_VFN(vf));
2597 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); 2605 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2598 c.viid_pkd = htons(FW_VI_CMD_VIID(viid)); 2606 c.type_viid = htons(FW_VI_CMD_VIID(viid));
2599 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2607 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2600} 2608}
2601 2609
@@ -3045,7 +3053,7 @@ static void __devinit init_link_config(struct link_config *lc,
3045 } 3053 }
3046} 3054}
3047 3055
3048static int __devinit wait_dev_ready(struct adapter *adap) 3056int t4_wait_dev_ready(struct adapter *adap)
3049{ 3057{
3050 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) 3058 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3051 return 0; 3059 return 0;
@@ -3053,6 +3061,33 @@ static int __devinit wait_dev_ready(struct adapter *adap)
3053 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; 3061 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3054} 3062}
3055 3063
3064static int __devinit get_flash_params(struct adapter *adap)
3065{
3066 int ret;
3067 u32 info;
3068
3069 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3070 if (!ret)
3071 ret = sf1_read(adap, 3, 0, 1, &info);
3072 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3073 if (ret)
3074 return ret;
3075
3076 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3077 return -EINVAL;
3078 info >>= 16; /* log2 of size */
3079 if (info >= 0x14 && info < 0x18)
3080 adap->params.sf_nsec = 1 << (info - 16);
3081 else if (info == 0x18)
3082 adap->params.sf_nsec = 64;
3083 else
3084 return -EINVAL;
3085 adap->params.sf_size = 1 << info;
3086 adap->params.sf_fw_start =
3087 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3088 return 0;
3089}
3090
3056/** 3091/**
3057 * t4_prep_adapter - prepare SW and HW for operation 3092 * t4_prep_adapter - prepare SW and HW for operation
3058 * @adapter: the adapter 3093 * @adapter: the adapter
@@ -3066,13 +3101,19 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
3066{ 3101{
3067 int ret; 3102 int ret;
3068 3103
3069 ret = wait_dev_ready(adapter); 3104 ret = t4_wait_dev_ready(adapter);
3070 if (ret < 0) 3105 if (ret < 0)
3071 return ret; 3106 return ret;
3072 3107
3073 get_pci_mode(adapter, &adapter->params.pci); 3108 get_pci_mode(adapter, &adapter->params.pci);
3074 adapter->params.rev = t4_read_reg(adapter, PL_REV); 3109 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3075 3110
3111 ret = get_flash_params(adapter);
3112 if (ret < 0) {
3113 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3114 return ret;
3115 }
3116
3076 ret = get_vpd_params(adapter, &adapter->params.vpd); 3117 ret = get_vpd_params(adapter, &adapter->params.vpd);
3077 if (ret < 0) 3118 if (ret < 0)
3078 return ret; 3119 return ret;
@@ -3092,8 +3133,10 @@ int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3092 u8 addr[6]; 3133 u8 addr[6];
3093 int ret, i, j = 0; 3134 int ret, i, j = 0;
3094 struct fw_port_cmd c; 3135 struct fw_port_cmd c;
3136 struct fw_rss_vi_config_cmd rvc;
3095 3137
3096 memset(&c, 0, sizeof(c)); 3138 memset(&c, 0, sizeof(c));
3139 memset(&rvc, 0, sizeof(rvc));
3097 3140
3098 for_each_port(adap, i) { 3141 for_each_port(adap, i) {
3099 unsigned int rss_size; 3142 unsigned int rss_size;
@@ -3122,12 +3165,22 @@ int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3122 p->rss_size = rss_size; 3165 p->rss_size = rss_size;
3123 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 3166 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3124 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); 3167 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3168 adap->port[i]->dev_id = j;
3125 3169
3126 ret = ntohl(c.u.info.lstatus_to_modtype); 3170 ret = ntohl(c.u.info.lstatus_to_modtype);
3127 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 3171 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3128 FW_PORT_CMD_MDIOADDR_GET(ret) : -1; 3172 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3129 p->port_type = FW_PORT_CMD_PTYPE_GET(ret); 3173 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3130 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret); 3174 p->mod_type = FW_PORT_MOD_TYPE_NA;
3175
3176 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3177 FW_CMD_REQUEST | FW_CMD_READ |
3178 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3179 rvc.retval_len16 = htonl(FW_LEN16(rvc));
3180 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3181 if (ret)
3182 return ret;
3183 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3131 3184
3132 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 3185 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3133 j++; 3186 j++;
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 025623285c93..10a055565776 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -57,8 +57,6 @@ enum {
57 57
58enum { 58enum {
59 SF_PAGE_SIZE = 256, /* serial flash page size */ 59 SF_PAGE_SIZE = 256, /* serial flash page size */
60 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
61 SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
62}; 60};
63 61
64enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ 62enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -69,6 +67,45 @@ enum {
69 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ 67 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
70 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ 68 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
71 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ 69 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
70
71 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
72 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
73
74 SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
75
76 SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */
77 SGE_INTRDST_IQ = 1, /* destination is an ingress queue */
78
79 SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */
80 SGE_UPDATEDEL_INTR = 1, /* interrupt */
81 SGE_UPDATEDEL_STPG = 2, /* status page */
82 SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */
83
84 SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */
85 SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */
86 SGE_HOSTFCMODE_STPG = 2, /* sent to status page */
87 SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */
88
89 SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
90 SGE_FETCHBURSTMIN_32B = 1,
91 SGE_FETCHBURSTMIN_64B = 2,
92 SGE_FETCHBURSTMIN_128B = 3,
93
94 SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
95 SGE_FETCHBURSTMAX_128B = 1,
96 SGE_FETCHBURSTMAX_256B = 2,
97 SGE_FETCHBURSTMAX_512B = 3,
98
99 SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
100 SGE_CIDXFLUSHTHRESH_2 = 1,
101 SGE_CIDXFLUSHTHRESH_4 = 2,
102 SGE_CIDXFLUSHTHRESH_8 = 3,
103 SGE_CIDXFLUSHTHRESH_16 = 4,
104 SGE_CIDXFLUSHTHRESH_32 = 5,
105 SGE_CIDXFLUSHTHRESH_64 = 6,
106 SGE_CIDXFLUSHTHRESH_128 = 7,
107
108 SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
72}; 109};
73 110
74struct sge_qstat { /* data written to SGE queue status entries */ 111struct sge_qstat { /* data written to SGE queue status entries */
@@ -90,11 +127,13 @@ struct rsp_ctrl {
90}; 127};
91 128
92#define RSPD_NEWBUF 0x80000000U 129#define RSPD_NEWBUF 0x80000000U
93#define RSPD_LEN 0x7fffffffU 130#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
131#define RSPD_QID(x) RSPD_LEN(x)
94 132
95#define RSPD_GEN(x) ((x) >> 7) 133#define RSPD_GEN(x) ((x) >> 7)
96#define RSPD_TYPE(x) (((x) >> 4) & 3) 134#define RSPD_TYPE(x) (((x) >> 4) & 3)
97 135
98#define QINTR_CNT_EN 0x1 136#define QINTR_CNT_EN 0x1
99#define QINTR_TIMER_IDX(x) ((x) << 1) 137#define QINTR_TIMER_IDX(x) ((x) << 1)
138#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
100#endif /* __T4_HW_H */ 139#endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index 7a981b81afaf..a550d0c706f3 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -443,8 +443,7 @@ struct cpl_tx_pkt {
443 443
444#define cpl_tx_pkt_xt cpl_tx_pkt 444#define cpl_tx_pkt_xt cpl_tx_pkt
445 445
446struct cpl_tx_pkt_lso { 446struct cpl_tx_pkt_lso_core {
447 WR_HDR;
448 __be32 lso_ctrl; 447 __be32 lso_ctrl;
449#define LSO_TCPHDR_LEN(x) ((x) << 0) 448#define LSO_TCPHDR_LEN(x) ((x) << 0)
450#define LSO_IPHDR_LEN(x) ((x) << 4) 449#define LSO_IPHDR_LEN(x) ((x) << 4)
@@ -460,6 +459,12 @@ struct cpl_tx_pkt_lso {
460 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ 459 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
461}; 460};
462 461
462struct cpl_tx_pkt_lso {
463 WR_HDR;
464 struct cpl_tx_pkt_lso_core c;
465 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
466};
467
463struct cpl_iscsi_hdr { 468struct cpl_iscsi_hdr {
464 union opcode_tid ot; 469 union opcode_tid ot;
465 __be16 pdu_len_ddp; 470 __be16 pdu_len_ddp;
@@ -524,6 +529,8 @@ struct cpl_rx_pkt {
524 __be32 l2info; 529 __be32 l2info;
525#define RXF_UDP (1 << 22) 530#define RXF_UDP (1 << 22)
526#define RXF_TCP (1 << 23) 531#define RXF_TCP (1 << 23)
532#define RXF_IP (1 << 24)
533#define RXF_IP6 (1 << 25)
527 __be16 hdr_len; 534 __be16 hdr_len;
528 __be16 err_vec; 535 __be16 err_vec;
529}; 536};
@@ -623,6 +630,11 @@ struct cpl_fw6_msg {
623 __be64 data[4]; 630 __be64 data[4];
624}; 631};
625 632
633/* cpl_fw6_msg.type values */
634enum {
635 FW6_TYPE_CMD_RPL = 0,
636};
637
626enum { 638enum {
627 ULP_TX_MEM_READ = 2, 639 ULP_TX_MEM_READ = 2,
628 ULP_TX_MEM_WRITE = 3, 640 ULP_TX_MEM_WRITE = 3,
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
index 5ed56483cbc2..0adc5bcec7c4 100644
--- a/drivers/net/cxgb4/t4_regs.h
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -93,12 +93,15 @@
93#define PKTSHIFT_MASK 0x00001c00U 93#define PKTSHIFT_MASK 0x00001c00U
94#define PKTSHIFT_SHIFT 10 94#define PKTSHIFT_SHIFT 10
95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
96#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
96#define INGPCIEBOUNDARY_MASK 0x00000380U 97#define INGPCIEBOUNDARY_MASK 0x00000380U
97#define INGPCIEBOUNDARY_SHIFT 7 98#define INGPCIEBOUNDARY_SHIFT 7
98#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) 99#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
99#define INGPADBOUNDARY_MASK 0x00000070U 100#define INGPADBOUNDARY_MASK 0x00000070U
100#define INGPADBOUNDARY_SHIFT 4 101#define INGPADBOUNDARY_SHIFT 4
101#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) 102#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
103#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
104 >> INGPADBOUNDARY_SHIFT)
102#define EGRPCIEBOUNDARY_MASK 0x0000000eU 105#define EGRPCIEBOUNDARY_MASK 0x0000000eU
103#define EGRPCIEBOUNDARY_SHIFT 1 106#define EGRPCIEBOUNDARY_SHIFT 1
104#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) 107#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
@@ -229,6 +232,7 @@
229#define WINDOW_MASK 0x000000ffU 232#define WINDOW_MASK 0x000000ffU
230#define WINDOW_SHIFT 0 233#define WINDOW_SHIFT 0
231#define WINDOW(x) ((x) << WINDOW_SHIFT) 234#define WINDOW(x) ((x) << WINDOW_SHIFT)
235#define PCIE_MEM_ACCESS_OFFSET 0x306c
232 236
233#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 237#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
234#define RNPP 0x80000000U 238#define RNPP 0x80000000U
@@ -326,6 +330,9 @@
326 330
327#define EDC_1_BASE_ADDR 0x7980 331#define EDC_1_BASE_ADDR 0x7980
328 332
333#define CIM_BOOT_CFG 0x7b00
334#define BOOTADDR_MASK 0xffffff00U
335
329#define CIM_PF_MAILBOX_DATA 0x240 336#define CIM_PF_MAILBOX_DATA 0x240
330#define CIM_PF_MAILBOX_CTRL 0x280 337#define CIM_PF_MAILBOX_CTRL 0x280
331#define MBMSGVALID 0x00000008U 338#define MBMSGVALID 0x00000008U
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 63991d68950e..0969f2fbc1b0 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -71,6 +71,7 @@ struct fw_wr_hdr {
71#define FW_WR_ATOMIC(x) ((x) << 23) 71#define FW_WR_ATOMIC(x) ((x) << 23)
72#define FW_WR_FLUSH(x) ((x) << 22) 72#define FW_WR_FLUSH(x) ((x) << 22)
73#define FW_WR_COMPL(x) ((x) << 21) 73#define FW_WR_COMPL(x) ((x) << 21)
74#define FW_WR_IMMDLEN_MASK 0xff
74#define FW_WR_IMMDLEN(x) ((x) << 0) 75#define FW_WR_IMMDLEN(x) ((x) << 0)
75 76
76#define FW_WR_EQUIQ (1U << 31) 77#define FW_WR_EQUIQ (1U << 31)
@@ -447,7 +448,9 @@ enum fw_params_param_dev {
447 FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, 448 FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
448 FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, 449 FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
449 FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, 450 FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
450 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A 451 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
452 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
453 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
451}; 454};
452 455
453/* 456/*
@@ -475,7 +478,15 @@ enum fw_params_param_pfvf {
475 FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, 478 FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
476 FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, 479 FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
477 FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, 480 FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
481 FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
482 FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16,
483 FW_PARAMS_PARAM_PFVF_CQ_START = 0x17,
484 FW_PARAMS_PARAM_PFVF_CQ_END = 0x18,
478 FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, 485 FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
486 FW_PARAMS_PARAM_PFVF_VIID = 0x24,
487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
479}; 490};
480 491
481/* 492/*
@@ -512,7 +523,7 @@ struct fw_pfvf_cmd {
512 __be32 op_to_vfn; 523 __be32 op_to_vfn;
513 __be32 retval_len16; 524 __be32 retval_len16;
514 __be32 niqflint_niq; 525 __be32 niqflint_niq;
515 __be32 cmask_to_neq; 526 __be32 type_to_neq;
516 __be32 tc_to_nexactf; 527 __be32 tc_to_nexactf;
517 __be32 r_caps_to_nethctrl; 528 __be32 r_caps_to_nethctrl;
518 __be16 nricq; 529 __be16 nricq;
@@ -529,11 +540,16 @@ struct fw_pfvf_cmd {
529#define FW_PFVF_CMD_NIQ(x) ((x) << 0) 540#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
530#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) 541#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
531 542
543#define FW_PFVF_CMD_TYPE (1 << 31)
544#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1)
545
532#define FW_PFVF_CMD_CMASK(x) ((x) << 24) 546#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
533#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf) 547#define FW_PFVF_CMD_CMASK_MASK 0xf
548#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK)
534 549
535#define FW_PFVF_CMD_PMASK(x) ((x) << 20) 550#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
536#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf) 551#define FW_PFVF_CMD_PMASK_MASK 0xf
552#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK)
537 553
538#define FW_PFVF_CMD_NEQ(x) ((x) << 0) 554#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
539#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) 555#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
@@ -686,6 +702,7 @@ struct fw_eq_eth_cmd {
686#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) 702#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
687#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) 703#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
688#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) 704#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
705#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
689 706
690#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) 707#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
691#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) 708#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
@@ -804,16 +821,16 @@ struct fw_eq_ofld_cmd {
804struct fw_vi_cmd { 821struct fw_vi_cmd {
805 __be32 op_to_vfn; 822 __be32 op_to_vfn;
806 __be32 alloc_to_len16; 823 __be32 alloc_to_len16;
807 __be16 viid_pkd; 824 __be16 type_viid;
808 u8 mac[6]; 825 u8 mac[6];
809 u8 portid_pkd; 826 u8 portid_pkd;
810 u8 nmac; 827 u8 nmac;
811 u8 nmac0[6]; 828 u8 nmac0[6];
812 __be16 rsssize_pkd; 829 __be16 rsssize_pkd;
813 u8 nmac1[6]; 830 u8 nmac1[6];
814 __be16 r7; 831 __be16 idsiiq_pkd;
815 u8 nmac2[6]; 832 u8 nmac2[6];
816 __be16 r8; 833 __be16 idseiq_pkd;
817 u8 nmac3[6]; 834 u8 nmac3[6];
818 __be64 r9; 835 __be64 r9;
819 __be64 r10; 836 __be64 r10;
@@ -824,13 +841,16 @@ struct fw_vi_cmd {
824#define FW_VI_CMD_ALLOC (1U << 31) 841#define FW_VI_CMD_ALLOC (1U << 31)
825#define FW_VI_CMD_FREE (1U << 30) 842#define FW_VI_CMD_FREE (1U << 30)
826#define FW_VI_CMD_VIID(x) ((x) << 0) 843#define FW_VI_CMD_VIID(x) ((x) << 0)
844#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
827#define FW_VI_CMD_PORTID(x) ((x) << 4) 845#define FW_VI_CMD_PORTID(x) ((x) << 4)
846#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf)
828#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) 847#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
829 848
830/* Special VI_MAC command index ids */ 849/* Special VI_MAC command index ids */
831#define FW_VI_MAC_ADD_MAC 0x3FF 850#define FW_VI_MAC_ADD_MAC 0x3FF
832#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE 851#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
833#define FW_VI_MAC_MAC_BASED_FREE 0x3FD 852#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
853#define FW_CLS_TCAM_NUM_ENTRIES 336
834 854
835enum fw_vi_mac_smac { 855enum fw_vi_mac_smac {
836 FW_VI_MAC_MPS_TCAM_ENTRY, 856 FW_VI_MAC_MPS_TCAM_ENTRY,
@@ -881,6 +901,7 @@ struct fw_vi_rxmode_cmd {
881}; 901};
882 902
883#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) 903#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
904#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff
884#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) 905#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
885#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 906#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
886#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) 907#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
@@ -1136,6 +1157,11 @@ struct fw_port_cmd {
1136 __be32 lstatus_to_modtype; 1157 __be32 lstatus_to_modtype;
1137 __be16 pcap; 1158 __be16 pcap;
1138 __be16 acap; 1159 __be16 acap;
1160 __be16 mtu;
1161 __u8 cbllen;
1162 __u8 r9;
1163 __be32 r10;
1164 __be64 r11;
1139 } info; 1165 } info;
1140 struct fw_port_ppp { 1166 struct fw_port_ppp {
1141 __be32 pppen_to_ncsich; 1167 __be32 pppen_to_ncsich;
@@ -1161,6 +1187,7 @@ struct fw_port_cmd {
1161#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) 1187#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
1162 1188
1163#define FW_PORT_CMD_ACTION(x) ((x) << 16) 1189#define FW_PORT_CMD_ACTION(x) ((x) << 16)
1190#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff)
1164 1191
1165#define FW_PORT_CMD_CTLBF(x) ((x) << 10) 1192#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
1166#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) 1193#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
@@ -1196,14 +1223,17 @@ struct fw_port_cmd {
1196#define FW_PORT_CMD_NCSICH(x) ((x) << 4) 1223#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
1197 1224
1198enum fw_port_type { 1225enum fw_port_type {
1199 FW_PORT_TYPE_FIBER, 1226 FW_PORT_TYPE_FIBER_XFI,
1200 FW_PORT_TYPE_KX4, 1227 FW_PORT_TYPE_FIBER_XAUI,
1201 FW_PORT_TYPE_BT_SGMII, 1228 FW_PORT_TYPE_BT_SGMII,
1202 FW_PORT_TYPE_KX, 1229 FW_PORT_TYPE_BT_XFI,
1203 FW_PORT_TYPE_BT_XAUI, 1230 FW_PORT_TYPE_BT_XAUI,
1204 FW_PORT_TYPE_KR, 1231 FW_PORT_TYPE_KX4,
1205 FW_PORT_TYPE_CX4, 1232 FW_PORT_TYPE_CX4,
1206 FW_PORT_TYPE_TWINAX, 1233 FW_PORT_TYPE_KX,
1234 FW_PORT_TYPE_KR,
1235 FW_PORT_TYPE_SFP,
1236 FW_PORT_TYPE_BP_AP,
1207 1237
1208 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK 1238 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
1209}; 1239};
@@ -1213,6 +1243,9 @@ enum fw_port_module_type {
1213 FW_PORT_MOD_TYPE_LR, 1243 FW_PORT_MOD_TYPE_LR,
1214 FW_PORT_MOD_TYPE_SR, 1244 FW_PORT_MOD_TYPE_SR,
1215 FW_PORT_MOD_TYPE_ER, 1245 FW_PORT_MOD_TYPE_ER,
1246 FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
1247 FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
1248 FW_PORT_MOD_TYPE_LRM,
1216 1249
1217 FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK 1250 FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
1218}; 1251};
@@ -1469,6 +1502,7 @@ struct fw_rss_glb_config_cmd {
1469}; 1502};
1470 1503
1471#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) 1504#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
1505#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf)
1472 1506
1473#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 1507#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
1474#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 1508#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
@@ -1485,13 +1519,14 @@ struct fw_rss_vi_config_cmd {
1485 } manual; 1519 } manual;
1486 struct fw_rss_vi_config_basicvirtual { 1520 struct fw_rss_vi_config_basicvirtual {
1487 __be32 r6; 1521 __be32 r6;
1488 __be32 defaultq_to_ip4udpen; 1522 __be32 defaultq_to_udpen;
1489#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) 1523#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
1524#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff)
1490#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) 1525#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
1491#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) 1526#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
1492#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) 1527#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
1493#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) 1528#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
1494#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0) 1529#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0)
1495 __be64 r9; 1530 __be64 r9;
1496 __be64 r10; 1531 __be64 r10;
1497 } basicvirtual; 1532 } basicvirtual;
diff --git a/drivers/net/cxgb4vf/Makefile b/drivers/net/cxgb4vf/Makefile
new file mode 100644
index 000000000000..d72ee26cb4c7
--- /dev/null
+++ b/drivers/net/cxgb4vf/Makefile
@@ -0,0 +1,7 @@
1#
2# Chelsio T4 SR-IOV Virtual Function Driver
3#
4
5obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o
6
7cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
new file mode 100644
index 000000000000..8ea01962e045
--- /dev/null
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -0,0 +1,540 @@
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36/*
37 * This file should not be included directly. Include t4vf_common.h instead.
38 */
39
40#ifndef __CXGB4VF_ADAPTER_H__
41#define __CXGB4VF_ADAPTER_H__
42
43#include <linux/pci.h>
44#include <linux/spinlock.h>
45#include <linux/skbuff.h>
46#include <linux/if_ether.h>
47#include <linux/netdevice.h>
48
49#include "../cxgb4/t4_hw.h"
50
51/*
52 * Constants of the implementation.
53 */
54enum {
55 MAX_NPORTS = 1, /* max # of "ports" */
56 MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
57 MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
58
59 /*
60 * MSI-X interrupt index usage.
61 */
62 MSIX_FW = 0, /* MSI-X index for firmware Q */
63 MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */
64 MSIX_EXTRAS = 1,
65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
66
67 /*
68 * The maximum number of Ingress and Egress Queues is determined by
69 * the maximum number of "Queue Sets" which we support plus any
70 * ancillary queues. Each "Queue Set" requires one Ingress Queue
71 * for RX Packet Ingress Event notifications and two Egress Queues for
72 * a Free List and an Ethernet TX list.
73 */
74 INGQ_EXTRAS = 2, /* firmware event queue and */
75 /* forwarded interrupts */
76 MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
77 MAX_EGRQ = MAX_ETH_QSETS*2,
78};
79
80/*
81 * Forward structure definition references.
82 */
83struct adapter;
84struct sge_eth_rxq;
85struct sge_rspq;
86
87/*
88 * Per-"port" information. This is really per-Virtual Interface information
89 * but the use of the "port" nomanclature makes it easier to go back and forth
90 * between the PF and VF drivers ...
91 */
92struct port_info {
93 struct adapter *adapter; /* our adapter */
94 struct vlan_group *vlan_grp; /* out VLAN group */
95 u16 viid; /* virtual interface ID */
96 s16 xact_addr_filt; /* index of our MAC address filter */
97 u16 rss_size; /* size of VI's RSS table slice */
98 u8 pidx; /* index into adapter port[] */
99 u8 port_id; /* physical port ID */
100 u8 rx_offload; /* CSO, etc. */
101 u8 nqsets; /* # of "Queue Sets" */
102 u8 first_qset; /* index of first "Queue Set" */
103 struct link_config link_cfg; /* physical port configuration */
104};
105
106/* port_info.rx_offload flags */
107enum {
108 RX_CSO = 1 << 0,
109};
110
111/*
112 * Scatter Gather Engine resources for the "adapter". Our ingress and egress
113 * queues are organized into "Queue Sets" with one ingress and one egress
114 * queue per Queue Set. These Queue Sets are aportionable between the "ports"
115 * (Virtual Interfaces). One extra ingress queue is used to receive
116 * asynchronous messages from the firmware. Note that the "Queue IDs" that we
117 * use here are really "Relative Queue IDs" which are returned as part of the
118 * firmware command to allocate queues. These queue IDs are relative to the
119 * absolute Queue ID base of the section of the Queue ID space allocated to
120 * the PF/VF.
121 */
122
123/*
124 * SGE free-list queue state.
125 */
126struct rx_sw_desc;
127struct sge_fl {
128 unsigned int avail; /* # of available RX buffers */
129 unsigned int pend_cred; /* new buffers since last FL DB ring */
130 unsigned int cidx; /* consumer index */
131 unsigned int pidx; /* producer index */
132 unsigned long alloc_failed; /* # of buffer allocation failures */
133 unsigned long large_alloc_failed;
134 unsigned long starving; /* # of times FL was found starving */
135
136 /*
137 * Write-once/infrequently fields.
138 * -------------------------------
139 */
140
141 unsigned int cntxt_id; /* SGE relative QID for the free list */
142 unsigned int abs_id; /* SGE absolute QID for the free list */
143 unsigned int size; /* capacity of free list */
144 struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
145 __be64 *desc; /* address of HW RX descriptor ring */
146 dma_addr_t addr; /* PCI bus address of hardware ring */
147};
148
149/*
150 * An ingress packet gather list.
151 */
152struct pkt_gl {
153 skb_frag_t frags[MAX_SKB_FRAGS];
154 void *va; /* virtual address of first byte */
155 unsigned int nfrags; /* # of fragments */
156 unsigned int tot_len; /* total length of fragments */
157};
158
159typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
160 const struct pkt_gl *);
161
162/*
163 * State for an SGE Response Queue.
164 */
165struct sge_rspq {
166 struct napi_struct napi; /* NAPI scheduling control */
167 const __be64 *cur_desc; /* current descriptor in queue */
168 unsigned int cidx; /* consumer index */
169 u8 gen; /* current generation bit */
170 u8 next_intr_params; /* holdoff params for next interrupt */
171 int offset; /* offset into current FL buffer */
172
173 unsigned int unhandled_irqs; /* bogus interrupts */
174
175 /*
176 * Write-once/infrequently fields.
177 * -------------------------------
178 */
179
180 u8 intr_params; /* interrupt holdoff parameters */
181 u8 pktcnt_idx; /* interrupt packet threshold */
182 u8 idx; /* queue index within its group */
183 u16 cntxt_id; /* SGE rel QID for the response Q */
184 u16 abs_id; /* SGE abs QID for the response Q */
185 __be64 *desc; /* address of hardware response ring */
186 dma_addr_t phys_addr; /* PCI bus address of ring */
187 unsigned int iqe_len; /* entry size */
188 unsigned int size; /* capcity of response Q */
189 struct adapter *adapter; /* our adapter */
190 struct net_device *netdev; /* associated net device */
191 rspq_handler_t handler; /* the handler for this response Q */
192};
193
194/*
195 * Ethernet queue statistics
196 */
197struct sge_eth_stats {
198 unsigned long pkts; /* # of ethernet packets */
199 unsigned long lro_pkts; /* # of LRO super packets */
200 unsigned long lro_merged; /* # of wire packets merged by LRO */
201 unsigned long rx_cso; /* # of Rx checksum offloads */
202 unsigned long vlan_ex; /* # of Rx VLAN extractions */
203 unsigned long rx_drops; /* # of packets dropped due to no mem */
204};
205
206/*
207 * State for an Ethernet Receive Queue.
208 */
209struct sge_eth_rxq {
210 struct sge_rspq rspq; /* Response Queue */
211 struct sge_fl fl; /* Free List */
212 struct sge_eth_stats stats; /* receive statistics */
213};
214
215/*
216 * SGE Transmit Queue state. This contains all of the resources associated
217 * with the hardware status of a TX Queue which is a circular ring of hardware
218 * TX Descriptors. For convenience, it also contains a pointer to a parallel
219 * "Software Descriptor" array but we don't know anything about it here other
220 * than its type name.
221 */
222struct tx_desc {
223 /*
224 * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
225 * hardware: Sizes, Producer and Consumer indices, etc.
226 */
227 __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
228};
229struct tx_sw_desc;
230struct sge_txq {
231 unsigned int in_use; /* # of in-use TX descriptors */
232 unsigned int size; /* # of descriptors */
233 unsigned int cidx; /* SW consumer index */
234 unsigned int pidx; /* producer index */
235 unsigned long stops; /* # of times queue has been stopped */
236 unsigned long restarts; /* # of queue restarts */
237
238 /*
239 * Write-once/infrequently fields.
240 * -------------------------------
241 */
242
243 unsigned int cntxt_id; /* SGE relative QID for the TX Q */
244 unsigned int abs_id; /* SGE absolute QID for the TX Q */
245 struct tx_desc *desc; /* address of HW TX descriptor ring */
246 struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
247 struct sge_qstat *stat; /* queue status entry */
248 dma_addr_t phys_addr; /* PCI bus address of hardware ring */
249};
250
251/*
252 * State for an Ethernet Transmit Queue.
253 */
254struct sge_eth_txq {
255 struct sge_txq q; /* SGE TX Queue */
256 struct netdev_queue *txq; /* associated netdev TX queue */
257 unsigned long tso; /* # of TSO requests */
258 unsigned long tx_cso; /* # of TX checksum offloads */
259 unsigned long vlan_ins; /* # of TX VLAN insertions */
260 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
261};
262
263/*
264 * The complete set of Scatter/Gather Engine resources.
265 */
266struct sge {
267 /*
268 * Our "Queue Sets" ...
269 */
270 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
271 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
272
273 /*
274 * Extra ingress queues for asynchronous firmware events and
275 * forwarded interrupts (when in MSI mode).
276 */
277 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
278
279 struct sge_rspq intrq ____cacheline_aligned_in_smp;
280 spinlock_t intrq_lock;
281
282 /*
283 * State for managing "starving Free Lists" -- Free Lists which have
284 * fallen below a certain threshold of buffers available to the
285 * hardware and attempts to refill them up to that threshold have
286 * failed. We have a regular "slow tick" timer process which will
287 * make periodic attempts to refill these starving Free Lists ...
288 */
289 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
290 struct timer_list rx_timer;
291
292 /*
293 * State for cleaning up completed TX descriptors.
294 */
295 struct timer_list tx_timer;
296
297 /*
298 * Write-once/infrequently fields.
299 * -------------------------------
300 */
301
302 u16 max_ethqsets; /* # of available Ethernet queue sets */
303 u16 ethqsets; /* # of active Ethernet queue sets */
304 u16 ethtxq_rover; /* Tx queue to clean up next */
305 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
306 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
307
308 /*
309 * Reverse maps from Absolute Queue IDs to associated queue pointers.
310 * The absolute Queue IDs are in a compact range which start at a
311 * [potentially large] Base Queue ID. We perform the reverse map by
312 * first converting the Absolute Queue ID into a Relative Queue ID by
313 * subtracting off the Base Queue ID and then use a Relative Queue ID
314 * indexed table to get the pointer to the corresponding software
315 * queue structure.
316 */
317 unsigned int egr_base;
318 unsigned int ingr_base;
319 void *egr_map[MAX_EGRQ];
320 struct sge_rspq *ingr_map[MAX_INGQ];
321};
322
323/*
324 * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
325 * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
326 * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
327 */
328#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
329#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
330
331#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
332#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
333
334/*
335 * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
336 */
337#define for_each_ethrxq(sge, iter) \
338 for (iter = 0; iter < (sge)->ethqsets; iter++)
339
340/*
341 * Per-"adapter" (Virtual Function) information.
342 */
343struct adapter {
344 /* PCI resources */
345 void __iomem *regs;
346 struct pci_dev *pdev;
347 struct device *pdev_dev;
348
349 /* "adapter" resources */
350 unsigned long registered_device_map;
351 unsigned long open_device_map;
352 unsigned long flags;
353 struct adapter_params params;
354
355 /* queue and interrupt resources */
356 struct {
357 unsigned short vec;
358 char desc[22];
359 } msix_info[MSIX_ENTRIES];
360 struct sge sge;
361
362 /* Linux network device resources */
363 struct net_device *port[MAX_NPORTS];
364 const char *name;
365 unsigned int msg_enable;
366
367 /* debugfs resources */
368 struct dentry *debugfs_root;
369
370 /* various locks */
371 spinlock_t stats_lock;
372};
373
374enum { /* adapter flags */
375 FULL_INIT_DONE = (1UL << 0),
376 USING_MSI = (1UL << 1),
377 USING_MSIX = (1UL << 2),
378 QUEUES_BOUND = (1UL << 3),
379};
380
381/*
382 * The following register read/write routine definitions are required by
383 * the common code.
384 */
385
386/**
387 * t4_read_reg - read a HW register
388 * @adapter: the adapter
389 * @reg_addr: the register address
390 *
391 * Returns the 32-bit value of the given HW register.
392 */
393static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
394{
395 return readl(adapter->regs + reg_addr);
396}
397
398/**
399 * t4_write_reg - write a HW register
400 * @adapter: the adapter
401 * @reg_addr: the register address
402 * @val: the value to write
403 *
404 * Write a 32-bit value into the given HW register.
405 */
406static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
407{
408 writel(val, adapter->regs + reg_addr);
409}
410
411#ifndef readq
412static inline u64 readq(const volatile void __iomem *addr)
413{
414 return readl(addr) + ((u64)readl(addr + 4) << 32);
415}
416
417static inline void writeq(u64 val, volatile void __iomem *addr)
418{
419 writel(val, addr);
420 writel(val >> 32, addr + 4);
421}
422#endif
423
424/**
425 * t4_read_reg64 - read a 64-bit HW register
426 * @adapter: the adapter
427 * @reg_addr: the register address
428 *
429 * Returns the 64-bit value of the given HW register.
430 */
431static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
432{
433 return readq(adapter->regs + reg_addr);
434}
435
436/**
437 * t4_write_reg64 - write a 64-bit HW register
438 * @adapter: the adapter
439 * @reg_addr: the register address
440 * @val: the value to write
441 *
442 * Write a 64-bit value into the given HW register.
443 */
444static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
445 u64 val)
446{
447 writeq(val, adapter->regs + reg_addr);
448}
449
450/**
451 * port_name - return the string name of a port
452 * @adapter: the adapter
453 * @pidx: the port index
454 *
455 * Return the string name of the selected port.
456 */
457static inline const char *port_name(struct adapter *adapter, int pidx)
458{
459 return adapter->port[pidx]->name;
460}
461
462/**
463 * t4_os_set_hw_addr - store a port's MAC address in SW
464 * @adapter: the adapter
465 * @pidx: the port index
466 * @hw_addr: the Ethernet address
467 *
468 * Store the Ethernet address of the given port in SW. Called by the common
469 * code when it retrieves a port's Ethernet address from EEPROM.
470 */
471static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
472 u8 hw_addr[])
473{
474 memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
475 memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
476}
477
478/**
479 * netdev2pinfo - return the port_info structure associated with a net_device
480 * @dev: the netdev
481 *
482 * Return the struct port_info associated with a net_device
483 */
484static inline struct port_info *netdev2pinfo(const struct net_device *dev)
485{
486 return netdev_priv(dev);
487}
488
489/**
490 * adap2pinfo - return the port_info of a port
491 * @adap: the adapter
492 * @pidx: the port index
493 *
494 * Return the port_info structure for the adapter.
495 */
496static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
497{
498 return netdev_priv(adapter->port[pidx]);
499}
500
501/**
502 * netdev2adap - return the adapter structure associated with a net_device
503 * @dev: the netdev
504 *
505 * Return the struct adapter associated with a net_device
506 */
507static inline struct adapter *netdev2adap(const struct net_device *dev)
508{
509 return netdev2pinfo(dev)->adapter;
510}
511
512/*
513 * OS "Callback" function declarations. These are functions that the OS code
514 * is "contracted" to provide for the common code.
515 */
516void t4vf_os_link_changed(struct adapter *, int, int);
517
518/*
519 * SGE function prototype declarations.
520 */
521int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
522 struct net_device *, int,
523 struct sge_fl *, rspq_handler_t);
524int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
525 struct net_device *, struct netdev_queue *,
526 unsigned int);
527void t4vf_free_sge_resources(struct adapter *);
528
529int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
530int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
531 const struct pkt_gl *);
532
533irq_handler_t t4vf_intr_handler(struct adapter *);
534irqreturn_t t4vf_sge_intr_msix(int, void *);
535
536int t4vf_sge_init(struct adapter *);
537void t4vf_sge_start(struct adapter *);
538void t4vf_sge_stop(struct adapter *);
539
540#endif /* __CXGB4VF_ADAPTER_H__ */
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
new file mode 100644
index 000000000000..a16563219ac9
--- /dev/null
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -0,0 +1,2888 @@
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/version.h>
37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/init.h>
40#include <linux/pci.h>
41#include <linux/dma-mapping.h>
42#include <linux/netdevice.h>
43#include <linux/etherdevice.h>
44#include <linux/debugfs.h>
45#include <linux/ethtool.h>
46
47#include "t4vf_common.h"
48#include "t4vf_defs.h"
49
50#include "../cxgb4/t4_regs.h"
51#include "../cxgb4/t4_msg.h"
52
53/*
54 * Generic information about the driver.
55 */
56#define DRV_VERSION "1.0.0"
57#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
58
59/*
60 * Module Parameters.
61 * ==================
62 */
63
64/*
65 * Default ethtool "message level" for adapters.
66 */
67#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
68 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
69 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
70
71static int dflt_msg_enable = DFLT_MSG_ENABLE;
72
73module_param(dflt_msg_enable, int, 0644);
74MODULE_PARM_DESC(dflt_msg_enable,
75 "default adapter ethtool message level bitmap");
76
77/*
78 * The driver uses the best interrupt scheme available on a platform in the
79 * order MSI-X then MSI. This parameter determines which of these schemes the
80 * driver may consider as follows:
81 *
82 * msi = 2: choose from among MSI-X and MSI
83 * msi = 1: only consider MSI interrupts
84 *
85 * Note that unlike the Physical Function driver, this Virtual Function driver
86 * does _not_ support legacy INTx interrupts (this limitation is mandated by
87 * the PCI-E SR-IOV standard).
88 */
89#define MSI_MSIX 2
90#define MSI_MSI 1
91#define MSI_DEFAULT MSI_MSIX
92
93static int msi = MSI_DEFAULT;
94
95module_param(msi, int, 0644);
96MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
97
98/*
99 * Fundamental constants.
100 * ======================
101 */
102
103enum {
104 MAX_TXQ_ENTRIES = 16384,
105 MAX_RSPQ_ENTRIES = 16384,
106 MAX_RX_BUFFERS = 16384,
107
108 MIN_TXQ_ENTRIES = 32,
109 MIN_RSPQ_ENTRIES = 128,
110 MIN_FL_ENTRIES = 16,
111
112 /*
113 * For purposes of manipulating the Free List size we need to
114 * recognize that Free Lists are actually Egress Queues (the host
115 * produces free buffers which the hardware consumes), Egress Queues
116 * indices are all in units of Egress Context Units bytes, and free
117 * list entries are 64-bit PCI DMA addresses. And since the state of
118 * the Producer Index == the Consumer Index implies an EMPTY list, we
119 * always have at least one Egress Unit's worth of Free List entries
120 * unused. See sge.c for more details ...
121 */
122 EQ_UNIT = SGE_EQ_IDXSIZE,
123 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
124 MIN_FL_RESID = FL_PER_EQ_UNIT,
125};
126
127/*
128 * Global driver state.
129 * ====================
130 */
131
132static struct dentry *cxgb4vf_debugfs_root;
133
134/*
135 * OS "Callback" functions.
136 * ========================
137 */
138
139/*
140 * The link status has changed on the indicated "port" (Virtual Interface).
141 */
142void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
143{
144 struct net_device *dev = adapter->port[pidx];
145
146 /*
147 * If the port is disabled or the current recorded "link up"
148 * status matches the new status, just return.
149 */
150 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
151 return;
152
153 /*
154 * Tell the OS that the link status has changed and print a short
155 * informative message on the console about the event.
156 */
157 if (link_ok) {
158 const char *s;
159 const char *fc;
160 const struct port_info *pi = netdev_priv(dev);
161
162 netif_carrier_on(dev);
163
164 switch (pi->link_cfg.speed) {
165 case SPEED_10000:
166 s = "10Gbps";
167 break;
168
169 case SPEED_1000:
170 s = "1000Mbps";
171 break;
172
173 case SPEED_100:
174 s = "100Mbps";
175 break;
176
177 default:
178 s = "unknown";
179 break;
180 }
181
182 switch (pi->link_cfg.fc) {
183 case PAUSE_RX:
184 fc = "RX";
185 break;
186
187 case PAUSE_TX:
188 fc = "TX";
189 break;
190
191 case PAUSE_RX|PAUSE_TX:
192 fc = "RX/TX";
193 break;
194
195 default:
196 fc = "no";
197 break;
198 }
199
200 printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
201 dev->name, s, fc);
202 } else {
203 netif_carrier_off(dev);
204 printk(KERN_INFO "%s: link down\n", dev->name);
205 }
206}
207
208/*
209 * Net device operations.
210 * ======================
211 */
212
213/*
214 * Record our new VLAN Group and enable/disable hardware VLAN Tag extraction
215 * based on whether the specified VLAN Group pointer is NULL or not.
216 */
217static void cxgb4vf_vlan_rx_register(struct net_device *dev,
218 struct vlan_group *grp)
219{
220 struct port_info *pi = netdev_priv(dev);
221
222 pi->vlan_grp = grp;
223 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, grp != NULL, 0);
224}
225
226/*
227 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
228 * Interface).
229 */
230static int link_start(struct net_device *dev)
231{
232 int ret;
233 struct port_info *pi = netdev_priv(dev);
234
235 /*
236 * We do not set address filters and promiscuity here, the stack does
237 * that step explicitly.
238 */
239 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, -1,
240 true);
241 if (ret == 0) {
242 ret = t4vf_change_mac(pi->adapter, pi->viid,
243 pi->xact_addr_filt, dev->dev_addr, true);
244 if (ret >= 0) {
245 pi->xact_addr_filt = ret;
246 ret = 0;
247 }
248 }
249
250 /*
251 * We don't need to actually "start the link" itself since the
252 * firmware will do that for us when the first Virtual Interface
253 * is enabled on a port.
254 */
255 if (ret == 0)
256 ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
257 return ret;
258}
259
260/*
261 * Name the MSI-X interrupts.
262 */
263static void name_msix_vecs(struct adapter *adapter)
264{
265 int namelen = sizeof(adapter->msix_info[0].desc) - 1;
266 int pidx;
267
268 /*
269 * Firmware events.
270 */
271 snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
272 "%s-FWeventq", adapter->name);
273 adapter->msix_info[MSIX_FW].desc[namelen] = 0;
274
275 /*
276 * Ethernet queues.
277 */
278 for_each_port(adapter, pidx) {
279 struct net_device *dev = adapter->port[pidx];
280 const struct port_info *pi = netdev_priv(dev);
281 int qs, msi;
282
283 for (qs = 0, msi = MSIX_NIQFLINT;
284 qs < pi->nqsets;
285 qs++, msi++) {
286 snprintf(adapter->msix_info[msi].desc, namelen,
287 "%s-%d", dev->name, qs);
288 adapter->msix_info[msi].desc[namelen] = 0;
289 }
290 }
291}
292
293/*
294 * Request all of our MSI-X resources.
295 */
296static int request_msix_queue_irqs(struct adapter *adapter)
297{
298 struct sge *s = &adapter->sge;
299 int rxq, msi, err;
300
301 /*
302 * Firmware events.
303 */
304 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
305 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
306 if (err)
307 return err;
308
309 /*
310 * Ethernet queues.
311 */
312 msi = MSIX_NIQFLINT;
313 for_each_ethrxq(s, rxq) {
314 err = request_irq(adapter->msix_info[msi].vec,
315 t4vf_sge_intr_msix, 0,
316 adapter->msix_info[msi].desc,
317 &s->ethrxq[rxq].rspq);
318 if (err)
319 goto err_free_irqs;
320 msi++;
321 }
322 return 0;
323
324err_free_irqs:
325 while (--rxq >= 0)
326 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
327 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
328 return err;
329}
330
331/*
332 * Free our MSI-X resources.
333 */
334static void free_msix_queue_irqs(struct adapter *adapter)
335{
336 struct sge *s = &adapter->sge;
337 int rxq, msi;
338
339 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
340 msi = MSIX_NIQFLINT;
341 for_each_ethrxq(s, rxq)
342 free_irq(adapter->msix_info[msi++].vec,
343 &s->ethrxq[rxq].rspq);
344}
345
346/*
347 * Turn on NAPI and start up interrupts on a response queue.
348 */
349static void qenable(struct sge_rspq *rspq)
350{
351 napi_enable(&rspq->napi);
352
353 /*
354 * 0-increment the Going To Sleep register to start the timer and
355 * enable interrupts.
356 */
357 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
358 CIDXINC(0) |
359 SEINTARM(rspq->intr_params) |
360 INGRESSQID(rspq->cntxt_id));
361}
362
363/*
364 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
365 */
366static void enable_rx(struct adapter *adapter)
367{
368 int rxq;
369 struct sge *s = &adapter->sge;
370
371 for_each_ethrxq(s, rxq)
372 qenable(&s->ethrxq[rxq].rspq);
373 qenable(&s->fw_evtq);
374
375 /*
376 * The interrupt queue doesn't use NAPI so we do the 0-increment of
377 * its Going To Sleep register here to get it started.
378 */
379 if (adapter->flags & USING_MSI)
380 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
381 CIDXINC(0) |
382 SEINTARM(s->intrq.intr_params) |
383 INGRESSQID(s->intrq.cntxt_id));
384
385}
386
387/*
388 * Wait until all NAPI handlers are descheduled.
389 */
390static void quiesce_rx(struct adapter *adapter)
391{
392 struct sge *s = &adapter->sge;
393 int rxq;
394
395 for_each_ethrxq(s, rxq)
396 napi_disable(&s->ethrxq[rxq].rspq.napi);
397 napi_disable(&s->fw_evtq.napi);
398}
399
400/*
401 * Response queue handler for the firmware event queue.
402 */
403static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
404 const struct pkt_gl *gl)
405{
406 /*
407 * Extract response opcode and get pointer to CPL message body.
408 */
409 struct adapter *adapter = rspq->adapter;
410 u8 opcode = ((const struct rss_header *)rsp)->opcode;
411 void *cpl = (void *)(rsp + 1);
412
413 switch (opcode) {
414 case CPL_FW6_MSG: {
415 /*
416 * We've received an asynchronous message from the firmware.
417 */
418 const struct cpl_fw6_msg *fw_msg = cpl;
419 if (fw_msg->type == FW6_TYPE_CMD_RPL)
420 t4vf_handle_fw_rpl(adapter, fw_msg->data);
421 break;
422 }
423
424 case CPL_SGE_EGR_UPDATE: {
425 /*
426 * We've received an Egress Queue Status Update message. We
427 * get these, if the SGE is configured to send these when the
428 * firmware passes certain points in processing our TX
429 * Ethernet Queue or if we make an explicit request for one.
430 * We use these updates to determine when we may need to
431 * restart a TX Ethernet Queue which was stopped for lack of
432 * free TX Queue Descriptors ...
433 */
434 const struct cpl_sge_egr_update *p = (void *)cpl;
435 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
436 struct sge *s = &adapter->sge;
437 struct sge_txq *tq;
438 struct sge_eth_txq *txq;
439 unsigned int eq_idx;
440
441 /*
442 * Perform sanity checking on the Queue ID to make sure it
443 * really refers to one of our TX Ethernet Egress Queues which
444 * is active and matches the queue's ID. None of these error
445 * conditions should ever happen so we may want to either make
446 * them fatal and/or conditionalized under DEBUG.
447 */
448 eq_idx = EQ_IDX(s, qid);
449 if (unlikely(eq_idx >= MAX_EGRQ)) {
450 dev_err(adapter->pdev_dev,
451 "Egress Update QID %d out of range\n", qid);
452 break;
453 }
454 tq = s->egr_map[eq_idx];
455 if (unlikely(tq == NULL)) {
456 dev_err(adapter->pdev_dev,
457 "Egress Update QID %d TXQ=NULL\n", qid);
458 break;
459 }
460 txq = container_of(tq, struct sge_eth_txq, q);
461 if (unlikely(tq->abs_id != qid)) {
462 dev_err(adapter->pdev_dev,
463 "Egress Update QID %d refers to TXQ %d\n",
464 qid, tq->abs_id);
465 break;
466 }
467
468 /*
469 * Restart a stopped TX Queue which has less than half of its
470 * TX ring in use ...
471 */
472 txq->q.restarts++;
473 netif_tx_wake_queue(txq->txq);
474 break;
475 }
476
477 default:
478 dev_err(adapter->pdev_dev,
479 "unexpected CPL %#x on FW event queue\n", opcode);
480 }
481
482 return 0;
483}
484
485/*
486 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
487 * to use and initializes them. We support multiple "Queue Sets" per port if
488 * we have MSI-X, otherwise just one queue set per port.
489 */
490static int setup_sge_queues(struct adapter *adapter)
491{
492 struct sge *s = &adapter->sge;
493 int err, pidx, msix;
494
495 /*
496 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
497 * state.
498 */
499 bitmap_zero(s->starving_fl, MAX_EGRQ);
500
501 /*
502 * If we're using MSI interrupt mode we need to set up a "forwarded
503 * interrupt" queue which we'll set up with our MSI vector. The rest
504 * of the ingress queues will be set up to forward their interrupts to
505 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
506 * the intrq's queue ID as the interrupt forwarding queue for the
507 * subsequent calls ...
508 */
509 if (adapter->flags & USING_MSI) {
510 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
511 adapter->port[0], 0, NULL, NULL);
512 if (err)
513 goto err_free_queues;
514 }
515
516 /*
517 * Allocate our ingress queue for asynchronous firmware messages.
518 */
519 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
520 MSIX_FW, NULL, fwevtq_handler);
521 if (err)
522 goto err_free_queues;
523
524 /*
525 * Allocate each "port"'s initial Queue Sets. These can be changed
526 * later on ... up to the point where any interface on the adapter is
527 * brought up at which point lots of things get nailed down
528 * permanently ...
529 */
530 msix = MSIX_NIQFLINT;
531 for_each_port(adapter, pidx) {
532 struct net_device *dev = adapter->port[pidx];
533 struct port_info *pi = netdev_priv(dev);
534 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
535 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
536 int qs;
537
538 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
539 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
540 dev, msix++,
541 &rxq->fl, t4vf_ethrx_handler);
542 if (err)
543 goto err_free_queues;
544
545 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
546 netdev_get_tx_queue(dev, qs),
547 s->fw_evtq.cntxt_id);
548 if (err)
549 goto err_free_queues;
550
551 rxq->rspq.idx = qs;
552 memset(&rxq->stats, 0, sizeof(rxq->stats));
553 }
554 }
555
556 /*
557 * Create the reverse mappings for the queues.
558 */
559 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
560 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
561 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
562 for_each_port(adapter, pidx) {
563 struct net_device *dev = adapter->port[pidx];
564 struct port_info *pi = netdev_priv(dev);
565 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
566 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
567 int qs;
568
569 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
570 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
571 EQ_MAP(s, txq->q.abs_id) = &txq->q;
572
573 /*
574 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
575 * for Free Lists but since all of the Egress Queues
576 * (including Free Lists) have Relative Queue IDs
577 * which are computed as Absolute - Base Queue ID, we
578 * can synthesize the Absolute Queue IDs for the Free
579 * Lists. This is useful for debugging purposes when
580 * we want to dump Queue Contexts via the PF Driver.
581 */
582 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
583 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
584 }
585 }
586 return 0;
587
588err_free_queues:
589 t4vf_free_sge_resources(adapter);
590 return err;
591}
592
593/*
594 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
595 * queues. We configure the RSS CPU lookup table to distribute to the number
596 * of HW receive queues, and the response queue lookup table to narrow that
597 * down to the response queues actually configured for each "port" (Virtual
598 * Interface). We always configure the RSS mapping for all ports since the
599 * mapping table has plenty of entries.
600 */
601static int setup_rss(struct adapter *adapter)
602{
603 int pidx;
604
605 for_each_port(adapter, pidx) {
606 struct port_info *pi = adap2pinfo(adapter, pidx);
607 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
608 u16 rss[MAX_PORT_QSETS];
609 int qs, err;
610
611 for (qs = 0; qs < pi->nqsets; qs++)
612 rss[qs] = rxq[qs].rspq.abs_id;
613
614 err = t4vf_config_rss_range(adapter, pi->viid,
615 0, pi->rss_size, rss, pi->nqsets);
616 if (err)
617 return err;
618
619 /*
620 * Perform Global RSS Mode-specific initialization.
621 */
622 switch (adapter->params.rss.mode) {
623 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
624 /*
625 * If Tunnel All Lookup isn't specified in the global
626 * RSS Configuration, then we need to specify a
627 * default Ingress Queue for any ingress packets which
628 * aren't hashed. We'll use our first ingress queue
629 * ...
630 */
631 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
632 union rss_vi_config config;
633 err = t4vf_read_rss_vi_config(adapter,
634 pi->viid,
635 &config);
636 if (err)
637 return err;
638 config.basicvirtual.defaultq =
639 rxq[0].rspq.abs_id;
640 err = t4vf_write_rss_vi_config(adapter,
641 pi->viid,
642 &config);
643 if (err)
644 return err;
645 }
646 break;
647 }
648 }
649
650 return 0;
651}
652
653/*
654 * Bring the adapter up. Called whenever we go from no "ports" open to having
655 * one open. This function performs the actions necessary to make an adapter
656 * operational, such as completing the initialization of HW modules, and
657 * enabling interrupts. Must be called with the rtnl lock held. (Note that
658 * this is called "cxgb_up" in the PF Driver.)
659 */
660static int adapter_up(struct adapter *adapter)
661{
662 int err;
663
664 /*
665 * If this is the first time we've been called, perform basic
666 * adapter setup. Once we've done this, many of our adapter
667 * parameters can no longer be changed ...
668 */
669 if ((adapter->flags & FULL_INIT_DONE) == 0) {
670 err = setup_sge_queues(adapter);
671 if (err)
672 return err;
673 err = setup_rss(adapter);
674 if (err) {
675 t4vf_free_sge_resources(adapter);
676 return err;
677 }
678
679 if (adapter->flags & USING_MSIX)
680 name_msix_vecs(adapter);
681 adapter->flags |= FULL_INIT_DONE;
682 }
683
684 /*
685 * Acquire our interrupt resources. We only support MSI-X and MSI.
686 */
687 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
688 if (adapter->flags & USING_MSIX)
689 err = request_msix_queue_irqs(adapter);
690 else
691 err = request_irq(adapter->pdev->irq,
692 t4vf_intr_handler(adapter), 0,
693 adapter->name, adapter);
694 if (err) {
695 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
696 err);
697 return err;
698 }
699
700 /*
701 * Enable NAPI ingress processing and return success.
702 */
703 enable_rx(adapter);
704 t4vf_sge_start(adapter);
705 return 0;
706}
707
708/*
709 * Bring the adapter down. Called whenever the last "port" (Virtual
710 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
711 * Driver.)
712 */
713static void adapter_down(struct adapter *adapter)
714{
715 /*
716 * Free interrupt resources.
717 */
718 if (adapter->flags & USING_MSIX)
719 free_msix_queue_irqs(adapter);
720 else
721 free_irq(adapter->pdev->irq, adapter);
722
723 /*
724 * Wait for NAPI handlers to finish.
725 */
726 quiesce_rx(adapter);
727}
728
729/*
730 * Start up a net device.
731 */
732static int cxgb4vf_open(struct net_device *dev)
733{
734 int err;
735 struct port_info *pi = netdev_priv(dev);
736 struct adapter *adapter = pi->adapter;
737
738 /*
739 * If this is the first interface that we're opening on the "adapter",
740 * bring the "adapter" up now.
741 */
742 if (adapter->open_device_map == 0) {
743 err = adapter_up(adapter);
744 if (err)
745 return err;
746 }
747
748 /*
749 * Note that this interface is up and start everything up ...
750 */
751 dev->real_num_tx_queues = pi->nqsets;
752 set_bit(pi->port_id, &adapter->open_device_map);
753 link_start(dev);
754 netif_tx_start_all_queues(dev);
755 return 0;
756}
757
758/*
759 * Shut down a net device. This routine is called "cxgb_close" in the PF
760 * Driver ...
761 */
762static int cxgb4vf_stop(struct net_device *dev)
763{
764 int ret;
765 struct port_info *pi = netdev_priv(dev);
766 struct adapter *adapter = pi->adapter;
767
768 netif_tx_stop_all_queues(dev);
769 netif_carrier_off(dev);
770 ret = t4vf_enable_vi(adapter, pi->viid, false, false);
771 pi->link_cfg.link_ok = 0;
772
773 clear_bit(pi->port_id, &adapter->open_device_map);
774 if (adapter->open_device_map == 0)
775 adapter_down(adapter);
776 return 0;
777}
778
779/*
780 * Translate our basic statistics into the standard "ifconfig" statistics.
781 */
782static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
783{
784 struct t4vf_port_stats stats;
785 struct port_info *pi = netdev2pinfo(dev);
786 struct adapter *adapter = pi->adapter;
787 struct net_device_stats *ns = &dev->stats;
788 int err;
789
790 spin_lock(&adapter->stats_lock);
791 err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
792 spin_unlock(&adapter->stats_lock);
793
794 memset(ns, 0, sizeof(*ns));
795 if (err)
796 return ns;
797
798 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
799 stats.tx_ucast_bytes + stats.tx_offload_bytes);
800 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
801 stats.tx_ucast_frames + stats.tx_offload_frames);
802 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
803 stats.rx_ucast_bytes);
804 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
805 stats.rx_ucast_frames);
806 ns->multicast = stats.rx_mcast_frames;
807 ns->tx_errors = stats.tx_drop_frames;
808 ns->rx_errors = stats.rx_err_frames;
809
810 return ns;
811}
812
813/*
814 * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
815 * array of addrss pointers and return the number collected.
816 */
817static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
818 const u8 **addr,
819 unsigned int maxaddrs)
820{
821 unsigned int naddr = 0;
822 const struct netdev_hw_addr *ha;
823
824 for_each_dev_addr(dev, ha) {
825 addr[naddr++] = ha->addr;
826 if (naddr >= maxaddrs)
827 break;
828 }
829 return naddr;
830}
831
832/*
833 * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
834 * array of addrss pointers and return the number collected.
835 */
836static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
837 const u8 **addr,
838 unsigned int maxaddrs)
839{
840 unsigned int naddr = 0;
841 const struct netdev_hw_addr *ha;
842
843 netdev_for_each_mc_addr(ha, dev) {
844 addr[naddr++] = ha->addr;
845 if (naddr >= maxaddrs)
846 break;
847 }
848 return naddr;
849}
850
851/*
852 * Configure the exact and hash address filters to handle a port's multicast
853 * and secondary unicast MAC addresses.
854 */
855static int set_addr_filters(const struct net_device *dev, bool sleep)
856{
857 u64 mhash = 0;
858 u64 uhash = 0;
859 bool free = true;
860 u16 filt_idx[7];
861 const u8 *addr[7];
862 int ret, naddr = 0;
863 const struct port_info *pi = netdev_priv(dev);
864
865 /* first do the secondary unicast addresses */
866 naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
867 if (naddr > 0) {
868 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
869 naddr, addr, filt_idx, &uhash, sleep);
870 if (ret < 0)
871 return ret;
872
873 free = false;
874 }
875
876 /* next set up the multicast addresses */
877 naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
878 if (naddr > 0) {
879 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
880 naddr, addr, filt_idx, &mhash, sleep);
881 if (ret < 0)
882 return ret;
883 }
884
885 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
886 uhash | mhash, sleep);
887}
888
889/*
890 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
891 * If @mtu is -1 it is left unchanged.
892 */
893static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
894{
895 int ret;
896 struct port_info *pi = netdev_priv(dev);
897
898 ret = set_addr_filters(dev, sleep_ok);
899 if (ret == 0)
900 ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
901 (dev->flags & IFF_PROMISC) != 0,
902 (dev->flags & IFF_ALLMULTI) != 0,
903 1, -1, sleep_ok);
904 return ret;
905}
906
907/*
908 * Set the current receive modes on the device.
909 */
910static void cxgb4vf_set_rxmode(struct net_device *dev)
911{
912 /* unfortunately we can't return errors to the stack */
913 set_rxmode(dev, -1, false);
914}
915
916/*
917 * Find the entry in the interrupt holdoff timer value array which comes
918 * closest to the specified interrupt holdoff value.
919 */
920static int closest_timer(const struct sge *s, int us)
921{
922 int i, timer_idx = 0, min_delta = INT_MAX;
923
924 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
925 int delta = us - s->timer_val[i];
926 if (delta < 0)
927 delta = -delta;
928 if (delta < min_delta) {
929 min_delta = delta;
930 timer_idx = i;
931 }
932 }
933 return timer_idx;
934}
935
936static int closest_thres(const struct sge *s, int thres)
937{
938 int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
939
940 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
941 delta = thres - s->counter_val[i];
942 if (delta < 0)
943 delta = -delta;
944 if (delta < min_delta) {
945 min_delta = delta;
946 pktcnt_idx = i;
947 }
948 }
949 return pktcnt_idx;
950}
951
952/*
953 * Return a queue's interrupt hold-off time in us. 0 means no timer.
954 */
955static unsigned int qtimer_val(const struct adapter *adapter,
956 const struct sge_rspq *rspq)
957{
958 unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
959
960 return timer_idx < SGE_NTIMERS
961 ? adapter->sge.timer_val[timer_idx]
962 : 0;
963}
964
965/**
966 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
967 * @adapter: the adapter
968 * @rspq: the RX response queue
969 * @us: the hold-off time in us, or 0 to disable timer
970 * @cnt: the hold-off packet count, or 0 to disable counter
971 *
972 * Sets an RX response queue's interrupt hold-off time and packet count.
973 * At least one of the two needs to be enabled for the queue to generate
974 * interrupts.
975 */
976static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
977 unsigned int us, unsigned int cnt)
978{
979 unsigned int timer_idx;
980
981 /*
982 * If both the interrupt holdoff timer and count are specified as
983 * zero, default to a holdoff count of 1 ...
984 */
985 if ((us | cnt) == 0)
986 cnt = 1;
987
988 /*
989 * If an interrupt holdoff count has been specified, then find the
990 * closest configured holdoff count and use that. If the response
991 * queue has already been created, then update its queue context
992 * parameters ...
993 */
994 if (cnt) {
995 int err;
996 u32 v, pktcnt_idx;
997
998 pktcnt_idx = closest_thres(&adapter->sge, cnt);
999 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1000 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1001 FW_PARAMS_PARAM_X(
1002 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1003 FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
1004 err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1005 if (err)
1006 return err;
1007 }
1008 rspq->pktcnt_idx = pktcnt_idx;
1009 }
1010
1011 /*
1012 * Compute the closest holdoff timer index from the supplied holdoff
1013 * timer value.
1014 */
1015 timer_idx = (us == 0
1016 ? SGE_TIMER_RSTRT_CNTR
1017 : closest_timer(&adapter->sge, us));
1018
1019 /*
1020 * Update the response queue's interrupt coalescing parameters and
1021 * return success.
1022 */
1023 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
1024 (cnt > 0 ? QINTR_CNT_EN : 0));
1025 return 0;
1026}
1027
1028/*
1029 * Return a version number to identify the type of adapter. The scheme is:
1030 * - bits 0..9: chip version
1031 * - bits 10..15: chip revision
1032 */
1033static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1034{
1035 /*
1036 * Chip version 4, revision 0x3f (cxgb4vf).
1037 */
1038 return 4 | (0x3f << 10);
1039}
1040
1041/*
1042 * Execute the specified ioctl command.
1043 */
1044static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1045{
1046 int ret = 0;
1047
1048 switch (cmd) {
1049 /*
1050 * The VF Driver doesn't have access to any of the other
1051 * common Ethernet device ioctl()'s (like reading/writing
1052 * PHY registers, etc.
1053 */
1054
1055 default:
1056 ret = -EOPNOTSUPP;
1057 break;
1058 }
1059 return ret;
1060}
1061
1062/*
1063 * Change the device's MTU.
1064 */
1065static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1066{
1067 int ret;
1068 struct port_info *pi = netdev_priv(dev);
1069
1070 /* accommodate SACK */
1071 if (new_mtu < 81)
1072 return -EINVAL;
1073
1074 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1075 -1, -1, -1, -1, true);
1076 if (!ret)
1077 dev->mtu = new_mtu;
1078 return ret;
1079}
1080
1081/*
1082 * Change the devices MAC address.
1083 */
1084static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1085{
1086 int ret;
1087 struct sockaddr *addr = _addr;
1088 struct port_info *pi = netdev_priv(dev);
1089
1090 if (!is_valid_ether_addr(addr->sa_data))
1091 return -EINVAL;
1092
1093 ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1094 addr->sa_data, true);
1095 if (ret < 0)
1096 return ret;
1097
1098 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1099 pi->xact_addr_filt = ret;
1100 return 0;
1101}
1102
1103/*
1104 * Return a TX Queue on which to send the specified skb.
1105 */
1106static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
1107{
1108 /*
1109 * XXX For now just use the default hash but we probably want to
1110 * XXX look at other possibilities ...
1111 */
1112 return skb_tx_hash(dev, skb);
1113}
1114
1115#ifdef CONFIG_NET_POLL_CONTROLLER
1116/*
1117 * Poll all of our receive queues. This is called outside of normal interrupt
1118 * context.
1119 */
1120static void cxgb4vf_poll_controller(struct net_device *dev)
1121{
1122 struct port_info *pi = netdev_priv(dev);
1123 struct adapter *adapter = pi->adapter;
1124
1125 if (adapter->flags & USING_MSIX) {
1126 struct sge_eth_rxq *rxq;
1127 int nqsets;
1128
1129 rxq = &adapter->sge.ethrxq[pi->first_qset];
1130 for (nqsets = pi->nqsets; nqsets; nqsets--) {
1131 t4vf_sge_intr_msix(0, &rxq->rspq);
1132 rxq++;
1133 }
1134 } else
1135 t4vf_intr_handler(adapter)(0, adapter);
1136}
1137#endif
1138
1139/*
1140 * Ethtool operations.
1141 * ===================
1142 *
1143 * Note that we don't support any ethtool operations which change the physical
1144 * state of the port to which we're linked.
1145 */
1146
1147/*
1148 * Return current port link settings.
1149 */
1150static int cxgb4vf_get_settings(struct net_device *dev,
1151 struct ethtool_cmd *cmd)
1152{
1153 const struct port_info *pi = netdev_priv(dev);
1154
1155 cmd->supported = pi->link_cfg.supported;
1156 cmd->advertising = pi->link_cfg.advertising;
1157 cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1;
1158 cmd->duplex = DUPLEX_FULL;
1159
1160 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1161 cmd->phy_address = pi->port_id;
1162 cmd->transceiver = XCVR_EXTERNAL;
1163 cmd->autoneg = pi->link_cfg.autoneg;
1164 cmd->maxtxpkt = 0;
1165 cmd->maxrxpkt = 0;
1166 return 0;
1167}
1168
1169/*
1170 * Return our driver information.
1171 */
1172static void cxgb4vf_get_drvinfo(struct net_device *dev,
1173 struct ethtool_drvinfo *drvinfo)
1174{
1175 struct adapter *adapter = netdev2adap(dev);
1176
1177 strcpy(drvinfo->driver, KBUILD_MODNAME);
1178 strcpy(drvinfo->version, DRV_VERSION);
1179 strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
1180 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1181 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1182 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
1183 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
1184 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
1185 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
1186 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
1187 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
1188 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
1189 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
1190}
1191
1192/*
1193 * Return current adapter message level.
1194 */
1195static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1196{
1197 return netdev2adap(dev)->msg_enable;
1198}
1199
1200/*
1201 * Set current adapter message level.
1202 */
1203static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1204{
1205 netdev2adap(dev)->msg_enable = msglevel;
1206}
1207
1208/*
1209 * Return the device's current Queue Set ring size parameters along with the
1210 * allowed maximum values. Since ethtool doesn't understand the concept of
1211 * multi-queue devices, we just return the current values associated with the
1212 * first Queue Set.
1213 */
1214static void cxgb4vf_get_ringparam(struct net_device *dev,
1215 struct ethtool_ringparam *rp)
1216{
1217 const struct port_info *pi = netdev_priv(dev);
1218 const struct sge *s = &pi->adapter->sge;
1219
1220 rp->rx_max_pending = MAX_RX_BUFFERS;
1221 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1222 rp->rx_jumbo_max_pending = 0;
1223 rp->tx_max_pending = MAX_TXQ_ENTRIES;
1224
1225 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1226 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1227 rp->rx_jumbo_pending = 0;
1228 rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1229}
1230
1231/*
1232 * Set the Queue Set ring size parameters for the device. Again, since
1233 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1234 * apply these new values across all of the Queue Sets associated with the
1235 * device -- after vetting them of course!
1236 */
1237static int cxgb4vf_set_ringparam(struct net_device *dev,
1238 struct ethtool_ringparam *rp)
1239{
1240 const struct port_info *pi = netdev_priv(dev);
1241 struct adapter *adapter = pi->adapter;
1242 struct sge *s = &adapter->sge;
1243 int qs;
1244
1245 if (rp->rx_pending > MAX_RX_BUFFERS ||
1246 rp->rx_jumbo_pending ||
1247 rp->tx_pending > MAX_TXQ_ENTRIES ||
1248 rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1249 rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1250 rp->rx_pending < MIN_FL_ENTRIES ||
1251 rp->tx_pending < MIN_TXQ_ENTRIES)
1252 return -EINVAL;
1253
1254 if (adapter->flags & FULL_INIT_DONE)
1255 return -EBUSY;
1256
1257 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1258 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1259 s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1260 s->ethtxq[qs].q.size = rp->tx_pending;
1261 }
1262 return 0;
1263}
1264
1265/*
1266 * Return the interrupt holdoff timer and count for the first Queue Set on the
1267 * device. Our extension ioctl() (the cxgbtool interface) allows the
1268 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1269 */
1270static int cxgb4vf_get_coalesce(struct net_device *dev,
1271 struct ethtool_coalesce *coalesce)
1272{
1273 const struct port_info *pi = netdev_priv(dev);
1274 const struct adapter *adapter = pi->adapter;
1275 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1276
1277 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1278 coalesce->rx_max_coalesced_frames =
1279 ((rspq->intr_params & QINTR_CNT_EN)
1280 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1281 : 0);
1282 return 0;
1283}
1284
1285/*
1286 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1287 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1288 * the interrupt holdoff timer on any of the device's Queue Sets.
1289 */
1290static int cxgb4vf_set_coalesce(struct net_device *dev,
1291 struct ethtool_coalesce *coalesce)
1292{
1293 const struct port_info *pi = netdev_priv(dev);
1294 struct adapter *adapter = pi->adapter;
1295
1296 return set_rxq_intr_params(adapter,
1297 &adapter->sge.ethrxq[pi->first_qset].rspq,
1298 coalesce->rx_coalesce_usecs,
1299 coalesce->rx_max_coalesced_frames);
1300}
1301
1302/*
1303 * Report current port link pause parameter settings.
1304 */
1305static void cxgb4vf_get_pauseparam(struct net_device *dev,
1306 struct ethtool_pauseparam *pauseparam)
1307{
1308 struct port_info *pi = netdev_priv(dev);
1309
1310 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1311 pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1312 pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1313}
1314
1315/*
1316 * Return whether RX Checksum Offloading is currently enabled for the device.
1317 */
1318static u32 cxgb4vf_get_rx_csum(struct net_device *dev)
1319{
1320 struct port_info *pi = netdev_priv(dev);
1321
1322 return (pi->rx_offload & RX_CSO) != 0;
1323}
1324
1325/*
1326 * Turn RX Checksum Offloading on or off for the device.
1327 */
1328static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum)
1329{
1330 struct port_info *pi = netdev_priv(dev);
1331
1332 if (csum)
1333 pi->rx_offload |= RX_CSO;
1334 else
1335 pi->rx_offload &= ~RX_CSO;
1336 return 0;
1337}
1338
1339/*
1340 * Identify the port by blinking the port's LED.
1341 */
1342static int cxgb4vf_phys_id(struct net_device *dev, u32 id)
1343{
1344 struct port_info *pi = netdev_priv(dev);
1345
1346 return t4vf_identify_port(pi->adapter, pi->viid, 5);
1347}
1348
1349/*
1350 * Port stats maintained per queue of the port.
1351 */
1352struct queue_port_stats {
1353 u64 tso;
1354 u64 tx_csum;
1355 u64 rx_csum;
1356 u64 vlan_ex;
1357 u64 vlan_ins;
1358};
1359
1360/*
1361 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1362 * these need to match the order of statistics returned by
1363 * t4vf_get_port_stats().
1364 */
1365static const char stats_strings[][ETH_GSTRING_LEN] = {
1366 /*
1367 * These must match the layout of the t4vf_port_stats structure.
1368 */
1369 "TxBroadcastBytes ",
1370 "TxBroadcastFrames ",
1371 "TxMulticastBytes ",
1372 "TxMulticastFrames ",
1373 "TxUnicastBytes ",
1374 "TxUnicastFrames ",
1375 "TxDroppedFrames ",
1376 "TxOffloadBytes ",
1377 "TxOffloadFrames ",
1378 "RxBroadcastBytes ",
1379 "RxBroadcastFrames ",
1380 "RxMulticastBytes ",
1381 "RxMulticastFrames ",
1382 "RxUnicastBytes ",
1383 "RxUnicastFrames ",
1384 "RxErrorFrames ",
1385
1386 /*
1387 * These are accumulated per-queue statistics and must match the
1388 * order of the fields in the queue_port_stats structure.
1389 */
1390 "TSO ",
1391 "TxCsumOffload ",
1392 "RxCsumGood ",
1393 "VLANextractions ",
1394 "VLANinsertions ",
1395};
1396
1397/*
1398 * Return the number of statistics in the specified statistics set.
1399 */
1400static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1401{
1402 switch (sset) {
1403 case ETH_SS_STATS:
1404 return ARRAY_SIZE(stats_strings);
1405 default:
1406 return -EOPNOTSUPP;
1407 }
1408 /*NOTREACHED*/
1409}
1410
1411/*
1412 * Return the strings for the specified statistics set.
1413 */
1414static void cxgb4vf_get_strings(struct net_device *dev,
1415 u32 sset,
1416 u8 *data)
1417{
1418 switch (sset) {
1419 case ETH_SS_STATS:
1420 memcpy(data, stats_strings, sizeof(stats_strings));
1421 break;
1422 }
1423}
1424
1425/*
1426 * Small utility routine to accumulate queue statistics across the queues of
1427 * a "port".
1428 */
1429static void collect_sge_port_stats(const struct adapter *adapter,
1430 const struct port_info *pi,
1431 struct queue_port_stats *stats)
1432{
1433 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1434 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1435 int qs;
1436
1437 memset(stats, 0, sizeof(*stats));
1438 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1439 stats->tso += txq->tso;
1440 stats->tx_csum += txq->tx_cso;
1441 stats->rx_csum += rxq->stats.rx_cso;
1442 stats->vlan_ex += rxq->stats.vlan_ex;
1443 stats->vlan_ins += txq->vlan_ins;
1444 }
1445}
1446
1447/*
1448 * Return the ETH_SS_STATS statistics set.
1449 */
1450static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1451 struct ethtool_stats *stats,
1452 u64 *data)
1453{
1454 struct port_info *pi = netdev2pinfo(dev);
1455 struct adapter *adapter = pi->adapter;
1456 int err = t4vf_get_port_stats(adapter, pi->pidx,
1457 (struct t4vf_port_stats *)data);
1458 if (err)
1459 memset(data, 0, sizeof(struct t4vf_port_stats));
1460
1461 data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1462 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1463}
1464
1465/*
1466 * Return the size of our register map.
1467 */
1468static int cxgb4vf_get_regs_len(struct net_device *dev)
1469{
1470 return T4VF_REGMAP_SIZE;
1471}
1472
1473/*
1474 * Dump a block of registers, start to end inclusive, into a buffer.
1475 */
1476static void reg_block_dump(struct adapter *adapter, void *regbuf,
1477 unsigned int start, unsigned int end)
1478{
1479 u32 *bp = regbuf + start - T4VF_REGMAP_START;
1480
1481 for ( ; start <= end; start += sizeof(u32)) {
1482 /*
1483 * Avoid reading the Mailbox Control register since that
1484 * can trigger a Mailbox Ownership Arbitration cycle and
1485 * interfere with communication with the firmware.
1486 */
1487 if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1488 *bp++ = 0xffff;
1489 else
1490 *bp++ = t4_read_reg(adapter, start);
1491 }
1492}
1493
1494/*
1495 * Copy our entire register map into the provided buffer.
1496 */
1497static void cxgb4vf_get_regs(struct net_device *dev,
1498 struct ethtool_regs *regs,
1499 void *regbuf)
1500{
1501 struct adapter *adapter = netdev2adap(dev);
1502
1503 regs->version = mk_adap_vers(adapter);
1504
1505 /*
1506 * Fill in register buffer with our register map.
1507 */
1508 memset(regbuf, 0, T4VF_REGMAP_SIZE);
1509
1510 reg_block_dump(adapter, regbuf,
1511 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1512 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1513 reg_block_dump(adapter, regbuf,
1514 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1515 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1516 reg_block_dump(adapter, regbuf,
1517 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1518 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
1519 reg_block_dump(adapter, regbuf,
1520 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1521 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1522
1523 reg_block_dump(adapter, regbuf,
1524 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1525 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1526}
1527
1528/*
1529 * Report current Wake On LAN settings.
1530 */
1531static void cxgb4vf_get_wol(struct net_device *dev,
1532 struct ethtool_wolinfo *wol)
1533{
1534 wol->supported = 0;
1535 wol->wolopts = 0;
1536 memset(&wol->sopass, 0, sizeof(wol->sopass));
1537}
1538
1539/*
1540 * Set TCP Segmentation Offloading feature capabilities.
1541 */
1542static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
1543{
1544 if (tso)
1545 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1546 else
1547 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1548 return 0;
1549}
1550
1551static struct ethtool_ops cxgb4vf_ethtool_ops = {
1552 .get_settings = cxgb4vf_get_settings,
1553 .get_drvinfo = cxgb4vf_get_drvinfo,
1554 .get_msglevel = cxgb4vf_get_msglevel,
1555 .set_msglevel = cxgb4vf_set_msglevel,
1556 .get_ringparam = cxgb4vf_get_ringparam,
1557 .set_ringparam = cxgb4vf_set_ringparam,
1558 .get_coalesce = cxgb4vf_get_coalesce,
1559 .set_coalesce = cxgb4vf_set_coalesce,
1560 .get_pauseparam = cxgb4vf_get_pauseparam,
1561 .get_rx_csum = cxgb4vf_get_rx_csum,
1562 .set_rx_csum = cxgb4vf_set_rx_csum,
1563 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1564 .set_sg = ethtool_op_set_sg,
1565 .get_link = ethtool_op_get_link,
1566 .get_strings = cxgb4vf_get_strings,
1567 .phys_id = cxgb4vf_phys_id,
1568 .get_sset_count = cxgb4vf_get_sset_count,
1569 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1570 .get_regs_len = cxgb4vf_get_regs_len,
1571 .get_regs = cxgb4vf_get_regs,
1572 .get_wol = cxgb4vf_get_wol,
1573 .set_tso = cxgb4vf_set_tso,
1574};
1575
1576/*
1577 * /sys/kernel/debug/cxgb4vf support code and data.
1578 * ================================================
1579 */
1580
1581/*
1582 * Show SGE Queue Set information. We display QPL Queues Sets per line.
1583 */
1584#define QPL 4
1585
1586static int sge_qinfo_show(struct seq_file *seq, void *v)
1587{
1588 struct adapter *adapter = seq->private;
1589 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1590 int qs, r = (uintptr_t)v - 1;
1591
1592 if (r)
1593 seq_putc(seq, '\n');
1594
1595 #define S3(fmt_spec, s, v) \
1596 do {\
1597 seq_printf(seq, "%-12s", s); \
1598 for (qs = 0; qs < n; ++qs) \
1599 seq_printf(seq, " %16" fmt_spec, v); \
1600 seq_putc(seq, '\n'); \
1601 } while (0)
1602 #define S(s, v) S3("s", s, v)
1603 #define T(s, v) S3("u", s, txq[qs].v)
1604 #define R(s, v) S3("u", s, rxq[qs].v)
1605
1606 if (r < eth_entries) {
1607 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1608 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1609 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1610
1611 S("QType:", "Ethernet");
1612 S("Interface:",
1613 (rxq[qs].rspq.netdev
1614 ? rxq[qs].rspq.netdev->name
1615 : "N/A"));
1616 S3("d", "Port:",
1617 (rxq[qs].rspq.netdev
1618 ? ((struct port_info *)
1619 netdev_priv(rxq[qs].rspq.netdev))->port_id
1620 : -1));
1621 T("TxQ ID:", q.abs_id);
1622 T("TxQ size:", q.size);
1623 T("TxQ inuse:", q.in_use);
1624 T("TxQ PIdx:", q.pidx);
1625 T("TxQ CIdx:", q.cidx);
1626 R("RspQ ID:", rspq.abs_id);
1627 R("RspQ size:", rspq.size);
1628 R("RspQE size:", rspq.iqe_len);
1629 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1630 S3("u", "Intr pktcnt:",
1631 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1632 R("RspQ CIdx:", rspq.cidx);
1633 R("RspQ Gen:", rspq.gen);
1634 R("FL ID:", fl.abs_id);
1635 R("FL size:", fl.size - MIN_FL_RESID);
1636 R("FL avail:", fl.avail);
1637 R("FL PIdx:", fl.pidx);
1638 R("FL CIdx:", fl.cidx);
1639 return 0;
1640 }
1641
1642 r -= eth_entries;
1643 if (r == 0) {
1644 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1645
1646 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1647 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1648 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1649 qtimer_val(adapter, evtq));
1650 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1651 adapter->sge.counter_val[evtq->pktcnt_idx]);
1652 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1653 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1654 } else if (r == 1) {
1655 const struct sge_rspq *intrq = &adapter->sge.intrq;
1656
1657 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1658 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1659 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1660 qtimer_val(adapter, intrq));
1661 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1662 adapter->sge.counter_val[intrq->pktcnt_idx]);
1663 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1664 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1665 }
1666
1667 #undef R
1668 #undef T
1669 #undef S
1670 #undef S3
1671
1672 return 0;
1673}
1674
1675/*
1676 * Return the number of "entries" in our "file". We group the multi-Queue
1677 * sections with QPL Queue Sets per "entry". The sections of the output are:
1678 *
1679 * Ethernet RX/TX Queue Sets
1680 * Firmware Event Queue
1681 * Forwarded Interrupt Queue (if in MSI mode)
1682 */
1683static int sge_queue_entries(const struct adapter *adapter)
1684{
1685 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1686 ((adapter->flags & USING_MSI) != 0);
1687}
1688
1689static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1690{
1691 int entries = sge_queue_entries(seq->private);
1692
1693 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1694}
1695
1696static void sge_queue_stop(struct seq_file *seq, void *v)
1697{
1698}
1699
1700static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1701{
1702 int entries = sge_queue_entries(seq->private);
1703
1704 ++*pos;
1705 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1706}
1707
1708static const struct seq_operations sge_qinfo_seq_ops = {
1709 .start = sge_queue_start,
1710 .next = sge_queue_next,
1711 .stop = sge_queue_stop,
1712 .show = sge_qinfo_show
1713};
1714
1715static int sge_qinfo_open(struct inode *inode, struct file *file)
1716{
1717 int res = seq_open(file, &sge_qinfo_seq_ops);
1718
1719 if (!res) {
1720 struct seq_file *seq = file->private_data;
1721 seq->private = inode->i_private;
1722 }
1723 return res;
1724}
1725
1726static const struct file_operations sge_qinfo_debugfs_fops = {
1727 .owner = THIS_MODULE,
1728 .open = sge_qinfo_open,
1729 .read = seq_read,
1730 .llseek = seq_lseek,
1731 .release = seq_release,
1732};
1733
1734/*
1735 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1736 */
1737#define QPL 4
1738
1739static int sge_qstats_show(struct seq_file *seq, void *v)
1740{
1741 struct adapter *adapter = seq->private;
1742 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1743 int qs, r = (uintptr_t)v - 1;
1744
1745 if (r)
1746 seq_putc(seq, '\n');
1747
1748 #define S3(fmt, s, v) \
1749 do { \
1750 seq_printf(seq, "%-16s", s); \
1751 for (qs = 0; qs < n; ++qs) \
1752 seq_printf(seq, " %8" fmt, v); \
1753 seq_putc(seq, '\n'); \
1754 } while (0)
1755 #define S(s, v) S3("s", s, v)
1756
1757 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1758 #define T(s, v) T3("lu", s, v)
1759
1760 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1761 #define R(s, v) R3("lu", s, v)
1762
1763 if (r < eth_entries) {
1764 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1765 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1766 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1767
1768 S("QType:", "Ethernet");
1769 S("Interface:",
1770 (rxq[qs].rspq.netdev
1771 ? rxq[qs].rspq.netdev->name
1772 : "N/A"));
1773 R3("u", "RspQNullInts:", rspq.unhandled_irqs);
1774 R("RxPackets:", stats.pkts);
1775 R("RxCSO:", stats.rx_cso);
1776 R("VLANxtract:", stats.vlan_ex);
1777 R("LROmerged:", stats.lro_merged);
1778 R("LROpackets:", stats.lro_pkts);
1779 R("RxDrops:", stats.rx_drops);
1780 T("TSO:", tso);
1781 T("TxCSO:", tx_cso);
1782 T("VLANins:", vlan_ins);
1783 T("TxQFull:", q.stops);
1784 T("TxQRestarts:", q.restarts);
1785 T("TxMapErr:", mapping_err);
1786 R("FLAllocErr:", fl.alloc_failed);
1787 R("FLLrgAlcErr:", fl.large_alloc_failed);
1788 R("FLStarving:", fl.starving);
1789 return 0;
1790 }
1791
1792 r -= eth_entries;
1793 if (r == 0) {
1794 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1795
1796 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
1797 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1798 evtq->unhandled_irqs);
1799 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
1800 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
1801 } else if (r == 1) {
1802 const struct sge_rspq *intrq = &adapter->sge.intrq;
1803
1804 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
1805 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1806 intrq->unhandled_irqs);
1807 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
1808 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
1809 }
1810
1811 #undef R
1812 #undef T
1813 #undef S
1814 #undef R3
1815 #undef T3
1816 #undef S3
1817
1818 return 0;
1819}
1820
1821/*
1822 * Return the number of "entries" in our "file". We group the multi-Queue
1823 * sections with QPL Queue Sets per "entry". The sections of the output are:
1824 *
1825 * Ethernet RX/TX Queue Sets
1826 * Firmware Event Queue
1827 * Forwarded Interrupt Queue (if in MSI mode)
1828 */
1829static int sge_qstats_entries(const struct adapter *adapter)
1830{
1831 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1832 ((adapter->flags & USING_MSI) != 0);
1833}
1834
1835static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
1836{
1837 int entries = sge_qstats_entries(seq->private);
1838
1839 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1840}
1841
1842static void sge_qstats_stop(struct seq_file *seq, void *v)
1843{
1844}
1845
1846static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
1847{
1848 int entries = sge_qstats_entries(seq->private);
1849
1850 (*pos)++;
1851 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1852}
1853
1854static const struct seq_operations sge_qstats_seq_ops = {
1855 .start = sge_qstats_start,
1856 .next = sge_qstats_next,
1857 .stop = sge_qstats_stop,
1858 .show = sge_qstats_show
1859};
1860
1861static int sge_qstats_open(struct inode *inode, struct file *file)
1862{
1863 int res = seq_open(file, &sge_qstats_seq_ops);
1864
1865 if (res == 0) {
1866 struct seq_file *seq = file->private_data;
1867 seq->private = inode->i_private;
1868 }
1869 return res;
1870}
1871
1872static const struct file_operations sge_qstats_proc_fops = {
1873 .owner = THIS_MODULE,
1874 .open = sge_qstats_open,
1875 .read = seq_read,
1876 .llseek = seq_lseek,
1877 .release = seq_release,
1878};
1879
1880/*
1881 * Show PCI-E SR-IOV Virtual Function Resource Limits.
1882 */
1883static int resources_show(struct seq_file *seq, void *v)
1884{
1885 struct adapter *adapter = seq->private;
1886 struct vf_resources *vfres = &adapter->params.vfres;
1887
1888 #define S(desc, fmt, var) \
1889 seq_printf(seq, "%-60s " fmt "\n", \
1890 desc " (" #var "):", vfres->var)
1891
1892 S("Virtual Interfaces", "%d", nvi);
1893 S("Egress Queues", "%d", neq);
1894 S("Ethernet Control", "%d", nethctrl);
1895 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
1896 S("Ingress Queues", "%d", niq);
1897 S("Traffic Class", "%d", tc);
1898 S("Port Access Rights Mask", "%#x", pmask);
1899 S("MAC Address Filters", "%d", nexactf);
1900 S("Firmware Command Read Capabilities", "%#x", r_caps);
1901 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
1902
1903 #undef S
1904
1905 return 0;
1906}
1907
1908static int resources_open(struct inode *inode, struct file *file)
1909{
1910 return single_open(file, resources_show, inode->i_private);
1911}
1912
1913static const struct file_operations resources_proc_fops = {
1914 .owner = THIS_MODULE,
1915 .open = resources_open,
1916 .read = seq_read,
1917 .llseek = seq_lseek,
1918 .release = single_release,
1919};
1920
1921/*
1922 * Show Virtual Interfaces.
1923 */
1924static int interfaces_show(struct seq_file *seq, void *v)
1925{
1926 if (v == SEQ_START_TOKEN) {
1927 seq_puts(seq, "Interface Port VIID\n");
1928 } else {
1929 struct adapter *adapter = seq->private;
1930 int pidx = (uintptr_t)v - 2;
1931 struct net_device *dev = adapter->port[pidx];
1932 struct port_info *pi = netdev_priv(dev);
1933
1934 seq_printf(seq, "%9s %4d %#5x\n",
1935 dev->name, pi->port_id, pi->viid);
1936 }
1937 return 0;
1938}
1939
1940static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
1941{
1942 return pos <= adapter->params.nports
1943 ? (void *)(uintptr_t)(pos + 1)
1944 : NULL;
1945}
1946
1947static void *interfaces_start(struct seq_file *seq, loff_t *pos)
1948{
1949 return *pos
1950 ? interfaces_get_idx(seq->private, *pos)
1951 : SEQ_START_TOKEN;
1952}
1953
1954static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
1955{
1956 (*pos)++;
1957 return interfaces_get_idx(seq->private, *pos);
1958}
1959
1960static void interfaces_stop(struct seq_file *seq, void *v)
1961{
1962}
1963
1964static const struct seq_operations interfaces_seq_ops = {
1965 .start = interfaces_start,
1966 .next = interfaces_next,
1967 .stop = interfaces_stop,
1968 .show = interfaces_show
1969};
1970
1971static int interfaces_open(struct inode *inode, struct file *file)
1972{
1973 int res = seq_open(file, &interfaces_seq_ops);
1974
1975 if (res == 0) {
1976 struct seq_file *seq = file->private_data;
1977 seq->private = inode->i_private;
1978 }
1979 return res;
1980}
1981
1982static const struct file_operations interfaces_proc_fops = {
1983 .owner = THIS_MODULE,
1984 .open = interfaces_open,
1985 .read = seq_read,
1986 .llseek = seq_lseek,
1987 .release = seq_release,
1988};
1989
1990/*
1991 * /sys/kernel/debugfs/cxgb4vf/ files list.
1992 */
1993struct cxgb4vf_debugfs_entry {
1994 const char *name; /* name of debugfs node */
1995 mode_t mode; /* file system mode */
1996 const struct file_operations *fops;
1997};
1998
1999static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2000 { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
2001 { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2002 { "resources", S_IRUGO, &resources_proc_fops },
2003 { "interfaces", S_IRUGO, &interfaces_proc_fops },
2004};
2005
2006/*
2007 * Module and device initialization and cleanup code.
2008 * ==================================================
2009 */
2010
2011/*
2012 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2013 * directory (debugfs_root) has already been set up.
2014 */
2015static int __devinit setup_debugfs(struct adapter *adapter)
2016{
2017 int i;
2018
2019 BUG_ON(adapter->debugfs_root == NULL);
2020
2021 /*
2022 * Debugfs support is best effort.
2023 */
2024 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2025 (void)debugfs_create_file(debugfs_files[i].name,
2026 debugfs_files[i].mode,
2027 adapter->debugfs_root,
2028 (void *)adapter,
2029 debugfs_files[i].fops);
2030
2031 return 0;
2032}
2033
2034/*
2035 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2036 * it to our caller to tear down the directory (debugfs_root).
2037 */
2038static void __devexit cleanup_debugfs(struct adapter *adapter)
2039{
2040 BUG_ON(adapter->debugfs_root == NULL);
2041
2042 /*
2043 * Unlike our sister routine cleanup_proc(), we don't need to remove
2044 * individual entries because a call will be made to
2045 * debugfs_remove_recursive(). We just need to clean up any ancillary
2046 * persistent state.
2047 */
2048 /* nothing to do */
2049}
2050
2051/*
2052 * Perform early "adapter" initialization. This is where we discover what
2053 * adapter parameters we're going to be using and initialize basic adapter
2054 * hardware support.
2055 */
2056static int adap_init0(struct adapter *adapter)
2057{
2058 struct vf_resources *vfres = &adapter->params.vfres;
2059 struct sge_params *sge_params = &adapter->params.sge;
2060 struct sge *s = &adapter->sge;
2061 unsigned int ethqsets;
2062 int err;
2063
2064 /*
2065 * Wait for the device to become ready before proceeding ...
2066 */
2067 err = t4vf_wait_dev_ready(adapter);
2068 if (err) {
2069 dev_err(adapter->pdev_dev, "device didn't become ready:"
2070 " err=%d\n", err);
2071 return err;
2072 }
2073
2074 /*
2075 * Grab basic operational parameters. These will predominantly have
2076 * been set up by the Physical Function Driver or will be hard coded
2077 * into the adapter. We just have to live with them ... Note that
2078 * we _must_ get our VPD parameters before our SGE parameters because
2079 * we need to know the adapter's core clock from the VPD in order to
2080 * properly decode the SGE Timer Values.
2081 */
2082 err = t4vf_get_dev_params(adapter);
2083 if (err) {
2084 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2085 " device parameters: err=%d\n", err);
2086 return err;
2087 }
2088 err = t4vf_get_vpd_params(adapter);
2089 if (err) {
2090 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2091 " VPD parameters: err=%d\n", err);
2092 return err;
2093 }
2094 err = t4vf_get_sge_params(adapter);
2095 if (err) {
2096 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2097 " SGE parameters: err=%d\n", err);
2098 return err;
2099 }
2100 err = t4vf_get_rss_glb_config(adapter);
2101 if (err) {
2102 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2103 " RSS parameters: err=%d\n", err);
2104 return err;
2105 }
2106 if (adapter->params.rss.mode !=
2107 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2108 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2109 " mode %d\n", adapter->params.rss.mode);
2110 return -EINVAL;
2111 }
2112 err = t4vf_sge_init(adapter);
2113 if (err) {
2114 dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2115 " err=%d\n", err);
2116 return err;
2117 }
2118
2119 /*
2120 * Retrieve our RX interrupt holdoff timer values and counter
2121 * threshold values from the SGE parameters.
2122 */
2123 s->timer_val[0] = core_ticks_to_us(adapter,
2124 TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
2125 s->timer_val[1] = core_ticks_to_us(adapter,
2126 TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
2127 s->timer_val[2] = core_ticks_to_us(adapter,
2128 TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
2129 s->timer_val[3] = core_ticks_to_us(adapter,
2130 TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
2131 s->timer_val[4] = core_ticks_to_us(adapter,
2132 TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
2133 s->timer_val[5] = core_ticks_to_us(adapter,
2134 TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
2135
2136 s->counter_val[0] =
2137 THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
2138 s->counter_val[1] =
2139 THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
2140 s->counter_val[2] =
2141 THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
2142 s->counter_val[3] =
2143 THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
2144
2145 /*
2146 * Grab our Virtual Interface resource allocation, extract the
2147 * features that we're interested in and do a bit of sanity testing on
2148 * what we discover.
2149 */
2150 err = t4vf_get_vfres(adapter);
2151 if (err) {
2152 dev_err(adapter->pdev_dev, "unable to get virtual interface"
2153 " resources: err=%d\n", err);
2154 return err;
2155 }
2156
2157 /*
2158 * The number of "ports" which we support is equal to the number of
2159 * Virtual Interfaces with which we've been provisioned.
2160 */
2161 adapter->params.nports = vfres->nvi;
2162 if (adapter->params.nports > MAX_NPORTS) {
2163 dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
2164 " virtual interfaces\n", MAX_NPORTS,
2165 adapter->params.nports);
2166 adapter->params.nports = MAX_NPORTS;
2167 }
2168
2169 /*
2170 * We need to reserve a number of the ingress queues with Free List
2171 * and Interrupt capabilities for special interrupt purposes (like
2172 * asynchronous firmware messages, or forwarded interrupts if we're
2173 * using MSI). The rest of the FL/Intr-capable ingress queues will be
2174 * matched up one-for-one with Ethernet/Control egress queues in order
2175 * to form "Queue Sets" which will be aportioned between the "ports".
2176 * For each Queue Set, we'll need the ability to allocate two Egress
2177 * Contexts -- one for the Ingress Queue Free List and one for the TX
2178 * Ethernet Queue.
2179 */
2180 ethqsets = vfres->niqflint - INGQ_EXTRAS;
2181 if (vfres->nethctrl != ethqsets) {
2182 dev_warn(adapter->pdev_dev, "unequal number of [available]"
2183 " ingress/egress queues (%d/%d); using minimum for"
2184 " number of Queue Sets\n", ethqsets, vfres->nethctrl);
2185 ethqsets = min(vfres->nethctrl, ethqsets);
2186 }
2187 if (vfres->neq < ethqsets*2) {
2188 dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
2189 " to support Queue Sets (%d); reducing allowed Queue"
2190 " Sets\n", vfres->neq, ethqsets);
2191 ethqsets = vfres->neq/2;
2192 }
2193 if (ethqsets > MAX_ETH_QSETS) {
2194 dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
2195 " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
2196 ethqsets = MAX_ETH_QSETS;
2197 }
2198 if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
2199 dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
2200 " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
2201 }
2202 adapter->sge.max_ethqsets = ethqsets;
2203
2204 /*
2205 * Check for various parameter sanity issues. Most checks simply
2206 * result in us using fewer resources than our provissioning but we
2207 * do need at least one "port" with which to work ...
2208 */
2209 if (adapter->sge.max_ethqsets < adapter->params.nports) {
2210 dev_warn(adapter->pdev_dev, "only using %d of %d available"
2211 " virtual interfaces (too few Queue Sets)\n",
2212 adapter->sge.max_ethqsets, adapter->params.nports);
2213 adapter->params.nports = adapter->sge.max_ethqsets;
2214 }
2215 if (adapter->params.nports == 0) {
2216 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2217 "usable!\n");
2218 return -EINVAL;
2219 }
2220 return 0;
2221}
2222
2223static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2224 u8 pkt_cnt_idx, unsigned int size,
2225 unsigned int iqe_size)
2226{
2227 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
2228 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
2229 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2230 ? pkt_cnt_idx
2231 : 0);
2232 rspq->iqe_len = iqe_size;
2233 rspq->size = size;
2234}
2235
2236/*
2237 * Perform default configuration of DMA queues depending on the number and
2238 * type of ports we found and the number of available CPUs. Most settings can
2239 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2240 * being brought up for the first time.
2241 */
2242static void __devinit cfg_queues(struct adapter *adapter)
2243{
2244 struct sge *s = &adapter->sge;
2245 int q10g, n10g, qidx, pidx, qs;
2246
2247 /*
2248 * We should not be called till we know how many Queue Sets we can
2249 * support. In particular, this means that we need to know what kind
2250 * of interrupts we'll be using ...
2251 */
2252 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2253
2254 /*
2255 * Count the number of 10GbE Virtual Interfaces that we have.
2256 */
2257 n10g = 0;
2258 for_each_port(adapter, pidx)
2259 n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2260
2261 /*
2262 * We default to 1 queue per non-10G port and up to # of cores queues
2263 * per 10G port.
2264 */
2265 if (n10g == 0)
2266 q10g = 0;
2267 else {
2268 int n1g = (adapter->params.nports - n10g);
2269 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2270 if (q10g > num_online_cpus())
2271 q10g = num_online_cpus();
2272 }
2273
2274 /*
2275 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2276 * The layout will be established in setup_sge_queues() when the
2277 * adapter is brough up for the first time.
2278 */
2279 qidx = 0;
2280 for_each_port(adapter, pidx) {
2281 struct port_info *pi = adap2pinfo(adapter, pidx);
2282
2283 pi->first_qset = qidx;
2284 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2285 qidx += pi->nqsets;
2286 }
2287 s->ethqsets = qidx;
2288
2289 /*
2290 * Set up default Queue Set parameters ... Start off with the
2291 * shortest interrupt holdoff timer.
2292 */
2293 for (qs = 0; qs < s->max_ethqsets; qs++) {
2294 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2295 struct sge_eth_txq *txq = &s->ethtxq[qs];
2296
2297 init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES);
2298 rxq->fl.size = 72;
2299 txq->q.size = 1024;
2300 }
2301
2302 /*
2303 * The firmware event queue is used for link state changes and
2304 * notifications of TX DMA completions.
2305 */
2306 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512,
2307 L1_CACHE_BYTES);
2308
2309 /*
2310 * The forwarded interrupt queue is used when we're in MSI interrupt
2311 * mode. In this mode all interrupts associated with RX queues will
2312 * be forwarded to a single queue which we'll associate with our MSI
2313 * interrupt vector. The messages dropped in the forwarded interrupt
2314 * queue will indicate which ingress queue needs servicing ... This
2315 * queue needs to be large enough to accommodate all of the ingress
2316 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2317 * from equalling the CIDX if every ingress queue has an outstanding
2318 * interrupt). The queue doesn't need to be any larger because no
2319 * ingress queue will ever have more than one outstanding interrupt at
2320 * any time ...
2321 */
2322 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2323 L1_CACHE_BYTES);
2324}
2325
2326/*
2327 * Reduce the number of Ethernet queues across all ports to at most n.
2328 * n provides at least one queue per port.
2329 */
2330static void __devinit reduce_ethqs(struct adapter *adapter, int n)
2331{
2332 int i;
2333 struct port_info *pi;
2334
2335 /*
2336 * While we have too many active Ether Queue Sets, interate across the
2337 * "ports" and reduce their individual Queue Set allocations.
2338 */
2339 BUG_ON(n < adapter->params.nports);
2340 while (n < adapter->sge.ethqsets)
2341 for_each_port(adapter, i) {
2342 pi = adap2pinfo(adapter, i);
2343 if (pi->nqsets > 1) {
2344 pi->nqsets--;
2345 adapter->sge.ethqsets--;
2346 if (adapter->sge.ethqsets <= n)
2347 break;
2348 }
2349 }
2350
2351 /*
2352 * Reassign the starting Queue Sets for each of the "ports" ...
2353 */
2354 n = 0;
2355 for_each_port(adapter, i) {
2356 pi = adap2pinfo(adapter, i);
2357 pi->first_qset = n;
2358 n += pi->nqsets;
2359 }
2360}
2361
2362/*
2363 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2364 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2365 * need. Minimally we need one for every Virtual Interface plus those needed
2366 * for our "extras". Note that this process may lower the maximum number of
2367 * allowed Queue Sets ...
2368 */
2369static int __devinit enable_msix(struct adapter *adapter)
2370{
2371 int i, err, want, need;
2372 struct msix_entry entries[MSIX_ENTRIES];
2373 struct sge *s = &adapter->sge;
2374
2375 for (i = 0; i < MSIX_ENTRIES; ++i)
2376 entries[i].entry = i;
2377
2378 /*
2379 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2380 * plus those needed for our "extras" (for example, the firmware
2381 * message queue). We _need_ at least one "Queue Set" per Virtual
2382 * Interface plus those needed for our "extras". So now we get to see
2383 * if the song is right ...
2384 */
2385 want = s->max_ethqsets + MSIX_EXTRAS;
2386 need = adapter->params.nports + MSIX_EXTRAS;
2387 while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
2388 want = err;
2389
2390 if (err == 0) {
2391 int nqsets = want - MSIX_EXTRAS;
2392 if (nqsets < s->max_ethqsets) {
2393 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2394 " for %d Queue Sets\n", nqsets);
2395 s->max_ethqsets = nqsets;
2396 if (nqsets < s->ethqsets)
2397 reduce_ethqs(adapter, nqsets);
2398 }
2399 for (i = 0; i < want; ++i)
2400 adapter->msix_info[i].vec = entries[i].vector;
2401 } else if (err > 0) {
2402 pci_disable_msix(adapter->pdev);
2403 dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
2404 " not using MSI-X\n", err);
2405 }
2406 return err;
2407}
2408
2409#ifdef HAVE_NET_DEVICE_OPS
2410static const struct net_device_ops cxgb4vf_netdev_ops = {
2411 .ndo_open = cxgb4vf_open,
2412 .ndo_stop = cxgb4vf_stop,
2413 .ndo_start_xmit = t4vf_eth_xmit,
2414 .ndo_get_stats = cxgb4vf_get_stats,
2415 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2416 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2417 .ndo_select_queue = cxgb4vf_select_queue,
2418 .ndo_validate_addr = eth_validate_addr,
2419 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2420 .ndo_change_mtu = cxgb4vf_change_mtu,
2421 .ndo_vlan_rx_register = cxgb4vf_vlan_rx_register,
2422#ifdef CONFIG_NET_POLL_CONTROLLER
2423 .ndo_poll_controller = cxgb4vf_poll_controller,
2424#endif
2425};
2426#endif
2427
2428/*
2429 * "Probe" a device: initialize a device and construct all kernel and driver
2430 * state needed to manage the device. This routine is called "init_one" in
2431 * the PF Driver ...
2432 */
2433static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2434 const struct pci_device_id *ent)
2435{
2436 static int version_printed;
2437
2438 int pci_using_dac;
2439 int err, pidx;
2440 unsigned int pmask;
2441 struct adapter *adapter;
2442 struct port_info *pi;
2443 struct net_device *netdev;
2444
2445 /*
2446 * Vet our module parameters.
2447 */
2448 if (msi != MSI_MSIX && msi != MSI_MSI) {
2449 dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
2450 " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
2451 MSI_MSI);
2452 err = -EINVAL;
2453 goto err_out;
2454 }
2455
2456 /*
2457 * Print our driver banner the first time we're called to initialize a
2458 * device.
2459 */
2460 if (version_printed == 0) {
2461 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2462 version_printed = 1;
2463 }
2464
2465 /*
2466 * Reserve PCI resources for the device. If we can't get them some
2467 * other driver may have already claimed the device ...
2468 */
2469 err = pci_request_regions(pdev, KBUILD_MODNAME);
2470 if (err) {
2471 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2472 return err;
2473 }
2474
2475 /*
2476 * Initialize generic PCI device state.
2477 */
2478 err = pci_enable_device(pdev);
2479 if (err) {
2480 dev_err(&pdev->dev, "cannot enable PCI device\n");
2481 goto err_release_regions;
2482 }
2483
2484 /*
2485 * Set up our DMA mask: try for 64-bit address masking first and
2486 * fall back to 32-bit if we can't get 64 bits ...
2487 */
2488 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2489 if (err == 0) {
2490 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2491 if (err) {
2492 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2493 " coherent allocations\n");
2494 goto err_disable_device;
2495 }
2496 pci_using_dac = 1;
2497 } else {
2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2499 if (err != 0) {
2500 dev_err(&pdev->dev, "no usable DMA configuration\n");
2501 goto err_disable_device;
2502 }
2503 pci_using_dac = 0;
2504 }
2505
2506 /*
2507 * Enable bus mastering for the device ...
2508 */
2509 pci_set_master(pdev);
2510
2511 /*
2512 * Allocate our adapter data structure and attach it to the device.
2513 */
2514 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2515 if (!adapter) {
2516 err = -ENOMEM;
2517 goto err_disable_device;
2518 }
2519 pci_set_drvdata(pdev, adapter);
2520 adapter->pdev = pdev;
2521 adapter->pdev_dev = &pdev->dev;
2522
2523 /*
2524 * Initialize SMP data synchronization resources.
2525 */
2526 spin_lock_init(&adapter->stats_lock);
2527
2528 /*
2529 * Map our I/O registers in BAR0.
2530 */
2531 adapter->regs = pci_ioremap_bar(pdev, 0);
2532 if (!adapter->regs) {
2533 dev_err(&pdev->dev, "cannot map device registers\n");
2534 err = -ENOMEM;
2535 goto err_free_adapter;
2536 }
2537
2538 /*
2539 * Initialize adapter level features.
2540 */
2541 adapter->name = pci_name(pdev);
2542 adapter->msg_enable = dflt_msg_enable;
2543 err = adap_init0(adapter);
2544 if (err)
2545 goto err_unmap_bar;
2546
2547 /*
2548 * Allocate our "adapter ports" and stitch everything together.
2549 */
2550 pmask = adapter->params.vfres.pmask;
2551 for_each_port(adapter, pidx) {
2552 int port_id, viid;
2553
2554 /*
2555 * We simplistically allocate our virtual interfaces
2556 * sequentially across the port numbers to which we have
2557 * access rights. This should be configurable in some manner
2558 * ...
2559 */
2560 if (pmask == 0)
2561 break;
2562 port_id = ffs(pmask) - 1;
2563 pmask &= ~(1 << port_id);
2564 viid = t4vf_alloc_vi(adapter, port_id);
2565 if (viid < 0) {
2566 dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2567 " err=%d\n", port_id, viid);
2568 err = viid;
2569 goto err_free_dev;
2570 }
2571
2572 /*
2573 * Allocate our network device and stitch things together.
2574 */
2575 netdev = alloc_etherdev_mq(sizeof(struct port_info),
2576 MAX_PORT_QSETS);
2577 if (netdev == NULL) {
2578 dev_err(&pdev->dev, "cannot allocate netdev for"
2579 " port %d\n", port_id);
2580 t4vf_free_vi(adapter, viid);
2581 err = -ENOMEM;
2582 goto err_free_dev;
2583 }
2584 adapter->port[pidx] = netdev;
2585 SET_NETDEV_DEV(netdev, &pdev->dev);
2586 pi = netdev_priv(netdev);
2587 pi->adapter = adapter;
2588 pi->pidx = pidx;
2589 pi->port_id = port_id;
2590 pi->viid = viid;
2591
2592 /*
2593 * Initialize the starting state of our "port" and register
2594 * it.
2595 */
2596 pi->xact_addr_filt = -1;
2597 pi->rx_offload = RX_CSO;
2598 netif_carrier_off(netdev);
2599 netif_tx_stop_all_queues(netdev);
2600 netdev->irq = pdev->irq;
2601
2602 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2603 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2604 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2605 NETIF_F_GRO);
2606 if (pci_using_dac)
2607 netdev->features |= NETIF_F_HIGHDMA;
2608 netdev->vlan_features =
2609 (netdev->features &
2610 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX));
2611
2612#ifdef HAVE_NET_DEVICE_OPS
2613 netdev->netdev_ops = &cxgb4vf_netdev_ops;
2614#else
2615 netdev->vlan_rx_register = cxgb4vf_vlan_rx_register;
2616 netdev->open = cxgb4vf_open;
2617 netdev->stop = cxgb4vf_stop;
2618 netdev->hard_start_xmit = t4vf_eth_xmit;
2619 netdev->get_stats = cxgb4vf_get_stats;
2620 netdev->set_rx_mode = cxgb4vf_set_rxmode;
2621 netdev->do_ioctl = cxgb4vf_do_ioctl;
2622 netdev->change_mtu = cxgb4vf_change_mtu;
2623 netdev->set_mac_address = cxgb4vf_set_mac_addr;
2624 netdev->select_queue = cxgb4vf_select_queue;
2625#ifdef CONFIG_NET_POLL_CONTROLLER
2626 netdev->poll_controller = cxgb4vf_poll_controller;
2627#endif
2628#endif
2629 SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
2630
2631 /*
2632 * Initialize the hardware/software state for the port.
2633 */
2634 err = t4vf_port_init(adapter, pidx);
2635 if (err) {
2636 dev_err(&pdev->dev, "cannot initialize port %d\n",
2637 pidx);
2638 goto err_free_dev;
2639 }
2640 }
2641
2642 /*
2643 * The "card" is now ready to go. If any errors occur during device
2644 * registration we do not fail the whole "card" but rather proceed
2645 * only with the ports we manage to register successfully. However we
2646 * must register at least one net device.
2647 */
2648 for_each_port(adapter, pidx) {
2649 netdev = adapter->port[pidx];
2650 if (netdev == NULL)
2651 continue;
2652
2653 err = register_netdev(netdev);
2654 if (err) {
2655 dev_warn(&pdev->dev, "cannot register net device %s,"
2656 " skipping\n", netdev->name);
2657 continue;
2658 }
2659
2660 set_bit(pidx, &adapter->registered_device_map);
2661 }
2662 if (adapter->registered_device_map == 0) {
2663 dev_err(&pdev->dev, "could not register any net devices\n");
2664 goto err_free_dev;
2665 }
2666
2667 /*
2668 * Set up our debugfs entries.
2669 */
2670 if (cxgb4vf_debugfs_root) {
2671 adapter->debugfs_root =
2672 debugfs_create_dir(pci_name(pdev),
2673 cxgb4vf_debugfs_root);
2674 if (adapter->debugfs_root == NULL)
2675 dev_warn(&pdev->dev, "could not create debugfs"
2676 " directory");
2677 else
2678 setup_debugfs(adapter);
2679 }
2680
2681 /*
2682 * See what interrupts we'll be using. If we've been configured to
2683 * use MSI-X interrupts, try to enable them but fall back to using
2684 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2685 * get MSI interrupts we bail with the error.
2686 */
2687 if (msi == MSI_MSIX && enable_msix(adapter) == 0)
2688 adapter->flags |= USING_MSIX;
2689 else {
2690 err = pci_enable_msi(pdev);
2691 if (err) {
2692 dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
2693 " err=%d\n",
2694 msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
2695 goto err_free_debugfs;
2696 }
2697 adapter->flags |= USING_MSI;
2698 }
2699
2700 /*
2701 * Now that we know how many "ports" we have and what their types are,
2702 * and how many Queue Sets we can support, we can configure our queue
2703 * resources.
2704 */
2705 cfg_queues(adapter);
2706
2707 /*
2708 * Print a short notice on the existance and configuration of the new
2709 * VF network device ...
2710 */
2711 for_each_port(adapter, pidx) {
2712 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
2713 adapter->port[pidx]->name,
2714 (adapter->flags & USING_MSIX) ? "MSI-X" :
2715 (adapter->flags & USING_MSI) ? "MSI" : "");
2716 }
2717
2718 /*
2719 * Return success!
2720 */
2721 return 0;
2722
2723 /*
2724 * Error recovery and exit code. Unwind state that's been created
2725 * so far and return the error.
2726 */
2727
2728err_free_debugfs:
2729 if (adapter->debugfs_root) {
2730 cleanup_debugfs(adapter);
2731 debugfs_remove_recursive(adapter->debugfs_root);
2732 }
2733
2734err_free_dev:
2735 for_each_port(adapter, pidx) {
2736 netdev = adapter->port[pidx];
2737 if (netdev == NULL)
2738 continue;
2739 pi = netdev_priv(netdev);
2740 t4vf_free_vi(adapter, pi->viid);
2741 if (test_bit(pidx, &adapter->registered_device_map))
2742 unregister_netdev(netdev);
2743 free_netdev(netdev);
2744 }
2745
2746err_unmap_bar:
2747 iounmap(adapter->regs);
2748
2749err_free_adapter:
2750 kfree(adapter);
2751 pci_set_drvdata(pdev, NULL);
2752
2753err_disable_device:
2754 pci_disable_device(pdev);
2755 pci_clear_master(pdev);
2756
2757err_release_regions:
2758 pci_release_regions(pdev);
2759 pci_set_drvdata(pdev, NULL);
2760
2761err_out:
2762 return err;
2763}
2764
2765/*
2766 * "Remove" a device: tear down all kernel and driver state created in the
2767 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2768 * that this is called "remove_one" in the PF Driver.)
2769 */
2770static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2771{
2772 struct adapter *adapter = pci_get_drvdata(pdev);
2773
2774 /*
2775 * Tear down driver state associated with device.
2776 */
2777 if (adapter) {
2778 int pidx;
2779
2780 /*
2781 * Stop all of our activity. Unregister network port,
2782 * disable interrupts, etc.
2783 */
2784 for_each_port(adapter, pidx)
2785 if (test_bit(pidx, &adapter->registered_device_map))
2786 unregister_netdev(adapter->port[pidx]);
2787 t4vf_sge_stop(adapter);
2788 if (adapter->flags & USING_MSIX) {
2789 pci_disable_msix(adapter->pdev);
2790 adapter->flags &= ~USING_MSIX;
2791 } else if (adapter->flags & USING_MSI) {
2792 pci_disable_msi(adapter->pdev);
2793 adapter->flags &= ~USING_MSI;
2794 }
2795
2796 /*
2797 * Tear down our debugfs entries.
2798 */
2799 if (adapter->debugfs_root) {
2800 cleanup_debugfs(adapter);
2801 debugfs_remove_recursive(adapter->debugfs_root);
2802 }
2803
2804 /*
2805 * Free all of the various resources which we've acquired ...
2806 */
2807 t4vf_free_sge_resources(adapter);
2808 for_each_port(adapter, pidx) {
2809 struct net_device *netdev = adapter->port[pidx];
2810 struct port_info *pi;
2811
2812 if (netdev == NULL)
2813 continue;
2814
2815 pi = netdev_priv(netdev);
2816 t4vf_free_vi(adapter, pi->viid);
2817 free_netdev(netdev);
2818 }
2819 iounmap(adapter->regs);
2820 kfree(adapter);
2821 pci_set_drvdata(pdev, NULL);
2822 }
2823
2824 /*
2825 * Disable the device and release its PCI resources.
2826 */
2827 pci_disable_device(pdev);
2828 pci_clear_master(pdev);
2829 pci_release_regions(pdev);
2830}
2831
2832/*
2833 * PCI Device registration data structures.
2834 */
2835#define CH_DEVICE(devid, idx) \
2836 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
2837
2838static struct pci_device_id cxgb4vf_pci_tbl[] = {
2839 CH_DEVICE(0xb000, 0), /* PE10K FPGA */
2840 CH_DEVICE(0x4800, 0), /* T440-dbg */
2841 CH_DEVICE(0x4801, 0), /* T420-cr */
2842 CH_DEVICE(0x4802, 0), /* T422-cr */
2843 { 0, }
2844};
2845
2846MODULE_DESCRIPTION(DRV_DESC);
2847MODULE_AUTHOR("Chelsio Communications");
2848MODULE_LICENSE("Dual BSD/GPL");
2849MODULE_VERSION(DRV_VERSION);
2850MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
2851
2852static struct pci_driver cxgb4vf_driver = {
2853 .name = KBUILD_MODNAME,
2854 .id_table = cxgb4vf_pci_tbl,
2855 .probe = cxgb4vf_pci_probe,
2856 .remove = __devexit_p(cxgb4vf_pci_remove),
2857};
2858
2859/*
2860 * Initialize global driver state.
2861 */
2862static int __init cxgb4vf_module_init(void)
2863{
2864 int ret;
2865
2866 /* Debugfs support is optional, just warn if this fails */
2867 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2868 if (!cxgb4vf_debugfs_root)
2869 printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2870 " debugfs entry, continuing\n");
2871
2872 ret = pci_register_driver(&cxgb4vf_driver);
2873 if (ret < 0)
2874 debugfs_remove(cxgb4vf_debugfs_root);
2875 return ret;
2876}
2877
2878/*
2879 * Tear down global driver state.
2880 */
2881static void __exit cxgb4vf_module_exit(void)
2882{
2883 pci_unregister_driver(&cxgb4vf_driver);
2884 debugfs_remove(cxgb4vf_debugfs_root);
2885}
2886
2887module_init(cxgb4vf_module_init);
2888module_exit(cxgb4vf_module_exit);
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
new file mode 100644
index 000000000000..eb5a1c9cb2d3
--- /dev/null
+++ b/drivers/net/cxgb4vf/sge.c
@@ -0,0 +1,2454 @@
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/ip.h>
41#include <net/ipv6.h>
42#include <net/tcp.h>
43#include <linux/dma-mapping.h>
44
45#include "t4vf_common.h"
46#include "t4vf_defs.h"
47
48#include "../cxgb4/t4_regs.h"
49#include "../cxgb4/t4fw_api.h"
50#include "../cxgb4/t4_msg.h"
51
52/*
53 * Decoded Adapter Parameters.
54 */
55static u32 FL_PG_ORDER; /* large page allocation size */
56static u32 STAT_LEN; /* length of status page at ring end */
57static u32 PKTSHIFT; /* padding between CPL and packet data */
58static u32 FL_ALIGN; /* response queue message alignment */
59
60/*
61 * Constants ...
62 */
63enum {
64 /*
65 * Egress Queue sizes, producer and consumer indices are all in units
66 * of Egress Context Units bytes. Note that as far as the hardware is
67 * concerned, the free list is an Egress Queue (the host produces free
68 * buffers which the hardware consumes) and free list entries are
69 * 64-bit PCI DMA addresses.
70 */
71 EQ_UNIT = SGE_EQ_IDXSIZE,
72 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
73 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
74
75 /*
76 * Max number of TX descriptors we clean up at a time. Should be
77 * modest as freeing skbs isn't cheap and it happens while holding
78 * locks. We just need to free packets faster than they arrive, we
79 * eventually catch up and keep the amortized cost reasonable.
80 */
81 MAX_TX_RECLAIM = 16,
82
83 /*
84 * Max number of Rx buffers we replenish at a time. Again keep this
85 * modest, allocating buffers isn't cheap either.
86 */
87 MAX_RX_REFILL = 16,
88
89 /*
90 * Period of the Rx queue check timer. This timer is infrequent as it
91 * has something to do only when the system experiences severe memory
92 * shortage.
93 */
94 RX_QCHECK_PERIOD = (HZ / 2),
95
96 /*
97 * Period of the TX queue check timer and the maximum number of TX
98 * descriptors to be reclaimed by the TX timer.
99 */
100 TX_QCHECK_PERIOD = (HZ / 2),
101 MAX_TIMER_TX_RECLAIM = 100,
102
103 /*
104 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
105 * timer will attempt to refill it.
106 */
107 FL_STARVE_THRES = 4,
108
109 /*
110 * Suspend an Ethernet TX queue with fewer available descriptors than
111 * this. We always want to have room for a maximum sized packet:
112 * inline immediate data + MAX_SKB_FRAGS. This is the same as
113 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
114 * (see that function and its helpers for a description of the
115 * calculation).
116 */
117 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
118 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
119 ((ETHTXQ_MAX_FRAGS-1) & 1) +
120 2),
121 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
122 sizeof(struct cpl_tx_pkt_lso_core) +
123 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
124 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
125
126 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
127
128 /*
129 * Max TX descriptor space we allow for an Ethernet packet to be
130 * inlined into a WR. This is limited by the maximum value which
131 * we can specify for immediate data in the firmware Ethernet TX
132 * Work Request.
133 */
134 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
135
136 /*
137 * Max size of a WR sent through a control TX queue.
138 */
139 MAX_CTRL_WR_LEN = 256,
140
141 /*
142 * Maximum amount of data which we'll ever need to inline into a
143 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
144 */
145 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
146 ? MAX_IMM_TX_PKT_LEN
147 : MAX_CTRL_WR_LEN),
148
149 /*
150 * For incoming packets less than RX_COPY_THRES, we copy the data into
151 * an skb rather than referencing the data. We allocate enough
152 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
153 * of the data (header).
154 */
155 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128,
157};
158
159/*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in
161 * the VF Driver ...
162 */
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
164
165/*
166 * Software state per TX descriptor.
167 */
168struct tx_sw_desc {
169 struct sk_buff *skb; /* socket buffer of TX data source */
170 struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
171};
172
173/*
174 * Software state per RX Free List descriptor. We keep track of the allocated
175 * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
176 * page size and its PCI DMA mapped state are stored in the low bits of the
177 * PCI DMA address as per below.
178 */
179struct rx_sw_desc {
180 struct page *page; /* Free List page buffer */
181 dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
182 /* and flags (see below) */
183};
184
185/*
186 * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
187 * SGE also uses the low 4 bits to determine the size of the buffer. It uses
188 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
189 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
190 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
191 * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
192 * maintained in an inverse sense so the hardware never sees that bit high.
193 */
194enum {
195 RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
196 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
197};
198
199/**
200 * get_buf_addr - return DMA buffer address of software descriptor
201 * @sdesc: pointer to the software buffer descriptor
202 *
203 * Return the DMA buffer address of a software descriptor (stripping out
204 * our low-order flag bits).
205 */
206static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
207{
208 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
209}
210
211/**
212 * is_buf_mapped - is buffer mapped for DMA?
213 * @sdesc: pointer to the software buffer descriptor
214 *
215 * Determine whether the buffer associated with a software descriptor in
216 * mapped for DMA or not.
217 */
218static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
219{
220 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
221}
222
223/**
224 * need_skb_unmap - does the platform need unmapping of sk_buffs?
225 *
226 * Returns true if the platfrom needs sk_buff unmapping. The compiler
227 * optimizes away unecessary code if this returns true.
228 */
229static inline int need_skb_unmap(void)
230{
231#ifdef CONFIG_NEED_DMA_MAP_STATE
232 return 1;
233#else
234 return 0;
235#endif
236}
237
238/**
239 * txq_avail - return the number of available slots in a TX queue
240 * @tq: the TX queue
241 *
242 * Returns the number of available descriptors in a TX queue.
243 */
244static inline unsigned int txq_avail(const struct sge_txq *tq)
245{
246 return tq->size - 1 - tq->in_use;
247}
248
249/**
250 * fl_cap - return the capacity of a Free List
251 * @fl: the Free List
252 *
253 * Returns the capacity of a Free List. The capacity is less than the
254 * size because an Egress Queue Index Unit worth of descriptors needs to
255 * be left unpopulated, otherwise the Producer and Consumer indices PIDX
256 * and CIDX will match and the hardware will think the FL is empty.
257 */
258static inline unsigned int fl_cap(const struct sge_fl *fl)
259{
260 return fl->size - FL_PER_EQ_UNIT;
261}
262
263/**
264 * fl_starving - return whether a Free List is starving.
265 * @fl: the Free List
266 *
267 * Tests specified Free List to see whether the number of buffers
268 * available to the hardware has falled below our "starvation"
269 * threshhold.
270 */
271static inline bool fl_starving(const struct sge_fl *fl)
272{
273 return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
274}
275
276/**
277 * map_skb - map an skb for DMA to the device
278 * @dev: the egress net device
279 * @skb: the packet to map
280 * @addr: a pointer to the base of the DMA mapping array
281 *
282 * Map an skb for DMA to the device and return an array of DMA addresses.
283 */
284static int map_skb(struct device *dev, const struct sk_buff *skb,
285 dma_addr_t *addr)
286{
287 const skb_frag_t *fp, *end;
288 const struct skb_shared_info *si;
289
290 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
291 if (dma_mapping_error(dev, *addr))
292 goto out_err;
293
294 si = skb_shinfo(skb);
295 end = &si->frags[si->nr_frags];
296 for (fp = si->frags; fp < end; fp++) {
297 *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
298 DMA_TO_DEVICE);
299 if (dma_mapping_error(dev, *addr))
300 goto unwind;
301 }
302 return 0;
303
304unwind:
305 while (fp-- > si->frags)
306 dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
307 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
308
309out_err:
310 return -ENOMEM;
311}
312
313static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
314 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
315{
316 const struct ulptx_sge_pair *p;
317 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
318
319 if (likely(skb_headlen(skb)))
320 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
321 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
322 else {
323 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
324 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
325 nfrags--;
326 }
327
328 /*
329 * the complexity below is because of the possibility of a wrap-around
330 * in the middle of an SGL
331 */
332 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
333 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
334unmap:
335 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
336 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
337 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
338 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
339 p++;
340 } else if ((u8 *)p == (u8 *)tq->stat) {
341 p = (const struct ulptx_sge_pair *)tq->desc;
342 goto unmap;
343 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
344 const __be64 *addr = (const __be64 *)tq->desc;
345
346 dma_unmap_page(dev, be64_to_cpu(addr[0]),
347 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
348 dma_unmap_page(dev, be64_to_cpu(addr[1]),
349 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
350 p = (const struct ulptx_sge_pair *)&addr[2];
351 } else {
352 const __be64 *addr = (const __be64 *)tq->desc;
353
354 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
355 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
356 dma_unmap_page(dev, be64_to_cpu(addr[0]),
357 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
358 p = (const struct ulptx_sge_pair *)&addr[1];
359 }
360 }
361 if (nfrags) {
362 __be64 addr;
363
364 if ((u8 *)p == (u8 *)tq->stat)
365 p = (const struct ulptx_sge_pair *)tq->desc;
366 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
367 ? p->addr[0]
368 : *(const __be64 *)tq->desc);
369 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
370 DMA_TO_DEVICE);
371 }
372}
373
374/**
375 * free_tx_desc - reclaims TX descriptors and their buffers
376 * @adapter: the adapter
377 * @tq: the TX queue to reclaim descriptors from
378 * @n: the number of descriptors to reclaim
379 * @unmap: whether the buffers should be unmapped for DMA
380 *
381 * Reclaims TX descriptors from an SGE TX queue and frees the associated
382 * TX buffers. Called with the TX queue lock held.
383 */
384static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
385 unsigned int n, bool unmap)
386{
387 struct tx_sw_desc *sdesc;
388 unsigned int cidx = tq->cidx;
389 struct device *dev = adapter->pdev_dev;
390
391 const int need_unmap = need_skb_unmap() && unmap;
392
393 sdesc = &tq->sdesc[cidx];
394 while (n--) {
395 /*
396 * If we kept a reference to the original TX skb, we need to
397 * unmap it from PCI DMA space (if required) and free it.
398 */
399 if (sdesc->skb) {
400 if (need_unmap)
401 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
402 kfree_skb(sdesc->skb);
403 sdesc->skb = NULL;
404 }
405
406 sdesc++;
407 if (++cidx == tq->size) {
408 cidx = 0;
409 sdesc = tq->sdesc;
410 }
411 }
412 tq->cidx = cidx;
413}
414
415/*
416 * Return the number of reclaimable descriptors in a TX queue.
417 */
418static inline int reclaimable(const struct sge_txq *tq)
419{
420 int hw_cidx = be16_to_cpu(tq->stat->cidx);
421 int reclaimable = hw_cidx - tq->cidx;
422 if (reclaimable < 0)
423 reclaimable += tq->size;
424 return reclaimable;
425}
426
427/**
428 * reclaim_completed_tx - reclaims completed TX descriptors
429 * @adapter: the adapter
430 * @tq: the TX queue to reclaim completed descriptors from
431 * @unmap: whether the buffers should be unmapped for DMA
432 *
433 * Reclaims TX descriptors that the SGE has indicated it has processed,
434 * and frees the associated buffers if possible. Called with the TX
435 * queue locked.
436 */
437static inline void reclaim_completed_tx(struct adapter *adapter,
438 struct sge_txq *tq,
439 bool unmap)
440{
441 int avail = reclaimable(tq);
442
443 if (avail) {
444 /*
445 * Limit the amount of clean up work we do at a time to keep
446 * the TX lock hold time O(1).
447 */
448 if (avail > MAX_TX_RECLAIM)
449 avail = MAX_TX_RECLAIM;
450
451 free_tx_desc(adapter, tq, avail, unmap);
452 tq->in_use -= avail;
453 }
454}
455
456/**
457 * get_buf_size - return the size of an RX Free List buffer.
458 * @sdesc: pointer to the software buffer descriptor
459 */
460static inline int get_buf_size(const struct rx_sw_desc *sdesc)
461{
462 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
463 ? (PAGE_SIZE << FL_PG_ORDER)
464 : PAGE_SIZE;
465}
466
467/**
468 * free_rx_bufs - free RX buffers on an SGE Free List
469 * @adapter: the adapter
470 * @fl: the SGE Free List to free buffers from
471 * @n: how many buffers to free
472 *
473 * Release the next @n buffers on an SGE Free List RX queue. The
474 * buffers must be made inaccessible to hardware before calling this
475 * function.
476 */
477static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
478{
479 while (n--) {
480 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
481
482 if (is_buf_mapped(sdesc))
483 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
484 get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
485 put_page(sdesc->page);
486 sdesc->page = NULL;
487 if (++fl->cidx == fl->size)
488 fl->cidx = 0;
489 fl->avail--;
490 }
491}
492
493/**
494 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
495 * @adapter: the adapter
496 * @fl: the SGE Free List
497 *
498 * Unmap the current buffer on an SGE Free List RX queue. The
499 * buffer must be made inaccessible to HW before calling this function.
500 *
501 * This is similar to @free_rx_bufs above but does not free the buffer.
502 * Do note that the FL still loses any further access to the buffer.
503 * This is used predominantly to "transfer ownership" of an FL buffer
504 * to another entity (typically an skb's fragment list).
505 */
506static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
507{
508 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
509
510 if (is_buf_mapped(sdesc))
511 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
512 get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
513 sdesc->page = NULL;
514 if (++fl->cidx == fl->size)
515 fl->cidx = 0;
516 fl->avail--;
517}
518
519/**
520 * ring_fl_db - righ doorbell on free list
521 * @adapter: the adapter
522 * @fl: the Free List whose doorbell should be rung ...
523 *
524 * Tell the Scatter Gather Engine that there are new free list entries
525 * available.
526 */
527static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
528{
529 /*
530 * The SGE keeps track of its Producer and Consumer Indices in terms
531 * of Egress Queue Units so we can only tell it about integral numbers
532 * of multiples of Free List Entries per Egress Queue Units ...
533 */
534 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
535 wmb();
536 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
537 DBPRIO |
538 QID(fl->cntxt_id) |
539 PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
540 fl->pend_cred %= FL_PER_EQ_UNIT;
541 }
542}
543
544/**
545 * set_rx_sw_desc - initialize software RX buffer descriptor
546 * @sdesc: pointer to the softwore RX buffer descriptor
547 * @page: pointer to the page data structure backing the RX buffer
548 * @dma_addr: PCI DMA address (possibly with low-bit flags)
549 */
550static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
551 dma_addr_t dma_addr)
552{
553 sdesc->page = page;
554 sdesc->dma_addr = dma_addr;
555}
556
557/*
558 * Support for poisoning RX buffers ...
559 */
560#define POISON_BUF_VAL -1
561
562static inline void poison_buf(struct page *page, size_t sz)
563{
564#if POISON_BUF_VAL >= 0
565 memset(page_address(page), POISON_BUF_VAL, sz);
566#endif
567}
568
569/**
570 * refill_fl - refill an SGE RX buffer ring
571 * @adapter: the adapter
572 * @fl: the Free List ring to refill
573 * @n: the number of new buffers to allocate
574 * @gfp: the gfp flags for the allocations
575 *
576 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
577 * allocated with the supplied gfp flags. The caller must assure that
578 * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
579 * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
580 * of buffers allocated. If afterwards the queue is found critically low,
581 * mark it as starving in the bitmap of starving FLs.
582 */
583static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
584 int n, gfp_t gfp)
585{
586 struct page *page;
587 dma_addr_t dma_addr;
588 unsigned int cred = fl->avail;
589 __be64 *d = &fl->desc[fl->pidx];
590 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
591
592 /*
593 * Sanity: ensure that the result of adding n Free List buffers
594 * won't result in wrapping the SGE's Producer Index around to
595 * it's Consumer Index thereby indicating an empty Free List ...
596 */
597 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
598
599 /*
600 * If we support large pages, prefer large buffers and fail over to
601 * small pages if we can't allocate large pages to satisfy the refill.
602 * If we don't support large pages, drop directly into the small page
603 * allocation code.
604 */
605 if (FL_PG_ORDER == 0)
606 goto alloc_small_pages;
607
608 while (n) {
609 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
610 FL_PG_ORDER);
611 if (unlikely(!page)) {
612 /*
613 * We've failed inour attempt to allocate a "large
614 * page". Fail over to the "small page" allocation
615 * below.
616 */
617 fl->large_alloc_failed++;
618 break;
619 }
620 poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
621
622 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
623 PAGE_SIZE << FL_PG_ORDER,
624 PCI_DMA_FROMDEVICE);
625 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
626 /*
627 * We've run out of DMA mapping space. Free up the
628 * buffer and return with what we've managed to put
629 * into the free list. We don't want to fail over to
630 * the small page allocation below in this case
631 * because DMA mapping resources are typically
632 * critical resources once they become scarse.
633 */
634 __free_pages(page, FL_PG_ORDER);
635 goto out;
636 }
637 dma_addr |= RX_LARGE_BUF;
638 *d++ = cpu_to_be64(dma_addr);
639
640 set_rx_sw_desc(sdesc, page, dma_addr);
641 sdesc++;
642
643 fl->avail++;
644 if (++fl->pidx == fl->size) {
645 fl->pidx = 0;
646 sdesc = fl->sdesc;
647 d = fl->desc;
648 }
649 n--;
650 }
651
652alloc_small_pages:
653 while (n--) {
654 page = __netdev_alloc_page(adapter->port[0],
655 gfp | __GFP_NOWARN);
656 if (unlikely(!page)) {
657 fl->alloc_failed++;
658 break;
659 }
660 poison_buf(page, PAGE_SIZE);
661
662 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
663 PCI_DMA_FROMDEVICE);
664 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
665 netdev_free_page(adapter->port[0], page);
666 break;
667 }
668 *d++ = cpu_to_be64(dma_addr);
669
670 set_rx_sw_desc(sdesc, page, dma_addr);
671 sdesc++;
672
673 fl->avail++;
674 if (++fl->pidx == fl->size) {
675 fl->pidx = 0;
676 sdesc = fl->sdesc;
677 d = fl->desc;
678 }
679 }
680
681out:
682 /*
683 * Update our accounting state to incorporate the new Free List
684 * buffers, tell the hardware about them and return the number of
685 * bufers which we were able to allocate.
686 */
687 cred = fl->avail - cred;
688 fl->pend_cred += cred;
689 ring_fl_db(adapter, fl);
690
691 if (unlikely(fl_starving(fl))) {
692 smp_wmb();
693 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
694 }
695
696 return cred;
697}
698
699/*
700 * Refill a Free List to its capacity or the Maximum Refill Increment,
701 * whichever is smaller ...
702 */
703static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
704{
705 refill_fl(adapter, fl,
706 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
707 GFP_ATOMIC);
708}
709
710/**
711 * alloc_ring - allocate resources for an SGE descriptor ring
712 * @dev: the PCI device's core device
713 * @nelem: the number of descriptors
714 * @hwsize: the size of each hardware descriptor
715 * @swsize: the size of each software descriptor
716 * @busaddrp: the physical PCI bus address of the allocated ring
717 * @swringp: return address pointer for software ring
718 * @stat_size: extra space in hardware ring for status information
719 *
720 * Allocates resources for an SGE descriptor ring, such as TX queues,
721 * free buffer lists, response queues, etc. Each SGE ring requires
722 * space for its hardware descriptors plus, optionally, space for software
723 * state associated with each hardware entry (the metadata). The function
724 * returns three values: the virtual address for the hardware ring (the
725 * return value of the function), the PCI bus address of the hardware
726 * ring (in *busaddrp), and the address of the software ring (in swringp).
727 * Both the hardware and software rings are returned zeroed out.
728 */
729static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
730 size_t swsize, dma_addr_t *busaddrp, void *swringp,
731 size_t stat_size)
732{
733 /*
734 * Allocate the hardware ring and PCI DMA bus address space for said.
735 */
736 size_t hwlen = nelem * hwsize + stat_size;
737 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
738
739 if (!hwring)
740 return NULL;
741
742 /*
743 * If the caller wants a software ring, allocate it and return a
744 * pointer to it in *swringp.
745 */
746 BUG_ON((swsize != 0) != (swringp != NULL));
747 if (swsize) {
748 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
749
750 if (!swring) {
751 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
752 return NULL;
753 }
754 *(void **)swringp = swring;
755 }
756
757 /*
758 * Zero out the hardware ring and return its address as our function
759 * value.
760 */
761 memset(hwring, 0, hwlen);
762 return hwring;
763}
764
765/**
766 * sgl_len - calculates the size of an SGL of the given capacity
767 * @n: the number of SGL entries
768 *
769 * Calculates the number of flits (8-byte units) needed for a Direct
770 * Scatter/Gather List that can hold the given number of entries.
771 */
772static inline unsigned int sgl_len(unsigned int n)
773{
774 /*
775 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
776 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
777 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
778 * repeated sequences of { Length[i], Length[i+1], Address[i],
779 * Address[i+1] } (this ensures that all addresses are on 64-bit
780 * boundaries). If N is even, then Length[N+1] should be set to 0 and
781 * Address[N+1] is omitted.
782 *
783 * The following calculation incorporates all of the above. It's
784 * somewhat hard to follow but, briefly: the "+2" accounts for the
785 * first two flits which include the DSGL header, Length0 and
786 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
787 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
788 * finally the "+((n-1)&1)" adds the one remaining flit needed if
789 * (n-1) is odd ...
790 */
791 n--;
792 return (3 * n) / 2 + (n & 1) + 2;
793}
794
795/**
796 * flits_to_desc - returns the num of TX descriptors for the given flits
797 * @flits: the number of flits
798 *
799 * Returns the number of TX descriptors needed for the supplied number
800 * of flits.
801 */
802static inline unsigned int flits_to_desc(unsigned int flits)
803{
804 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
805 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
806}
807
808/**
809 * is_eth_imm - can an Ethernet packet be sent as immediate data?
810 * @skb: the packet
811 *
812 * Returns whether an Ethernet packet is small enough to fit completely as
813 * immediate data.
814 */
815static inline int is_eth_imm(const struct sk_buff *skb)
816{
817 /*
818 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
819 * which does not accommodate immediate data. We could dike out all
820 * of the support code for immediate data but that would tie our hands
821 * too much if we ever want to enhace the firmware. It would also
822 * create more differences between the PF and VF Drivers.
823 */
824 return false;
825}
826
827/**
828 * calc_tx_flits - calculate the number of flits for a packet TX WR
829 * @skb: the packet
830 *
831 * Returns the number of flits needed for a TX Work Request for the
832 * given Ethernet packet, including the needed WR and CPL headers.
833 */
834static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
835{
836 unsigned int flits;
837
838 /*
839 * If the skb is small enough, we can pump it out as a work request
840 * with only immediate data. In that case we just have to have the
841 * TX Packet header plus the skb data in the Work Request.
842 */
843 if (is_eth_imm(skb))
844 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
845 sizeof(__be64));
846
847 /*
848 * Otherwise, we're going to have to construct a Scatter gather list
849 * of the skb body and fragments. We also include the flits necessary
850 * for the TX Packet Work Request and CPL. We always have a firmware
851 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
852 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
853 * message or, if we're doing a Large Send Offload, an LSO CPL message
854 * with an embeded TX Packet Write CPL message.
855 */
856 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
857 if (skb_shinfo(skb)->gso_size)
858 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
859 sizeof(struct cpl_tx_pkt_lso_core) +
860 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
861 else
862 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
863 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
864 return flits;
865}
866
867/**
868 * write_sgl - populate a Scatter/Gather List for a packet
869 * @skb: the packet
870 * @tq: the TX queue we are writing into
871 * @sgl: starting location for writing the SGL
872 * @end: points right after the end of the SGL
873 * @start: start offset into skb main-body data to include in the SGL
874 * @addr: the list of DMA bus addresses for the SGL elements
875 *
876 * Generates a Scatter/Gather List for the buffers that make up a packet.
877 * The caller must provide adequate space for the SGL that will be written.
878 * The SGL includes all of the packet's page fragments and the data in its
879 * main body except for the first @start bytes. @pos must be 16-byte
880 * aligned and within a TX descriptor with available space. @end points
881 * write after the end of the SGL but does not account for any potential
882 * wrap around, i.e., @end > @tq->stat.
883 */
884static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
885 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
886 const dma_addr_t *addr)
887{
888 unsigned int i, len;
889 struct ulptx_sge_pair *to;
890 const struct skb_shared_info *si = skb_shinfo(skb);
891 unsigned int nfrags = si->nr_frags;
892 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
893
894 len = skb_headlen(skb) - start;
895 if (likely(len)) {
896 sgl->len0 = htonl(len);
897 sgl->addr0 = cpu_to_be64(addr[0] + start);
898 nfrags++;
899 } else {
900 sgl->len0 = htonl(si->frags[0].size);
901 sgl->addr0 = cpu_to_be64(addr[1]);
902 }
903
904 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
905 ULPTX_NSGE(nfrags));
906 if (likely(--nfrags == 0))
907 return;
908 /*
909 * Most of the complexity below deals with the possibility we hit the
910 * end of the queue in the middle of writing the SGL. For this case
911 * only we create the SGL in a temporary buffer and then copy it.
912 */
913 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
914
915 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
916 to->len[0] = cpu_to_be32(si->frags[i].size);
917 to->len[1] = cpu_to_be32(si->frags[++i].size);
918 to->addr[0] = cpu_to_be64(addr[i]);
919 to->addr[1] = cpu_to_be64(addr[++i]);
920 }
921 if (nfrags) {
922 to->len[0] = cpu_to_be32(si->frags[i].size);
923 to->len[1] = cpu_to_be32(0);
924 to->addr[0] = cpu_to_be64(addr[i + 1]);
925 }
926 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
927 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
928
929 if (likely(part0))
930 memcpy(sgl->sge, buf, part0);
931 part1 = (u8 *)end - (u8 *)tq->stat;
932 memcpy(tq->desc, (u8 *)buf + part0, part1);
933 end = (void *)tq->desc + part1;
934 }
935 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
936 *(u64 *)end = 0;
937}
938
939/**
940 * check_ring_tx_db - check and potentially ring a TX queue's doorbell
941 * @adapter: the adapter
942 * @tq: the TX queue
943 * @n: number of new descriptors to give to HW
944 *
945 * Ring the doorbel for a TX queue.
946 */
947static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
948 int n)
949{
950 /*
951 * Warn if we write doorbells with the wrong priority and write
952 * descriptors before telling HW.
953 */
954 WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
955 wmb();
956 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
957 QID(tq->cntxt_id) | PIDX(n));
958}
959
960/**
961 * inline_tx_skb - inline a packet's data into TX descriptors
962 * @skb: the packet
963 * @tq: the TX queue where the packet will be inlined
964 * @pos: starting position in the TX queue to inline the packet
965 *
966 * Inline a packet's contents directly into TX descriptors, starting at
967 * the given position within the TX DMA ring.
968 * Most of the complexity of this operation is dealing with wrap arounds
969 * in the middle of the packet we want to inline.
970 */
971static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
972 void *pos)
973{
974 u64 *p;
975 int left = (void *)tq->stat - pos;
976
977 if (likely(skb->len <= left)) {
978 if (likely(!skb->data_len))
979 skb_copy_from_linear_data(skb, pos, skb->len);
980 else
981 skb_copy_bits(skb, 0, pos, skb->len);
982 pos += skb->len;
983 } else {
984 skb_copy_bits(skb, 0, pos, left);
985 skb_copy_bits(skb, left, tq->desc, skb->len - left);
986 pos = (void *)tq->desc + (skb->len - left);
987 }
988
989 /* 0-pad to multiple of 16 */
990 p = PTR_ALIGN(pos, 8);
991 if ((uintptr_t)p & 8)
992 *p = 0;
993}
994
995/*
996 * Figure out what HW csum a packet wants and return the appropriate control
997 * bits.
998 */
999static u64 hwcsum(const struct sk_buff *skb)
1000{
1001 int csum_type;
1002 const struct iphdr *iph = ip_hdr(skb);
1003
1004 if (iph->version == 4) {
1005 if (iph->protocol == IPPROTO_TCP)
1006 csum_type = TX_CSUM_TCPIP;
1007 else if (iph->protocol == IPPROTO_UDP)
1008 csum_type = TX_CSUM_UDPIP;
1009 else {
1010nocsum:
1011 /*
1012 * unknown protocol, disable HW csum
1013 * and hope a bad packet is detected
1014 */
1015 return TXPKT_L4CSUM_DIS;
1016 }
1017 } else {
1018 /*
1019 * this doesn't work with extension headers
1020 */
1021 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1022
1023 if (ip6h->nexthdr == IPPROTO_TCP)
1024 csum_type = TX_CSUM_TCPIP6;
1025 else if (ip6h->nexthdr == IPPROTO_UDP)
1026 csum_type = TX_CSUM_UDPIP6;
1027 else
1028 goto nocsum;
1029 }
1030
1031 if (likely(csum_type >= TX_CSUM_TCPIP))
1032 return TXPKT_CSUM_TYPE(csum_type) |
1033 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1034 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1035 else {
1036 int start = skb_transport_offset(skb);
1037
1038 return TXPKT_CSUM_TYPE(csum_type) |
1039 TXPKT_CSUM_START(start) |
1040 TXPKT_CSUM_LOC(start + skb->csum_offset);
1041 }
1042}
1043
1044/*
1045 * Stop an Ethernet TX queue and record that state change.
1046 */
1047static void txq_stop(struct sge_eth_txq *txq)
1048{
1049 netif_tx_stop_queue(txq->txq);
1050 txq->q.stops++;
1051}
1052
1053/*
1054 * Advance our software state for a TX queue by adding n in use descriptors.
1055 */
1056static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1057{
1058 tq->in_use += n;
1059 tq->pidx += n;
1060 if (tq->pidx >= tq->size)
1061 tq->pidx -= tq->size;
1062}
1063
1064/**
1065 * t4vf_eth_xmit - add a packet to an Ethernet TX queue
1066 * @skb: the packet
1067 * @dev: the egress net device
1068 *
1069 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1070 */
1071int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1072{
1073 u32 wr_mid;
1074 u64 cntrl, *end;
1075 int qidx, credits;
1076 unsigned int flits, ndesc;
1077 struct adapter *adapter;
1078 struct sge_eth_txq *txq;
1079 const struct port_info *pi;
1080 struct fw_eth_tx_pkt_vm_wr *wr;
1081 struct cpl_tx_pkt_core *cpl;
1082 const struct skb_shared_info *ssi;
1083 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1084 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1085 sizeof(wr->ethmacsrc) +
1086 sizeof(wr->ethtype) +
1087 sizeof(wr->vlantci));
1088
1089 /*
1090 * The chip minimum packet length is 10 octets but the firmware
1091 * command that we are using requires that we copy the Ethernet header
1092 * (including the VLAN tag) into the header so we reject anything
1093 * smaller than that ...
1094 */
1095 if (unlikely(skb->len < fw_hdr_copy_len))
1096 goto out_free;
1097
1098 /*
1099 * Figure out which TX Queue we're going to use.
1100 */
1101 pi = netdev_priv(dev);
1102 adapter = pi->adapter;
1103 qidx = skb_get_queue_mapping(skb);
1104 BUG_ON(qidx >= pi->nqsets);
1105 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1106
1107 /*
1108 * Take this opportunity to reclaim any TX Descriptors whose DMA
1109 * transfers have completed.
1110 */
1111 reclaim_completed_tx(adapter, &txq->q, true);
1112
1113 /*
1114 * Calculate the number of flits and TX Descriptors we're going to
1115 * need along with how many TX Descriptors will be left over after
1116 * we inject our Work Request.
1117 */
1118 flits = calc_tx_flits(skb);
1119 ndesc = flits_to_desc(flits);
1120 credits = txq_avail(&txq->q) - ndesc;
1121
1122 if (unlikely(credits < 0)) {
1123 /*
1124 * Not enough room for this packet's Work Request. Stop the
1125 * TX Queue and return a "busy" condition. The queue will get
1126 * started later on when the firmware informs us that space
1127 * has opened up.
1128 */
1129 txq_stop(txq);
1130 dev_err(adapter->pdev_dev,
1131 "%s: TX ring %u full while queue awake!\n",
1132 dev->name, qidx);
1133 return NETDEV_TX_BUSY;
1134 }
1135
1136 if (!is_eth_imm(skb) &&
1137 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1138 /*
1139 * We need to map the skb into PCI DMA space (because it can't
1140 * be in-lined directly into the Work Request) and the mapping
1141 * operation failed. Record the error and drop the packet.
1142 */
1143 txq->mapping_err++;
1144 goto out_free;
1145 }
1146
1147 wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1148 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1149 /*
1150 * After we're done injecting the Work Request for this
1151 * packet, we'll be below our "stop threshhold" so stop the TX
1152 * Queue now and schedule a request for an SGE Egress Queue
1153 * Update message. The queue will get started later on when
1154 * the firmware processes this Work Request and sends us an
1155 * Egress Queue Status Update message indicating that space
1156 * has opened up.
1157 */
1158 txq_stop(txq);
1159 wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
1160 }
1161
1162 /*
1163 * Start filling in our Work Request. Note that we do _not_ handle
1164 * the WR Header wrapping around the TX Descriptor Ring. If our
1165 * maximum header size ever exceeds one TX Descriptor, we'll need to
1166 * do something else here.
1167 */
1168 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1169 wr = (void *)&txq->q.desc[txq->q.pidx];
1170 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1171 wr->r3[0] = cpu_to_be64(0);
1172 wr->r3[1] = cpu_to_be64(0);
1173 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1174 end = (u64 *)wr + flits;
1175
1176 /*
1177 * If this is a Large Send Offload packet we'll put in an LSO CPL
1178 * message with an encapsulated TX Packet CPL message. Otherwise we
1179 * just use a TX Packet CPL message.
1180 */
1181 ssi = skb_shinfo(skb);
1182 if (ssi->gso_size) {
1183 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1184 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1185 int l3hdr_len = skb_network_header_len(skb);
1186 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1187
1188 wr->op_immdlen =
1189 cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
1190 FW_WR_IMMDLEN(sizeof(*lso) +
1191 sizeof(*cpl)));
1192 /*
1193 * Fill in the LSO CPL message.
1194 */
1195 lso->lso_ctrl =
1196 cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
1197 LSO_FIRST_SLICE |
1198 LSO_LAST_SLICE |
1199 LSO_IPV6(v6) |
1200 LSO_ETHHDR_LEN(eth_xtra_len/4) |
1201 LSO_IPHDR_LEN(l3hdr_len/4) |
1202 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1203 lso->ipid_ofst = cpu_to_be16(0);
1204 lso->mss = cpu_to_be16(ssi->gso_size);
1205 lso->seqno_offset = cpu_to_be32(0);
1206 lso->len = cpu_to_be32(skb->len);
1207
1208 /*
1209 * Set up TX Packet CPL pointer, control word and perform
1210 * accounting.
1211 */
1212 cpl = (void *)(lso + 1);
1213 cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1214 TXPKT_IPHDR_LEN(l3hdr_len) |
1215 TXPKT_ETHHDR_LEN(eth_xtra_len));
1216 txq->tso++;
1217 txq->tx_cso += ssi->gso_segs;
1218 } else {
1219 int len;
1220
1221 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1222 wr->op_immdlen =
1223 cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
1224 FW_WR_IMMDLEN(len));
1225
1226 /*
1227 * Set up TX Packet CPL pointer, control word and perform
1228 * accounting.
1229 */
1230 cpl = (void *)(wr + 1);
1231 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1232 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1233 txq->tx_cso++;
1234 } else
1235 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1236 }
1237
1238 /*
1239 * If there's a VLAN tag present, add that to the list of things to
1240 * do in this Work Request.
1241 */
1242 if (vlan_tx_tag_present(skb)) {
1243 txq->vlan_ins++;
1244 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
1245 }
1246
1247 /*
1248 * Fill in the TX Packet CPL message header.
1249 */
1250 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1251 TXPKT_INTF(pi->port_id) |
1252 TXPKT_PF(0));
1253 cpl->pack = cpu_to_be16(0);
1254 cpl->len = cpu_to_be16(skb->len);
1255 cpl->ctrl1 = cpu_to_be64(cntrl);
1256
1257#ifdef T4_TRACE
1258 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1259 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1260 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1261#endif
1262
1263 /*
1264 * Fill in the body of the TX Packet CPL message with either in-lined
1265 * data or a Scatter/Gather List.
1266 */
1267 if (is_eth_imm(skb)) {
1268 /*
1269 * In-line the packet's data and free the skb since we don't
1270 * need it any longer.
1271 */
1272 inline_tx_skb(skb, &txq->q, cpl + 1);
1273 dev_kfree_skb(skb);
1274 } else {
1275 /*
1276 * Write the skb's Scatter/Gather list into the TX Packet CPL
1277 * message and retain a pointer to the skb so we can free it
1278 * later when its DMA completes. (We store the skb pointer
1279 * in the Software Descriptor corresponding to the last TX
1280 * Descriptor used by the Work Request.)
1281 *
1282 * The retained skb will be freed when the corresponding TX
1283 * Descriptors are reclaimed after their DMAs complete.
1284 * However, this could take quite a while since, in general,
1285 * the hardware is set up to be lazy about sending DMA
1286 * completion notifications to us and we mostly perform TX
1287 * reclaims in the transmit routine.
1288 *
1289 * This is good for performamce but means that we rely on new
1290 * TX packets arriving to run the destructors of completed
1291 * packets, which open up space in their sockets' send queues.
1292 * Sometimes we do not get such new packets causing TX to
1293 * stall. A single UDP transmitter is a good example of this
1294 * situation. We have a clean up timer that periodically
1295 * reclaims completed packets but it doesn't run often enough
1296 * (nor do we want it to) to prevent lengthy stalls. A
1297 * solution to this problem is to run the destructor early,
1298 * after the packet is queued but before it's DMAd. A con is
1299 * that we lie to socket memory accounting, but the amount of
1300 * extra memory is reasonable (limited by the number of TX
1301 * descriptors), the packets do actually get freed quickly by
1302 * new packets almost always, and for protocols like TCP that
1303 * wait for acks to really free up the data the extra memory
1304 * is even less. On the positive side we run the destructors
1305 * on the sending CPU rather than on a potentially different
1306 * completing CPU, usually a good thing.
1307 *
1308 * Run the destructor before telling the DMA engine about the
1309 * packet to make sure it doesn't complete and get freed
1310 * prematurely.
1311 */
1312 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1313 struct sge_txq *tq = &txq->q;
1314 int last_desc;
1315
1316 /*
1317 * If the Work Request header was an exact multiple of our TX
1318 * Descriptor length, then it's possible that the starting SGL
1319 * pointer lines up exactly with the end of our TX Descriptor
1320 * ring. If that's the case, wrap around to the beginning
1321 * here ...
1322 */
1323 if (unlikely((void *)sgl == (void *)tq->stat)) {
1324 sgl = (void *)tq->desc;
1325 end = (void *)((void *)tq->desc +
1326 ((void *)end - (void *)tq->stat));
1327 }
1328
1329 write_sgl(skb, tq, sgl, end, 0, addr);
1330 skb_orphan(skb);
1331
1332 last_desc = tq->pidx + ndesc - 1;
1333 if (last_desc >= tq->size)
1334 last_desc -= tq->size;
1335 tq->sdesc[last_desc].skb = skb;
1336 tq->sdesc[last_desc].sgl = sgl;
1337 }
1338
1339 /*
1340 * Advance our internal TX Queue state, tell the hardware about
1341 * the new TX descriptors and return success.
1342 */
1343 txq_advance(&txq->q, ndesc);
1344 dev->trans_start = jiffies;
1345 ring_tx_db(adapter, &txq->q, ndesc);
1346 return NETDEV_TX_OK;
1347
1348out_free:
1349 /*
1350 * An error of some sort happened. Free the TX skb and tell the
1351 * OS that we've "dealt" with the packet ...
1352 */
1353 dev_kfree_skb(skb);
1354 return NETDEV_TX_OK;
1355}
1356
1357/**
1358 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list
1360 *
1361 * Releases the pages of a packet gather list. We do not own the last
1362 * page on the list and do not free it.
1363 */
1364void t4vf_pktgl_free(const struct pkt_gl *gl)
1365{
1366 int frag;
1367
1368 frag = gl->nfrags - 1;
1369 while (frag--)
1370 put_page(gl->frags[frag].page);
1371}
1372
1373/**
1374 * copy_frags - copy fragments from gather list into skb_shared_info
1375 * @si: destination skb shared info structure
1376 * @gl: source internal packet gather list
1377 * @offset: packet start offset in first page
1378 *
1379 * Copy an internal packet gather list into a Linux skb_shared_info
1380 * structure.
1381 */
1382static inline void copy_frags(struct skb_shared_info *si,
1383 const struct pkt_gl *gl,
1384 unsigned int offset)
1385{
1386 unsigned int n;
1387
1388 /* usually there's just one frag */
1389 si->frags[0].page = gl->frags[0].page;
1390 si->frags[0].page_offset = gl->frags[0].page_offset + offset;
1391 si->frags[0].size = gl->frags[0].size - offset;
1392 si->nr_frags = gl->nfrags;
1393
1394 n = gl->nfrags - 1;
1395 if (n)
1396 memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1397
1398 /* get a reference to the last page, we don't own it */
1399 get_page(gl->frags[n].page);
1400}
1401
1402/**
1403 * do_gro - perform Generic Receive Offload ingress packet processing
1404 * @rxq: ingress RX Ethernet Queue
1405 * @gl: gather list for ingress packet
1406 * @pkt: CPL header for last packet fragment
1407 *
1408 * Perform Generic Receive Offload (GRO) ingress packet processing.
1409 * We use the standard Linux GRO interfaces for this.
1410 */
1411static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1412 const struct cpl_rx_pkt *pkt)
1413{
1414 int ret;
1415 struct sk_buff *skb;
1416
1417 skb = napi_get_frags(&rxq->rspq.napi);
1418 if (unlikely(!skb)) {
1419 t4vf_pktgl_free(gl);
1420 rxq->stats.rx_drops++;
1421 return;
1422 }
1423
1424 copy_frags(skb_shinfo(skb), gl, PKTSHIFT);
1425 skb->len = gl->tot_len - PKTSHIFT;
1426 skb->data_len = skb->len;
1427 skb->truesize += skb->data_len;
1428 skb->ip_summed = CHECKSUM_UNNECESSARY;
1429 skb_record_rx_queue(skb, rxq->rspq.idx);
1430
1431 if (unlikely(pkt->vlan_ex)) {
1432 struct port_info *pi = netdev_priv(rxq->rspq.netdev);
1433 struct vlan_group *grp = pi->vlan_grp;
1434
1435 rxq->stats.vlan_ex++;
1436 if (likely(grp)) {
1437 ret = vlan_gro_frags(&rxq->rspq.napi, grp,
1438 be16_to_cpu(pkt->vlan));
1439 goto stats;
1440 }
1441 }
1442 ret = napi_gro_frags(&rxq->rspq.napi);
1443
1444stats:
1445 if (ret == GRO_HELD)
1446 rxq->stats.lro_pkts++;
1447 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1448 rxq->stats.lro_merged++;
1449 rxq->stats.pkts++;
1450 rxq->stats.rx_cso++;
1451}
1452
1453/**
1454 * t4vf_ethrx_handler - process an ingress ethernet packet
1455 * @rspq: the response queue that received the packet
1456 * @rsp: the response queue descriptor holding the RX_PKT message
1457 * @gl: the gather list of packet fragments
1458 *
1459 * Process an ingress ethernet packet and deliver it to the stack.
1460 */
1461int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1462 const struct pkt_gl *gl)
1463{
1464 struct sk_buff *skb;
1465 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471
1472 /*
1473 * If this is a good TCP packet and we have Generic Receive Offload
1474 * enabled, handle the packet in the GRO path.
1475 */
1476 if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
1477 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1478 !pkt->ip_frag) {
1479 do_gro(rxq, gl, pkt);
1480 return 0;
1481 }
1482
1483 /*
1484 * If the ingress packet is small enough, allocate an skb large enough
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */
1489 if (len <= RX_COPY_THRES) {
1490 /* small packets have only one fragment */
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
1492 if (!skb)
1493 goto nomem;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 }
1519
1520 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx);
1523 skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */
1524 pi = netdev_priv(skb->dev);
1525 rxq->stats.pkts++;
1526
1527 if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec &&
1528 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
1529 if (!pkt->ip_frag)
1530 skb->ip_summed = CHECKSUM_UNNECESSARY;
1531 else {
1532 __sum16 c = (__force __sum16)pkt->csum;
1533 skb->csum = csum_unfold(c);
1534 skb->ip_summed = CHECKSUM_COMPLETE;
1535 }
1536 rxq->stats.rx_cso++;
1537 } else
1538 skb->ip_summed = CHECKSUM_NONE;
1539
1540 if (unlikely(pkt->vlan_ex)) {
1541 struct vlan_group *grp = pi->vlan_grp;
1542
1543 rxq->stats.vlan_ex++;
1544 if (likely(grp))
1545 vlan_hwaccel_receive_skb(skb, grp,
1546 be16_to_cpu(pkt->vlan));
1547 else
1548 dev_kfree_skb_any(skb);
1549 } else
1550 netif_receive_skb(skb);
1551
1552 return 0;
1553
1554nomem:
1555 t4vf_pktgl_free(gl);
1556 rxq->stats.rx_drops++;
1557 return 0;
1558}
1559
1560/**
1561 * is_new_response - check if a response is newly written
1562 * @rc: the response control descriptor
1563 * @rspq: the response queue
1564 *
1565 * Returns true if a response descriptor contains a yet unprocessed
1566 * response.
1567 */
1568static inline bool is_new_response(const struct rsp_ctrl *rc,
1569 const struct sge_rspq *rspq)
1570{
1571 return RSPD_GEN(rc->type_gen) == rspq->gen;
1572}
1573
1574/**
1575 * restore_rx_bufs - put back a packet's RX buffers
1576 * @gl: the packet gather list
1577 * @fl: the SGE Free List
1578 * @nfrags: how many fragments in @si
1579 *
1580 * Called when we find out that the current packet, @si, can't be
1581 * processed right away for some reason. This is a very rare event and
1582 * there's no effort to make this suspension/resumption process
1583 * particularly efficient.
1584 *
1585 * We implement the suspension by putting all of the RX buffers associated
1586 * with the current packet back on the original Free List. The buffers
1587 * have already been unmapped and are left unmapped, we mark them as
1588 * unmapped in order to prevent further unmapping attempts. (Effectively
1589 * this function undoes the series of @unmap_rx_buf calls which were done
1590 * to create the current packet's gather list.) This leaves us ready to
1591 * restart processing of the packet the next time we start processing the
1592 * RX Queue ...
1593 */
1594static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1595 int frags)
1596{
1597 struct rx_sw_desc *sdesc;
1598
1599 while (frags--) {
1600 if (fl->cidx == 0)
1601 fl->cidx = fl->size - 1;
1602 else
1603 fl->cidx--;
1604 sdesc = &fl->sdesc[fl->cidx];
1605 sdesc->page = gl->frags[frags].page;
1606 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1607 fl->avail++;
1608 }
1609}
1610
1611/**
1612 * rspq_next - advance to the next entry in a response queue
1613 * @rspq: the queue
1614 *
1615 * Updates the state of a response queue to advance it to the next entry.
1616 */
1617static inline void rspq_next(struct sge_rspq *rspq)
1618{
1619 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1620 if (unlikely(++rspq->cidx == rspq->size)) {
1621 rspq->cidx = 0;
1622 rspq->gen ^= 1;
1623 rspq->cur_desc = rspq->desc;
1624 }
1625}
1626
1627/**
1628 * process_responses - process responses from an SGE response queue
1629 * @rspq: the ingress response queue to process
1630 * @budget: how many responses can be processed in this round
1631 *
1632 * Process responses from a Scatter Gather Engine response queue up to
1633 * the supplied budget. Responses include received packets as well as
1634 * control messages from firmware or hardware.
1635 *
1636 * Additionally choose the interrupt holdoff time for the next interrupt
1637 * on this queue. If the system is under memory shortage use a fairly
1638 * long delay to help recovery.
1639 */
1640int process_responses(struct sge_rspq *rspq, int budget)
1641{
1642 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1643 int budget_left = budget;
1644
1645 while (likely(budget_left)) {
1646 int ret, rsp_type;
1647 const struct rsp_ctrl *rc;
1648
1649 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1650 if (!is_new_response(rc, rspq))
1651 break;
1652
1653 /*
1654 * Figure out what kind of response we've received from the
1655 * SGE.
1656 */
1657 rmb();
1658 rsp_type = RSPD_TYPE(rc->type_gen);
1659 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1660 skb_frag_t *fp;
1661 struct pkt_gl gl;
1662 const struct rx_sw_desc *sdesc;
1663 u32 bufsz, frag;
1664 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1665
1666 /*
1667 * If we get a "new buffer" message from the SGE we
1668 * need to move on to the next Free List buffer.
1669 */
1670 if (len & RSPD_NEWBUF) {
1671 /*
1672 * We get one "new buffer" message when we
1673 * first start up a queue so we need to ignore
1674 * it when our offset into the buffer is 0.
1675 */
1676 if (likely(rspq->offset > 0)) {
1677 free_rx_bufs(rspq->adapter, &rxq->fl,
1678 1);
1679 rspq->offset = 0;
1680 }
1681 len = RSPD_LEN(len);
1682 }
1683
1684 /*
1685 * Gather packet fragments.
1686 */
1687 for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1688 BUG_ON(frag >= MAX_SKB_FRAGS);
1689 BUG_ON(rxq->fl.avail == 0);
1690 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1691 bufsz = get_buf_size(sdesc);
1692 fp->page = sdesc->page;
1693 fp->page_offset = rspq->offset;
1694 fp->size = min(bufsz, len);
1695 len -= fp->size;
1696 if (!len)
1697 break;
1698 unmap_rx_buf(rspq->adapter, &rxq->fl);
1699 }
1700 gl.nfrags = frag+1;
1701
1702 /*
1703 * Last buffer remains mapped so explicitly make it
1704 * coherent for CPU access and start preloading first
1705 * cache line ...
1706 */
1707 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1708 get_buf_addr(sdesc),
1709 fp->size, DMA_FROM_DEVICE);
1710 gl.va = (page_address(gl.frags[0].page) +
1711 gl.frags[0].page_offset);
1712 prefetch(gl.va);
1713
1714 /*
1715 * Hand the new ingress packet to the handler for
1716 * this Response Queue.
1717 */
1718 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1719 if (likely(ret == 0))
1720 rspq->offset += ALIGN(fp->size, FL_ALIGN);
1721 else
1722 restore_rx_bufs(&gl, &rxq->fl, frag);
1723 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1724 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1725 } else {
1726 WARN_ON(rsp_type > RSP_TYPE_CPL);
1727 ret = 0;
1728 }
1729
1730 if (unlikely(ret)) {
1731 /*
1732 * Couldn't process descriptor, back off for recovery.
1733 * We use the SGE's last timer which has the longest
1734 * interrupt coalescing value ...
1735 */
1736 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1737 rspq->next_intr_params =
1738 QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
1739 break;
1740 }
1741
1742 rspq_next(rspq);
1743 budget_left--;
1744 }
1745
1746 /*
1747 * If this is a Response Queue with an associated Free List and
1748 * at least two Egress Queue units available in the Free List
1749 * for new buffer pointers, refill the Free List.
1750 */
1751 if (rspq->offset >= 0 &&
1752 rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1753 __refill_fl(rspq->adapter, &rxq->fl);
1754 return budget - budget_left;
1755}
1756
1757/**
1758 * napi_rx_handler - the NAPI handler for RX processing
1759 * @napi: the napi instance
1760 * @budget: how many packets we can process in this round
1761 *
1762 * Handler for new data events when using NAPI. This does not need any
1763 * locking or protection from interrupts as data interrupts are off at
1764 * this point and other adapter interrupts do not interfere (the latter
1765 * in not a concern at all with MSI-X as non-data interrupts then have
1766 * a separate handler).
1767 */
1768static int napi_rx_handler(struct napi_struct *napi, int budget)
1769{
1770 unsigned int intr_params;
1771 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1772 int work_done = process_responses(rspq, budget);
1773
1774 if (likely(work_done < budget)) {
1775 napi_complete(napi);
1776 intr_params = rspq->next_intr_params;
1777 rspq->next_intr_params = rspq->intr_params;
1778 } else
1779 intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
1780
1781 if (unlikely(work_done == 0))
1782 rspq->unhandled_irqs++;
1783
1784 t4_write_reg(rspq->adapter,
1785 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1786 CIDXINC(work_done) |
1787 INGRESSQID((u32)rspq->cntxt_id) |
1788 SEINTARM(intr_params));
1789 return work_done;
1790}
1791
1792/*
1793 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1794 * (i.e., response queue serviced by NAPI polling).
1795 */
1796irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1797{
1798 struct sge_rspq *rspq = cookie;
1799
1800 napi_schedule(&rspq->napi);
1801 return IRQ_HANDLED;
1802}
1803
1804/*
1805 * Process the indirect interrupt entries in the interrupt queue and kick off
1806 * NAPI for each queue that has generated an entry.
1807 */
1808static unsigned int process_intrq(struct adapter *adapter)
1809{
1810 struct sge *s = &adapter->sge;
1811 struct sge_rspq *intrq = &s->intrq;
1812 unsigned int work_done;
1813
1814 spin_lock(&adapter->sge.intrq_lock);
1815 for (work_done = 0; ; work_done++) {
1816 const struct rsp_ctrl *rc;
1817 unsigned int qid, iq_idx;
1818 struct sge_rspq *rspq;
1819
1820 /*
1821 * Grab the next response from the interrupt queue and bail
1822 * out if it's not a new response.
1823 */
1824 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1825 if (!is_new_response(rc, intrq))
1826 break;
1827
1828 /*
1829 * If the response isn't a forwarded interrupt message issue a
1830 * error and go on to the next response message. This should
1831 * never happen ...
1832 */
1833 rmb();
1834 if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
1835 dev_err(adapter->pdev_dev,
1836 "Unexpected INTRQ response type %d\n",
1837 RSPD_TYPE(rc->type_gen));
1838 continue;
1839 }
1840
1841 /*
1842 * Extract the Queue ID from the interrupt message and perform
1843 * sanity checking to make sure it really refers to one of our
1844 * Ingress Queues which is active and matches the queue's ID.
1845 * None of these error conditions should ever happen so we may
1846 * want to either make them fatal and/or conditionalized under
1847 * DEBUG.
1848 */
1849 qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
1850 iq_idx = IQ_IDX(s, qid);
1851 if (unlikely(iq_idx >= MAX_INGQ)) {
1852 dev_err(adapter->pdev_dev,
1853 "Ingress QID %d out of range\n", qid);
1854 continue;
1855 }
1856 rspq = s->ingr_map[iq_idx];
1857 if (unlikely(rspq == NULL)) {
1858 dev_err(adapter->pdev_dev,
1859 "Ingress QID %d RSPQ=NULL\n", qid);
1860 continue;
1861 }
1862 if (unlikely(rspq->abs_id != qid)) {
1863 dev_err(adapter->pdev_dev,
1864 "Ingress QID %d refers to RSPQ %d\n",
1865 qid, rspq->abs_id);
1866 continue;
1867 }
1868
1869 /*
1870 * Schedule NAPI processing on the indicated Response Queue
1871 * and move on to the next entry in the Forwarded Interrupt
1872 * Queue.
1873 */
1874 napi_schedule(&rspq->napi);
1875 rspq_next(intrq);
1876 }
1877
1878 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1879 CIDXINC(work_done) |
1880 INGRESSQID(intrq->cntxt_id) |
1881 SEINTARM(intrq->intr_params));
1882
1883 spin_unlock(&adapter->sge.intrq_lock);
1884
1885 return work_done;
1886}
1887
1888/*
1889 * The MSI interrupt handler handles data events from SGE response queues as
1890 * well as error and other async events as they all use the same MSI vector.
1891 */
1892irqreturn_t t4vf_intr_msi(int irq, void *cookie)
1893{
1894 struct adapter *adapter = cookie;
1895
1896 process_intrq(adapter);
1897 return IRQ_HANDLED;
1898}
1899
1900/**
1901 * t4vf_intr_handler - select the top-level interrupt handler
1902 * @adapter: the adapter
1903 *
1904 * Selects the top-level interrupt handler based on the type of interrupts
1905 * (MSI-X or MSI).
1906 */
1907irq_handler_t t4vf_intr_handler(struct adapter *adapter)
1908{
1909 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
1910 if (adapter->flags & USING_MSIX)
1911 return t4vf_sge_intr_msix;
1912 else
1913 return t4vf_intr_msi;
1914}
1915
1916/**
1917 * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
1918 * @data: the adapter
1919 *
1920 * Runs periodically from a timer to perform maintenance of SGE RX queues.
1921 *
1922 * a) Replenishes RX queues that have run out due to memory shortage.
1923 * Normally new RX buffers are added when existing ones are consumed but
1924 * when out of memory a queue can become empty. We schedule NAPI to do
1925 * the actual refill.
1926 */
1927static void sge_rx_timer_cb(unsigned long data)
1928{
1929 struct adapter *adapter = (struct adapter *)data;
1930 struct sge *s = &adapter->sge;
1931 unsigned int i;
1932
1933 /*
1934 * Scan the "Starving Free Lists" flag array looking for any Free
1935 * Lists in need of more free buffers. If we find one and it's not
1936 * being actively polled, then bump its "starving" counter and attempt
1937 * to refill it. If we're successful in adding enough buffers to push
1938 * the Free List over the starving threshold, then we can clear its
1939 * "starving" status.
1940 */
1941 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
1942 unsigned long m;
1943
1944 for (m = s->starving_fl[i]; m; m &= m - 1) {
1945 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1946 struct sge_fl *fl = s->egr_map[id];
1947
1948 clear_bit(id, s->starving_fl);
1949 smp_mb__after_clear_bit();
1950
1951 /*
1952 * Since we are accessing fl without a lock there's a
1953 * small probability of a false positive where we
1954 * schedule napi but the FL is no longer starving.
1955 * No biggie.
1956 */
1957 if (fl_starving(fl)) {
1958 struct sge_eth_rxq *rxq;
1959
1960 rxq = container_of(fl, struct sge_eth_rxq, fl);
1961 if (napi_reschedule(&rxq->rspq.napi))
1962 fl->starving++;
1963 else
1964 set_bit(id, s->starving_fl);
1965 }
1966 }
1967 }
1968
1969 /*
1970 * Reschedule the next scan for starving Free Lists ...
1971 */
1972 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1973}
1974
1975/**
1976 * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
1977 * @data: the adapter
1978 *
1979 * Runs periodically from a timer to perform maintenance of SGE TX queues.
1980 *
1981 * b) Reclaims completed Tx packets for the Ethernet queues. Normally
1982 * packets are cleaned up by new Tx packets, this timer cleans up packets
1983 * when no new packets are being submitted. This is essential for pktgen,
1984 * at least.
1985 */
1986static void sge_tx_timer_cb(unsigned long data)
1987{
1988 struct adapter *adapter = (struct adapter *)data;
1989 struct sge *s = &adapter->sge;
1990 unsigned int i, budget;
1991
1992 budget = MAX_TIMER_TX_RECLAIM;
1993 i = s->ethtxq_rover;
1994 do {
1995 struct sge_eth_txq *txq = &s->ethtxq[i];
1996
1997 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
1998 int avail = reclaimable(&txq->q);
1999
2000 if (avail > budget)
2001 avail = budget;
2002
2003 free_tx_desc(adapter, &txq->q, avail, true);
2004 txq->q.in_use -= avail;
2005 __netif_tx_unlock(txq->txq);
2006
2007 budget -= avail;
2008 if (!budget)
2009 break;
2010 }
2011
2012 i++;
2013 if (i >= s->ethqsets)
2014 i = 0;
2015 } while (i != s->ethtxq_rover);
2016 s->ethtxq_rover = i;
2017
2018 /*
2019 * If we found too many reclaimable packets schedule a timer in the
2020 * near future to continue where we left off. Otherwise the next timer
2021 * will be at its normal interval.
2022 */
2023 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2024}
2025
2026/**
2027 * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2028 * @adapter: the adapter
2029 * @rspq: pointer to to the new rxq's Response Queue to be filled in
2030 * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2031 * @dev: the network device associated with the new rspq
2032 * @intr_dest: MSI-X vector index (overriden in MSI mode)
2033 * @fl: pointer to the new rxq's Free List to be filled in
2034 * @hnd: the interrupt handler to invoke for the rspq
2035 */
2036int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2037 bool iqasynch, struct net_device *dev,
2038 int intr_dest,
2039 struct sge_fl *fl, rspq_handler_t hnd)
2040{
2041 struct port_info *pi = netdev_priv(dev);
2042 struct fw_iq_cmd cmd, rpl;
2043 int ret, iqandst, flsz = 0;
2044
2045 /*
2046 * If we're using MSI interrupts and we're not initializing the
2047 * Forwarded Interrupt Queue itself, then set up this queue for
2048 * indirect interrupts to the Forwarded Interrupt Queue. Obviously
2049 * the Forwarded Interrupt Queue must be set up before any other
2050 * ingress queue ...
2051 */
2052 if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
2053 iqandst = SGE_INTRDST_IQ;
2054 intr_dest = adapter->sge.intrq.abs_id;
2055 } else
2056 iqandst = SGE_INTRDST_PCI;
2057
2058 /*
2059 * Allocate the hardware ring for the Response Queue. The size needs
2060 * to be a multiple of 16 which includes the mandatory status entry
2061 * (regardless of whether the Status Page capabilities are enabled or
2062 * not).
2063 */
2064 rspq->size = roundup(rspq->size, 16);
2065 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2066 0, &rspq->phys_addr, NULL, 0);
2067 if (!rspq->desc)
2068 return -ENOMEM;
2069
2070 /*
2071 * Fill in the Ingress Queue Command. Note: Ideally this code would
2072 * be in t4vf_hw.c but there are so many parameters and dependencies
2073 * on our Linux SGE state that we would end up having to pass tons of
2074 * parameters. We'll have to think about how this might be migrated
2075 * into OS-independent common code ...
2076 */
2077 memset(&cmd, 0, sizeof(cmd));
2078 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
2079 FW_CMD_REQUEST |
2080 FW_CMD_WRITE |
2081 FW_CMD_EXEC);
2082 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
2083 FW_IQ_CMD_IQSTART(1) |
2084 FW_LEN16(cmd));
2085 cmd.type_to_iqandstindex =
2086 cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2087 FW_IQ_CMD_IQASYNCH(iqasynch) |
2088 FW_IQ_CMD_VIID(pi->viid) |
2089 FW_IQ_CMD_IQANDST(iqandst) |
2090 FW_IQ_CMD_IQANUS(1) |
2091 FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
2092 FW_IQ_CMD_IQANDSTINDEX(intr_dest));
2093 cmd.iqdroprss_to_iqesize =
2094 cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
2095 FW_IQ_CMD_IQGTSMODE |
2096 FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
2097 FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
2098 cmd.iqsize = cpu_to_be16(rspq->size);
2099 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2100
2101 if (fl) {
2102 /*
2103 * Allocate the ring for the hardware free list (with space
2104 * for its status page) along with the associated software
2105 * descriptor ring. The free list size needs to be a multiple
2106 * of the Egress Queue Unit.
2107 */
2108 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2109 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2110 sizeof(__be64), sizeof(struct rx_sw_desc),
2111 &fl->addr, &fl->sdesc, STAT_LEN);
2112 if (!fl->desc) {
2113 ret = -ENOMEM;
2114 goto err;
2115 }
2116
2117 /*
2118 * Calculate the size of the hardware free list ring plus
2119 * status page (which the SGE will place at the end of the
2120 * free list ring) in Egress Queue Units.
2121 */
2122 flsz = (fl->size / FL_PER_EQ_UNIT +
2123 STAT_LEN / EQ_UNIT);
2124
2125 /*
2126 * Fill in all the relevant firmware Ingress Queue Command
2127 * fields for the free list.
2128 */
2129 cmd.iqns_to_fl0congen =
2130 cpu_to_be32(
2131 FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
2132 FW_IQ_CMD_FL0PACKEN |
2133 FW_IQ_CMD_FL0PADEN);
2134 cmd.fl0dcaen_to_fl0cidxfthresh =
2135 cpu_to_be16(
2136 FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
2137 FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
2138 cmd.fl0size = cpu_to_be16(flsz);
2139 cmd.fl0addr = cpu_to_be64(fl->addr);
2140 }
2141
2142 /*
2143 * Issue the firmware Ingress Queue Command and extract the results if
2144 * it completes successfully.
2145 */
2146 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2147 if (ret)
2148 goto err;
2149
2150 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2151 rspq->cur_desc = rspq->desc;
2152 rspq->cidx = 0;
2153 rspq->gen = 1;
2154 rspq->next_intr_params = rspq->intr_params;
2155 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2156 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2157 rspq->size--; /* subtract status entry */
2158 rspq->adapter = adapter;
2159 rspq->netdev = dev;
2160 rspq->handler = hnd;
2161
2162 /* set offset to -1 to distinguish ingress queues without FL */
2163 rspq->offset = fl ? 0 : -1;
2164
2165 if (fl) {
2166 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2167 fl->avail = 0;
2168 fl->pend_cred = 0;
2169 fl->pidx = 0;
2170 fl->cidx = 0;
2171 fl->alloc_failed = 0;
2172 fl->large_alloc_failed = 0;
2173 fl->starving = 0;
2174 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2175 }
2176
2177 return 0;
2178
2179err:
2180 /*
2181 * An error occurred. Clean up our partial allocation state and
2182 * return the error.
2183 */
2184 if (rspq->desc) {
2185 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2186 rspq->desc, rspq->phys_addr);
2187 rspq->desc = NULL;
2188 }
2189 if (fl && fl->desc) {
2190 kfree(fl->sdesc);
2191 fl->sdesc = NULL;
2192 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2193 fl->desc, fl->addr);
2194 fl->desc = NULL;
2195 }
2196 return ret;
2197}
2198
2199/**
2200 * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2201 * @adapter: the adapter
2202 * @txq: pointer to the new txq to be filled in
2203 * @devq: the network TX queue associated with the new txq
2204 * @iqid: the relative ingress queue ID to which events relating to
2205 * the new txq should be directed
2206 */
2207int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2208 struct net_device *dev, struct netdev_queue *devq,
2209 unsigned int iqid)
2210{
2211 int ret, nentries;
2212 struct fw_eq_eth_cmd cmd, rpl;
2213 struct port_info *pi = netdev_priv(dev);
2214
2215 /*
2216 * Calculate the size of the hardware TX Queue (including the
2217 * status age on the end) in units of TX Descriptors.
2218 */
2219 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2220
2221 /*
2222 * Allocate the hardware ring for the TX ring (with space for its
2223 * status page) along with the associated software descriptor ring.
2224 */
2225 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2226 sizeof(struct tx_desc),
2227 sizeof(struct tx_sw_desc),
2228 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
2229 if (!txq->q.desc)
2230 return -ENOMEM;
2231
2232 /*
2233 * Fill in the Egress Queue Command. Note: As with the direct use of
2234 * the firmware Ingress Queue COmmand above in our RXQ allocation
2235 * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
2236 * have to see if there's some reasonable way to parameterize it
2237 * into the common code ...
2238 */
2239 memset(&cmd, 0, sizeof(cmd));
2240 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
2241 FW_CMD_REQUEST |
2242 FW_CMD_WRITE |
2243 FW_CMD_EXEC);
2244 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
2245 FW_EQ_ETH_CMD_EQSTART |
2246 FW_LEN16(cmd));
2247 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
2248 cmd.fetchszm_to_iqid =
2249 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
2250 FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
2251 FW_EQ_ETH_CMD_IQID(iqid));
2252 cmd.dcaen_to_eqsize =
2253 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
2254 FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
2255 FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
2256 FW_EQ_ETH_CMD_EQSIZE(nentries));
2257 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2258
2259 /*
2260 * Issue the firmware Egress Queue Command and extract the results if
2261 * it completes successfully.
2262 */
2263 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2264 if (ret) {
2265 /*
2266 * The girmware Ingress Queue Command failed for some reason.
2267 * Free up our partial allocation state and return the error.
2268 */
2269 kfree(txq->q.sdesc);
2270 txq->q.sdesc = NULL;
2271 dma_free_coherent(adapter->pdev_dev,
2272 nentries * sizeof(struct tx_desc),
2273 txq->q.desc, txq->q.phys_addr);
2274 txq->q.desc = NULL;
2275 return ret;
2276 }
2277
2278 txq->q.in_use = 0;
2279 txq->q.cidx = 0;
2280 txq->q.pidx = 0;
2281 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2282 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
2283 txq->q.abs_id =
2284 FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
2285 txq->txq = devq;
2286 txq->tso = 0;
2287 txq->tx_cso = 0;
2288 txq->vlan_ins = 0;
2289 txq->q.stops = 0;
2290 txq->q.restarts = 0;
2291 txq->mapping_err = 0;
2292 return 0;
2293}
2294
2295/*
2296 * Free the DMA map resources associated with a TX queue.
2297 */
2298static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2299{
2300 dma_free_coherent(adapter->pdev_dev,
2301 tq->size * sizeof(*tq->desc) + STAT_LEN,
2302 tq->desc, tq->phys_addr);
2303 tq->cntxt_id = 0;
2304 tq->sdesc = NULL;
2305 tq->desc = NULL;
2306}
2307
2308/*
2309 * Free the resources associated with a response queue (possibly including a
2310 * free list).
2311 */
2312static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2313 struct sge_fl *fl)
2314{
2315 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2316
2317 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2318 rspq->cntxt_id, flid, 0xffff);
2319 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2320 rspq->desc, rspq->phys_addr);
2321 netif_napi_del(&rspq->napi);
2322 rspq->netdev = NULL;
2323 rspq->cntxt_id = 0;
2324 rspq->abs_id = 0;
2325 rspq->desc = NULL;
2326
2327 if (fl) {
2328 free_rx_bufs(adapter, fl, fl->avail);
2329 dma_free_coherent(adapter->pdev_dev,
2330 fl->size * sizeof(*fl->desc) + STAT_LEN,
2331 fl->desc, fl->addr);
2332 kfree(fl->sdesc);
2333 fl->sdesc = NULL;
2334 fl->cntxt_id = 0;
2335 fl->desc = NULL;
2336 }
2337}
2338
2339/**
2340 * t4vf_free_sge_resources - free SGE resources
2341 * @adapter: the adapter
2342 *
2343 * Frees resources used by the SGE queue sets.
2344 */
2345void t4vf_free_sge_resources(struct adapter *adapter)
2346{
2347 struct sge *s = &adapter->sge;
2348 struct sge_eth_rxq *rxq = s->ethrxq;
2349 struct sge_eth_txq *txq = s->ethtxq;
2350 struct sge_rspq *evtq = &s->fw_evtq;
2351 struct sge_rspq *intrq = &s->intrq;
2352 int qs;
2353
2354 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2355 if (rxq->rspq.desc)
2356 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2357 if (txq->q.desc) {
2358 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2359 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2360 kfree(txq->q.sdesc);
2361 free_txq(adapter, &txq->q);
2362 }
2363 }
2364 if (evtq->desc)
2365 free_rspq_fl(adapter, evtq, NULL);
2366 if (intrq->desc)
2367 free_rspq_fl(adapter, intrq, NULL);
2368}
2369
2370/**
2371 * t4vf_sge_start - enable SGE operation
2372 * @adapter: the adapter
2373 *
2374 * Start tasklets and timers associated with the DMA engine.
2375 */
2376void t4vf_sge_start(struct adapter *adapter)
2377{
2378 adapter->sge.ethtxq_rover = 0;
2379 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2380 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2381}
2382
2383/**
2384 * t4vf_sge_stop - disable SGE operation
2385 * @adapter: the adapter
2386 *
2387 * Stop tasklets and timers associated with the DMA engine. Note that
2388 * this is effective only if measures have been taken to disable any HW
2389 * events that may restart them.
2390 */
2391void t4vf_sge_stop(struct adapter *adapter)
2392{
2393 struct sge *s = &adapter->sge;
2394
2395 if (s->rx_timer.function)
2396 del_timer_sync(&s->rx_timer);
2397 if (s->tx_timer.function)
2398 del_timer_sync(&s->tx_timer);
2399}
2400
2401/**
2402 * t4vf_sge_init - initialize SGE
2403 * @adapter: the adapter
2404 *
2405 * Performs SGE initialization needed every time after a chip reset.
2406 * We do not initialize any of the queue sets here, instead the driver
2407 * top-level must request those individually. We also do not enable DMA
2408 * here, that should be done after the queues have been set up.
2409 */
2410int t4vf_sge_init(struct adapter *adapter)
2411{
2412 struct sge_params *sge_params = &adapter->params.sge;
2413 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2414 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2415 struct sge *s = &adapter->sge;
2416
2417 /*
2418 * Start by vetting the basic SGE parameters which have been set up by
2419 * the Physical Function Driver. Ideally we should be able to deal
2420 * with _any_ configuration. Practice is different ...
2421 */
2422 if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2423 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2424 fl0, fl1);
2425 return -EINVAL;
2426 }
2427 if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
2428 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2429 return -EINVAL;
2430 }
2431
2432 /*
2433 * Now translate the adapter parameters into our internal forms.
2434 */
2435 if (fl1)
2436 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
2437 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
2438 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
2439 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2440 SGE_INGPADBOUNDARY_SHIFT);
2441
2442 /*
2443 * Set up tasklet timers.
2444 */
2445 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
2446 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
2447
2448 /*
2449 * Initialize Forwarded Interrupt Queue lock.
2450 */
2451 spin_lock_init(&s->intrq_lock);
2452
2453 return 0;
2454}
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
new file mode 100644
index 000000000000..5c7bde7f9bae
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -0,0 +1,273 @@
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#ifndef __T4VF_COMMON_H__
37#define __T4VF_COMMON_H__
38
39#include "../cxgb4/t4fw_api.h"
40
41/*
42 * The "len16" field of a Firmware Command Structure ...
43 */
44#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
45
46/*
47 * Per-VF statistics.
48 */
49struct t4vf_port_stats {
50 /*
51 * TX statistics.
52 */
53 u64 tx_bcast_bytes; /* broadcast */
54 u64 tx_bcast_frames;
55 u64 tx_mcast_bytes; /* multicast */
56 u64 tx_mcast_frames;
57 u64 tx_ucast_bytes; /* unicast */
58 u64 tx_ucast_frames;
59 u64 tx_drop_frames; /* TX dropped frames */
60 u64 tx_offload_bytes; /* offload */
61 u64 tx_offload_frames;
62
63 /*
64 * RX statistics.
65 */
66 u64 rx_bcast_bytes; /* broadcast */
67 u64 rx_bcast_frames;
68 u64 rx_mcast_bytes; /* multicast */
69 u64 rx_mcast_frames;
70 u64 rx_ucast_bytes;
71 u64 rx_ucast_frames; /* unicast */
72
73 u64 rx_err_frames; /* RX error frames */
74};
75
76/*
77 * Per-"port" (Virtual Interface) link configuration ...
78 */
79struct link_config {
80 unsigned int supported; /* link capabilities */
81 unsigned int advertising; /* advertised capabilities */
82 unsigned short requested_speed; /* speed user has requested */
83 unsigned short speed; /* actual link speed */
84 unsigned char requested_fc; /* flow control user has requested */
85 unsigned char fc; /* actual link flow control */
86 unsigned char autoneg; /* autonegotiating? */
87 unsigned char link_ok; /* link up? */
88};
89
90enum {
91 PAUSE_RX = 1 << 0,
92 PAUSE_TX = 1 << 1,
93 PAUSE_AUTONEG = 1 << 2
94};
95
96/*
97 * General device parameters ...
98 */
99struct dev_params {
100 u32 fwrev; /* firmware version */
101 u32 tprev; /* TP Microcode Version */
102};
103
104/*
105 * Scatter Gather Engine parameters. These are almost all determined by the
106 * Physical Function Driver. We just need to grab them to see within which
107 * environment we're playing ...
108 */
109struct sge_params {
110 u32 sge_control; /* padding, boundaries, lengths, etc. */
111 u32 sge_host_page_size; /* RDMA page sizes */
112 u32 sge_queues_per_page; /* RDMA queues/page */
113 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
114 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
115 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
116 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
117 u32 sge_timer_value_2_and_3;
118 u32 sge_timer_value_4_and_5;
119};
120
121/*
122 * Vital Product Data parameters.
123 */
124struct vpd_params {
125 u32 cclk; /* Core Clock (KHz) */
126};
127
128/*
129 * Global Receive Side Scaling (RSS) parameters in host-native format.
130 */
131struct rss_params {
132 unsigned int mode; /* RSS mode */
133 union {
134 struct {
135 int synmapen:1; /* SYN Map Enable */
136 int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
137 int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
138 int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
139 int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
140 int ofdmapen:1; /* Offload Map Enable */
141 int tnlmapen:1; /* Tunnel Map Enable */
142 int tnlalllookup:1; /* Tunnel All Lookup */
143 int hashtoeplitz:1; /* use Toeplitz hash */
144 } basicvirtual;
145 } u;
146};
147
148/*
149 * Virtual Interface RSS Configuration in host-native format.
150 */
151union rss_vi_config {
152 struct {
153 u16 defaultq; /* Ingress Queue ID for !tnlalllookup */
154 int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */
155 int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */
156 int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */
157 int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */
158 int udpen; /* hash 4-tuple UDP ingress packets */
159 } basicvirtual;
160};
161
162/*
163 * Maximum resources provisioned for a PCI VF.
164 */
165struct vf_resources {
166 unsigned int nvi; /* N virtual interfaces */
167 unsigned int neq; /* N egress Qs */
168 unsigned int nethctrl; /* N egress ETH or CTRL Qs */
169 unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
170 unsigned int niq; /* N ingress Qs */
171 unsigned int tc; /* PCI-E traffic class */
172 unsigned int pmask; /* port access rights mask */
173 unsigned int nexactf; /* N exact MPS filters */
174 unsigned int r_caps; /* read capabilities */
175 unsigned int wx_caps; /* write/execute capabilities */
176};
177
178/*
179 * Per-"adapter" (Virtual Function) parameters.
180 */
181struct adapter_params {
182 struct dev_params dev; /* general device parameters */
183 struct sge_params sge; /* Scatter Gather Engine */
184 struct vpd_params vpd; /* Vital Product Data */
185 struct rss_params rss; /* Receive Side Scaling */
186 struct vf_resources vfres; /* Virtual Function Resource limits */
187 u8 nports; /* # of Ethernet "ports" */
188};
189
190#include "adapter.h"
191
192#ifndef PCI_VENDOR_ID_CHELSIO
193# define PCI_VENDOR_ID_CHELSIO 0x1425
194#endif
195
196#define for_each_port(adapter, iter) \
197 for (iter = 0; iter < (adapter)->params.nports; iter++)
198
199static inline bool is_10g_port(const struct link_config *lc)
200{
201 return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
202}
203
204static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
205{
206 return adapter->params.vpd.cclk / 1000;
207}
208
209static inline unsigned int us_to_core_ticks(const struct adapter *adapter,
210 unsigned int us)
211{
212 return (us * adapter->params.vpd.cclk) / 1000;
213}
214
215static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
216 unsigned int ticks)
217{
218 return (ticks * 1000) / adapter->params.vpd.cclk;
219}
220
221int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
222
223static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
224 int size, void *rpl)
225{
226 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
227}
228
229static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
230 int size, void *rpl)
231{
232 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
233}
234
235int __devinit t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int);
237
238int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
239int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
240
241int t4vf_get_sge_params(struct adapter *);
242int t4vf_get_vpd_params(struct adapter *);
243int t4vf_get_dev_params(struct adapter *);
244int t4vf_get_rss_glb_config(struct adapter *);
245int t4vf_get_vfres(struct adapter *);
246
247int t4vf_read_rss_vi_config(struct adapter *, unsigned int,
248 union rss_vi_config *);
249int t4vf_write_rss_vi_config(struct adapter *, unsigned int,
250 union rss_vi_config *);
251int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
252 const u16 *, int);
253
254int t4vf_alloc_vi(struct adapter *, int);
255int t4vf_free_vi(struct adapter *, int);
256int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
257int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
258
259int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
260 bool);
261int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
262 const u8 **, u16 *, u64 *, bool);
263int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
264int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
265int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
266
267int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
268 unsigned int);
269int t4vf_eth_eq_free(struct adapter *, unsigned int);
270
271int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
272
273#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/cxgb4vf/t4vf_defs.h b/drivers/net/cxgb4vf/t4vf_defs.h
new file mode 100644
index 000000000000..c7b127d93767
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_defs.h
@@ -0,0 +1,121 @@
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#ifndef __T4VF_DEFS_H__
37#define __T4VF_DEFS_H__
38
39#include "../cxgb4/t4_regs.h"
40
41/*
42 * The VF Register Map.
43 *
44 * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
45 * bus module (PL) and CPU Interface Module (CIM) components are mapped via
46 * the Slice to Module Map Table (see below) in the Physical Function Register
47 * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
48 * and Offset registers in the PF Register Map. The MBDATA base address is
49 * quite constrained as it determines the Mailbox Data addresses for both PFs
50 * and VFs, and therefore must fit in both the VF and PF Register Maps without
51 * overlapping other registers.
52 */
53#define T4VF_SGE_BASE_ADDR 0x0000
54#define T4VF_MPS_BASE_ADDR 0x0100
55#define T4VF_PL_BASE_ADDR 0x0200
56#define T4VF_MBDATA_BASE_ADDR 0x0240
57#define T4VF_CIM_BASE_ADDR 0x0300
58
59#define T4VF_REGMAP_START 0x0000
60#define T4VF_REGMAP_SIZE 0x0400
61
62/*
63 * There's no hardware limitation which requires that the addresses of the
64 * Mailbox Data in the fixed CIM PF map and the programmable VF map must
65 * match. However, it's a useful convention ...
66 */
67#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
68#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
69#endif
70
71/*
72 * Virtual Function "Slice to Module Map Table" definitions.
73 *
74 * This table allows us to map subsets of the various module register sets
75 * into the T4VF Register Map. Each table entry identifies the index of the
76 * module whose registers are being mapped, the offset within the module's
77 * register set that the mapping should start at, the limit of the mapping,
78 * and the offset within the T4VF Register Map to which the module's registers
79 * are being mapped. All addresses and qualtities are in terms of 32-bit
80 * words. The "limit" value is also in terms of 32-bit words and is equal to
81 * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<="
82 * relation rather than a "<").
83 */
84#define T4VF_MOD_MAP(module, index, first, last) \
85 T4VF_MOD_MAP_##module##_INDEX = (index), \
86 T4VF_MOD_MAP_##module##_FIRST = (first), \
87 T4VF_MOD_MAP_##module##_LAST = (last), \
88 T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \
89 T4VF_MOD_MAP_##module##_BASE = \
90 (T4VF_##module##_BASE_ADDR/4 + (first)/4), \
91 T4VF_MOD_MAP_##module##_LIMIT = \
92 (T4VF_##module##_BASE_ADDR/4 + (last)/4),
93
94#define SGE_VF_KDOORBELL 0x0
95#define SGE_VF_GTS 0x4
96#define MPS_VF_CTL 0x0
97#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
98#define PL_VF_WHOAMI 0x0
99#define CIM_VF_EXT_MAILBOX_CTRL 0x0
100#define CIM_VF_EXT_MAILBOX_STATUS 0x4
101
102enum {
103 T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS)
104 T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H)
105 T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI)
106 T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS)
107};
108
109/*
110 * There isn't a Slice to Module Map Table entry for the Mailbox Data
111 * registers, but it's convenient to use similar names as above. There are 8
112 * little-endian 64-bit Mailbox Data registers. Note that the "instances"
113 * value below is in terms of 32-bit words which matches the "word" addressing
114 * space we use above for the Slice to Module Map Space.
115 */
116#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16
117
118#define T4VF_MBDATA_FIRST 0
119#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4)
120
121#endif /* __T4T4VF_DEFS_H__ */
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
new file mode 100644
index 000000000000..ea1c123f0cb4
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -0,0 +1,1333 @@
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/version.h>
37#include <linux/pci.h>
38
39#include "t4vf_common.h"
40#include "t4vf_defs.h"
41
42#include "../cxgb4/t4_regs.h"
43#include "../cxgb4/t4fw_api.h"
44
45/*
46 * Wait for the device to become ready (signified by our "who am I" register
47 * returning a value other than all 1's). Return an error if it doesn't
48 * become ready ...
49 */
50int __devinit t4vf_wait_dev_ready(struct adapter *adapter)
51{
52 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
53 const u32 notready1 = 0xffffffff;
54 const u32 notready2 = 0xeeeeeeee;
55 u32 val;
56
57 val = t4_read_reg(adapter, whoami);
58 if (val != notready1 && val != notready2)
59 return 0;
60 msleep(500);
61 val = t4_read_reg(adapter, whoami);
62 if (val != notready1 && val != notready2)
63 return 0;
64 else
65 return -EIO;
66}
67
68/*
69 * Get the reply to a mailbox command and store it in @rpl in big-endian order
70 * (since the firmware data structures are specified in a big-endian layout).
71 */
72static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
73 u32 mbox_data)
74{
75 for ( ; size; size -= 8, mbox_data += 8)
76 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
77}
78
79/*
80 * Dump contents of mailbox with a leading tag.
81 */
82static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
83{
84 dev_err(adapter->pdev_dev,
85 "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
86 (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
87 (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
88 (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
89 (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
90 (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
91 (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
92 (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
93 (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
94}
95
96/**
97 * t4vf_wr_mbox_core - send a command to FW through the mailbox
98 * @adapter: the adapter
99 * @cmd: the command to write
100 * @size: command length in bytes
101 * @rpl: where to optionally store the reply
102 * @sleep_ok: if true we may sleep while awaiting command completion
103 *
104 * Sends the given command to FW through the mailbox and waits for the
105 * FW to execute the command. If @rpl is not %NULL it is used to store
106 * the FW's reply to the command. The command and its optional reply
107 * are of the same length. FW can take up to 500 ms to respond.
108 * @sleep_ok determines whether we may sleep while awaiting the response.
109 * If sleeping is allowed we use progressive backoff otherwise we spin.
110 *
111 * The return value is 0 on success or a negative errno on failure. A
112 * failure can happen either because we are not able to execute the
113 * command or FW executes it but signals an error. In the latter case
114 * the return value is the error code indicated by FW (negated).
115 */
116int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
117 void *rpl, bool sleep_ok)
118{
119 static int delay[] = {
120 1, 1, 3, 5, 10, 10, 20, 50, 100
121 };
122
123 u32 v;
124 int i, ms, delay_idx;
125 const __be64 *p;
126 u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
127 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
128
129 /*
130 * Commands must be multiples of 16 bytes in length and may not be
131 * larger than the size of the Mailbox Data register array.
132 */
133 if ((size % 16) != 0 ||
134 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
135 return -EINVAL;
136
137 /*
138 * Loop trying to get ownership of the mailbox. Return an error
139 * if we can't gain ownership.
140 */
141 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
142 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
143 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
144 if (v != MBOX_OWNER_DRV)
145 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
146
147 /*
148 * Write the command array into the Mailbox Data register array and
149 * transfer ownership of the mailbox to the firmware.
150 */
151 for (i = 0, p = cmd; i < size; i += 8)
152 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
153 t4_write_reg(adapter, mbox_ctl,
154 MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
155 t4_read_reg(adapter, mbox_ctl); /* flush write */
156
157 /*
158 * Spin waiting for firmware to acknowledge processing our command.
159 */
160 delay_idx = 0;
161 ms = delay[0];
162
163 for (i = 0; i < 500; i += ms) {
164 if (sleep_ok) {
165 ms = delay[delay_idx];
166 if (delay_idx < ARRAY_SIZE(delay) - 1)
167 delay_idx++;
168 msleep(ms);
169 } else
170 mdelay(ms);
171
172 /*
173 * If we're the owner, see if this is the reply we wanted.
174 */
175 v = t4_read_reg(adapter, mbox_ctl);
176 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
177 /*
178 * If the Message Valid bit isn't on, revoke ownership
179 * of the mailbox and continue waiting for our reply.
180 */
181 if ((v & MBMSGVALID) == 0) {
182 t4_write_reg(adapter, mbox_ctl,
183 MBOWNER(MBOX_OWNER_NONE));
184 continue;
185 }
186
187 /*
188 * We now have our reply. Extract the command return
189 * value, copy the reply back to our caller's buffer
190 * (if specified) and revoke ownership of the mailbox.
191 * We return the (negated) firmware command return
192 * code (this depends on FW_SUCCESS == 0).
193 */
194
195 /* return value in low-order little-endian word */
196 v = t4_read_reg(adapter, mbox_data);
197 if (FW_CMD_RETVAL_GET(v))
198 dump_mbox(adapter, "FW Error", mbox_data);
199
200 if (rpl) {
201 /* request bit in high-order BE word */
202 WARN_ON((be32_to_cpu(*(const u32 *)cmd)
203 & FW_CMD_REQUEST) == 0);
204 get_mbox_rpl(adapter, rpl, size, mbox_data);
205 WARN_ON((be32_to_cpu(*(u32 *)rpl)
206 & FW_CMD_REQUEST) != 0);
207 }
208 t4_write_reg(adapter, mbox_ctl,
209 MBOWNER(MBOX_OWNER_NONE));
210 return -FW_CMD_RETVAL_GET(v);
211 }
212 }
213
214 /*
215 * We timed out. Return the error ...
216 */
217 dump_mbox(adapter, "FW Timeout", mbox_data);
218 return -ETIMEDOUT;
219}
220
221/**
222 * hash_mac_addr - return the hash value of a MAC address
223 * @addr: the 48-bit Ethernet MAC address
224 *
225 * Hashes a MAC address according to the hash function used by hardware
226 * inexact (hash) address matching.
227 */
228static int hash_mac_addr(const u8 *addr)
229{
230 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
231 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
232 a ^= b;
233 a ^= (a >> 12);
234 a ^= (a >> 6);
235 return a & 0x3f;
236}
237
238/**
239 * init_link_config - initialize a link's SW state
240 * @lc: structure holding the link state
241 * @caps: link capabilities
242 *
243 * Initializes the SW state maintained for each link, including the link's
244 * capabilities and default speed/flow-control/autonegotiation settings.
245 */
246static void __devinit init_link_config(struct link_config *lc,
247 unsigned int caps)
248{
249 lc->supported = caps;
250 lc->requested_speed = 0;
251 lc->speed = 0;
252 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
253 if (lc->supported & SUPPORTED_Autoneg) {
254 lc->advertising = lc->supported;
255 lc->autoneg = AUTONEG_ENABLE;
256 lc->requested_fc |= PAUSE_AUTONEG;
257 } else {
258 lc->advertising = 0;
259 lc->autoneg = AUTONEG_DISABLE;
260 }
261}
262
263/**
264 * t4vf_port_init - initialize port hardware/software state
265 * @adapter: the adapter
266 * @pidx: the adapter port index
267 */
268int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
269{
270 struct port_info *pi = adap2pinfo(adapter, pidx);
271 struct fw_vi_cmd vi_cmd, vi_rpl;
272 struct fw_port_cmd port_cmd, port_rpl;
273 int v;
274 u32 word;
275
276 /*
277 * Execute a VI Read command to get our Virtual Interface information
278 * like MAC address, etc.
279 */
280 memset(&vi_cmd, 0, sizeof(vi_cmd));
281 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
282 FW_CMD_REQUEST |
283 FW_CMD_READ);
284 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
285 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid));
286 v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
287 if (v)
288 return v;
289
290 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd));
291 pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd));
292 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
293
294 /*
295 * If we don't have read access to our port information, we're done
296 * now. Otherwise, execute a PORT Read command to get it ...
297 */
298 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
299 return 0;
300
301 memset(&port_cmd, 0, sizeof(port_cmd));
302 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) |
303 FW_CMD_REQUEST |
304 FW_CMD_READ |
305 FW_PORT_CMD_PORTID(pi->port_id));
306 port_cmd.action_to_len16 =
307 cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
308 FW_LEN16(port_cmd));
309 v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
310 if (v)
311 return v;
312
313 v = 0;
314 word = be16_to_cpu(port_rpl.u.info.pcap);
315 if (word & FW_PORT_CAP_SPEED_100M)
316 v |= SUPPORTED_100baseT_Full;
317 if (word & FW_PORT_CAP_SPEED_1G)
318 v |= SUPPORTED_1000baseT_Full;
319 if (word & FW_PORT_CAP_SPEED_10G)
320 v |= SUPPORTED_10000baseT_Full;
321 if (word & FW_PORT_CAP_ANEG)
322 v |= SUPPORTED_Autoneg;
323 init_link_config(&pi->link_cfg, v);
324
325 return 0;
326}
327
328/**
329 * t4vf_query_params - query FW or device parameters
330 * @adapter: the adapter
331 * @nparams: the number of parameters
332 * @params: the parameter names
333 * @vals: the parameter values
334 *
335 * Reads the values of firmware or device parameters. Up to 7 parameters
336 * can be queried at once.
337 */
338int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
339 const u32 *params, u32 *vals)
340{
341 int i, ret;
342 struct fw_params_cmd cmd, rpl;
343 struct fw_params_param *p;
344 size_t len16;
345
346 if (nparams > 7)
347 return -EINVAL;
348
349 memset(&cmd, 0, sizeof(cmd));
350 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
351 FW_CMD_REQUEST |
352 FW_CMD_READ);
353 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
354 param[nparams].mnem), 16);
355 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
356 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
357 p->mnem = htonl(*params++);
358
359 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
360 if (ret == 0)
361 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
362 *vals++ = be32_to_cpu(p->val);
363 return ret;
364}
365
366/**
367 * t4vf_set_params - sets FW or device parameters
368 * @adapter: the adapter
369 * @nparams: the number of parameters
370 * @params: the parameter names
371 * @vals: the parameter values
372 *
373 * Sets the values of firmware or device parameters. Up to 7 parameters
374 * can be specified at once.
375 */
376int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
377 const u32 *params, const u32 *vals)
378{
379 int i;
380 struct fw_params_cmd cmd;
381 struct fw_params_param *p;
382 size_t len16;
383
384 if (nparams > 7)
385 return -EINVAL;
386
387 memset(&cmd, 0, sizeof(cmd));
388 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
389 FW_CMD_REQUEST |
390 FW_CMD_WRITE);
391 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
392 param[nparams]), 16);
393 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
394 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
395 p->mnem = cpu_to_be32(*params++);
396 p->val = cpu_to_be32(*vals++);
397 }
398
399 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
400}
401
402/**
403 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
404 * @adapter: the adapter
405 *
406 * Retrieves various core SGE parameters in the form of hardware SGE
407 * register values. The caller is responsible for decoding these as
408 * needed. The SGE parameters are stored in @adapter->params.sge.
409 */
410int t4vf_get_sge_params(struct adapter *adapter)
411{
412 struct sge_params *sge_params = &adapter->params.sge;
413 u32 params[7], vals[7];
414 int v;
415
416 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
417 FW_PARAMS_PARAM_XYZ(SGE_CONTROL));
418 params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
419 FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE));
420 params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
421 FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0));
422 params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
423 FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1));
424 params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
425 FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1));
426 params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
427 FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3));
428 params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
429 FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5));
430 v = t4vf_query_params(adapter, 7, params, vals);
431 if (v)
432 return v;
433 sge_params->sge_control = vals[0];
434 sge_params->sge_host_page_size = vals[1];
435 sge_params->sge_fl_buffer_size[0] = vals[2];
436 sge_params->sge_fl_buffer_size[1] = vals[3];
437 sge_params->sge_timer_value_0_and_1 = vals[4];
438 sge_params->sge_timer_value_2_and_3 = vals[5];
439 sge_params->sge_timer_value_4_and_5 = vals[6];
440
441 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
442 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
443 v = t4vf_query_params(adapter, 1, params, vals);
444 if (v)
445 return v;
446 sge_params->sge_ingress_rx_threshold = vals[0];
447
448 return 0;
449}
450
451/**
452 * t4vf_get_vpd_params - retrieve device VPD paremeters
453 * @adapter: the adapter
454 *
455 * Retrives various device Vital Product Data parameters. The parameters
456 * are stored in @adapter->params.vpd.
457 */
458int t4vf_get_vpd_params(struct adapter *adapter)
459{
460 struct vpd_params *vpd_params = &adapter->params.vpd;
461 u32 params[7], vals[7];
462 int v;
463
464 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
465 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
466 v = t4vf_query_params(adapter, 1, params, vals);
467 if (v)
468 return v;
469 vpd_params->cclk = vals[0];
470
471 return 0;
472}
473
474/**
475 * t4vf_get_dev_params - retrieve device paremeters
476 * @adapter: the adapter
477 *
478 * Retrives various device parameters. The parameters are stored in
479 * @adapter->params.dev.
480 */
481int t4vf_get_dev_params(struct adapter *adapter)
482{
483 struct dev_params *dev_params = &adapter->params.dev;
484 u32 params[7], vals[7];
485 int v;
486
487 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
488 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
489 params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
490 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
491 v = t4vf_query_params(adapter, 2, params, vals);
492 if (v)
493 return v;
494 dev_params->fwrev = vals[0];
495 dev_params->tprev = vals[1];
496
497 return 0;
498}
499
500/**
501 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
502 * @adapter: the adapter
503 *
504 * Retrieves global RSS mode and parameters with which we have to live
505 * and stores them in the @adapter's RSS parameters.
506 */
507int t4vf_get_rss_glb_config(struct adapter *adapter)
508{
509 struct rss_params *rss = &adapter->params.rss;
510 struct fw_rss_glb_config_cmd cmd, rpl;
511 int v;
512
513 /*
514 * Execute an RSS Global Configuration read command to retrieve
515 * our RSS configuration.
516 */
517 memset(&cmd, 0, sizeof(cmd));
518 cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
519 FW_CMD_REQUEST |
520 FW_CMD_READ);
521 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
522 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
523 if (v)
524 return v;
525
526 /*
527 * Transate the big-endian RSS Global Configuration into our
528 * cpu-endian format based on the RSS mode. We also do first level
529 * filtering at this point to weed out modes which don't support
530 * VF Drivers ...
531 */
532 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET(
533 be32_to_cpu(rpl.u.manual.mode_pkd));
534 switch (rss->mode) {
535 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
536 u32 word = be32_to_cpu(
537 rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
538
539 rss->u.basicvirtual.synmapen =
540 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
541 rss->u.basicvirtual.syn4tupenipv6 =
542 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
543 rss->u.basicvirtual.syn2tupenipv6 =
544 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
545 rss->u.basicvirtual.syn4tupenipv4 =
546 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
547 rss->u.basicvirtual.syn2tupenipv4 =
548 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
549
550 rss->u.basicvirtual.ofdmapen =
551 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
552
553 rss->u.basicvirtual.tnlmapen =
554 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
555 rss->u.basicvirtual.tnlalllookup =
556 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
557
558 rss->u.basicvirtual.hashtoeplitz =
559 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
560
561 /* we need at least Tunnel Map Enable to be set */
562 if (!rss->u.basicvirtual.tnlmapen)
563 return -EINVAL;
564 break;
565 }
566
567 default:
568 /* all unknown/unsupported RSS modes result in an error */
569 return -EINVAL;
570 }
571
572 return 0;
573}
574
575/**
576 * t4vf_get_vfres - retrieve VF resource limits
577 * @adapter: the adapter
578 *
579 * Retrieves configured resource limits and capabilities for a virtual
580 * function. The results are stored in @adapter->vfres.
581 */
582int t4vf_get_vfres(struct adapter *adapter)
583{
584 struct vf_resources *vfres = &adapter->params.vfres;
585 struct fw_pfvf_cmd cmd, rpl;
586 int v;
587 u32 word;
588
589 /*
590 * Execute PFVF Read command to get VF resource limits; bail out early
591 * with error on command failure.
592 */
593 memset(&cmd, 0, sizeof(cmd));
594 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) |
595 FW_CMD_REQUEST |
596 FW_CMD_READ);
597 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
598 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
599 if (v)
600 return v;
601
602 /*
603 * Extract VF resource limits and return success.
604 */
605 word = be32_to_cpu(rpl.niqflint_niq);
606 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word);
607 vfres->niq = FW_PFVF_CMD_NIQ_GET(word);
608
609 word = be32_to_cpu(rpl.type_to_neq);
610 vfres->neq = FW_PFVF_CMD_NEQ_GET(word);
611 vfres->pmask = FW_PFVF_CMD_PMASK_GET(word);
612
613 word = be32_to_cpu(rpl.tc_to_nexactf);
614 vfres->tc = FW_PFVF_CMD_TC_GET(word);
615 vfres->nvi = FW_PFVF_CMD_NVI_GET(word);
616 vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word);
617
618 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
619 vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word);
620 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word);
621 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word);
622
623 return 0;
624}
625
626/**
627 * t4vf_read_rss_vi_config - read a VI's RSS configuration
628 * @adapter: the adapter
629 * @viid: Virtual Interface ID
630 * @config: pointer to host-native VI RSS Configuration buffer
631 *
632 * Reads the Virtual Interface's RSS configuration information and
633 * translates it into CPU-native format.
634 */
635int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
636 union rss_vi_config *config)
637{
638 struct fw_rss_vi_config_cmd cmd, rpl;
639 int v;
640
641 memset(&cmd, 0, sizeof(cmd));
642 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
643 FW_CMD_REQUEST |
644 FW_CMD_READ |
645 FW_RSS_VI_CONFIG_CMD_VIID(viid));
646 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
647 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
648 if (v)
649 return v;
650
651 switch (adapter->params.rss.mode) {
652 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
653 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
654
655 config->basicvirtual.ip6fourtupen =
656 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0);
657 config->basicvirtual.ip6twotupen =
658 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0);
659 config->basicvirtual.ip4fourtupen =
660 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0);
661 config->basicvirtual.ip4twotupen =
662 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0);
663 config->basicvirtual.udpen =
664 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0);
665 config->basicvirtual.defaultq =
666 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word);
667 break;
668 }
669
670 default:
671 return -EINVAL;
672 }
673
674 return 0;
675}
676
677/**
678 * t4vf_write_rss_vi_config - write a VI's RSS configuration
679 * @adapter: the adapter
680 * @viid: Virtual Interface ID
681 * @config: pointer to host-native VI RSS Configuration buffer
682 *
683 * Write the Virtual Interface's RSS configuration information
684 * (translating it into firmware-native format before writing).
685 */
686int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
687 union rss_vi_config *config)
688{
689 struct fw_rss_vi_config_cmd cmd, rpl;
690
691 memset(&cmd, 0, sizeof(cmd));
692 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
693 FW_CMD_REQUEST |
694 FW_CMD_WRITE |
695 FW_RSS_VI_CONFIG_CMD_VIID(viid));
696 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
697 switch (adapter->params.rss.mode) {
698 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
699 u32 word = 0;
700
701 if (config->basicvirtual.ip6fourtupen)
702 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
703 if (config->basicvirtual.ip6twotupen)
704 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
705 if (config->basicvirtual.ip4fourtupen)
706 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
707 if (config->basicvirtual.ip4twotupen)
708 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
709 if (config->basicvirtual.udpen)
710 word |= FW_RSS_VI_CONFIG_CMD_UDPEN;
711 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ(
712 config->basicvirtual.defaultq);
713 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
714 break;
715 }
716
717 default:
718 return -EINVAL;
719 }
720
721 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
722}
723
724/**
725 * t4vf_config_rss_range - configure a portion of the RSS mapping table
726 * @adapter: the adapter
727 * @viid: Virtual Interface of RSS Table Slice
728 * @start: starting entry in the table to write
729 * @n: how many table entries to write
730 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
731 * @nrspq: number of values in @rspq
732 *
733 * Programs the selected part of the VI's RSS mapping table with the
734 * provided values. If @nrspq < @n the supplied values are used repeatedly
735 * until the full table range is populated.
736 *
737 * The caller must ensure the values in @rspq are in the range 0..1023.
738 */
739int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
740 int start, int n, const u16 *rspq, int nrspq)
741{
742 const u16 *rsp = rspq;
743 const u16 *rsp_end = rspq+nrspq;
744 struct fw_rss_ind_tbl_cmd cmd;
745
746 /*
747 * Initialize firmware command template to write the RSS table.
748 */
749 memset(&cmd, 0, sizeof(cmd));
750 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
751 FW_CMD_REQUEST |
752 FW_CMD_WRITE |
753 FW_RSS_IND_TBL_CMD_VIID(viid));
754 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
755
756 /*
757 * Each firmware RSS command can accommodate up to 32 RSS Ingress
758 * Queue Identifiers. These Ingress Queue IDs are packed three to
759 * a 32-bit word as 10-bit values with the upper remaining 2 bits
760 * reserved.
761 */
762 while (n > 0) {
763 __be32 *qp = &cmd.iq0_to_iq2;
764 int nq = min(n, 32);
765 int ret;
766
767 /*
768 * Set up the firmware RSS command header to send the next
769 * "nq" Ingress Queue IDs to the firmware.
770 */
771 cmd.niqid = cpu_to_be16(nq);
772 cmd.startidx = cpu_to_be16(start);
773
774 /*
775 * "nq" more done for the start of the next loop.
776 */
777 start += nq;
778 n -= nq;
779
780 /*
781 * While there are still Ingress Queue IDs to stuff into the
782 * current firmware RSS command, retrieve them from the
783 * Ingress Queue ID array and insert them into the command.
784 */
785 while (nq > 0) {
786 /*
787 * Grab up to the next 3 Ingress Queue IDs (wrapping
788 * around the Ingress Queue ID array if necessary) and
789 * insert them into the firmware RSS command at the
790 * current 3-tuple position within the commad.
791 */
792 u16 qbuf[3];
793 u16 *qbp = qbuf;
794 int nqbuf = min(3, nq);
795
796 nq -= nqbuf;
797 qbuf[0] = qbuf[1] = qbuf[2] = 0;
798 while (nqbuf) {
799 nqbuf--;
800 *qbp++ = *rsp++;
801 if (rsp >= rsp_end)
802 rsp = rspq;
803 }
804 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
805 FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
806 FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
807 }
808
809 /*
810 * Send this portion of the RRS table update to the firmware;
811 * bail out on any errors.
812 */
813 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
814 if (ret)
815 return ret;
816 }
817 return 0;
818}
819
820/**
821 * t4vf_alloc_vi - allocate a virtual interface on a port
822 * @adapter: the adapter
823 * @port_id: physical port associated with the VI
824 *
825 * Allocate a new Virtual Interface and bind it to the indicated
826 * physical port. Return the new Virtual Interface Identifier on
827 * success, or a [negative] error number on failure.
828 */
829int t4vf_alloc_vi(struct adapter *adapter, int port_id)
830{
831 struct fw_vi_cmd cmd, rpl;
832 int v;
833
834 /*
835 * Execute a VI command to allocate Virtual Interface and return its
836 * VIID.
837 */
838 memset(&cmd, 0, sizeof(cmd));
839 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
840 FW_CMD_REQUEST |
841 FW_CMD_WRITE |
842 FW_CMD_EXEC);
843 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
844 FW_VI_CMD_ALLOC);
845 cmd.portid_pkd = FW_VI_CMD_PORTID(port_id);
846 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
847 if (v)
848 return v;
849
850 return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid));
851}
852
853/**
854 * t4vf_free_vi -- free a virtual interface
855 * @adapter: the adapter
856 * @viid: the virtual interface identifier
857 *
858 * Free a previously allocated Virtual Interface. Return an error on
859 * failure.
860 */
861int t4vf_free_vi(struct adapter *adapter, int viid)
862{
863 struct fw_vi_cmd cmd;
864
865 /*
866 * Execute a VI command to free the Virtual Interface.
867 */
868 memset(&cmd, 0, sizeof(cmd));
869 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
870 FW_CMD_REQUEST |
871 FW_CMD_EXEC);
872 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
873 FW_VI_CMD_FREE);
874 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid));
875 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
876}
877
878/**
879 * t4vf_enable_vi - enable/disable a virtual interface
880 * @adapter: the adapter
881 * @viid: the Virtual Interface ID
882 * @rx_en: 1=enable Rx, 0=disable Rx
883 * @tx_en: 1=enable Tx, 0=disable Tx
884 *
885 * Enables/disables a virtual interface.
886 */
887int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
888 bool rx_en, bool tx_en)
889{
890 struct fw_vi_enable_cmd cmd;
891
892 memset(&cmd, 0, sizeof(cmd));
893 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
894 FW_CMD_REQUEST |
895 FW_CMD_EXEC |
896 FW_VI_ENABLE_CMD_VIID(viid));
897 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) |
898 FW_VI_ENABLE_CMD_EEN(tx_en) |
899 FW_LEN16(cmd));
900 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
901}
902
903/**
904 * t4vf_identify_port - identify a VI's port by blinking its LED
905 * @adapter: the adapter
906 * @viid: the Virtual Interface ID
907 * @nblinks: how many times to blink LED at 2.5 Hz
908 *
909 * Identifies a VI's port by blinking its LED.
910 */
911int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
912 unsigned int nblinks)
913{
914 struct fw_vi_enable_cmd cmd;
915
916 memset(&cmd, 0, sizeof(cmd));
917 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
918 FW_CMD_REQUEST |
919 FW_CMD_EXEC |
920 FW_VI_ENABLE_CMD_VIID(viid));
921 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED |
922 FW_LEN16(cmd));
923 cmd.blinkdur = cpu_to_be16(nblinks);
924 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
925}
926
927/**
928 * t4vf_set_rxmode - set Rx properties of a virtual interface
929 * @adapter: the adapter
930 * @viid: the VI id
931 * @mtu: the new MTU or -1 for no change
932 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
933 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
934 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
935 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
936 * -1 no change
937 *
938 * Sets Rx properties of a virtual interface.
939 */
940int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
941 int mtu, int promisc, int all_multi, int bcast, int vlanex,
942 bool sleep_ok)
943{
944 struct fw_vi_rxmode_cmd cmd;
945
946 /* convert to FW values */
947 if (mtu < 0)
948 mtu = FW_VI_RXMODE_CMD_MTU_MASK;
949 if (promisc < 0)
950 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
951 if (all_multi < 0)
952 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
953 if (bcast < 0)
954 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
955 if (vlanex < 0)
956 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
957
958 memset(&cmd, 0, sizeof(cmd));
959 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) |
960 FW_CMD_REQUEST |
961 FW_CMD_WRITE |
962 FW_VI_RXMODE_CMD_VIID(viid));
963 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
964 cmd.mtu_to_vlanexen =
965 cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) |
966 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
967 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
968 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
969 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
970 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
971}
972
973/**
974 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
975 * @adapter: the adapter
976 * @viid: the Virtual Interface Identifier
977 * @free: if true any existing filters for this VI id are first removed
978 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
979 * @addr: the MAC address(es)
980 * @idx: where to store the index of each allocated filter
981 * @hash: pointer to hash address filter bitmap
982 * @sleep_ok: call is allowed to sleep
983 *
984 * Allocates an exact-match filter for each of the supplied addresses and
985 * sets it to the corresponding address. If @idx is not %NULL it should
986 * have at least @naddr entries, each of which will be set to the index of
987 * the filter allocated for the corresponding MAC address. If a filter
988 * could not be allocated for an address its index is set to 0xffff.
989 * If @hash is not %NULL addresses that fail to allocate an exact filter
990 * are hashed and update the hash filter bitmap pointed at by @hash.
991 *
992 * Returns a negative error number or the number of filters allocated.
993 */
994int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
995 unsigned int naddr, const u8 **addr, u16 *idx,
996 u64 *hash, bool sleep_ok)
997{
998 int i, ret;
999 struct fw_vi_mac_cmd cmd, rpl;
1000 struct fw_vi_mac_exact *p;
1001 size_t len16;
1002
1003 if (naddr > ARRAY_SIZE(cmd.u.exact))
1004 return -EINVAL;
1005 len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1006 u.exact[naddr]), 16);
1007
1008 memset(&cmd, 0, sizeof(cmd));
1009 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1010 FW_CMD_REQUEST |
1011 FW_CMD_WRITE |
1012 (free ? FW_CMD_EXEC : 0) |
1013 FW_VI_MAC_CMD_VIID(viid));
1014 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
1015 FW_CMD_LEN16(len16));
1016
1017 for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
1018 p->valid_to_idx =
1019 cpu_to_be16(FW_VI_MAC_CMD_VALID |
1020 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1021 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
1022 }
1023
1024 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
1025 if (ret)
1026 return ret;
1027
1028 for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
1029 u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
1030
1031 if (idx)
1032 idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
1033 ? 0xffff
1034 : index);
1035 if (index < FW_CLS_TCAM_NUM_ENTRIES)
1036 ret++;
1037 else if (hash)
1038 *hash |= (1 << hash_mac_addr(addr[i]));
1039 }
1040 return ret;
1041}
1042
1043/**
1044 * t4vf_change_mac - modifies the exact-match filter for a MAC address
1045 * @adapter: the adapter
1046 * @viid: the Virtual Interface ID
1047 * @idx: index of existing filter for old value of MAC address, or -1
1048 * @addr: the new MAC address value
1049 * @persist: if idx < 0, the new MAC allocation should be persistent
1050 *
1051 * Modifies an exact-match filter and sets it to the new MAC address.
1052 * Note that in general it is not possible to modify the value of a given
1053 * filter so the generic way to modify an address filter is to free the
1054 * one being used by the old address value and allocate a new filter for
1055 * the new address value. @idx can be -1 if the address is a new
1056 * addition.
1057 *
1058 * Returns a negative error number or the index of the filter with the new
1059 * MAC value.
1060 */
1061int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1062 int idx, const u8 *addr, bool persist)
1063{
1064 int ret;
1065 struct fw_vi_mac_cmd cmd, rpl;
1066 struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1067 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1068 u.exact[1]), 16);
1069
1070 /*
1071 * If this is a new allocation, determine whether it should be
1072 * persistent (across a "freemacs" operation) or not.
1073 */
1074 if (idx < 0)
1075 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
1076
1077 memset(&cmd, 0, sizeof(cmd));
1078 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1079 FW_CMD_REQUEST |
1080 FW_CMD_WRITE |
1081 FW_VI_MAC_CMD_VIID(viid));
1082 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
1083 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID |
1084 FW_VI_MAC_CMD_IDX(idx));
1085 memcpy(p->macaddr, addr, sizeof(p->macaddr));
1086
1087 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1088 if (ret == 0) {
1089 p = &rpl.u.exact[0];
1090 ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
1091 if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
1092 ret = -ENOMEM;
1093 }
1094 return ret;
1095}
1096
1097/**
1098 * t4vf_set_addr_hash - program the MAC inexact-match hash filter
1099 * @adapter: the adapter
1100 * @viid: the Virtual Interface Identifier
1101 * @ucast: whether the hash filter should also match unicast addresses
1102 * @vec: the value to be written to the hash filter
1103 * @sleep_ok: call is allowed to sleep
1104 *
1105 * Sets the 64-bit inexact-match hash filter for a virtual interface.
1106 */
1107int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
1108 bool ucast, u64 vec, bool sleep_ok)
1109{
1110 struct fw_vi_mac_cmd cmd;
1111 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1112 u.exact[0]), 16);
1113
1114 memset(&cmd, 0, sizeof(cmd));
1115 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1116 FW_CMD_REQUEST |
1117 FW_CMD_WRITE |
1118 FW_VI_ENABLE_CMD_VIID(viid));
1119 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN |
1120 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
1121 FW_CMD_LEN16(len16));
1122 cmd.u.hash.hashvec = cpu_to_be64(vec);
1123 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1124}
1125
1126/**
1127 * t4vf_get_port_stats - collect "port" statistics
1128 * @adapter: the adapter
1129 * @pidx: the port index
1130 * @s: the stats structure to fill
1131 *
1132 * Collect statistics for the "port"'s Virtual Interface.
1133 */
1134int t4vf_get_port_stats(struct adapter *adapter, int pidx,
1135 struct t4vf_port_stats *s)
1136{
1137 struct port_info *pi = adap2pinfo(adapter, pidx);
1138 struct fw_vi_stats_vf fwstats;
1139 unsigned int rem = VI_VF_NUM_STATS;
1140 __be64 *fwsp = (__be64 *)&fwstats;
1141
1142 /*
1143 * Grab the Virtual Interface statistics a chunk at a time via mailbox
1144 * commands. We could use a Work Request and get all of them at once
1145 * but that's an asynchronous interface which is awkward to use.
1146 */
1147 while (rem) {
1148 unsigned int ix = VI_VF_NUM_STATS - rem;
1149 unsigned int nstats = min(6U, rem);
1150 struct fw_vi_stats_cmd cmd, rpl;
1151 size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
1152 sizeof(struct fw_vi_stats_ctl));
1153 size_t len16 = DIV_ROUND_UP(len, 16);
1154 int ret;
1155
1156 memset(&cmd, 0, sizeof(cmd));
1157 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
1158 FW_VI_STATS_CMD_VIID(pi->viid) |
1159 FW_CMD_REQUEST |
1160 FW_CMD_READ);
1161 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
1162 cmd.u.ctl.nstats_ix =
1163 cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
1164 FW_VI_STATS_CMD_NSTATS(nstats));
1165 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
1166 if (ret)
1167 return ret;
1168
1169 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
1170
1171 rem -= nstats;
1172 fwsp += nstats;
1173 }
1174
1175 /*
1176 * Translate firmware statistics into host native statistics.
1177 */
1178 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
1179 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
1180 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
1181 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
1182 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
1183 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
1184 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
1185 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
1186 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
1187
1188 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
1189 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
1190 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
1191 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
1192 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
1193 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
1194
1195 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
1196
1197 return 0;
1198}
1199
1200/**
1201 * t4vf_iq_free - free an ingress queue and its free lists
1202 * @adapter: the adapter
1203 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
1204 * @iqid: ingress queue ID
1205 * @fl0id: FL0 queue ID or 0xffff if no attached FL0
1206 * @fl1id: FL1 queue ID or 0xffff if no attached FL1
1207 *
1208 * Frees an ingress queue and its associated free lists, if any.
1209 */
1210int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
1211 unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
1212{
1213 struct fw_iq_cmd cmd;
1214
1215 memset(&cmd, 0, sizeof(cmd));
1216 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
1217 FW_CMD_REQUEST |
1218 FW_CMD_EXEC);
1219 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE |
1220 FW_LEN16(cmd));
1221 cmd.type_to_iqandstindex =
1222 cpu_to_be32(FW_IQ_CMD_TYPE(iqtype));
1223
1224 cmd.iqid = cpu_to_be16(iqid);
1225 cmd.fl0id = cpu_to_be16(fl0id);
1226 cmd.fl1id = cpu_to_be16(fl1id);
1227 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1228}
1229
1230/**
1231 * t4vf_eth_eq_free - free an Ethernet egress queue
1232 * @adapter: the adapter
1233 * @eqid: egress queue ID
1234 *
1235 * Frees an Ethernet egress queue.
1236 */
1237int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1238{
1239 struct fw_eq_eth_cmd cmd;
1240
1241 memset(&cmd, 0, sizeof(cmd));
1242 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
1243 FW_CMD_REQUEST |
1244 FW_CMD_EXEC);
1245 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE |
1246 FW_LEN16(cmd));
1247 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid));
1248 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1249}
1250
1251/**
1252 * t4vf_handle_fw_rpl - process a firmware reply message
1253 * @adapter: the adapter
1254 * @rpl: start of the firmware message
1255 *
1256 * Processes a firmware message, such as link state change messages.
1257 */
1258int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1259{
1260 struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
1261 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
1262
1263 switch (opcode) {
1264 case FW_PORT_CMD: {
1265 /*
1266 * Link/module state change message.
1267 */
1268 const struct fw_port_cmd *port_cmd = (void *)rpl;
1269 u32 word;
1270 int action, port_id, link_ok, speed, fc, pidx;
1271
1272 /*
1273 * Extract various fields from port status change message.
1274 */
1275 action = FW_PORT_CMD_ACTION_GET(
1276 be32_to_cpu(port_cmd->action_to_len16));
1277 if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1278 dev_err(adapter->pdev_dev,
1279 "Unknown firmware PORT reply action %x\n",
1280 action);
1281 break;
1282 }
1283
1284 port_id = FW_PORT_CMD_PORTID_GET(
1285 be32_to_cpu(port_cmd->op_to_portid));
1286
1287 word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
1288 link_ok = (word & FW_PORT_CMD_LSTATUS) != 0;
1289 speed = 0;
1290 fc = 0;
1291 if (word & FW_PORT_CMD_RXPAUSE)
1292 fc |= PAUSE_RX;
1293 if (word & FW_PORT_CMD_TXPAUSE)
1294 fc |= PAUSE_TX;
1295 if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
1296 speed = SPEED_100;
1297 else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
1298 speed = SPEED_1000;
1299 else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
1300 speed = SPEED_10000;
1301
1302 /*
1303 * Scan all of our "ports" (Virtual Interfaces) looking for
1304 * those bound to the physical port which has changed. If
1305 * our recorded state doesn't match the current state,
1306 * signal that change to the OS code.
1307 */
1308 for_each_port(adapter, pidx) {
1309 struct port_info *pi = adap2pinfo(adapter, pidx);
1310 struct link_config *lc;
1311
1312 if (pi->port_id != port_id)
1313 continue;
1314
1315 lc = &pi->link_cfg;
1316 if (link_ok != lc->link_ok || speed != lc->speed ||
1317 fc != lc->fc) {
1318 /* something changed */
1319 lc->link_ok = link_ok;
1320 lc->speed = speed;
1321 lc->fc = fc;
1322 t4vf_os_link_changed(adapter, pidx, link_ok);
1323 }
1324 }
1325 break;
1326 }
1327
1328 default:
1329 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
1330 opcode);
1331 }
1332 return 0;
1333}
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 08e82b1a0b33..d0824e322068 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -298,6 +298,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
298#define EMAC_CTRL_EWCTL (0x4) 298#define EMAC_CTRL_EWCTL (0x4)
299#define EMAC_CTRL_EWINTTCNT (0x8) 299#define EMAC_CTRL_EWINTTCNT (0x8)
300 300
301/* EMAC DM644x control module masks */
302#define EMAC_DM644X_EWINTCNT_MASK 0x1FFFF
303#define EMAC_DM644X_INTMIN_INTVL 0x1
304#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
305
301/* EMAC MDIO related */ 306/* EMAC MDIO related */
302/* Mask & Control defines */ 307/* Mask & Control defines */
303#define MDIO_CONTROL_CLKDIV (0xFF) 308#define MDIO_CONTROL_CLKDIV (0xFF)
@@ -318,8 +323,20 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
318#define MDIO_CONTROL (0x04) 323#define MDIO_CONTROL (0x04)
319 324
320/* EMAC DM646X control module registers */ 325/* EMAC DM646X control module registers */
321#define EMAC_DM646X_CMRXINTEN (0x14) 326#define EMAC_DM646X_CMINTCTRL 0x0C
322#define EMAC_DM646X_CMTXINTEN (0x18) 327#define EMAC_DM646X_CMRXINTEN 0x14
328#define EMAC_DM646X_CMTXINTEN 0x18
329#define EMAC_DM646X_CMRXINTMAX 0x70
330#define EMAC_DM646X_CMTXINTMAX 0x74
331
332/* EMAC DM646X control module masks */
333#define EMAC_DM646X_INTPACEEN (0x3 << 16)
334#define EMAC_DM646X_INTPRESCALE_MASK (0x7FF << 0)
335#define EMAC_DM646X_CMINTMAX_CNT 63
336#define EMAC_DM646X_CMINTMIN_CNT 2
337#define EMAC_DM646X_CMINTMAX_INTVL (1000 / EMAC_DM646X_CMINTMIN_CNT)
338#define EMAC_DM646X_CMINTMIN_INTVL ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1)
339
323 340
324/* EMAC EOI codes for C0 */ 341/* EMAC EOI codes for C0 */
325#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) 342#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
@@ -468,9 +485,10 @@ struct emac_priv {
468 u32 duplex; /* Link duplex: 0=Half, 1=Full */ 485 u32 duplex; /* Link duplex: 0=Half, 1=Full */
469 u32 rx_buf_size; 486 u32 rx_buf_size;
470 u32 isr_count; 487 u32 isr_count;
488 u32 coal_intvl;
489 u32 bus_freq_mhz;
471 u8 rmii_en; 490 u8 rmii_en;
472 u8 version; 491 u8 version;
473 struct net_device_stats net_dev_stats;
474 u32 mac_hash1; 492 u32 mac_hash1;
475 u32 mac_hash2; 493 u32 mac_hash2;
476 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; 494 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
@@ -546,9 +564,11 @@ static void emac_dump_regs(struct emac_priv *priv)
546 564
547 /* Print important registers in EMAC */ 565 /* Print important registers in EMAC */
548 dev_info(emac_dev, "EMAC Basic registers\n"); 566 dev_info(emac_dev, "EMAC Basic registers\n");
549 dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n", 567 if (priv->version == EMAC_VERSION_1) {
550 emac_ctrl_read(EMAC_CTRL_EWCTL), 568 dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
551 emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); 569 emac_ctrl_read(EMAC_CTRL_EWCTL),
570 emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
571 }
552 dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n", 572 dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
553 emac_read(EMAC_TXIDVER), 573 emac_read(EMAC_TXIDVER),
554 ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"), 574 ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
@@ -692,6 +712,103 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
692} 712}
693 713
694/** 714/**
715 * emac_get_coalesce : Get interrupt coalesce settings for this device
716 * @ndev : The DaVinci EMAC network adapter
717 * @coal : ethtool coalesce settings structure
718 *
719 * Fetch the current interrupt coalesce settings
720 *
721 */
722static int emac_get_coalesce(struct net_device *ndev,
723 struct ethtool_coalesce *coal)
724{
725 struct emac_priv *priv = netdev_priv(ndev);
726
727 coal->rx_coalesce_usecs = priv->coal_intvl;
728 return 0;
729
730}
731
732/**
733 * emac_set_coalesce : Set interrupt coalesce settings for this device
734 * @ndev : The DaVinci EMAC network adapter
735 * @coal : ethtool coalesce settings structure
736 *
737 * Set interrupt coalesce parameters
738 *
739 */
740static int emac_set_coalesce(struct net_device *ndev,
741 struct ethtool_coalesce *coal)
742{
743 struct emac_priv *priv = netdev_priv(ndev);
744 u32 int_ctrl, num_interrupts = 0;
745 u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
746
747 if (!coal->rx_coalesce_usecs)
748 return -EINVAL;
749
750 coal_intvl = coal->rx_coalesce_usecs;
751
752 switch (priv->version) {
753 case EMAC_VERSION_2:
754 int_ctrl = emac_ctrl_read(EMAC_DM646X_CMINTCTRL);
755 prescale = priv->bus_freq_mhz * 4;
756
757 if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL)
758 coal_intvl = EMAC_DM646X_CMINTMIN_INTVL;
759
760 if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) {
761 /*
762 * Interrupt pacer works with 4us Pulse, we can
763 * throttle further by dilating the 4us pulse.
764 */
765 addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale;
766
767 if (addnl_dvdr > 1) {
768 prescale *= addnl_dvdr;
769 if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL
770 * addnl_dvdr))
771 coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL
772 * addnl_dvdr);
773 } else {
774 addnl_dvdr = 1;
775 coal_intvl = EMAC_DM646X_CMINTMAX_INTVL;
776 }
777 }
778
779 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
780
781 int_ctrl |= EMAC_DM646X_INTPACEEN;
782 int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK);
783 int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK);
784 emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl);
785
786 emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts);
787 emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts);
788
789 break;
790 default:
791 int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT);
792 int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK);
793 prescale = coal_intvl * priv->bus_freq_mhz;
794 if (prescale > EMAC_DM644X_EWINTCNT_MASK) {
795 prescale = EMAC_DM644X_EWINTCNT_MASK;
796 coal_intvl = prescale / priv->bus_freq_mhz;
797 }
798 emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale));
799
800 break;
801 }
802
803 printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
804 priv->coal_intvl = coal_intvl;
805
806 return 0;
807
808}
809
810
811/**
695 * ethtool_ops: DaVinci EMAC Ethtool structure 812 * ethtool_ops: DaVinci EMAC Ethtool structure
696 * 813 *
697 * Ethtool support for EMAC adapter 814 * Ethtool support for EMAC adapter
@@ -702,6 +819,8 @@ static const struct ethtool_ops ethtool_ops = {
702 .get_settings = emac_get_settings, 819 .get_settings = emac_get_settings,
703 .set_settings = emac_set_settings, 820 .set_settings = emac_set_settings,
704 .get_link = ethtool_op_get_link, 821 .get_link = ethtool_op_get_link,
822 .get_coalesce = emac_get_coalesce,
823 .set_coalesce = emac_set_coalesce,
705}; 824};
706 825
707/** 826/**
@@ -1180,16 +1299,17 @@ static int emac_net_tx_complete(struct emac_priv *priv,
1180 void **net_data_tokens, 1299 void **net_data_tokens,
1181 int num_tokens, u32 ch) 1300 int num_tokens, u32 ch)
1182{ 1301{
1302 struct net_device *ndev = priv->ndev;
1183 u32 cnt; 1303 u32 cnt;
1184 1304
1185 if (unlikely(num_tokens && netif_queue_stopped(priv->ndev))) 1305 if (unlikely(num_tokens && netif_queue_stopped(ndev)))
1186 netif_start_queue(priv->ndev); 1306 netif_start_queue(ndev);
1187 for (cnt = 0; cnt < num_tokens; cnt++) { 1307 for (cnt = 0; cnt < num_tokens; cnt++) {
1188 struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt]; 1308 struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
1189 if (skb == NULL) 1309 if (skb == NULL)
1190 continue; 1310 continue;
1191 priv->net_dev_stats.tx_packets++; 1311 ndev->stats.tx_packets++;
1192 priv->net_dev_stats.tx_bytes += skb->len; 1312 ndev->stats.tx_bytes += skb->len;
1193 dev_kfree_skb_any(skb); 1313 dev_kfree_skb_any(skb);
1194 } 1314 }
1195 return 0; 1315 return 0;
@@ -1476,7 +1596,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1476 " err. Out of TX BD's"); 1596 " err. Out of TX BD's");
1477 netif_stop_queue(priv->ndev); 1597 netif_stop_queue(priv->ndev);
1478 } 1598 }
1479 priv->net_dev_stats.tx_dropped++; 1599 ndev->stats.tx_dropped++;
1480 return NETDEV_TX_BUSY; 1600 return NETDEV_TX_BUSY;
1481 } 1601 }
1482 1602
@@ -1501,7 +1621,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
1501 if (netif_msg_tx_err(priv)) 1621 if (netif_msg_tx_err(priv))
1502 dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); 1622 dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
1503 1623
1504 priv->net_dev_stats.tx_errors++; 1624 ndev->stats.tx_errors++;
1505 emac_int_disable(priv); 1625 emac_int_disable(priv);
1506 emac_stop_txch(priv, EMAC_DEF_TX_CH); 1626 emac_stop_txch(priv, EMAC_DEF_TX_CH);
1507 emac_cleanup_txch(priv, EMAC_DEF_TX_CH); 1627 emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
@@ -1926,14 +2046,14 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
1926static int emac_net_rx_cb(struct emac_priv *priv, 2046static int emac_net_rx_cb(struct emac_priv *priv,
1927 struct emac_netpktobj *net_pkt_list) 2047 struct emac_netpktobj *net_pkt_list)
1928{ 2048{
1929 struct sk_buff *p_skb; 2049 struct net_device *ndev = priv->ndev;
1930 p_skb = (struct sk_buff *)net_pkt_list->pkt_token; 2050 struct sk_buff *p_skb = net_pkt_list->pkt_token;
1931 /* set length of packet */ 2051 /* set length of packet */
1932 skb_put(p_skb, net_pkt_list->pkt_length); 2052 skb_put(p_skb, net_pkt_list->pkt_length);
1933 p_skb->protocol = eth_type_trans(p_skb, priv->ndev); 2053 p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
1934 netif_receive_skb(p_skb); 2054 netif_receive_skb(p_skb);
1935 priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; 2055 ndev->stats.rx_bytes += net_pkt_list->pkt_length;
1936 priv->net_dev_stats.rx_packets++; 2056 ndev->stats.rx_packets++;
1937 return 0; 2057 return 0;
1938} 2058}
1939 2059
@@ -2148,7 +2268,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
2148 struct net_device *ndev = priv->ndev; 2268 struct net_device *ndev = priv->ndev;
2149 struct device *emac_dev = &ndev->dev; 2269 struct device *emac_dev = &ndev->dev;
2150 u32 status = 0; 2270 u32 status = 0;
2151 u32 num_pkts = 0; 2271 u32 num_tx_pkts = 0, num_rx_pkts = 0;
2152 2272
2153 /* Check interrupt vectors and call packet processing */ 2273 /* Check interrupt vectors and call packet processing */
2154 status = emac_read(EMAC_MACINVECTOR); 2274 status = emac_read(EMAC_MACINVECTOR);
@@ -2159,27 +2279,19 @@ static int emac_poll(struct napi_struct *napi, int budget)
2159 mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; 2279 mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
2160 2280
2161 if (status & mask) { 2281 if (status & mask) {
2162 num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH, 2282 num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
2163 EMAC_DEF_TX_MAX_SERVICE); 2283 EMAC_DEF_TX_MAX_SERVICE);
2164 } /* TX processing */ 2284 } /* TX processing */
2165 2285
2166 if (num_pkts)
2167 return budget;
2168
2169 mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; 2286 mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
2170 2287
2171 if (priv->version == EMAC_VERSION_2) 2288 if (priv->version == EMAC_VERSION_2)
2172 mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; 2289 mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
2173 2290
2174 if (status & mask) { 2291 if (status & mask) {
2175 num_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget); 2292 num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
2176 } /* RX processing */ 2293 } /* RX processing */
2177 2294
2178 if (num_pkts < budget) {
2179 napi_complete(napi);
2180 emac_int_enable(priv);
2181 }
2182
2183 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; 2295 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
2184 if (priv->version == EMAC_VERSION_2) 2296 if (priv->version == EMAC_VERSION_2)
2185 mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT; 2297 mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
@@ -2210,9 +2322,12 @@ static int emac_poll(struct napi_struct *napi, int budget)
2210 dev_err(emac_dev, "RX Host error %s on ch=%d\n", 2322 dev_err(emac_dev, "RX Host error %s on ch=%d\n",
2211 &emac_rxhost_errcodes[cause][0], ch); 2323 &emac_rxhost_errcodes[cause][0], ch);
2212 } 2324 }
2213 } /* Host error processing */ 2325 } else if (num_rx_pkts < budget) {
2326 napi_complete(napi);
2327 emac_int_enable(priv);
2328 }
2214 2329
2215 return num_pkts; 2330 return num_rx_pkts;
2216} 2331}
2217 2332
2218#ifdef CONFIG_NET_POLL_CONTROLLER 2333#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2437,6 +2552,14 @@ static int emac_dev_open(struct net_device *ndev)
2437 /* Start/Enable EMAC hardware */ 2552 /* Start/Enable EMAC hardware */
2438 emac_hw_enable(priv); 2553 emac_hw_enable(priv);
2439 2554
2555 /* Enable Interrupt pacing if configured */
2556 if (priv->coal_intvl != 0) {
2557 struct ethtool_coalesce coal;
2558
2559 coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
2560 emac_set_coalesce(ndev, &coal);
2561 }
2562
2440 /* find the first phy */ 2563 /* find the first phy */
2441 priv->phydev = NULL; 2564 priv->phydev = NULL;
2442 if (priv->phy_mask) { 2565 if (priv->phy_mask) {
@@ -2570,39 +2693,39 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
2570 else 2693 else
2571 stats_clear_mask = 0; 2694 stats_clear_mask = 0;
2572 2695
2573 priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); 2696 ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
2574 emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask); 2697 emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
2575 2698
2576 priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + 2699 ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) +
2577 emac_read(EMAC_TXSINGLECOLL) + 2700 emac_read(EMAC_TXSINGLECOLL) +
2578 emac_read(EMAC_TXMULTICOLL)); 2701 emac_read(EMAC_TXMULTICOLL));
2579 emac_write(EMAC_TXCOLLISION, stats_clear_mask); 2702 emac_write(EMAC_TXCOLLISION, stats_clear_mask);
2580 emac_write(EMAC_TXSINGLECOLL, stats_clear_mask); 2703 emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
2581 emac_write(EMAC_TXMULTICOLL, stats_clear_mask); 2704 emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
2582 2705
2583 priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + 2706 ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
2584 emac_read(EMAC_RXJABBER) + 2707 emac_read(EMAC_RXJABBER) +
2585 emac_read(EMAC_RXUNDERSIZED)); 2708 emac_read(EMAC_RXUNDERSIZED));
2586 emac_write(EMAC_RXOVERSIZED, stats_clear_mask); 2709 emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
2587 emac_write(EMAC_RXJABBER, stats_clear_mask); 2710 emac_write(EMAC_RXJABBER, stats_clear_mask);
2588 emac_write(EMAC_RXUNDERSIZED, stats_clear_mask); 2711 emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
2589 2712
2590 priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + 2713 ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
2591 emac_read(EMAC_RXMOFOVERRUNS)); 2714 emac_read(EMAC_RXMOFOVERRUNS));
2592 emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask); 2715 emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
2593 emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask); 2716 emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
2594 2717
2595 priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); 2718 ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
2596 emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask); 2719 emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
2597 2720
2598 priv->net_dev_stats.tx_carrier_errors += 2721 ndev->stats.tx_carrier_errors +=
2599 emac_read(EMAC_TXCARRIERSENSE); 2722 emac_read(EMAC_TXCARRIERSENSE);
2600 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); 2723 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
2601 2724
2602 priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); 2725 ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
2603 emac_write(EMAC_TXUNDERRUN, stats_clear_mask); 2726 emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
2604 2727
2605 return &priv->net_dev_stats; 2728 return &ndev->stats;
2606} 2729}
2607 2730
2608static const struct net_device_ops emac_netdev_ops = { 2731static const struct net_device_ops emac_netdev_ops = {
@@ -2677,6 +2800,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2677 priv->int_enable = pdata->interrupt_enable; 2800 priv->int_enable = pdata->interrupt_enable;
2678 priv->int_disable = pdata->interrupt_disable; 2801 priv->int_disable = pdata->interrupt_disable;
2679 2802
2803 priv->coal_intvl = 0;
2804 priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
2805
2680 emac_dev = &ndev->dev; 2806 emac_dev = &ndev->dev;
2681 /* Get EMAC platform data */ 2807 /* Get EMAC platform data */
2682 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2808 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index bf66e9b3b19e..44c0694c1f4e 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -2061,18 +2061,35 @@ static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2061 2061
2062static int __init depca_module_init (void) 2062static int __init depca_module_init (void)
2063{ 2063{
2064 int err = 0; 2064 int err = 0;
2065 2065
2066#ifdef CONFIG_MCA 2066#ifdef CONFIG_MCA
2067 err = mca_register_driver (&depca_mca_driver); 2067 err = mca_register_driver(&depca_mca_driver);
2068 if (err)
2069 goto err;
2068#endif 2070#endif
2069#ifdef CONFIG_EISA 2071#ifdef CONFIG_EISA
2070 err |= eisa_driver_register (&depca_eisa_driver); 2072 err = eisa_driver_register(&depca_eisa_driver);
2073 if (err)
2074 goto err_mca;
2071#endif 2075#endif
2072 err |= platform_driver_register (&depca_isa_driver); 2076 err = platform_driver_register(&depca_isa_driver);
2073 depca_platform_probe (); 2077 if (err)
2078 goto err_eisa;
2074 2079
2075 return err; 2080 depca_platform_probe();
2081 return 0;
2082
2083err_eisa:
2084#ifdef CONFIG_EISA
2085 eisa_driver_unregister(&depca_eisa_driver);
2086err_mca:
2087#endif
2088#ifdef CONFIG_MCA
2089 mca_unregister_driver(&depca_mca_driver);
2090err:
2091#endif
2092 return err;
2076} 2093}
2077 2094
2078static void __exit depca_module_exit (void) 2095static void __exit depca_module_exit (void)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index abcc838e18af..4fd6b2b4554b 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -961,7 +961,7 @@ struct dm9000_rxhdr {
961 u8 RxPktReady; 961 u8 RxPktReady;
962 u8 RxStatus; 962 u8 RxStatus;
963 __le16 RxLen; 963 __le16 RxLen;
964} __attribute__((__packed__)); 964} __packed;
965 965
966/* 966/*
967 * Received a packet and pass to upper layer 967 * Received a packet and pass to upper layer
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 8b0f50bbf3e5..7c075756611a 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -797,7 +797,7 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
797 if (!phydev) 797 if (!phydev)
798 return -ENODEV; 798 return -ENODEV;
799 799
800 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 800 return phy_mii_ioctl(phydev, rq, cmd);
801} 801}
802 802
803static void dnet_get_drvinfo(struct net_device *dev, 803static void dnet_get_drvinfo(struct net_device *dev,
@@ -854,7 +854,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
854 dev = alloc_etherdev(sizeof(*bp)); 854 dev = alloc_etherdev(sizeof(*bp));
855 if (!dev) { 855 if (!dev) {
856 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); 856 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
857 goto err_out; 857 goto err_out_release_mem;
858 } 858 }
859 859
860 /* TODO: Actually, we have some interesting features... */ 860 /* TODO: Actually, we have some interesting features... */
@@ -911,7 +911,8 @@ static int __devinit dnet_probe(struct platform_device *pdev)
911 if (err) 911 if (err)
912 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); 912 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
913 913
914 if (dnet_mii_init(bp) != 0) 914 err = dnet_mii_init(bp);
915 if (err)
915 goto err_out_unregister_netdev; 916 goto err_out_unregister_netdev;
916 917
917 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", 918 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
@@ -936,6 +937,8 @@ err_out_iounmap:
936 iounmap(bp->regs); 937 iounmap(bp->regs);
937err_out_free_dev: 938err_out_free_dev:
938 free_netdev(dev); 939 free_netdev(dev);
940err_out_release_mem:
941 release_mem_region(mem_base, mem_size);
939err_out: 942err_out:
940 return err; 943 return err;
941} 944}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 40b62b406b08..99288b95aead 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -86,12 +86,12 @@ struct e1000_adapter;
86/* TX/RX descriptor defines */ 86/* TX/RX descriptor defines */
87#define E1000_DEFAULT_TXD 256 87#define E1000_DEFAULT_TXD 256
88#define E1000_MAX_TXD 256 88#define E1000_MAX_TXD 256
89#define E1000_MIN_TXD 80 89#define E1000_MIN_TXD 48
90#define E1000_MAX_82544_TXD 4096 90#define E1000_MAX_82544_TXD 4096
91 91
92#define E1000_DEFAULT_RXD 256 92#define E1000_DEFAULT_RXD 256
93#define E1000_MAX_RXD 256 93#define E1000_MAX_RXD 256
94#define E1000_MIN_RXD 80 94#define E1000_MIN_RXD 48
95#define E1000_MAX_82544_RXD 4096 95#define E1000_MAX_82544_RXD 4096
96 96
97#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ 97#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
@@ -324,18 +324,20 @@ enum e1000_state_t {
324extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); 324extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
325#define e_dbg(format, arg...) \ 325#define e_dbg(format, arg...) \
326 netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) 326 netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
327#define e_err(format, arg...) \ 327#define e_err(msglvl, format, arg...) \
328 netdev_err(adapter->netdev, format, ## arg) 328 netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
329#define e_info(format, arg...) \ 329#define e_info(msglvl, format, arg...) \
330 netdev_info(adapter->netdev, format, ## arg) 330 netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
331#define e_warn(format, arg...) \ 331#define e_warn(msglvl, format, arg...) \
332 netdev_warn(adapter->netdev, format, ## arg) 332 netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
333#define e_notice(format, arg...) \ 333#define e_notice(msglvl, format, arg...) \
334 netdev_notice(adapter->netdev, format, ## arg) 334 netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
335#define e_dev_info(format, arg...) \ 335#define e_dev_info(format, arg...) \
336 dev_info(&adapter->pdev->dev, format, ## arg) 336 dev_info(&adapter->pdev->dev, format, ## arg)
337#define e_dev_warn(format, arg...) \ 337#define e_dev_warn(format, arg...) \
338 dev_warn(&adapter->pdev->dev, format, ## arg) 338 dev_warn(&adapter->pdev->dev, format, ## arg)
339#define e_dev_err(format, arg...) \
340 dev_err(&adapter->pdev->dev, format, ## arg)
339 341
340extern char e1000_driver_name[]; 342extern char e1000_driver_name[];
341extern const char e1000_driver_version[]; 343extern const char e1000_driver_version[];
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d5ff029aa7b2..f4d0922ec65b 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
346 346
347 netdev->features &= ~NETIF_F_TSO6; 347 netdev->features &= ~NETIF_F_TSO6;
348 348
349 e_info("TSO is %s\n", data ? "Enabled" : "Disabled"); 349 e_info(probe, "TSO is %s\n", data ? "Enabled" : "Disabled");
350 adapter->tso_force = true; 350 adapter->tso_force = true;
351 return 0; 351 return 0;
352} 352}
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
714 writel(write & test[i], address); 714 writel(write & test[i], address);
715 read = readl(address); 715 read = readl(address);
716 if (read != (write & test[i] & mask)) { 716 if (read != (write & test[i] & mask)) {
717 e_info("pattern test reg %04X failed: " 717 e_err(drv, "pattern test reg %04X failed: "
718 "got 0x%08X expected 0x%08X\n", 718 "got 0x%08X expected 0x%08X\n",
719 reg, read, (write & test[i] & mask)); 719 reg, read, (write & test[i] & mask));
720 *data = reg; 720 *data = reg;
721 return true; 721 return true;
722 } 722 }
@@ -734,7 +734,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
734 writel(write & mask, address); 734 writel(write & mask, address);
735 read = readl(address); 735 read = readl(address);
736 if ((read & mask) != (write & mask)) { 736 if ((read & mask) != (write & mask)) {
737 e_err("set/check reg %04X test failed: " 737 e_err(drv, "set/check reg %04X test failed: "
738 "got 0x%08X expected 0x%08X\n", 738 "got 0x%08X expected 0x%08X\n",
739 reg, (read & mask), (write & mask)); 739 reg, (read & mask), (write & mask));
740 *data = reg; 740 *data = reg;
@@ -779,7 +779,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
779 ew32(STATUS, toggle); 779 ew32(STATUS, toggle);
780 after = er32(STATUS) & toggle; 780 after = er32(STATUS) & toggle;
781 if (value != after) { 781 if (value != after) {
782 e_err("failed STATUS register test got: " 782 e_err(drv, "failed STATUS register test got: "
783 "0x%08X expected: 0x%08X\n", after, value); 783 "0x%08X expected: 0x%08X\n", after, value);
784 *data = 1; 784 *data = 1;
785 return 1; 785 return 1;
@@ -894,7 +894,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
894 *data = 1; 894 *data = 1;
895 return -1; 895 return -1;
896 } 896 }
897 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); 897 e_info(hw, "testing %s interrupt\n", (shared_int ?
898 "shared" : "unshared"));
898 899
899 /* Disable all the interrupts */ 900 /* Disable all the interrupts */
900 ew32(IMC, 0xFFFFFFFF); 901 ew32(IMC, 0xFFFFFFFF);
@@ -1561,7 +1562,7 @@ static void e1000_diag_test(struct net_device *netdev,
1561 u8 forced_speed_duplex = hw->forced_speed_duplex; 1562 u8 forced_speed_duplex = hw->forced_speed_duplex;
1562 u8 autoneg = hw->autoneg; 1563 u8 autoneg = hw->autoneg;
1563 1564
1564 e_info("offline testing starting\n"); 1565 e_info(hw, "offline testing starting\n");
1565 1566
1566 /* Link test performed before hardware reset so autoneg doesn't 1567 /* Link test performed before hardware reset so autoneg doesn't
1567 * interfere with test result */ 1568 * interfere with test result */
@@ -1601,7 +1602,7 @@ static void e1000_diag_test(struct net_device *netdev,
1601 if (if_running) 1602 if (if_running)
1602 dev_open(netdev); 1603 dev_open(netdev);
1603 } else { 1604 } else {
1604 e_info("online testing starting\n"); 1605 e_info(hw, "online testing starting\n");
1605 /* Online tests */ 1606 /* Online tests */
1606 if (e1000_link_test(adapter, &data[4])) 1607 if (e1000_link_test(adapter, &data[4]))
1607 eth_test->flags |= ETH_TEST_FL_FAILED; 1608 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1694,8 +1695,8 @@ static void e1000_get_wol(struct net_device *netdev,
1694 wol->supported &= ~WAKE_UCAST; 1695 wol->supported &= ~WAKE_UCAST;
1695 1696
1696 if (adapter->wol & E1000_WUFC_EX) 1697 if (adapter->wol & E1000_WUFC_EX)
1697 e_err("Interface does not support " 1698 e_err(drv, "Interface does not support directed "
1698 "directed (unicast) frame wake-up packets\n"); 1699 "(unicast) frame wake-up packets\n");
1699 break; 1700 break;
1700 default: 1701 default:
1701 break; 1702 break;
@@ -1726,8 +1727,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1726 switch (hw->device_id) { 1727 switch (hw->device_id) {
1727 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1728 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1728 if (wol->wolopts & WAKE_UCAST) { 1729 if (wol->wolopts & WAKE_UCAST) {
1729 e_err("Interface does not support " 1730 e_err(drv, "Interface does not support directed "
1730 "directed (unicast) frame wake-up packets\n"); 1731 "(unicast) frame wake-up packets\n");
1731 return -EOPNOTSUPP; 1732 return -EOPNOTSUPP;
1732 } 1733 }
1733 break; 1734 break;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 68a80893dce1..02833af8a0b1 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -275,7 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
276 netdev); 276 netdev);
277 if (err) { 277 if (err) {
278 e_err("Unable to allocate interrupt Error: %d\n", err); 278 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
279 } 279 }
280 280
281 return err; 281 return err;
@@ -657,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
657 ew32(WUC, 0); 657 ew32(WUC, 0);
658 658
659 if (e1000_init_hw(hw)) 659 if (e1000_init_hw(hw))
660 e_err("Hardware Error\n"); 660 e_dev_err("Hardware Error\n");
661 e1000_update_mng_vlan(adapter); 661 e1000_update_mng_vlan(adapter);
662 662
663 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 663 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -925,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
925 925
926 /* initialize eeprom parameters */ 926 /* initialize eeprom parameters */
927 if (e1000_init_eeprom_params(hw)) { 927 if (e1000_init_eeprom_params(hw)) {
928 e_err("EEPROM initialization failed\n"); 928 e_err(probe, "EEPROM initialization failed\n");
929 goto err_eeprom; 929 goto err_eeprom;
930 } 930 }
931 931
@@ -936,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
936 936
937 /* make sure the EEPROM is good */ 937 /* make sure the EEPROM is good */
938 if (e1000_validate_eeprom_checksum(hw) < 0) { 938 if (e1000_validate_eeprom_checksum(hw) < 0) {
939 e_err("The EEPROM Checksum Is Not Valid\n"); 939 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
940 e1000_dump_eeprom(adapter); 940 e1000_dump_eeprom(adapter);
941 /* 941 /*
942 * set MAC address to all zeroes to invalidate and temporary 942 * set MAC address to all zeroes to invalidate and temporary
@@ -950,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
950 } else { 950 } else {
951 /* copy the MAC address out of the EEPROM */ 951 /* copy the MAC address out of the EEPROM */
952 if (e1000_read_mac_addr(hw)) 952 if (e1000_read_mac_addr(hw))
953 e_err("EEPROM Read Error\n"); 953 e_err(probe, "EEPROM Read Error\n");
954 } 954 }
955 /* don't block initalization here due to bad MAC address */ 955 /* don't block initalization here due to bad MAC address */
956 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 956 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
957 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 957 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
958 958
959 if (!is_valid_ether_addr(netdev->perm_addr)) 959 if (!is_valid_ether_addr(netdev->perm_addr))
960 e_err("Invalid MAC Address\n"); 960 e_err(probe, "Invalid MAC Address\n");
961 961
962 e1000_get_bus_info(hw); 962 e1000_get_bus_info(hw);
963 963
@@ -1047,7 +1047,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1047 goto err_register; 1047 goto err_register;
1048 1048
1049 /* print bus type/speed/width info */ 1049 /* print bus type/speed/width info */
1050 e_info("(PCI%s:%dMHz:%d-bit) %pM\n", 1050 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1052 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1052 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1053 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1053 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
@@ -1059,7 +1059,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1059 /* carrier off reporting is important to ethtool even BEFORE open */ 1059 /* carrier off reporting is important to ethtool even BEFORE open */
1060 netif_carrier_off(netdev); 1060 netif_carrier_off(netdev);
1061 1061
1062 e_info("Intel(R) PRO/1000 Network Connection\n"); 1062 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1063 1063
1064 cards_found++; 1064 cards_found++;
1065 return 0; 1065 return 0;
@@ -1159,7 +1159,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1159 /* identify the MAC */ 1159 /* identify the MAC */
1160 1160
1161 if (e1000_set_mac_type(hw)) { 1161 if (e1000_set_mac_type(hw)) {
1162 e_err("Unknown MAC Type\n"); 1162 e_err(probe, "Unknown MAC Type\n");
1163 return -EIO; 1163 return -EIO;
1164 } 1164 }
1165 1165
@@ -1192,7 +1192,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1192 adapter->num_rx_queues = 1; 1192 adapter->num_rx_queues = 1;
1193 1193
1194 if (e1000_alloc_queues(adapter)) { 1194 if (e1000_alloc_queues(adapter)) {
1195 e_err("Unable to allocate memory for queues\n"); 1195 e_err(probe, "Unable to allocate memory for queues\n");
1196 return -ENOMEM; 1196 return -ENOMEM;
1197 } 1197 }
1198 1198
@@ -1386,7 +1386,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1386 size = sizeof(struct e1000_buffer) * txdr->count; 1386 size = sizeof(struct e1000_buffer) * txdr->count;
1387 txdr->buffer_info = vmalloc(size); 1387 txdr->buffer_info = vmalloc(size);
1388 if (!txdr->buffer_info) { 1388 if (!txdr->buffer_info) {
1389 e_err("Unable to allocate memory for the Tx descriptor ring\n"); 1389 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1390 "ring\n");
1390 return -ENOMEM; 1391 return -ENOMEM;
1391 } 1392 }
1392 memset(txdr->buffer_info, 0, size); 1393 memset(txdr->buffer_info, 0, size);
@@ -1401,7 +1402,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1401 if (!txdr->desc) { 1402 if (!txdr->desc) {
1402setup_tx_desc_die: 1403setup_tx_desc_die:
1403 vfree(txdr->buffer_info); 1404 vfree(txdr->buffer_info);
1404 e_err("Unable to allocate memory for the Tx descriptor ring\n"); 1405 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1406 "ring\n");
1405 return -ENOMEM; 1407 return -ENOMEM;
1406 } 1408 }
1407 1409
@@ -1409,7 +1411,7 @@ setup_tx_desc_die:
1409 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1411 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1410 void *olddesc = txdr->desc; 1412 void *olddesc = txdr->desc;
1411 dma_addr_t olddma = txdr->dma; 1413 dma_addr_t olddma = txdr->dma;
1412 e_err("txdr align check failed: %u bytes at %p\n", 1414 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1413 txdr->size, txdr->desc); 1415 txdr->size, txdr->desc);
1414 /* Try again, without freeing the previous */ 1416 /* Try again, without freeing the previous */
1415 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1417 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
@@ -1427,7 +1429,7 @@ setup_tx_desc_die:
1427 txdr->dma); 1429 txdr->dma);
1428 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1430 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1429 olddma); 1431 olddma);
1430 e_err("Unable to allocate aligned memory " 1432 e_err(probe, "Unable to allocate aligned memory "
1431 "for the transmit descriptor ring\n"); 1433 "for the transmit descriptor ring\n");
1432 vfree(txdr->buffer_info); 1434 vfree(txdr->buffer_info);
1433 return -ENOMEM; 1435 return -ENOMEM;
@@ -1460,7 +1462,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1460 for (i = 0; i < adapter->num_tx_queues; i++) { 1462 for (i = 0; i < adapter->num_tx_queues; i++) {
1461 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1463 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1462 if (err) { 1464 if (err) {
1463 e_err("Allocation for Tx Queue %u failed\n", i); 1465 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1464 for (i-- ; i >= 0; i--) 1466 for (i-- ; i >= 0; i--)
1465 e1000_free_tx_resources(adapter, 1467 e1000_free_tx_resources(adapter,
1466 &adapter->tx_ring[i]); 1468 &adapter->tx_ring[i]);
@@ -1580,7 +1582,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1580 size = sizeof(struct e1000_buffer) * rxdr->count; 1582 size = sizeof(struct e1000_buffer) * rxdr->count;
1581 rxdr->buffer_info = vmalloc(size); 1583 rxdr->buffer_info = vmalloc(size);
1582 if (!rxdr->buffer_info) { 1584 if (!rxdr->buffer_info) {
1583 e_err("Unable to allocate memory for the Rx descriptor ring\n"); 1585 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1586 "ring\n");
1584 return -ENOMEM; 1587 return -ENOMEM;
1585 } 1588 }
1586 memset(rxdr->buffer_info, 0, size); 1589 memset(rxdr->buffer_info, 0, size);
@@ -1596,7 +1599,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1596 GFP_KERNEL); 1599 GFP_KERNEL);
1597 1600
1598 if (!rxdr->desc) { 1601 if (!rxdr->desc) {
1599 e_err("Unable to allocate memory for the Rx descriptor ring\n"); 1602 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1603 "ring\n");
1600setup_rx_desc_die: 1604setup_rx_desc_die:
1601 vfree(rxdr->buffer_info); 1605 vfree(rxdr->buffer_info);
1602 return -ENOMEM; 1606 return -ENOMEM;
@@ -1606,7 +1610,7 @@ setup_rx_desc_die:
1606 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1610 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1607 void *olddesc = rxdr->desc; 1611 void *olddesc = rxdr->desc;
1608 dma_addr_t olddma = rxdr->dma; 1612 dma_addr_t olddma = rxdr->dma;
1609 e_err("rxdr align check failed: %u bytes at %p\n", 1613 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1610 rxdr->size, rxdr->desc); 1614 rxdr->size, rxdr->desc);
1611 /* Try again, without freeing the previous */ 1615 /* Try again, without freeing the previous */
1612 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1616 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
@@ -1615,8 +1619,8 @@ setup_rx_desc_die:
1615 if (!rxdr->desc) { 1619 if (!rxdr->desc) {
1616 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1620 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1617 olddma); 1621 olddma);
1618 e_err("Unable to allocate memory for the Rx descriptor " 1622 e_err(probe, "Unable to allocate memory for the Rx "
1619 "ring\n"); 1623 "descriptor ring\n");
1620 goto setup_rx_desc_die; 1624 goto setup_rx_desc_die;
1621 } 1625 }
1622 1626
@@ -1626,8 +1630,8 @@ setup_rx_desc_die:
1626 rxdr->dma); 1630 rxdr->dma);
1627 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1631 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1628 olddma); 1632 olddma);
1629 e_err("Unable to allocate aligned memory for the Rx " 1633 e_err(probe, "Unable to allocate aligned memory for "
1630 "descriptor ring\n"); 1634 "the Rx descriptor ring\n");
1631 goto setup_rx_desc_die; 1635 goto setup_rx_desc_die;
1632 } else { 1636 } else {
1633 /* Free old allocation, new allocation was successful */ 1637 /* Free old allocation, new allocation was successful */
@@ -1659,7 +1663,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1659 for (i = 0; i < adapter->num_rx_queues; i++) { 1663 for (i = 0; i < adapter->num_rx_queues; i++) {
1660 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1664 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1661 if (err) { 1665 if (err) {
1662 e_err("Allocation for Rx Queue %u failed\n", i); 1666 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1663 for (i-- ; i >= 0; i--) 1667 for (i-- ; i >= 0; i--)
1664 e1000_free_rx_resources(adapter, 1668 e1000_free_rx_resources(adapter,
1665 &adapter->rx_ring[i]); 1669 &adapter->rx_ring[i]);
@@ -2110,7 +2114,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2110 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2114 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2111 2115
2112 if (!mcarray) { 2116 if (!mcarray) {
2113 e_err("memory allocation failed\n"); 2117 e_err(probe, "memory allocation failed\n");
2114 return; 2118 return;
2115 } 2119 }
2116 2120
@@ -2648,7 +2652,8 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2648 break; 2652 break;
2649 default: 2653 default:
2650 if (unlikely(net_ratelimit())) 2654 if (unlikely(net_ratelimit()))
2651 e_warn("checksum_partial proto=%x!\n", skb->protocol); 2655 e_warn(drv, "checksum_partial proto=%x!\n",
2656 skb->protocol);
2652 break; 2657 break;
2653 } 2658 }
2654 2659
@@ -2992,7 +2997,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2992 /* fall through */ 2997 /* fall through */
2993 pull_size = min((unsigned int)4, skb->data_len); 2998 pull_size = min((unsigned int)4, skb->data_len);
2994 if (!__pskb_pull_tail(skb, pull_size)) { 2999 if (!__pskb_pull_tail(skb, pull_size)) {
2995 e_err("__pskb_pull_tail failed.\n"); 3000 e_err(drv, "__pskb_pull_tail "
3001 "failed.\n");
2996 dev_kfree_skb_any(skb); 3002 dev_kfree_skb_any(skb);
2997 return NETDEV_TX_OK; 3003 return NETDEV_TX_OK;
2998 } 3004 }
@@ -3140,7 +3146,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3140 3146
3141 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3147 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3142 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3148 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3143 e_err("Invalid MTU setting\n"); 3149 e_err(probe, "Invalid MTU setting\n");
3144 return -EINVAL; 3150 return -EINVAL;
3145 } 3151 }
3146 3152
@@ -3148,7 +3154,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3148 switch (hw->mac_type) { 3154 switch (hw->mac_type) {
3149 case e1000_undefined ... e1000_82542_rev2_1: 3155 case e1000_undefined ... e1000_82542_rev2_1:
3150 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3156 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3151 e_err("Jumbo Frames not supported.\n"); 3157 e_err(probe, "Jumbo Frames not supported.\n");
3152 return -EINVAL; 3158 return -EINVAL;
3153 } 3159 }
3154 break; 3160 break;
@@ -3500,7 +3506,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3500 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3506 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3501 3507
3502 /* detected Tx unit hang */ 3508 /* detected Tx unit hang */
3503 e_err("Detected Tx Unit Hang\n" 3509 e_err(drv, "Detected Tx Unit Hang\n"
3504 " Tx Queue <%lu>\n" 3510 " Tx Queue <%lu>\n"
3505 " TDH <%x>\n" 3511 " TDH <%x>\n"
3506 " TDT <%x>\n" 3512 " TDT <%x>\n"
@@ -3749,7 +3755,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3749 3755
3750 /* eth type trans needs skb->data to point to something */ 3756 /* eth type trans needs skb->data to point to something */
3751 if (!pskb_may_pull(skb, ETH_HLEN)) { 3757 if (!pskb_may_pull(skb, ETH_HLEN)) {
3752 e_err("pskb_may_pull failed.\n"); 3758 e_err(drv, "pskb_may_pull failed.\n");
3753 dev_kfree_skb(skb); 3759 dev_kfree_skb(skb);
3754 goto next_desc; 3760 goto next_desc;
3755 } 3761 }
@@ -3874,7 +3880,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3874 3880
3875 if (adapter->discarding) { 3881 if (adapter->discarding) {
3876 /* All receives must fit into a single buffer */ 3882 /* All receives must fit into a single buffer */
3877 e_info("Receive packet consumed multiple buffers\n"); 3883 e_dbg("Receive packet consumed multiple buffers\n");
3878 /* recycle */ 3884 /* recycle */
3879 buffer_info->skb = skb; 3885 buffer_info->skb = skb;
3880 if (status & E1000_RXD_STAT_EOP) 3886 if (status & E1000_RXD_STAT_EOP)
@@ -3986,8 +3992,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3986 /* Fix for errata 23, can't cross 64kB boundary */ 3992 /* Fix for errata 23, can't cross 64kB boundary */
3987 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3993 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3988 struct sk_buff *oldskb = skb; 3994 struct sk_buff *oldskb = skb;
3989 e_err("skb align check failed: %u bytes at %p\n", 3995 e_err(rx_err, "skb align check failed: %u bytes at "
3990 bufsz, skb->data); 3996 "%p\n", bufsz, skb->data);
3991 /* Try again, without freeing the previous */ 3997 /* Try again, without freeing the previous */
3992 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3998 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3993 /* Failed allocation, critical failure */ 3999 /* Failed allocation, critical failure */
@@ -4095,8 +4101,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4095 /* Fix for errata 23, can't cross 64kB boundary */ 4101 /* Fix for errata 23, can't cross 64kB boundary */
4096 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4102 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4097 struct sk_buff *oldskb = skb; 4103 struct sk_buff *oldskb = skb;
4098 e_err("skb align check failed: %u bytes at %p\n", 4104 e_err(rx_err, "skb align check failed: %u bytes at "
4099 bufsz, skb->data); 4105 "%p\n", bufsz, skb->data);
4100 /* Try again, without freeing the previous */ 4106 /* Try again, without freeing the previous */
4101 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4107 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4102 /* Failed allocation, critical failure */ 4108 /* Failed allocation, critical failure */
@@ -4141,8 +4147,8 @@ map_skb:
4141 if (!e1000_check_64k_bound(adapter, 4147 if (!e1000_check_64k_bound(adapter,
4142 (void *)(unsigned long)buffer_info->dma, 4148 (void *)(unsigned long)buffer_info->dma,
4143 adapter->rx_buffer_len)) { 4149 adapter->rx_buffer_len)) {
4144 e_err("dma align check failed: %u bytes at %p\n", 4150 e_err(rx_err, "dma align check failed: %u bytes at "
4145 adapter->rx_buffer_len, 4151 "%p\n", adapter->rx_buffer_len,
4146 (void *)(unsigned long)buffer_info->dma); 4152 (void *)(unsigned long)buffer_info->dma);
4147 dev_kfree_skb(skb); 4153 dev_kfree_skb(skb);
4148 buffer_info->skb = NULL; 4154 buffer_info->skb = NULL;
@@ -4355,7 +4361,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
4355 int ret_val = pci_set_mwi(adapter->pdev); 4361 int ret_val = pci_set_mwi(adapter->pdev);
4356 4362
4357 if (ret_val) 4363 if (ret_val)
4358 e_err("Error in setting MWI\n"); 4364 e_err(probe, "Error in setting MWI\n");
4359} 4365}
4360 4366
4361void e1000_pci_clear_mwi(struct e1000_hw *hw) 4367void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4486,7 +4492,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4486 /* Fiber NICs only allow 1000 gbps Full duplex */ 4492 /* Fiber NICs only allow 1000 gbps Full duplex */
4487 if ((hw->media_type == e1000_media_type_fiber) && 4493 if ((hw->media_type == e1000_media_type_fiber) &&
4488 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4494 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4489 e_err("Unsupported Speed/Duplex configuration\n"); 4495 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4490 return -EINVAL; 4496 return -EINVAL;
4491 } 4497 }
4492 4498
@@ -4509,7 +4515,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4509 break; 4515 break;
4510 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4516 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4511 default: 4517 default:
4512 e_err("Unsupported Speed/Duplex configuration\n"); 4518 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4513 return -EINVAL; 4519 return -EINVAL;
4514 } 4520 }
4515 return 0; 4521 return 0;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index f654db9121de..a4a0d2b6eb1c 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 4dc02c71ffd6..307a72f483ee 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -359,6 +359,7 @@
359#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 359#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
360#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 360#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
361#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 361#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
362#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
362#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 363#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
363#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 364#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
364#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 365#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
@@ -714,6 +715,7 @@
714#define BME1000_E_PHY_ID_R2 0x01410CB1 715#define BME1000_E_PHY_ID_R2 0x01410CB1
715#define I82577_E_PHY_ID 0x01540050 716#define I82577_E_PHY_ID 0x01540050
716#define I82578_E_PHY_ID 0x004DD040 717#define I82578_E_PHY_ID 0x004DD040
718#define I82579_E_PHY_ID 0x01540090
717 719
718/* M88E1000 Specific Registers */ 720/* M88E1000 Specific Registers */
719#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 721#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c0b3db40bd73..f9a31c82f871 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -164,6 +164,7 @@ enum e1000_boards {
164 board_ich9lan, 164 board_ich9lan,
165 board_ich10lan, 165 board_ich10lan,
166 board_pchlan, 166 board_pchlan,
167 board_pch2lan,
167}; 168};
168 169
169struct e1000_queue_stats { 170struct e1000_queue_stats {
@@ -347,6 +348,7 @@ struct e1000_adapter {
347 u32 test_icr; 348 u32 test_icr;
348 349
349 u32 msg_enable; 350 u32 msg_enable;
351 unsigned int num_vectors;
350 struct msix_entry *msix_entries; 352 struct msix_entry *msix_entries;
351 int int_mode; 353 int int_mode;
352 u32 eiac_mask; 354 u32 eiac_mask;
@@ -421,6 +423,8 @@ struct e1000_info {
421#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 423#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
422#define FLAG2_IS_DISCARDING (1 << 2) 424#define FLAG2_IS_DISCARDING (1 << 2)
423#define FLAG2_DISABLE_ASPM_L1 (1 << 3) 425#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
426#define FLAG2_HAS_PHY_STATS (1 << 4)
427#define FLAG2_HAS_EEE (1 << 5)
424 428
425#define E1000_RX_DESC_PS(R, i) \ 429#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 430 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -458,7 +462,6 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
458extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 462extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
459extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 463extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
460extern void e1000e_update_stats(struct e1000_adapter *adapter); 464extern void e1000e_update_stats(struct e1000_adapter *adapter);
461extern bool e1000e_has_link(struct e1000_adapter *adapter);
462extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 465extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
463extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 466extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
464extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 467extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
@@ -476,6 +479,7 @@ extern struct e1000_info e1000_ich8_info;
476extern struct e1000_info e1000_ich9_info; 479extern struct e1000_info e1000_ich9_info;
477extern struct e1000_info e1000_ich10_info; 480extern struct e1000_info e1000_ich10_info;
478extern struct e1000_info e1000_pch_info; 481extern struct e1000_info e1000_pch_info;
482extern struct e1000_info e1000_pch2_info;
479extern struct e1000_info e1000_es2_info; 483extern struct e1000_info e1000_es2_info;
480 484
481extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); 485extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -494,6 +498,8 @@ extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
494extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); 498extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
495extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); 499extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
496extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); 500extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
501extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
502extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
497 503
498extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); 504extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
499extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); 505extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 38d79a669059..45aebb4a6fe1 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 2c521218102b..6355a1b779d3 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -118,7 +118,6 @@ static int e1000_get_settings(struct net_device *netdev,
118{ 118{
119 struct e1000_adapter *adapter = netdev_priv(netdev); 119 struct e1000_adapter *adapter = netdev_priv(netdev);
120 struct e1000_hw *hw = &adapter->hw; 120 struct e1000_hw *hw = &adapter->hw;
121 u32 status;
122 121
123 if (hw->phy.media_type == e1000_media_type_copper) { 122 if (hw->phy.media_type == e1000_media_type_copper) {
124 123
@@ -156,22 +155,29 @@ static int e1000_get_settings(struct net_device *netdev,
156 ecmd->transceiver = XCVR_EXTERNAL; 155 ecmd->transceiver = XCVR_EXTERNAL;
157 } 156 }
158 157
159 status = er32(STATUS); 158 ecmd->speed = -1;
160 if (status & E1000_STATUS_LU) { 159 ecmd->duplex = -1;
161 if (status & E1000_STATUS_SPEED_1000)
162 ecmd->speed = 1000;
163 else if (status & E1000_STATUS_SPEED_100)
164 ecmd->speed = 100;
165 else
166 ecmd->speed = 10;
167 160
168 if (status & E1000_STATUS_FD) 161 if (netif_running(netdev)) {
169 ecmd->duplex = DUPLEX_FULL; 162 if (netif_carrier_ok(netdev)) {
170 else 163 ecmd->speed = adapter->link_speed;
171 ecmd->duplex = DUPLEX_HALF; 164 ecmd->duplex = adapter->link_duplex - 1;
165 }
172 } else { 166 } else {
173 ecmd->speed = -1; 167 u32 status = er32(STATUS);
174 ecmd->duplex = -1; 168 if (status & E1000_STATUS_LU) {
169 if (status & E1000_STATUS_SPEED_1000)
170 ecmd->speed = 1000;
171 else if (status & E1000_STATUS_SPEED_100)
172 ecmd->speed = 100;
173 else
174 ecmd->speed = 10;
175
176 if (status & E1000_STATUS_FD)
177 ecmd->duplex = DUPLEX_FULL;
178 else
179 ecmd->duplex = DUPLEX_HALF;
180 }
175 } 181 }
176 182
177 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || 183 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
@@ -179,7 +185,7 @@ static int e1000_get_settings(struct net_device *netdev,
179 185
180 /* MDI-X => 2; MDI =>1; Invalid =>0 */ 186 /* MDI-X => 2; MDI =>1; Invalid =>0 */
181 if ((hw->phy.media_type == e1000_media_type_copper) && 187 if ((hw->phy.media_type == e1000_media_type_copper) &&
182 !hw->mac.get_link_status) 188 netif_carrier_ok(netdev))
183 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : 189 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
184 ETH_TP_MDI; 190 ETH_TP_MDI;
185 else 191 else
@@ -191,19 +197,15 @@ static int e1000_get_settings(struct net_device *netdev,
191static u32 e1000_get_link(struct net_device *netdev) 197static u32 e1000_get_link(struct net_device *netdev)
192{ 198{
193 struct e1000_adapter *adapter = netdev_priv(netdev); 199 struct e1000_adapter *adapter = netdev_priv(netdev);
194 struct e1000_mac_info *mac = &adapter->hw.mac; 200 struct e1000_hw *hw = &adapter->hw;
195 201
196 /* 202 /*
197 * If the link is not reported up to netdev, interrupts are disabled, 203 * Avoid touching hardware registers when possible, otherwise
198 * and so the physical link state may have changed since we last 204 * link negotiation can get messed up when user-level scripts
199 * looked. Set get_link_status to make sure that the true link 205 * are rapidly polling the driver to see if link is up.
200 * state is interrogated, rather than pulling a cached and possibly
201 * stale link state from the driver.
202 */ 206 */
203 if (!netif_carrier_ok(netdev)) 207 return netif_running(netdev) ? netif_carrier_ok(netdev) :
204 mac->get_link_status = 1; 208 !!(er32(STATUS) & E1000_STATUS_LU);
205
206 return e1000e_has_link(adapter);
207} 209}
208 210
209static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 211static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
@@ -880,6 +882,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
880 switch (mac->type) { 882 switch (mac->type) {
881 case e1000_ich10lan: 883 case e1000_ich10lan:
882 case e1000_pchlan: 884 case e1000_pchlan:
885 case e1000_pch2lan:
883 mask |= (1 << 18); 886 mask |= (1 << 18);
884 break; 887 break;
885 default: 888 default:
@@ -1263,33 +1266,36 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1263 1266
1264 hw->mac.autoneg = 0; 1267 hw->mac.autoneg = 0;
1265 1268
1266 /* Workaround: K1 must be disabled for stable 1Gbps operation */ 1269 if (hw->phy.type == e1000_phy_ife) {
1267 if (hw->mac.type == e1000_pchlan)
1268 e1000_configure_k1_ich8lan(hw, false);
1269
1270 if (hw->phy.type == e1000_phy_m88) {
1271 /* Auto-MDI/MDIX Off */
1272 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1273 /* reset to update Auto-MDI/MDIX */
1274 e1e_wphy(hw, PHY_CONTROL, 0x9140);
1275 /* autoneg off */
1276 e1e_wphy(hw, PHY_CONTROL, 0x8140);
1277 } else if (hw->phy.type == e1000_phy_gg82563)
1278 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
1279
1280 ctrl_reg = er32(CTRL);
1281
1282 switch (hw->phy.type) {
1283 case e1000_phy_ife:
1284 /* force 100, set loopback */ 1270 /* force 100, set loopback */
1285 e1e_wphy(hw, PHY_CONTROL, 0x6100); 1271 e1e_wphy(hw, PHY_CONTROL, 0x6100);
1286 1272
1287 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1273 /* Now set up the MAC to the same speed/duplex as the PHY. */
1274 ctrl_reg = er32(CTRL);
1288 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1275 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1289 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1276 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1290 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1277 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1291 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1278 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1292 E1000_CTRL_FD); /* Force Duplex to FULL */ 1279 E1000_CTRL_FD); /* Force Duplex to FULL */
1280
1281 ew32(CTRL, ctrl_reg);
1282 udelay(500);
1283
1284 return 0;
1285 }
1286
1287 /* Specific PHY configuration for loopback */
1288 switch (hw->phy.type) {
1289 case e1000_phy_m88:
1290 /* Auto-MDI/MDIX Off */
1291 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1292 /* reset to update Auto-MDI/MDIX */
1293 e1e_wphy(hw, PHY_CONTROL, 0x9140);
1294 /* autoneg off */
1295 e1e_wphy(hw, PHY_CONTROL, 0x8140);
1296 break;
1297 case e1000_phy_gg82563:
1298 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
1293 break; 1299 break;
1294 case e1000_phy_bm: 1300 case e1000_phy_bm:
1295 /* Set Default MAC Interface speed to 1GB */ 1301 /* Set Default MAC Interface speed to 1GB */
@@ -1312,23 +1318,41 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1312 /* Set Early Link Enable */ 1318 /* Set Early Link Enable */
1313 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); 1319 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1314 e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); 1320 e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
1315 /* fall through */ 1321 break;
1322 case e1000_phy_82577:
1323 case e1000_phy_82578:
1324 /* Workaround: K1 must be disabled for stable 1Gbps operation */
1325 e1000_configure_k1_ich8lan(hw, false);
1326 break;
1327 case e1000_phy_82579:
1328 /* Disable PHY energy detect power down */
1329 e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
1330 e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
1331 /* Disable full chip energy detect */
1332 e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
1333 e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
1334 /* Enable loopback on the PHY */
1335#define I82577_PHY_LBK_CTRL 19
1336 e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
1337 break;
1316 default: 1338 default:
1317 /* force 1000, set loopback */ 1339 break;
1318 e1e_wphy(hw, PHY_CONTROL, 0x4140); 1340 }
1319 mdelay(250);
1320 1341
1321 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1342 /* force 1000, set loopback */
1322 ctrl_reg = er32(CTRL); 1343 e1e_wphy(hw, PHY_CONTROL, 0x4140);
1323 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1344 mdelay(250);
1324 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1325 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1326 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1327 E1000_CTRL_FD); /* Force Duplex to FULL */
1328 1345
1329 if (adapter->flags & FLAG_IS_ICH) 1346 /* Now set up the MAC to the same speed/duplex as the PHY. */
1330 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ 1347 ctrl_reg = er32(CTRL);
1331 } 1348 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1349 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1350 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1351 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1352 E1000_CTRL_FD); /* Force Duplex to FULL */
1353
1354 if (adapter->flags & FLAG_IS_ICH)
1355 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
1332 1356
1333 if (hw->phy.media_type == e1000_media_type_copper && 1357 if (hw->phy.media_type == e1000_media_type_copper &&
1334 hw->phy.type == e1000_phy_m88) { 1358 hw->phy.type == e1000_phy_m88) {
@@ -1868,6 +1892,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1868 1892
1869 if ((hw->phy.type == e1000_phy_ife) || 1893 if ((hw->phy.type == e1000_phy_ife) ||
1870 (hw->mac.type == e1000_pchlan) || 1894 (hw->mac.type == e1000_pchlan) ||
1895 (hw->mac.type == e1000_pch2lan) ||
1871 (hw->mac.type == e1000_82583) || 1896 (hw->mac.type == e1000_82583) ||
1872 (hw->mac.type == e1000_82574)) { 1897 (hw->mac.type == e1000_82574)) {
1873 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); 1898 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
@@ -2026,7 +2051,6 @@ static const struct ethtool_ops e1000_ethtool_ops = {
2026 .get_coalesce = e1000_get_coalesce, 2051 .get_coalesce = e1000_get_coalesce,
2027 .set_coalesce = e1000_set_coalesce, 2052 .set_coalesce = e1000_set_coalesce,
2028 .get_flags = ethtool_op_get_flags, 2053 .get_flags = ethtool_op_get_flags,
2029 .set_flags = ethtool_op_set_flags,
2030}; 2054};
2031 2055
2032void e1000e_set_ethtool_ops(struct net_device *netdev) 2056void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 5d1220d188d4..66ed08f726fb 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -217,7 +217,10 @@ enum e1e_registers {
217 E1000_SWSM = 0x05B50, /* SW Semaphore */ 217 E1000_SWSM = 0x05B50, /* SW Semaphore */
218 E1000_FWSM = 0x05B54, /* FW Semaphore */ 218 E1000_FWSM = 0x05B54, /* FW Semaphore */
219 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ 219 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
220 E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */ 220 E1000_FFLT_DBG = 0x05F04, /* Debug Register */
221 E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
222#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
223#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
221 E1000_HICR = 0x08F00, /* Host Interface Control */ 224 E1000_HICR = 0x08F00, /* Host Interface Control */
222}; 225};
223 226
@@ -303,13 +306,14 @@ enum e1e_registers {
303#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 306#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
304#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 307#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
305#define E1000_KMRNCTRLSTA_REN 0x00200000 308#define E1000_KMRNCTRLSTA_REN 0x00200000
309#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
306#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ 310#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
307#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ 311#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
308#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ 312#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
309#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ 313#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
310#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 314#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
311#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E 315#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
312#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400 316#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
313 317
314#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 318#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
315#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ 319#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
@@ -387,6 +391,8 @@ enum e1e_registers {
387#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB 391#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
388#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF 392#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
389#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 393#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
394#define E1000_DEV_ID_PCH2_LV_LM 0x1502
395#define E1000_DEV_ID_PCH2_LV_V 0x1503
390 396
391#define E1000_REVISION_4 4 397#define E1000_REVISION_4 4
392 398
@@ -406,6 +412,7 @@ enum e1000_mac_type {
406 e1000_ich9lan, 412 e1000_ich9lan,
407 e1000_ich10lan, 413 e1000_ich10lan,
408 e1000_pchlan, 414 e1000_pchlan,
415 e1000_pch2lan,
409}; 416};
410 417
411enum e1000_media_type { 418enum e1000_media_type {
@@ -442,6 +449,7 @@ enum e1000_phy_type {
442 e1000_phy_bm, 449 e1000_phy_bm,
443 e1000_phy_82578, 450 e1000_phy_82578,
444 e1000_phy_82577, 451 e1000_phy_82577,
452 e1000_phy_82579,
445}; 453};
446 454
447enum e1000_bus_width { 455enum e1000_bus_width {
@@ -929,6 +937,7 @@ struct e1000_dev_spec_ich8lan {
929 bool kmrn_lock_loss_workaround_enabled; 937 bool kmrn_lock_loss_workaround_enabled;
930 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 938 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
931 bool nvm_k1_enabled; 939 bool nvm_k1_enabled;
940 bool eee_disable;
932}; 941};
933 942
934struct e1000_hw { 943struct e1000_hw {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index b2507d93de99..63930d12711c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,8 @@
52 * 82577LC Gigabit Network Connection 52 * 82577LC Gigabit Network Connection
53 * 82578DM Gigabit Network Connection 53 * 82578DM Gigabit Network Connection
54 * 82578DC Gigabit Network Connection 54 * 82578DC Gigabit Network Connection
55 * 82579LM Gigabit Network Connection
56 * 82579V Gigabit Network Connection
55 */ 57 */
56 58
57#include "e1000.h" 59#include "e1000.h"
@@ -126,6 +128,13 @@
126#define HV_SMB_ADDR_PEC_EN 0x0200 128#define HV_SMB_ADDR_PEC_EN 0x0200
127#define HV_SMB_ADDR_VALID 0x0080 129#define HV_SMB_ADDR_VALID 0x0080
128 130
131/* PHY Power Management Control */
132#define HV_PM_CTRL PHY_REG(770, 17)
133
134/* PHY Low Power Idle Control */
135#define I82579_LPI_CTRL PHY_REG(772, 20)
136#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
137
129/* Strapping Option Register - RO */ 138/* Strapping Option Register - RO */
130#define E1000_STRAP 0x0000C 139#define E1000_STRAP 0x0000C
131#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 140#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
@@ -226,6 +235,8 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
226static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 235static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
227static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 236static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
228static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 237static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
238static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
239static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
229 240
230static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 241static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
231{ 242{
@@ -277,13 +288,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
277 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 288 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
278 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 289 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
279 290
291 /*
292 * The MAC-PHY interconnect may still be in SMBus mode
293 * after Sx->S0. If the manageability engine (ME) is
294 * disabled, then toggle the LANPHYPC Value bit to force
295 * the interconnect to PCIe mode.
296 */
280 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 297 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
281 /*
282 * The MAC-PHY interconnect may still be in SMBus mode
283 * after Sx->S0. Toggle the LANPHYPC Value bit to force
284 * the interconnect to PCIe mode, but only if there is no
285 * firmware present otherwise firmware will have done it.
286 */
287 ctrl = er32(CTRL); 298 ctrl = er32(CTRL);
288 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 299 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
289 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 300 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -324,6 +335,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
324 335
325 switch (phy->type) { 336 switch (phy->type) {
326 case e1000_phy_82577: 337 case e1000_phy_82577:
338 case e1000_phy_82579:
327 phy->ops.check_polarity = e1000_check_polarity_82577; 339 phy->ops.check_polarity = e1000_check_polarity_82577;
328 phy->ops.force_speed_duplex = 340 phy->ops.force_speed_duplex =
329 e1000_phy_force_speed_duplex_82577; 341 e1000_phy_force_speed_duplex_82577;
@@ -515,6 +527,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
515 case e1000_ich8lan: 527 case e1000_ich8lan:
516 case e1000_ich9lan: 528 case e1000_ich9lan:
517 case e1000_ich10lan: 529 case e1000_ich10lan:
530 /* check management mode */
531 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
518 /* ID LED init */ 532 /* ID LED init */
519 mac->ops.id_led_init = e1000e_id_led_init; 533 mac->ops.id_led_init = e1000e_id_led_init;
520 /* setup LED */ 534 /* setup LED */
@@ -526,6 +540,9 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
526 mac->ops.led_off = e1000_led_off_ich8lan; 540 mac->ops.led_off = e1000_led_off_ich8lan;
527 break; 541 break;
528 case e1000_pchlan: 542 case e1000_pchlan:
543 case e1000_pch2lan:
544 /* check management mode */
545 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
529 /* ID LED init */ 546 /* ID LED init */
530 mac->ops.id_led_init = e1000_id_led_init_pchlan; 547 mac->ops.id_led_init = e1000_id_led_init_pchlan;
531 /* setup LED */ 548 /* setup LED */
@@ -544,10 +561,47 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
544 if (mac->type == e1000_ich8lan) 561 if (mac->type == e1000_ich8lan)
545 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 562 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
546 563
564 /* Disable PHY configuration by hardware, config by software */
565 if (mac->type == e1000_pch2lan) {
566 u32 extcnf_ctrl = er32(EXTCNF_CTRL);
567
568 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
569 ew32(EXTCNF_CTRL, extcnf_ctrl);
570 }
571
547 return 0; 572 return 0;
548} 573}
549 574
550/** 575/**
576 * e1000_set_eee_pchlan - Enable/disable EEE support
577 * @hw: pointer to the HW structure
578 *
579 * Enable/disable EEE based on setting in dev_spec structure. The bits in
580 * the LPI Control register will remain set only if/when link is up.
581 **/
582static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
583{
584 s32 ret_val = 0;
585 u16 phy_reg;
586
587 if (hw->phy.type != e1000_phy_82579)
588 goto out;
589
590 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
591 if (ret_val)
592 goto out;
593
594 if (hw->dev_spec.ich8lan.eee_disable)
595 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
596 else
597 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
598
599 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
600out:
601 return ret_val;
602}
603
604/**
551 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 605 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
552 * @hw: pointer to the HW structure 606 * @hw: pointer to the HW structure
553 * 607 *
@@ -604,6 +658,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
604 */ 658 */
605 e1000e_check_downshift(hw); 659 e1000e_check_downshift(hw);
606 660
661 /* Enable/Disable EEE after link up */
662 ret_val = e1000_set_eee_pchlan(hw);
663 if (ret_val)
664 goto out;
665
607 /* 666 /*
608 * If we are forcing speed/duplex, then we simply return since 667 * If we are forcing speed/duplex, then we simply return since
609 * we have already determined whether we have link or not. 668 * we have already determined whether we have link or not.
@@ -647,10 +706,19 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
647 if (rc) 706 if (rc)
648 return rc; 707 return rc;
649 708
650 if (hw->mac.type == e1000_pchlan) 709 switch (hw->mac.type) {
651 rc = e1000_init_phy_params_pchlan(hw); 710 case e1000_ich8lan:
652 else 711 case e1000_ich9lan:
712 case e1000_ich10lan:
653 rc = e1000_init_phy_params_ich8lan(hw); 713 rc = e1000_init_phy_params_ich8lan(hw);
714 break;
715 case e1000_pchlan:
716 case e1000_pch2lan:
717 rc = e1000_init_phy_params_pchlan(hw);
718 break;
719 default:
720 break;
721 }
654 if (rc) 722 if (rc)
655 return rc; 723 return rc;
656 724
@@ -663,6 +731,10 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
663 (adapter->hw.phy.type == e1000_phy_igp_3)) 731 (adapter->hw.phy.type == e1000_phy_igp_3))
664 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; 732 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
665 733
734 /* Disable EEE by default until IEEE802.3az spec is finalized */
735 if (adapter->flags2 & FLAG2_HAS_EEE)
736 adapter->hw.dev_spec.ich8lan.eee_disable = true;
737
666 return 0; 738 return 0;
667} 739}
668 740
@@ -774,7 +846,7 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
774 * e1000_check_mng_mode_ich8lan - Checks management mode 846 * e1000_check_mng_mode_ich8lan - Checks management mode
775 * @hw: pointer to the HW structure 847 * @hw: pointer to the HW structure
776 * 848 *
777 * This checks if the adapter has manageability enabled. 849 * This checks if the adapter has any manageability enabled.
778 * This is a function pointer entry point only called by read/write 850 * This is a function pointer entry point only called by read/write
779 * routines for the PHY and NVM parts. 851 * routines for the PHY and NVM parts.
780 **/ 852 **/
@@ -783,9 +855,26 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
783 u32 fwsm; 855 u32 fwsm;
784 856
785 fwsm = er32(FWSM); 857 fwsm = er32(FWSM);
858 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
859 ((fwsm & E1000_FWSM_MODE_MASK) ==
860 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
861}
862
863/**
864 * e1000_check_mng_mode_pchlan - Checks management mode
865 * @hw: pointer to the HW structure
866 *
867 * This checks if the adapter has iAMT enabled.
868 * This is a function pointer entry point only called by read/write
869 * routines for the PHY and NVM parts.
870 **/
871static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
872{
873 u32 fwsm;
786 874
787 return (fwsm & E1000_FWSM_MODE_MASK) == 875 fwsm = er32(FWSM);
788 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); 876 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
877 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
789} 878}
790 879
791/** 880/**
@@ -820,14 +909,6 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
820 s32 ret_val = 0; 909 s32 ret_val = 0;
821 u16 word_addr, reg_data, reg_addr, phy_page = 0; 910 u16 word_addr, reg_data, reg_addr, phy_page = 0;
822 911
823 if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
824 !(hw->mac.type == e1000_pchlan))
825 return ret_val;
826
827 ret_val = hw->phy.ops.acquire(hw);
828 if (ret_val)
829 return ret_val;
830
831 /* 912 /*
832 * Initialize the PHY from the NVM on ICH platforms. This 913 * Initialize the PHY from the NVM on ICH platforms. This
833 * is needed due to an issue where the NVM configuration is 914 * is needed due to an issue where the NVM configuration is
@@ -835,12 +916,27 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
835 * Therefore, after each PHY reset, we will load the 916 * Therefore, after each PHY reset, we will load the
836 * configuration data out of the NVM manually. 917 * configuration data out of the NVM manually.
837 */ 918 */
838 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || 919 switch (hw->mac.type) {
839 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) || 920 case e1000_ich8lan:
840 (hw->mac.type == e1000_pchlan)) 921 if (phy->type != e1000_phy_igp_3)
922 return ret_val;
923
924 if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
925 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
926 break;
927 }
928 /* Fall-thru */
929 case e1000_pchlan:
930 case e1000_pch2lan:
841 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 931 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
842 else 932 break;
843 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 933 default:
934 return ret_val;
935 }
936
937 ret_val = hw->phy.ops.acquire(hw);
938 if (ret_val)
939 return ret_val;
844 940
845 data = er32(FEXTNVM); 941 data = er32(FEXTNVM);
846 if (!(data & sw_cfg_mask)) 942 if (!(data & sw_cfg_mask))
@@ -851,8 +947,10 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
851 * extended configuration before SW configuration 947 * extended configuration before SW configuration
852 */ 948 */
853 data = er32(EXTCNF_CTRL); 949 data = er32(EXTCNF_CTRL);
854 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 950 if (!(hw->mac.type == e1000_pch2lan)) {
855 goto out; 951 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
952 goto out;
953 }
856 954
857 cnf_size = er32(EXTCNF_SIZE); 955 cnf_size = er32(EXTCNF_SIZE);
858 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 956 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -864,7 +962,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
864 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
865 963
866 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 964 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
867 (hw->mac.type == e1000_pchlan)) { 965 ((hw->mac.type == e1000_pchlan) ||
966 (hw->mac.type == e1000_pch2lan))) {
868 /* 967 /*
869 * HW configures the SMBus address and LEDs when the 968 * HW configures the SMBus address and LEDs when the
870 * OEM and LCD Write Enable bits are set in the NVM. 969 * OEM and LCD Write Enable bits are set in the NVM.
@@ -1071,16 +1170,18 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1071 u32 mac_reg; 1170 u32 mac_reg;
1072 u16 oem_reg; 1171 u16 oem_reg;
1073 1172
1074 if (hw->mac.type != e1000_pchlan) 1173 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1075 return ret_val; 1174 return ret_val;
1076 1175
1077 ret_val = hw->phy.ops.acquire(hw); 1176 ret_val = hw->phy.ops.acquire(hw);
1078 if (ret_val) 1177 if (ret_val)
1079 return ret_val; 1178 return ret_val;
1080 1179
1081 mac_reg = er32(EXTCNF_CTRL); 1180 if (!(hw->mac.type == e1000_pch2lan)) {
1082 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 1181 mac_reg = er32(EXTCNF_CTRL);
1083 goto out; 1182 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1183 goto out;
1184 }
1084 1185
1085 mac_reg = er32(FEXTNVM); 1186 mac_reg = er32(FEXTNVM);
1086 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) 1187 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
@@ -1221,6 +1322,243 @@ out:
1221} 1322}
1222 1323
1223/** 1324/**
1325 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1326 * @hw: pointer to the HW structure
1327 **/
1328void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1329{
1330 u32 mac_reg;
1331 u16 i;
1332
1333 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1334 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1335 mac_reg = er32(RAL(i));
1336 e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
1337 e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
1338 mac_reg = er32(RAH(i));
1339 e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
1340 e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
1341 }
1342}
1343
1344static u32 e1000_calc_rx_da_crc(u8 mac[])
1345{
1346 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1347 u32 i, j, mask, crc;
1348
1349 crc = 0xffffffff;
1350 for (i = 0; i < 6; i++) {
1351 crc = crc ^ mac[i];
1352 for (j = 8; j > 0; j--) {
1353 mask = (crc & 1) * (-1);
1354 crc = (crc >> 1) ^ (poly & mask);
1355 }
1356 }
1357 return ~crc;
1358}
1359
1360/**
1361 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1362 * with 82579 PHY
1363 * @hw: pointer to the HW structure
1364 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1365 **/
1366s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1367{
1368 s32 ret_val = 0;
1369 u16 phy_reg, data;
1370 u32 mac_reg;
1371 u16 i;
1372
1373 if (hw->mac.type != e1000_pch2lan)
1374 goto out;
1375
1376 /* disable Rx path while enabling/disabling workaround */
1377 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1378 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1379 if (ret_val)
1380 goto out;
1381
1382 if (enable) {
1383 /*
1384 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1385 * SHRAL/H) and initial CRC values to the MAC
1386 */
1387 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1388 u8 mac_addr[ETH_ALEN] = {0};
1389 u32 addr_high, addr_low;
1390
1391 addr_high = er32(RAH(i));
1392 if (!(addr_high & E1000_RAH_AV))
1393 continue;
1394 addr_low = er32(RAL(i));
1395 mac_addr[0] = (addr_low & 0xFF);
1396 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1397 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1398 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1399 mac_addr[4] = (addr_high & 0xFF);
1400 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1401
1402 ew32(PCH_RAICC(i),
1403 e1000_calc_rx_da_crc(mac_addr));
1404 }
1405
1406 /* Write Rx addresses to the PHY */
1407 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1408
1409 /* Enable jumbo frame workaround in the MAC */
1410 mac_reg = er32(FFLT_DBG);
1411 mac_reg &= ~(1 << 14);
1412 mac_reg |= (7 << 15);
1413 ew32(FFLT_DBG, mac_reg);
1414
1415 mac_reg = er32(RCTL);
1416 mac_reg |= E1000_RCTL_SECRC;
1417 ew32(RCTL, mac_reg);
1418
1419 ret_val = e1000e_read_kmrn_reg(hw,
1420 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1421 &data);
1422 if (ret_val)
1423 goto out;
1424 ret_val = e1000e_write_kmrn_reg(hw,
1425 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1426 data | (1 << 0));
1427 if (ret_val)
1428 goto out;
1429 ret_val = e1000e_read_kmrn_reg(hw,
1430 E1000_KMRNCTRLSTA_HD_CTRL,
1431 &data);
1432 if (ret_val)
1433 goto out;
1434 data &= ~(0xF << 8);
1435 data |= (0xB << 8);
1436 ret_val = e1000e_write_kmrn_reg(hw,
1437 E1000_KMRNCTRLSTA_HD_CTRL,
1438 data);
1439 if (ret_val)
1440 goto out;
1441
1442 /* Enable jumbo frame workaround in the PHY */
1443 e1e_rphy(hw, PHY_REG(769, 20), &data);
1444 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1445 if (ret_val)
1446 goto out;
1447 e1e_rphy(hw, PHY_REG(769, 23), &data);
1448 data &= ~(0x7F << 5);
1449 data |= (0x37 << 5);
1450 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1451 if (ret_val)
1452 goto out;
1453 e1e_rphy(hw, PHY_REG(769, 16), &data);
1454 data &= ~(1 << 13);
1455 data |= (1 << 12);
1456 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1457 if (ret_val)
1458 goto out;
1459 e1e_rphy(hw, PHY_REG(776, 20), &data);
1460 data &= ~(0x3FF << 2);
1461 data |= (0x1A << 2);
1462 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1463 if (ret_val)
1464 goto out;
1465 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00);
1466 if (ret_val)
1467 goto out;
1468 e1e_rphy(hw, HV_PM_CTRL, &data);
1469 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
1470 if (ret_val)
1471 goto out;
1472 } else {
1473 /* Write MAC register values back to h/w defaults */
1474 mac_reg = er32(FFLT_DBG);
1475 mac_reg &= ~(0xF << 14);
1476 ew32(FFLT_DBG, mac_reg);
1477
1478 mac_reg = er32(RCTL);
1479 mac_reg &= ~E1000_RCTL_SECRC;
1480 ew32(FFLT_DBG, mac_reg);
1481
1482 ret_val = e1000e_read_kmrn_reg(hw,
1483 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1484 &data);
1485 if (ret_val)
1486 goto out;
1487 ret_val = e1000e_write_kmrn_reg(hw,
1488 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1489 data & ~(1 << 0));
1490 if (ret_val)
1491 goto out;
1492 ret_val = e1000e_read_kmrn_reg(hw,
1493 E1000_KMRNCTRLSTA_HD_CTRL,
1494 &data);
1495 if (ret_val)
1496 goto out;
1497 data &= ~(0xF << 8);
1498 data |= (0xB << 8);
1499 ret_val = e1000e_write_kmrn_reg(hw,
1500 E1000_KMRNCTRLSTA_HD_CTRL,
1501 data);
1502 if (ret_val)
1503 goto out;
1504
1505 /* Write PHY register values back to h/w defaults */
1506 e1e_rphy(hw, PHY_REG(769, 20), &data);
1507 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1508 if (ret_val)
1509 goto out;
1510 e1e_rphy(hw, PHY_REG(769, 23), &data);
1511 data &= ~(0x7F << 5);
1512 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1513 if (ret_val)
1514 goto out;
1515 e1e_rphy(hw, PHY_REG(769, 16), &data);
1516 data &= ~(1 << 12);
1517 data |= (1 << 13);
1518 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1519 if (ret_val)
1520 goto out;
1521 e1e_rphy(hw, PHY_REG(776, 20), &data);
1522 data &= ~(0x3FF << 2);
1523 data |= (0x8 << 2);
1524 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1525 if (ret_val)
1526 goto out;
1527 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
1528 if (ret_val)
1529 goto out;
1530 e1e_rphy(hw, HV_PM_CTRL, &data);
1531 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
1532 if (ret_val)
1533 goto out;
1534 }
1535
1536 /* re-enable Rx path after enabling/disabling workaround */
1537 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1538
1539out:
1540 return ret_val;
1541}
1542
1543/**
1544 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1545 * done after every PHY reset.
1546 **/
1547static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1548{
1549 s32 ret_val = 0;
1550
1551 if (hw->mac.type != e1000_pch2lan)
1552 goto out;
1553
1554 /* Set MDIO slow mode before any other MDIO access */
1555 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1556
1557out:
1558 return ret_val;
1559}
1560
1561/**
1224 * e1000_lan_init_done_ich8lan - Check for PHY config completion 1562 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1225 * @hw: pointer to the HW structure 1563 * @hw: pointer to the HW structure
1226 * 1564 *
@@ -1271,12 +1609,17 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1271 if (ret_val) 1609 if (ret_val)
1272 goto out; 1610 goto out;
1273 break; 1611 break;
1612 case e1000_pch2lan:
1613 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1614 if (ret_val)
1615 goto out;
1616 break;
1274 default: 1617 default:
1275 break; 1618 break;
1276 } 1619 }
1277 1620
1278 /* Dummy read to clear the phy wakeup bit after lcd reset */ 1621 /* Dummy read to clear the phy wakeup bit after lcd reset */
1279 if (hw->mac.type == e1000_pchlan) 1622 if (hw->mac.type >= e1000_pchlan)
1280 e1e_rphy(hw, BM_WUC, &reg); 1623 e1e_rphy(hw, BM_WUC, &reg);
1281 1624
1282 /* Configure the LCD with the extended configuration region in NVM */ 1625 /* Configure the LCD with the extended configuration region in NVM */
@@ -2800,6 +3143,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2800 3143
2801 ew32(FCTTV, hw->fc.pause_time); 3144 ew32(FCTTV, hw->fc.pause_time);
2802 if ((hw->phy.type == e1000_phy_82578) || 3145 if ((hw->phy.type == e1000_phy_82578) ||
3146 (hw->phy.type == e1000_phy_82579) ||
2803 (hw->phy.type == e1000_phy_82577)) { 3147 (hw->phy.type == e1000_phy_82577)) {
2804 ew32(FCRTV_PCH, hw->fc.refresh_time); 3148 ew32(FCRTV_PCH, hw->fc.refresh_time);
2805 3149
@@ -2863,6 +3207,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
2863 return ret_val; 3207 return ret_val;
2864 break; 3208 break;
2865 case e1000_phy_82577: 3209 case e1000_phy_82577:
3210 case e1000_phy_82579:
2866 ret_val = e1000_copper_link_setup_82577(hw); 3211 ret_val = e1000_copper_link_setup_82577(hw);
2867 if (ret_val) 3212 if (ret_val)
2868 return ret_val; 3213 return ret_val;
@@ -3116,21 +3461,12 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3116{ 3461{
3117 u32 phy_ctrl; 3462 u32 phy_ctrl;
3118 3463
3119 switch (hw->mac.type) { 3464 phy_ctrl = er32(PHY_CTRL);
3120 case e1000_ich8lan: 3465 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3121 case e1000_ich9lan: 3466 ew32(PHY_CTRL, phy_ctrl);
3122 case e1000_ich10lan:
3123 case e1000_pchlan:
3124 phy_ctrl = er32(PHY_CTRL);
3125 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
3126 E1000_PHY_CTRL_GBE_DISABLE;
3127 ew32(PHY_CTRL, phy_ctrl);
3128 3467
3129 if (hw->mac.type == e1000_pchlan) 3468 if (hw->mac.type >= e1000_pchlan)
3130 e1000_phy_hw_reset_ich8lan(hw); 3469 e1000_phy_hw_reset_ich8lan(hw);
3131 default:
3132 break;
3133 }
3134} 3470}
3135 3471
3136/** 3472/**
@@ -3370,6 +3706,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3370 3706
3371 /* Clear PHY statistics registers */ 3707 /* Clear PHY statistics registers */
3372 if ((hw->phy.type == e1000_phy_82578) || 3708 if ((hw->phy.type == e1000_phy_82578) ||
3709 (hw->phy.type == e1000_phy_82579) ||
3373 (hw->phy.type == e1000_phy_82577)) { 3710 (hw->phy.type == e1000_phy_82577)) {
3374 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); 3711 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
3375 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); 3712 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
@@ -3390,7 +3727,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3390 3727
3391static struct e1000_mac_operations ich8_mac_ops = { 3728static struct e1000_mac_operations ich8_mac_ops = {
3392 .id_led_init = e1000e_id_led_init, 3729 .id_led_init = e1000e_id_led_init,
3393 .check_mng_mode = e1000_check_mng_mode_ich8lan, 3730 /* check_mng_mode dependent on mac type */
3394 .check_for_link = e1000_check_for_copper_link_ich8lan, 3731 .check_for_link = e1000_check_for_copper_link_ich8lan,
3395 /* cleanup_led dependent on mac type */ 3732 /* cleanup_led dependent on mac type */
3396 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 3733 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
@@ -3497,6 +3834,7 @@ struct e1000_info e1000_pch_info = {
3497 | FLAG_HAS_JUMBO_FRAMES 3834 | FLAG_HAS_JUMBO_FRAMES
3498 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 3835 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
3499 | FLAG_APME_IN_WUC, 3836 | FLAG_APME_IN_WUC,
3837 .flags2 = FLAG2_HAS_PHY_STATS,
3500 .pba = 26, 3838 .pba = 26,
3501 .max_hw_frame_size = 4096, 3839 .max_hw_frame_size = 4096,
3502 .get_variants = e1000_get_variants_ich8lan, 3840 .get_variants = e1000_get_variants_ich8lan,
@@ -3504,3 +3842,23 @@ struct e1000_info e1000_pch_info = {
3504 .phy_ops = &ich8_phy_ops, 3842 .phy_ops = &ich8_phy_ops,
3505 .nvm_ops = &ich8_nvm_ops, 3843 .nvm_ops = &ich8_nvm_ops,
3506}; 3844};
3845
3846struct e1000_info e1000_pch2_info = {
3847 .mac = e1000_pch2lan,
3848 .flags = FLAG_IS_ICH
3849 | FLAG_HAS_WOL
3850 | FLAG_RX_CSUM_ENABLED
3851 | FLAG_HAS_CTRLEXT_ON_LOAD
3852 | FLAG_HAS_AMT
3853 | FLAG_HAS_FLASH
3854 | FLAG_HAS_JUMBO_FRAMES
3855 | FLAG_APME_IN_WUC,
3856 .flags2 = FLAG2_HAS_PHY_STATS
3857 | FLAG2_HAS_EEE,
3858 .pba = 18,
3859 .max_hw_frame_size = DEFAULT_JUMBO,
3860 .get_variants = e1000_get_variants_ich8lan,
3861 .mac_ops = &ich8_mac_ops,
3862 .phy_ops = &ich8_phy_ops,
3863 .nvm_ops = &ich8_nvm_ops,
3864};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a968e3a416ac..df4a27922931 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57a7e41da69e..36d31a416320 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,9 @@
52 52
53#include "e1000.h" 53#include "e1000.h"
54 54
55#define DRV_VERSION "1.0.2-k4" 55#define DRV_EXTRAVERSION "-k2"
56
57#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
56char e1000e_driver_name[] = "e1000e"; 58char e1000e_driver_name[] = "e1000e";
57const char e1000e_driver_version[] = DRV_VERSION; 59const char e1000e_driver_version[] = DRV_VERSION;
58 60
@@ -67,6 +69,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
67 [board_ich9lan] = &e1000_ich9_info, 69 [board_ich9lan] = &e1000_ich9_info,
68 [board_ich10lan] = &e1000_ich10_info, 70 [board_ich10lan] = &e1000_ich10_info,
69 [board_pchlan] = &e1000_pch_info, 71 [board_pchlan] = &e1000_pch_info,
72 [board_pch2lan] = &e1000_pch2_info,
70}; 73};
71 74
72struct e1000_reg_info { 75struct e1000_reg_info {
@@ -221,10 +224,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
221 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 224 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
222 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 225 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
223 0, tx_ring->next_to_use, tx_ring->next_to_clean, 226 0, tx_ring->next_to_use, tx_ring->next_to_clean,
224 (u64)buffer_info->dma, 227 (unsigned long long)buffer_info->dma,
225 buffer_info->length, 228 buffer_info->length,
226 buffer_info->next_to_watch, 229 buffer_info->next_to_watch,
227 (u64)buffer_info->time_stamp); 230 (unsigned long long)buffer_info->time_stamp);
228 231
229 /* Print TX Rings */ 232 /* Print TX Rings */
230 if (!netif_msg_tx_done(adapter)) 233 if (!netif_msg_tx_done(adapter))
@@ -276,9 +279,11 @@ static void e1000e_dump(struct e1000_adapter *adapter)
276 "%04X %3X %016llX %p", 279 "%04X %3X %016llX %p",
277 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : 280 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
278 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, 281 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
279 le64_to_cpu(u0->a), le64_to_cpu(u0->b), 282 (unsigned long long)le64_to_cpu(u0->a),
280 (u64)buffer_info->dma, buffer_info->length, 283 (unsigned long long)le64_to_cpu(u0->b),
281 buffer_info->next_to_watch, (u64)buffer_info->time_stamp, 284 (unsigned long long)buffer_info->dma,
285 buffer_info->length, buffer_info->next_to_watch,
286 (unsigned long long)buffer_info->time_stamp,
282 buffer_info->skb); 287 buffer_info->skb);
283 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 288 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
284 printk(KERN_CONT " NTC/U\n"); 289 printk(KERN_CONT " NTC/U\n");
@@ -353,19 +358,19 @@ rx_ring_summary:
353 printk(KERN_INFO "RWB[0x%03X] %016llX " 358 printk(KERN_INFO "RWB[0x%03X] %016llX "
354 "%016llX %016llX %016llX " 359 "%016llX %016llX %016llX "
355 "---------------- %p", i, 360 "---------------- %p", i,
356 le64_to_cpu(u1->a), 361 (unsigned long long)le64_to_cpu(u1->a),
357 le64_to_cpu(u1->b), 362 (unsigned long long)le64_to_cpu(u1->b),
358 le64_to_cpu(u1->c), 363 (unsigned long long)le64_to_cpu(u1->c),
359 le64_to_cpu(u1->d), 364 (unsigned long long)le64_to_cpu(u1->d),
360 buffer_info->skb); 365 buffer_info->skb);
361 } else { 366 } else {
362 printk(KERN_INFO "R [0x%03X] %016llX " 367 printk(KERN_INFO "R [0x%03X] %016llX "
363 "%016llX %016llX %016llX %016llX %p", i, 368 "%016llX %016llX %016llX %016llX %p", i,
364 le64_to_cpu(u1->a), 369 (unsigned long long)le64_to_cpu(u1->a),
365 le64_to_cpu(u1->b), 370 (unsigned long long)le64_to_cpu(u1->b),
366 le64_to_cpu(u1->c), 371 (unsigned long long)le64_to_cpu(u1->c),
367 le64_to_cpu(u1->d), 372 (unsigned long long)le64_to_cpu(u1->d),
368 (u64)buffer_info->dma, 373 (unsigned long long)buffer_info->dma,
369 buffer_info->skb); 374 buffer_info->skb);
370 375
371 if (netif_msg_pktdata(adapter)) 376 if (netif_msg_pktdata(adapter))
@@ -402,9 +407,11 @@ rx_ring_summary:
402 buffer_info = &rx_ring->buffer_info[i]; 407 buffer_info = &rx_ring->buffer_info[i];
403 u0 = (struct my_u0 *)rx_desc; 408 u0 = (struct my_u0 *)rx_desc;
404 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " 409 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
405 "%016llX %p", 410 "%016llX %p", i,
406 i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), 411 (unsigned long long)le64_to_cpu(u0->a),
407 (u64)buffer_info->dma, buffer_info->skb); 412 (unsigned long long)le64_to_cpu(u0->b),
413 (unsigned long long)buffer_info->dma,
414 buffer_info->skb);
408 if (i == rx_ring->next_to_use) 415 if (i == rx_ring->next_to_use)
409 printk(KERN_CONT " NTU\n"); 416 printk(KERN_CONT " NTU\n");
410 else if (i == rx_ring->next_to_clean) 417 else if (i == rx_ring->next_to_clean)
@@ -1778,25 +1785,25 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1778void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 1785void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1779{ 1786{
1780 int err; 1787 int err;
1781 int numvecs, i; 1788 int i;
1782
1783 1789
1784 switch (adapter->int_mode) { 1790 switch (adapter->int_mode) {
1785 case E1000E_INT_MODE_MSIX: 1791 case E1000E_INT_MODE_MSIX:
1786 if (adapter->flags & FLAG_HAS_MSIX) { 1792 if (adapter->flags & FLAG_HAS_MSIX) {
1787 numvecs = 3; /* RxQ0, TxQ0 and other */ 1793 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1788 adapter->msix_entries = kcalloc(numvecs, 1794 adapter->msix_entries = kcalloc(adapter->num_vectors,
1789 sizeof(struct msix_entry), 1795 sizeof(struct msix_entry),
1790 GFP_KERNEL); 1796 GFP_KERNEL);
1791 if (adapter->msix_entries) { 1797 if (adapter->msix_entries) {
1792 for (i = 0; i < numvecs; i++) 1798 for (i = 0; i < adapter->num_vectors; i++)
1793 adapter->msix_entries[i].entry = i; 1799 adapter->msix_entries[i].entry = i;
1794 1800
1795 err = pci_enable_msix(adapter->pdev, 1801 err = pci_enable_msix(adapter->pdev,
1796 adapter->msix_entries, 1802 adapter->msix_entries,
1797 numvecs); 1803 adapter->num_vectors);
1798 if (err == 0) 1804 if (err == 0) {
1799 return; 1805 return;
1806 }
1800 } 1807 }
1801 /* MSI-X failed, so fall through and try MSI */ 1808 /* MSI-X failed, so fall through and try MSI */
1802 e_err("Failed to initialize MSI-X interrupts. " 1809 e_err("Failed to initialize MSI-X interrupts. "
@@ -1818,6 +1825,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1818 /* Don't do anything; this is the system default */ 1825 /* Don't do anything; this is the system default */
1819 break; 1826 break;
1820 } 1827 }
1828
1829 /* store the number of vectors being used */
1830 adapter->num_vectors = 1;
1821} 1831}
1822 1832
1823/** 1833/**
@@ -1939,7 +1949,14 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
1939 if (adapter->msix_entries) 1949 if (adapter->msix_entries)
1940 ew32(EIAC_82574, 0); 1950 ew32(EIAC_82574, 0);
1941 e1e_flush(); 1951 e1e_flush();
1942 synchronize_irq(adapter->pdev->irq); 1952
1953 if (adapter->msix_entries) {
1954 int i;
1955 for (i = 0; i < adapter->num_vectors; i++)
1956 synchronize_irq(adapter->msix_entries[i].vector);
1957 } else {
1958 synchronize_irq(adapter->pdev->irq);
1959 }
1943} 1960}
1944 1961
1945/** 1962/**
@@ -2723,6 +2740,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2723 e1e_wphy(hw, 22, phy_data); 2740 e1e_wphy(hw, 22, phy_data);
2724 } 2741 }
2725 2742
2743 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2744 if (hw->mac.type == e1000_pch2lan) {
2745 s32 ret_val;
2746
2747 if (rctl & E1000_RCTL_LPE)
2748 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2749 else
2750 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2751 }
2752
2726 /* Setup buffer sizes */ 2753 /* Setup buffer sizes */
2727 rctl &= ~E1000_RCTL_SZ_4096; 2754 rctl &= ~E1000_RCTL_SZ_4096;
2728 rctl |= E1000_RCTL_BSEX; 2755 rctl |= E1000_RCTL_BSEX;
@@ -2759,7 +2786,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2759 * per packet. 2786 * per packet.
2760 */ 2787 */
2761 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2788 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2762 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && 2789 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
2763 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2790 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2764 adapter->rx_ps_pages = pages; 2791 adapter->rx_ps_pages = pages;
2765 else 2792 else
@@ -2901,10 +2928,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2901 * dropped transactions. 2928 * dropped transactions.
2902 */ 2929 */
2903 pm_qos_update_request( 2930 pm_qos_update_request(
2904 adapter->netdev->pm_qos_req, 55); 2931 &adapter->netdev->pm_qos_req, 55);
2905 } else { 2932 } else {
2906 pm_qos_update_request( 2933 pm_qos_update_request(
2907 adapter->netdev->pm_qos_req, 2934 &adapter->netdev->pm_qos_req,
2908 PM_QOS_DEFAULT_VALUE); 2935 PM_QOS_DEFAULT_VALUE);
2909 } 2936 }
2910 } 2937 }
@@ -3118,7 +3145,27 @@ void e1000e_reset(struct e1000_adapter *adapter)
3118 * with ERT support assuming ERT set to E1000_ERT_2048), or 3145 * with ERT support assuming ERT set to E1000_ERT_2048), or
3119 * - the full Rx FIFO size minus one full frame 3146 * - the full Rx FIFO size minus one full frame
3120 */ 3147 */
3121 if (hw->mac.type == e1000_pchlan) { 3148 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3149 fc->pause_time = 0xFFFF;
3150 else
3151 fc->pause_time = E1000_FC_PAUSE_TIME;
3152 fc->send_xon = 1;
3153 fc->current_mode = fc->requested_mode;
3154
3155 switch (hw->mac.type) {
3156 default:
3157 if ((adapter->flags & FLAG_HAS_ERT) &&
3158 (adapter->netdev->mtu > ETH_DATA_LEN))
3159 hwm = min(((pba << 10) * 9 / 10),
3160 ((pba << 10) - (E1000_ERT_2048 << 3)));
3161 else
3162 hwm = min(((pba << 10) * 9 / 10),
3163 ((pba << 10) - adapter->max_frame_size));
3164
3165 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3166 fc->low_water = fc->high_water - 8;
3167 break;
3168 case e1000_pchlan:
3122 /* 3169 /*
3123 * Workaround PCH LOM adapter hangs with certain network 3170 * Workaround PCH LOM adapter hangs with certain network
3124 * loads. If hangs persist, try disabling Tx flow control. 3171 * loads. If hangs persist, try disabling Tx flow control.
@@ -3131,26 +3178,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
3131 fc->low_water = 0x3000; 3178 fc->low_water = 0x3000;
3132 } 3179 }
3133 fc->refresh_time = 0x1000; 3180 fc->refresh_time = 0x1000;
3134 } else { 3181 break;
3135 if ((adapter->flags & FLAG_HAS_ERT) && 3182 case e1000_pch2lan:
3136 (adapter->netdev->mtu > ETH_DATA_LEN)) 3183 fc->high_water = 0x05C20;
3137 hwm = min(((pba << 10) * 9 / 10), 3184 fc->low_water = 0x05048;
3138 ((pba << 10) - (E1000_ERT_2048 << 3))); 3185 fc->pause_time = 0x0650;
3139 else 3186 fc->refresh_time = 0x0400;
3140 hwm = min(((pba << 10) * 9 / 10), 3187 break;
3141 ((pba << 10) - adapter->max_frame_size));
3142
3143 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3144 fc->low_water = fc->high_water - 8;
3145 } 3188 }
3146 3189
3147 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3148 fc->pause_time = 0xFFFF;
3149 else
3150 fc->pause_time = E1000_FC_PAUSE_TIME;
3151 fc->send_xon = 1;
3152 fc->current_mode = fc->requested_mode;
3153
3154 /* Allow time for pending master requests to run */ 3190 /* Allow time for pending master requests to run */
3155 mac->ops.reset_hw(hw); 3191 mac->ops.reset_hw(hw);
3156 3192
@@ -3162,8 +3198,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
3162 e1000_get_hw_control(adapter); 3198 e1000_get_hw_control(adapter);
3163 3199
3164 ew32(WUC, 0); 3200 ew32(WUC, 0);
3165 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
3166 e1e_wphy(&adapter->hw, BM_WUC, 0);
3167 3201
3168 if (mac->ops.init_hw(hw)) 3202 if (mac->ops.init_hw(hw))
3169 e_err("Hardware Error\n"); 3203 e_err("Hardware Error\n");
@@ -3194,12 +3228,6 @@ int e1000e_up(struct e1000_adapter *adapter)
3194{ 3228{
3195 struct e1000_hw *hw = &adapter->hw; 3229 struct e1000_hw *hw = &adapter->hw;
3196 3230
3197 /* DMA latency requirement to workaround early-receive/jumbo issue */
3198 if (adapter->flags & FLAG_HAS_ERT)
3199 adapter->netdev->pm_qos_req =
3200 pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
3201 PM_QOS_DEFAULT_VALUE);
3202
3203 /* hardware has been reset, we need to reload some things */ 3231 /* hardware has been reset, we need to reload some things */
3204 e1000_configure(adapter); 3232 e1000_configure(adapter);
3205 3233
@@ -3263,12 +3291,6 @@ void e1000e_down(struct e1000_adapter *adapter)
3263 e1000_clean_tx_ring(adapter); 3291 e1000_clean_tx_ring(adapter);
3264 e1000_clean_rx_ring(adapter); 3292 e1000_clean_rx_ring(adapter);
3265 3293
3266 if (adapter->flags & FLAG_HAS_ERT) {
3267 pm_qos_remove_request(
3268 adapter->netdev->pm_qos_req);
3269 adapter->netdev->pm_qos_req = NULL;
3270 }
3271
3272 /* 3294 /*
3273 * TODO: for power management, we could drop the link and 3295 * TODO: for power management, we could drop the link and
3274 * pci_disable_device here. 3296 * pci_disable_device here.
@@ -3419,13 +3441,18 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
3419 3441
3420 /* disable SERR in case the MSI write causes a master abort */ 3442 /* disable SERR in case the MSI write causes a master abort */
3421 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3443 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3422 pci_write_config_word(adapter->pdev, PCI_COMMAND, 3444 if (pci_cmd & PCI_COMMAND_SERR)
3423 pci_cmd & ~PCI_COMMAND_SERR); 3445 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3446 pci_cmd & ~PCI_COMMAND_SERR);
3424 3447
3425 err = e1000_test_msi_interrupt(adapter); 3448 err = e1000_test_msi_interrupt(adapter);
3426 3449
3427 /* restore previous setting of command word */ 3450 /* re-enable SERR */
3428 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 3451 if (pci_cmd & PCI_COMMAND_SERR) {
3452 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3453 pci_cmd |= PCI_COMMAND_SERR;
3454 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3455 }
3429 3456
3430 /* success ! */ 3457 /* success ! */
3431 if (!err) 3458 if (!err)
@@ -3498,6 +3525,12 @@ static int e1000_open(struct net_device *netdev)
3498 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 3525 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3499 e1000_update_mng_vlan(adapter); 3526 e1000_update_mng_vlan(adapter);
3500 3527
3528 /* DMA latency requirement to workaround early-receive/jumbo issue */
3529 if (adapter->flags & FLAG_HAS_ERT)
3530 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3531 PM_QOS_CPU_DMA_LATENCY,
3532 PM_QOS_DEFAULT_VALUE);
3533
3501 /* 3534 /*
3502 * before we allocate an interrupt, we must be ready to handle it. 3535 * before we allocate an interrupt, we must be ready to handle it.
3503 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3536 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -3602,6 +3635,9 @@ static int e1000_close(struct net_device *netdev)
3602 if (adapter->flags & FLAG_HAS_AMT) 3635 if (adapter->flags & FLAG_HAS_AMT)
3603 e1000_release_hw_control(adapter); 3636 e1000_release_hw_control(adapter);
3604 3637
3638 if (adapter->flags & FLAG_HAS_ERT)
3639 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3640
3605 pm_runtime_put_sync(&pdev->dev); 3641 pm_runtime_put_sync(&pdev->dev);
3606 3642
3607 return 0; 3643 return 0;
@@ -3672,6 +3708,110 @@ static void e1000_update_phy_info(unsigned long data)
3672} 3708}
3673 3709
3674/** 3710/**
3711 * e1000e_update_phy_stats - Update the PHY statistics counters
3712 * @adapter: board private structure
3713 **/
3714static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
3715{
3716 struct e1000_hw *hw = &adapter->hw;
3717 s32 ret_val;
3718 u16 phy_data;
3719
3720 ret_val = hw->phy.ops.acquire(hw);
3721 if (ret_val)
3722 return;
3723
3724 hw->phy.addr = 1;
3725
3726#define HV_PHY_STATS_PAGE 778
3727 /*
3728 * A page set is expensive so check if already on desired page.
3729 * If not, set to the page with the PHY status registers.
3730 */
3731 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3732 &phy_data);
3733 if (ret_val)
3734 goto release;
3735 if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
3736 ret_val = e1000e_write_phy_reg_mdic(hw,
3737 IGP01E1000_PHY_PAGE_SELECT,
3738 (HV_PHY_STATS_PAGE <<
3739 IGP_PAGE_SHIFT));
3740 if (ret_val)
3741 goto release;
3742 }
3743
3744 /* Read/clear the upper 16-bit registers and read/accumulate lower */
3745
3746 /* Single Collision Count */
3747 e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
3748 &phy_data);
3749 ret_val = e1000e_read_phy_reg_mdic(hw,
3750 HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
3751 &phy_data);
3752 if (!ret_val)
3753 adapter->stats.scc += phy_data;
3754
3755 /* Excessive Collision Count */
3756 e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
3757 &phy_data);
3758 ret_val = e1000e_read_phy_reg_mdic(hw,
3759 HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
3760 &phy_data);
3761 if (!ret_val)
3762 adapter->stats.ecol += phy_data;
3763
3764 /* Multiple Collision Count */
3765 e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
3766 &phy_data);
3767 ret_val = e1000e_read_phy_reg_mdic(hw,
3768 HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
3769 &phy_data);
3770 if (!ret_val)
3771 adapter->stats.mcc += phy_data;
3772
3773 /* Late Collision Count */
3774 e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
3775 &phy_data);
3776 ret_val = e1000e_read_phy_reg_mdic(hw,
3777 HV_LATECOL_LOWER &
3778 MAX_PHY_REG_ADDRESS,
3779 &phy_data);
3780 if (!ret_val)
3781 adapter->stats.latecol += phy_data;
3782
3783 /* Collision Count - also used for adaptive IFS */
3784 e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
3785 &phy_data);
3786 ret_val = e1000e_read_phy_reg_mdic(hw,
3787 HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
3788 &phy_data);
3789 if (!ret_val)
3790 hw->mac.collision_delta = phy_data;
3791
3792 /* Defer Count */
3793 e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
3794 &phy_data);
3795 ret_val = e1000e_read_phy_reg_mdic(hw,
3796 HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
3797 &phy_data);
3798 if (!ret_val)
3799 adapter->stats.dc += phy_data;
3800
3801 /* Transmit with no CRS */
3802 e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
3803 &phy_data);
3804 ret_val = e1000e_read_phy_reg_mdic(hw,
3805 HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
3806 &phy_data);
3807 if (!ret_val)
3808 adapter->stats.tncrs += phy_data;
3809
3810release:
3811 hw->phy.ops.release(hw);
3812}
3813
3814/**
3675 * e1000e_update_stats - Update the board statistics counters 3815 * e1000e_update_stats - Update the board statistics counters
3676 * @adapter: board private structure 3816 * @adapter: board private structure
3677 **/ 3817 **/
@@ -3680,7 +3820,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3680 struct net_device *netdev = adapter->netdev; 3820 struct net_device *netdev = adapter->netdev;
3681 struct e1000_hw *hw = &adapter->hw; 3821 struct e1000_hw *hw = &adapter->hw;
3682 struct pci_dev *pdev = adapter->pdev; 3822 struct pci_dev *pdev = adapter->pdev;
3683 u16 phy_data;
3684 3823
3685 /* 3824 /*
3686 * Prevent stats update while adapter is being reset, or if the pci 3825 * Prevent stats update while adapter is being reset, or if the pci
@@ -3700,34 +3839,27 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3700 adapter->stats.roc += er32(ROC); 3839 adapter->stats.roc += er32(ROC);
3701 3840
3702 adapter->stats.mpc += er32(MPC); 3841 adapter->stats.mpc += er32(MPC);
3703 if ((hw->phy.type == e1000_phy_82578) || 3842
3704 (hw->phy.type == e1000_phy_82577)) { 3843 /* Half-duplex statistics */
3705 e1e_rphy(hw, HV_SCC_UPPER, &phy_data); 3844 if (adapter->link_duplex == HALF_DUPLEX) {
3706 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) 3845 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
3707 adapter->stats.scc += phy_data; 3846 e1000e_update_phy_stats(adapter);
3708 3847 } else {
3709 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); 3848 adapter->stats.scc += er32(SCC);
3710 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) 3849 adapter->stats.ecol += er32(ECOL);
3711 adapter->stats.ecol += phy_data; 3850 adapter->stats.mcc += er32(MCC);
3712 3851 adapter->stats.latecol += er32(LATECOL);
3713 e1e_rphy(hw, HV_MCC_UPPER, &phy_data); 3852 adapter->stats.dc += er32(DC);
3714 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) 3853
3715 adapter->stats.mcc += phy_data; 3854 hw->mac.collision_delta = er32(COLC);
3716 3855
3717 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); 3856 if ((hw->mac.type != e1000_82574) &&
3718 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) 3857 (hw->mac.type != e1000_82583))
3719 adapter->stats.latecol += phy_data; 3858 adapter->stats.tncrs += er32(TNCRS);
3720 3859 }
3721 e1e_rphy(hw, HV_DC_UPPER, &phy_data); 3860 adapter->stats.colc += hw->mac.collision_delta;
3722 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3723 adapter->stats.dc += phy_data;
3724 } else {
3725 adapter->stats.scc += er32(SCC);
3726 adapter->stats.ecol += er32(ECOL);
3727 adapter->stats.mcc += er32(MCC);
3728 adapter->stats.latecol += er32(LATECOL);
3729 adapter->stats.dc += er32(DC);
3730 } 3861 }
3862
3731 adapter->stats.xonrxc += er32(XONRXC); 3863 adapter->stats.xonrxc += er32(XONRXC);
3732 adapter->stats.xontxc += er32(XONTXC); 3864 adapter->stats.xontxc += er32(XONTXC);
3733 adapter->stats.xoffrxc += er32(XOFFRXC); 3865 adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3745,28 +3877,9 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3745 3877
3746 hw->mac.tx_packet_delta = er32(TPT); 3878 hw->mac.tx_packet_delta = er32(TPT);
3747 adapter->stats.tpt += hw->mac.tx_packet_delta; 3879 adapter->stats.tpt += hw->mac.tx_packet_delta;
3748 if ((hw->phy.type == e1000_phy_82578) ||
3749 (hw->phy.type == e1000_phy_82577)) {
3750 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3751 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3752 hw->mac.collision_delta = phy_data;
3753 } else {
3754 hw->mac.collision_delta = er32(COLC);
3755 }
3756 adapter->stats.colc += hw->mac.collision_delta;
3757 3880
3758 adapter->stats.algnerrc += er32(ALGNERRC); 3881 adapter->stats.algnerrc += er32(ALGNERRC);
3759 adapter->stats.rxerrc += er32(RXERRC); 3882 adapter->stats.rxerrc += er32(RXERRC);
3760 if ((hw->phy.type == e1000_phy_82578) ||
3761 (hw->phy.type == e1000_phy_82577)) {
3762 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3763 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3764 adapter->stats.tncrs += phy_data;
3765 } else {
3766 if ((hw->mac.type != e1000_82574) &&
3767 (hw->mac.type != e1000_82583))
3768 adapter->stats.tncrs += er32(TNCRS);
3769 }
3770 adapter->stats.cexterr += er32(CEXTERR); 3883 adapter->stats.cexterr += er32(CEXTERR);
3771 adapter->stats.tsctc += er32(TSCTC); 3884 adapter->stats.tsctc += er32(TSCTC);
3772 adapter->stats.tsctfc += er32(TSCTFC); 3885 adapter->stats.tsctfc += er32(TSCTFC);
@@ -3865,7 +3978,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
3865 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 3978 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3866} 3979}
3867 3980
3868bool e1000e_has_link(struct e1000_adapter *adapter) 3981static bool e1000e_has_link(struct e1000_adapter *adapter)
3869{ 3982{
3870 struct e1000_hw *hw = &adapter->hw; 3983 struct e1000_hw *hw = &adapter->hw;
3871 bool link_active = 0; 3984 bool link_active = 0;
@@ -4841,14 +4954,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4841 int retval = 0; 4954 int retval = 0;
4842 4955
4843 /* copy MAC RARs to PHY RARs */ 4956 /* copy MAC RARs to PHY RARs */
4844 for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) { 4957 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
4845 mac_reg = er32(RAL(i));
4846 e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
4847 e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
4848 mac_reg = er32(RAH(i));
4849 e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
4850 e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
4851 }
4852 4958
4853 /* copy MAC MTA to PHY MTA */ 4959 /* copy MAC MTA to PHY MTA */
4854 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 4960 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
@@ -5551,8 +5657,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5551 if (err) 5657 if (err)
5552 goto err_sw_init; 5658 goto err_sw_init;
5553 5659
5554 err = -EIO;
5555
5556 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 5660 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5557 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 5661 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5558 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 5662 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -5899,6 +6003,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5899 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 6003 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
5900 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 6004 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
5901 6005
6006 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6007 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6008
5902 { } /* terminate list */ 6009 { } /* terminate list */
5903}; 6010};
5904MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6011MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
@@ -5935,7 +6042,7 @@ static int __init e1000_init_module(void)
5935 int ret; 6042 int ret;
5936 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6043 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
5937 e1000e_driver_version); 6044 e1000e_driver_version);
5938 pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n"); 6045 pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
5939 ret = pci_register_driver(&e1000_driver); 6046 ret = pci_register_driver(&e1000_driver);
5940 6047
5941 return ret; 6048 return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a150e48a117f..34aeec13bb16 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b4ac82d51b20..3d3dc0c82355 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -2319,6 +2319,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
2319 case I82577_E_PHY_ID: 2319 case I82577_E_PHY_ID:
2320 phy_type = e1000_phy_82577; 2320 phy_type = e1000_phy_82577;
2321 break; 2321 break;
2322 case I82579_E_PHY_ID:
2323 phy_type = e1000_phy_82579;
2324 break;
2322 default: 2325 default:
2323 phy_type = e1000_phy_unknown; 2326 phy_type = e1000_phy_unknown;
2324 break; 2327 break;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 8b92acb448c2..3beba70b7dea 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -335,7 +335,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
335 335
336 memset(stats, 0, sizeof(*stats)); 336 memset(stats, 0, sizeof(*stats));
337 337
338 cb2 = (void *)get_zeroed_page(GFP_ATOMIC); 338 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
339 if (!cb2) { 339 if (!cb2) {
340 ehea_error("no mem for cb2"); 340 ehea_error("no mem for cb2");
341 goto out; 341 goto out;
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 882c50c9c34f..f608a6c54af5 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -126,7 +126,7 @@ struct ehea_swqe {
126 u8 immediate_data[SWQE2_MAX_IMM]; 126 u8 immediate_data[SWQE2_MAX_IMM];
127 /* 0xd0 */ 127 /* 0xd0 */
128 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; 128 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
129 } immdata_desc __attribute__ ((packed)); 129 } immdata_desc __packed;
130 130
131 /* Send WQE Format 3 */ 131 /* Send WQE Format 3 */
132 struct { 132 struct {
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
index 1eb289f773bf..d6dd1b4edf6e 100644
--- a/drivers/net/enic/cq_desc.h
+++ b/drivers/net/enic/cq_desc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 337d1943af46..c2c0680a1146 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -73,7 +73,16 @@ struct cq_enet_rq_desc {
73#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) 73#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
74#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) 74#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
75 75
76#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4 76#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12
77#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \
78 ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1)
79#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12)
80#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3
81#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \
82 ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1)
83#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13
84
85#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8
77#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ 86#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
78 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) 87 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
79#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 88#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
@@ -96,7 +105,7 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
96 u8 *type, u8 *color, u16 *q_number, u16 *completed_index, 105 u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
97 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, 106 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
98 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, 107 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
99 u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof, 108 u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
100 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, 109 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, 110 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) 111 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
@@ -136,7 +145,10 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
136 *vlan_stripped = (bytes_written_flags & 145 *vlan_stripped = (bytes_written_flags &
137 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; 146 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
138 147
139 *vlan = le16_to_cpu(desc->vlan); 148 /*
149 * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
150 */
151 *vlan_tci = le16_to_cpu(desc->vlan);
140 152
141 if (*fcoe) { 153 if (*fcoe) {
142 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & 154 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 45e86d1e5b1b..f239aa8c6f4c 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -20,8 +20,6 @@
20#ifndef _ENIC_H_ 20#ifndef _ENIC_H_
21#define _ENIC_H_ 21#define _ENIC_H_
22 22
23#include <linux/inet_lro.h>
24
25#include "vnic_enet.h" 23#include "vnic_enet.h"
26#include "vnic_dev.h" 24#include "vnic_dev.h"
27#include "vnic_wq.h" 25#include "vnic_wq.h"
@@ -34,12 +32,8 @@
34 32
35#define DRV_NAME "enic" 33#define DRV_NAME "enic"
36#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
37#define DRV_VERSION "1.3.1.1-pp" 35#define DRV_VERSION "1.4.1.1"
38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
39#define PFX DRV_NAME ": "
40
41#define ENIC_LRO_MAX_DESC 8
42#define ENIC_LRO_MAX_AGGR 64
43 37
44#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
45 39
@@ -116,6 +110,8 @@ struct enic {
116 spinlock_t wq_lock[ENIC_WQ_MAX]; 110 spinlock_t wq_lock[ENIC_WQ_MAX];
117 unsigned int wq_count; 111 unsigned int wq_count;
118 struct vlan_group *vlan_group; 112 struct vlan_group *vlan_group;
113 u16 loop_enable;
114 u16 loop_tag;
119 115
120 /* receive queue cache line section */ 116 /* receive queue cache line section */
121 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; 117 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
@@ -124,8 +120,6 @@ struct enic {
124 u64 rq_truncated_pkts; 120 u64 rq_truncated_pkts;
125 u64 rq_bad_fcs; 121 u64 rq_bad_fcs;
126 struct napi_struct napi; 122 struct napi_struct napi;
127 struct net_lro_mgr lro_mgr;
128 struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
129 123
130 /* interrupt resource cache line section */ 124 /* interrupt resource cache line section */
131 ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; 125 ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
@@ -137,4 +131,9 @@ struct enic {
137 unsigned int cq_count; 131 unsigned int cq_count;
138}; 132};
139 133
134static inline struct device *enic_get_dev(struct enic *enic)
135{
136 return &(enic->pdev->dev);
137}
138
140#endif /* _ENIC_H_ */ 139#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index bc7d6b96de3d..77a7f87d498e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -29,12 +29,12 @@
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/if_link.h>
33#include <linux/ethtool.h> 32#include <linux/ethtool.h>
34#include <linux/in.h> 33#include <linux/in.h>
35#include <linux/ip.h> 34#include <linux/ip.h>
36#include <linux/ipv6.h> 35#include <linux/ipv6.h>
37#include <linux/tcp.h> 36#include <linux/tcp.h>
37#include <linux/rtnetlink.h>
38#include <net/ip6_checksum.h> 38#include <net/ip6_checksum.h>
39 39
40#include "cq_enet_desc.h" 40#include "cq_enet_desc.h"
@@ -145,15 +145,25 @@ static int enic_get_settings(struct net_device *netdev,
145 return 0; 145 return 0;
146} 146}
147 147
148static int enic_dev_fw_info(struct enic *enic,
149 struct vnic_devcmd_fw_info **fw_info)
150{
151 int err;
152
153 spin_lock(&enic->devcmd_lock);
154 err = vnic_dev_fw_info(enic->vdev, fw_info);
155 spin_unlock(&enic->devcmd_lock);
156
157 return err;
158}
159
148static void enic_get_drvinfo(struct net_device *netdev, 160static void enic_get_drvinfo(struct net_device *netdev,
149 struct ethtool_drvinfo *drvinfo) 161 struct ethtool_drvinfo *drvinfo)
150{ 162{
151 struct enic *enic = netdev_priv(netdev); 163 struct enic *enic = netdev_priv(netdev);
152 struct vnic_devcmd_fw_info *fw_info; 164 struct vnic_devcmd_fw_info *fw_info;
153 165
154 spin_lock(&enic->devcmd_lock); 166 enic_dev_fw_info(enic, &fw_info);
155 vnic_dev_fw_info(enic->vdev, &fw_info);
156 spin_unlock(&enic->devcmd_lock);
157 167
158 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 168 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
159 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); 169 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -191,6 +201,17 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
191 } 201 }
192} 202}
193 203
204static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
205{
206 int err;
207
208 spin_lock(&enic->devcmd_lock);
209 err = vnic_dev_stats_dump(enic->vdev, vstats);
210 spin_unlock(&enic->devcmd_lock);
211
212 return err;
213}
214
194static void enic_get_ethtool_stats(struct net_device *netdev, 215static void enic_get_ethtool_stats(struct net_device *netdev,
195 struct ethtool_stats *stats, u64 *data) 216 struct ethtool_stats *stats, u64 *data)
196{ 217{
@@ -198,9 +219,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
198 struct vnic_stats *vstats; 219 struct vnic_stats *vstats;
199 unsigned int i; 220 unsigned int i;
200 221
201 spin_lock(&enic->devcmd_lock); 222 enic_dev_stats_dump(enic, &vstats);
202 vnic_dev_stats_dump(enic->vdev, &vstats);
203 spin_unlock(&enic->devcmd_lock);
204 223
205 for (i = 0; i < enic_n_tx_stats; i++) 224 for (i = 0; i < enic_n_tx_stats; i++)
206 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; 225 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
@@ -346,7 +365,6 @@ static const struct ethtool_ops enic_ethtool_ops = {
346 .get_coalesce = enic_get_coalesce, 365 .get_coalesce = enic_get_coalesce,
347 .set_coalesce = enic_set_coalesce, 366 .set_coalesce = enic_set_coalesce,
348 .get_flags = ethtool_op_get_flags, 367 .get_flags = ethtool_op_get_flags,
349 .set_flags = ethtool_op_set_flags,
350}; 368};
351 369
352static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 370static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -399,54 +417,55 @@ static void enic_log_q_error(struct enic *enic)
399 for (i = 0; i < enic->wq_count; i++) { 417 for (i = 0; i < enic->wq_count; i++) {
400 error_status = vnic_wq_error_status(&enic->wq[i]); 418 error_status = vnic_wq_error_status(&enic->wq[i]);
401 if (error_status) 419 if (error_status)
402 printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", 420 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
403 enic->netdev->name, i, error_status); 421 i, error_status);
404 } 422 }
405 423
406 for (i = 0; i < enic->rq_count; i++) { 424 for (i = 0; i < enic->rq_count; i++) {
407 error_status = vnic_rq_error_status(&enic->rq[i]); 425 error_status = vnic_rq_error_status(&enic->rq[i]);
408 if (error_status) 426 if (error_status)
409 printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", 427 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
410 enic->netdev->name, i, error_status); 428 i, error_status);
411 } 429 }
412} 430}
413 431
414static void enic_link_check(struct enic *enic) 432static void enic_msglvl_check(struct enic *enic)
415{ 433{
416 int link_status = vnic_dev_link_status(enic->vdev); 434 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
417 int carrier_ok = netif_carrier_ok(enic->netdev);
418 435
419 if (link_status && !carrier_ok) { 436 if (msg_enable != enic->msg_enable) {
420 printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); 437 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
421 netif_carrier_on(enic->netdev); 438 enic->msg_enable, msg_enable);
422 } else if (!link_status && carrier_ok) { 439 enic->msg_enable = msg_enable;
423 printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
424 netif_carrier_off(enic->netdev);
425 } 440 }
426} 441}
427 442
428static void enic_mtu_check(struct enic *enic) 443static void enic_mtu_check(struct enic *enic)
429{ 444{
430 u32 mtu = vnic_dev_mtu(enic->vdev); 445 u32 mtu = vnic_dev_mtu(enic->vdev);
446 struct net_device *netdev = enic->netdev;
431 447
432 if (mtu && mtu != enic->port_mtu) { 448 if (mtu && mtu != enic->port_mtu) {
433 enic->port_mtu = mtu; 449 enic->port_mtu = mtu;
434 if (mtu < enic->netdev->mtu) 450 if (mtu < netdev->mtu)
435 printk(KERN_WARNING PFX 451 netdev_warn(netdev,
436 "%s: interface MTU (%d) set higher " 452 "interface MTU (%d) set higher "
437 "than switch port MTU (%d)\n", 453 "than switch port MTU (%d)\n",
438 enic->netdev->name, enic->netdev->mtu, mtu); 454 netdev->mtu, mtu);
439 } 455 }
440} 456}
441 457
442static void enic_msglvl_check(struct enic *enic) 458static void enic_link_check(struct enic *enic)
443{ 459{
444 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); 460 int link_status = vnic_dev_link_status(enic->vdev);
461 int carrier_ok = netif_carrier_ok(enic->netdev);
445 462
446 if (msg_enable != enic->msg_enable) { 463 if (link_status && !carrier_ok) {
447 printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", 464 netdev_info(enic->netdev, "Link UP\n");
448 enic->netdev->name, enic->msg_enable, msg_enable); 465 netif_carrier_on(enic->netdev);
449 enic->msg_enable = msg_enable; 466 } else if (!link_status && carrier_ok) {
467 netdev_info(enic->netdev, "Link DOWN\n");
468 netif_carrier_off(enic->netdev);
450 } 469 }
451} 470}
452 471
@@ -574,7 +593,7 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
574 593
575static inline void enic_queue_wq_skb_cont(struct enic *enic, 594static inline void enic_queue_wq_skb_cont(struct enic *enic,
576 struct vnic_wq *wq, struct sk_buff *skb, 595 struct vnic_wq *wq, struct sk_buff *skb,
577 unsigned int len_left) 596 unsigned int len_left, int loopback)
578{ 597{
579 skb_frag_t *frag; 598 skb_frag_t *frag;
580 599
@@ -586,13 +605,14 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic,
586 frag->page_offset, frag->size, 605 frag->page_offset, frag->size,
587 PCI_DMA_TODEVICE), 606 PCI_DMA_TODEVICE),
588 frag->size, 607 frag->size,
589 (len_left == 0)); /* EOP? */ 608 (len_left == 0), /* EOP? */
609 loopback);
590 } 610 }
591} 611}
592 612
593static inline void enic_queue_wq_skb_vlan(struct enic *enic, 613static inline void enic_queue_wq_skb_vlan(struct enic *enic,
594 struct vnic_wq *wq, struct sk_buff *skb, 614 struct vnic_wq *wq, struct sk_buff *skb,
595 int vlan_tag_insert, unsigned int vlan_tag) 615 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
596{ 616{
597 unsigned int head_len = skb_headlen(skb); 617 unsigned int head_len = skb_headlen(skb);
598 unsigned int len_left = skb->len - head_len; 618 unsigned int len_left = skb->len - head_len;
@@ -608,15 +628,15 @@ static inline void enic_queue_wq_skb_vlan(struct enic *enic,
608 head_len, PCI_DMA_TODEVICE), 628 head_len, PCI_DMA_TODEVICE),
609 head_len, 629 head_len,
610 vlan_tag_insert, vlan_tag, 630 vlan_tag_insert, vlan_tag,
611 eop); 631 eop, loopback);
612 632
613 if (!eop) 633 if (!eop)
614 enic_queue_wq_skb_cont(enic, wq, skb, len_left); 634 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
615} 635}
616 636
617static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, 637static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
618 struct vnic_wq *wq, struct sk_buff *skb, 638 struct vnic_wq *wq, struct sk_buff *skb,
619 int vlan_tag_insert, unsigned int vlan_tag) 639 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
620{ 640{
621 unsigned int head_len = skb_headlen(skb); 641 unsigned int head_len = skb_headlen(skb);
622 unsigned int len_left = skb->len - head_len; 642 unsigned int len_left = skb->len - head_len;
@@ -636,15 +656,15 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
636 csum_offset, 656 csum_offset,
637 hdr_len, 657 hdr_len,
638 vlan_tag_insert, vlan_tag, 658 vlan_tag_insert, vlan_tag,
639 eop); 659 eop, loopback);
640 660
641 if (!eop) 661 if (!eop)
642 enic_queue_wq_skb_cont(enic, wq, skb, len_left); 662 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
643} 663}
644 664
645static inline void enic_queue_wq_skb_tso(struct enic *enic, 665static inline void enic_queue_wq_skb_tso(struct enic *enic,
646 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, 666 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
647 int vlan_tag_insert, unsigned int vlan_tag) 667 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
648{ 668{
649 unsigned int frag_len_left = skb_headlen(skb); 669 unsigned int frag_len_left = skb_headlen(skb);
650 unsigned int len_left = skb->len - frag_len_left; 670 unsigned int len_left = skb->len - frag_len_left;
@@ -681,7 +701,7 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
681 len, 701 len,
682 mss, hdr_len, 702 mss, hdr_len,
683 vlan_tag_insert, vlan_tag, 703 vlan_tag_insert, vlan_tag,
684 eop && (len == frag_len_left)); 704 eop && (len == frag_len_left), loopback);
685 frag_len_left -= len; 705 frag_len_left -= len;
686 offset += len; 706 offset += len;
687 } 707 }
@@ -707,7 +727,8 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
707 dma_addr, 727 dma_addr,
708 len, 728 len,
709 (len_left == 0) && 729 (len_left == 0) &&
710 (len == frag_len_left)); /* EOP? */ 730 (len == frag_len_left), /* EOP? */
731 loopback);
711 frag_len_left -= len; 732 frag_len_left -= len;
712 offset += len; 733 offset += len;
713 } 734 }
@@ -720,22 +741,26 @@ static inline void enic_queue_wq_skb(struct enic *enic,
720 unsigned int mss = skb_shinfo(skb)->gso_size; 741 unsigned int mss = skb_shinfo(skb)->gso_size;
721 unsigned int vlan_tag = 0; 742 unsigned int vlan_tag = 0;
722 int vlan_tag_insert = 0; 743 int vlan_tag_insert = 0;
744 int loopback = 0;
723 745
724 if (enic->vlan_group && vlan_tx_tag_present(skb)) { 746 if (enic->vlan_group && vlan_tx_tag_present(skb)) {
725 /* VLAN tag from trunking driver */ 747 /* VLAN tag from trunking driver */
726 vlan_tag_insert = 1; 748 vlan_tag_insert = 1;
727 vlan_tag = vlan_tx_tag_get(skb); 749 vlan_tag = vlan_tx_tag_get(skb);
750 } else if (enic->loop_enable) {
751 vlan_tag = enic->loop_tag;
752 loopback = 1;
728 } 753 }
729 754
730 if (mss) 755 if (mss)
731 enic_queue_wq_skb_tso(enic, wq, skb, mss, 756 enic_queue_wq_skb_tso(enic, wq, skb, mss,
732 vlan_tag_insert, vlan_tag); 757 vlan_tag_insert, vlan_tag, loopback);
733 else if (skb->ip_summed == CHECKSUM_PARTIAL) 758 else if (skb->ip_summed == CHECKSUM_PARTIAL)
734 enic_queue_wq_skb_csum_l4(enic, wq, skb, 759 enic_queue_wq_skb_csum_l4(enic, wq, skb,
735 vlan_tag_insert, vlan_tag); 760 vlan_tag_insert, vlan_tag, loopback);
736 else 761 else
737 enic_queue_wq_skb_vlan(enic, wq, skb, 762 enic_queue_wq_skb_vlan(enic, wq, skb,
738 vlan_tag_insert, vlan_tag); 763 vlan_tag_insert, vlan_tag, loopback);
739} 764}
740 765
741/* netif_tx_lock held, process context with BHs disabled, or BH */ 766/* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -769,8 +794,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
769 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 794 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
770 netif_stop_queue(netdev); 795 netif_stop_queue(netdev);
771 /* This is a hard error, log it */ 796 /* This is a hard error, log it */
772 printk(KERN_ERR PFX "%s: BUG! Tx ring full when " 797 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
773 "queue awake!\n", netdev->name);
774 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 798 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
775 return NETDEV_TX_BUSY; 799 return NETDEV_TX_BUSY;
776 } 800 }
@@ -792,9 +816,7 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
792 struct net_device_stats *net_stats = &netdev->stats; 816 struct net_device_stats *net_stats = &netdev->stats;
793 struct vnic_stats *stats; 817 struct vnic_stats *stats;
794 818
795 spin_lock(&enic->devcmd_lock); 819 enic_dev_stats_dump(enic, &stats);
796 vnic_dev_stats_dump(enic->vdev, &stats);
797 spin_unlock(&enic->devcmd_lock);
798 820
799 net_stats->tx_packets = stats->tx.tx_frames_ok; 821 net_stats->tx_packets = stats->tx.tx_frames_ok;
800 net_stats->tx_bytes = stats->tx.tx_bytes_ok; 822 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -812,9 +834,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
812 return net_stats; 834 return net_stats;
813} 835}
814 836
815static void enic_reset_mcaddrs(struct enic *enic) 837static void enic_reset_multicast_list(struct enic *enic)
816{ 838{
817 enic->mc_count = 0; 839 enic->mc_count = 0;
840 enic->flags = 0;
818} 841}
819 842
820static int enic_set_mac_addr(struct net_device *netdev, char *addr) 843static int enic_set_mac_addr(struct net_device *netdev, char *addr)
@@ -891,6 +914,41 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
891 return -EOPNOTSUPP; 914 return -EOPNOTSUPP;
892} 915}
893 916
917static int enic_dev_packet_filter(struct enic *enic, int directed,
918 int multicast, int broadcast, int promisc, int allmulti)
919{
920 int err;
921
922 spin_lock(&enic->devcmd_lock);
923 err = vnic_dev_packet_filter(enic->vdev, directed,
924 multicast, broadcast, promisc, allmulti);
925 spin_unlock(&enic->devcmd_lock);
926
927 return err;
928}
929
930static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
931{
932 int err;
933
934 spin_lock(&enic->devcmd_lock);
935 err = vnic_dev_add_addr(enic->vdev, addr);
936 spin_unlock(&enic->devcmd_lock);
937
938 return err;
939}
940
941static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
942{
943 int err;
944
945 spin_lock(&enic->devcmd_lock);
946 err = vnic_dev_del_addr(enic->vdev, addr);
947 spin_unlock(&enic->devcmd_lock);
948
949 return err;
950}
951
894/* netif_tx_lock held, BHs disabled */ 952/* netif_tx_lock held, BHs disabled */
895static void enic_set_multicast_list(struct net_device *netdev) 953static void enic_set_multicast_list(struct net_device *netdev)
896{ 954{
@@ -910,11 +968,9 @@ static void enic_set_multicast_list(struct net_device *netdev)
910 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) 968 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
911 mc_count = ENIC_MULTICAST_PERFECT_FILTERS; 969 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
912 970
913 spin_lock(&enic->devcmd_lock);
914
915 if (enic->flags != flags) { 971 if (enic->flags != flags) {
916 enic->flags = flags; 972 enic->flags = flags;
917 vnic_dev_packet_filter(enic->vdev, directed, 973 enic_dev_packet_filter(enic, directed,
918 multicast, broadcast, promisc, allmulti); 974 multicast, broadcast, promisc, allmulti);
919 } 975 }
920 976
@@ -937,7 +993,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
937 mc_addr[j]) == 0) 993 mc_addr[j]) == 0)
938 break; 994 break;
939 if (j == mc_count) 995 if (j == mc_count)
940 enic_del_multicast_addr(enic, enic->mc_addr[i]); 996 enic_dev_del_multicast_addr(enic, enic->mc_addr[i]);
941 } 997 }
942 998
943 for (i = 0; i < mc_count; i++) { 999 for (i = 0; i < mc_count; i++) {
@@ -946,7 +1002,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
946 enic->mc_addr[j]) == 0) 1002 enic->mc_addr[j]) == 0)
947 break; 1003 break;
948 if (j == enic->mc_count) 1004 if (j == enic->mc_count)
949 enic_add_multicast_addr(enic, mc_addr[i]); 1005 enic_dev_add_multicast_addr(enic, mc_addr[i]);
950 } 1006 }
951 1007
952 /* Save the list to compare against next time 1008 /* Save the list to compare against next time
@@ -956,8 +1012,6 @@ static void enic_set_multicast_list(struct net_device *netdev)
956 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); 1012 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
957 1013
958 enic->mc_count = mc_count; 1014 enic->mc_count = mc_count;
959
960 spin_unlock(&enic->devcmd_lock);
961} 1015}
962 1016
963/* rtnl lock is held */ 1017/* rtnl lock is held */
@@ -1226,7 +1280,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
1226 struct enic *enic = vnic_dev_priv(rq->vdev); 1280 struct enic *enic = vnic_dev_priv(rq->vdev);
1227 struct net_device *netdev = enic->netdev; 1281 struct net_device *netdev = enic->netdev;
1228 struct sk_buff *skb; 1282 struct sk_buff *skb;
1229 unsigned int len = netdev->mtu + ETH_HLEN; 1283 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1230 unsigned int os_buf_index = 0; 1284 unsigned int os_buf_index = 0;
1231 dma_addr_t dma_addr; 1285 dma_addr_t dma_addr;
1232 1286
@@ -1263,12 +1317,24 @@ static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
1263 return 0; 1317 return 0;
1264} 1318}
1265 1319
1320static int enic_dev_hw_version(struct enic *enic,
1321 enum vnic_dev_hw_version *hw_ver)
1322{
1323 int err;
1324
1325 spin_lock(&enic->devcmd_lock);
1326 err = vnic_dev_hw_version(enic->vdev, hw_ver);
1327 spin_unlock(&enic->devcmd_lock);
1328
1329 return err;
1330}
1331
1266static int enic_set_rq_alloc_buf(struct enic *enic) 1332static int enic_set_rq_alloc_buf(struct enic *enic)
1267{ 1333{
1268 enum vnic_dev_hw_version hw_ver; 1334 enum vnic_dev_hw_version hw_ver;
1269 int err; 1335 int err;
1270 1336
1271 err = vnic_dev_hw_version(enic->vdev, &hw_ver); 1337 err = enic_dev_hw_version(enic, &hw_ver);
1272 if (err) 1338 if (err)
1273 return err; 1339 return err;
1274 1340
@@ -1287,51 +1353,6 @@ static int enic_set_rq_alloc_buf(struct enic *enic)
1287 return 0; 1353 return 0;
1288} 1354}
1289 1355
1290static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
1291 void **tcph, u64 *hdr_flags, void *priv)
1292{
1293 struct cq_enet_rq_desc *cq_desc = priv;
1294 unsigned int ip_len;
1295 struct iphdr *iph;
1296
1297 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1298 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1299 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1300 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1301 u8 packet_error;
1302 u16 q_number, completed_index, bytes_written, vlan, checksum;
1303 u32 rss_hash;
1304
1305 cq_enet_rq_desc_dec(cq_desc,
1306 &type, &color, &q_number, &completed_index,
1307 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1308 &csum_not_calc, &rss_hash, &bytes_written,
1309 &packet_error, &vlan_stripped, &vlan, &checksum,
1310 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1311 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1312 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1313 &fcs_ok);
1314
1315 if (!(ipv4 && tcp && !ipv4_fragment))
1316 return -1;
1317
1318 skb_reset_network_header(skb);
1319 iph = ip_hdr(skb);
1320
1321 ip_len = ip_hdrlen(skb);
1322 skb_set_transport_header(skb, ip_len);
1323
1324 /* check if ip header and tcp header are complete */
1325 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
1326 return -1;
1327
1328 *hdr_flags = LRO_IPV4 | LRO_TCP;
1329 *tcph = tcp_hdr(skb);
1330 *iphdr = iph;
1331
1332 return 0;
1333}
1334
1335static void enic_rq_indicate_buf(struct vnic_rq *rq, 1356static void enic_rq_indicate_buf(struct vnic_rq *rq,
1336 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1357 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1337 int skipped, void *opaque) 1358 int skipped, void *opaque)
@@ -1345,7 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1345 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 1366 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1346 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; 1367 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1347 u8 packet_error; 1368 u8 packet_error;
1348 u16 q_number, completed_index, bytes_written, vlan, checksum; 1369 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1349 u32 rss_hash; 1370 u32 rss_hash;
1350 1371
1351 if (skipped) 1372 if (skipped)
@@ -1360,7 +1381,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1360 &type, &color, &q_number, &completed_index, 1381 &type, &color, &q_number, &completed_index,
1361 &ingress_port, &fcoe, &eop, &sop, &rss_type, 1382 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1362 &csum_not_calc, &rss_hash, &bytes_written, 1383 &csum_not_calc, &rss_hash, &bytes_written,
1363 &packet_error, &vlan_stripped, &vlan, &checksum, 1384 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1364 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, 1385 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1365 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, 1386 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1366 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, 1387 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
@@ -1395,20 +1416,20 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1395 1416
1396 skb->dev = netdev; 1417 skb->dev = netdev;
1397 1418
1398 if (enic->vlan_group && vlan_stripped) { 1419 if (enic->vlan_group && vlan_stripped &&
1420 (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
1399 1421
1400 if ((netdev->features & NETIF_F_LRO) && ipv4) 1422 if (netdev->features & NETIF_F_GRO)
1401 lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, 1423 vlan_gro_receive(&enic->napi, enic->vlan_group,
1402 skb, enic->vlan_group, 1424 vlan_tci, skb);
1403 vlan, cq_desc);
1404 else 1425 else
1405 vlan_hwaccel_receive_skb(skb, 1426 vlan_hwaccel_receive_skb(skb,
1406 enic->vlan_group, vlan); 1427 enic->vlan_group, vlan_tci);
1407 1428
1408 } else { 1429 } else {
1409 1430
1410 if ((netdev->features & NETIF_F_LRO) && ipv4) 1431 if (netdev->features & NETIF_F_GRO)
1411 lro_receive_skb(&enic->lro_mgr, skb, cq_desc); 1432 napi_gro_receive(&enic->napi, skb);
1412 else 1433 else
1413 netif_receive_skb(skb); 1434 netif_receive_skb(skb);
1414 1435
@@ -1438,7 +1459,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1438static int enic_poll(struct napi_struct *napi, int budget) 1459static int enic_poll(struct napi_struct *napi, int budget)
1439{ 1460{
1440 struct enic *enic = container_of(napi, struct enic, napi); 1461 struct enic *enic = container_of(napi, struct enic, napi);
1441 struct net_device *netdev = enic->netdev;
1442 unsigned int rq_work_to_do = budget; 1462 unsigned int rq_work_to_do = budget;
1443 unsigned int wq_work_to_do = -1; /* no limit */ 1463 unsigned int wq_work_to_do = -1; /* no limit */
1444 unsigned int work_done, rq_work_done, wq_work_done; 1464 unsigned int work_done, rq_work_done, wq_work_done;
@@ -1478,12 +1498,9 @@ static int enic_poll(struct napi_struct *napi, int budget)
1478 if (rq_work_done < rq_work_to_do) { 1498 if (rq_work_done < rq_work_to_do) {
1479 1499
1480 /* Some work done, but not enough to stay in polling, 1500 /* Some work done, but not enough to stay in polling,
1481 * flush all LROs and exit polling 1501 * exit polling
1482 */ 1502 */
1483 1503
1484 if (netdev->features & NETIF_F_LRO)
1485 lro_flush_all(&enic->lro_mgr);
1486
1487 napi_complete(napi); 1504 napi_complete(napi);
1488 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 1505 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
1489 } 1506 }
@@ -1494,7 +1511,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
1494static int enic_poll_msix(struct napi_struct *napi, int budget) 1511static int enic_poll_msix(struct napi_struct *napi, int budget)
1495{ 1512{
1496 struct enic *enic = container_of(napi, struct enic, napi); 1513 struct enic *enic = container_of(napi, struct enic, napi);
1497 struct net_device *netdev = enic->netdev;
1498 unsigned int work_to_do = budget; 1514 unsigned int work_to_do = budget;
1499 unsigned int work_done; 1515 unsigned int work_done;
1500 int err; 1516 int err;
@@ -1528,12 +1544,9 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1528 if (work_done < work_to_do) { 1544 if (work_done < work_to_do) {
1529 1545
1530 /* Some work done, but not enough to stay in polling, 1546 /* Some work done, but not enough to stay in polling,
1531 * flush all LROs and exit polling 1547 * exit polling
1532 */ 1548 */
1533 1549
1534 if (netdev->features & NETIF_F_LRO)
1535 lro_flush_all(&enic->lro_mgr);
1536
1537 napi_complete(napi); 1550 napi_complete(napi);
1538 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1551 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1539 } 1552 }
@@ -1655,7 +1668,7 @@ static void enic_synchronize_irqs(struct enic *enic)
1655 } 1668 }
1656} 1669}
1657 1670
1658static int enic_notify_set(struct enic *enic) 1671static int enic_dev_notify_set(struct enic *enic)
1659{ 1672{
1660 int err; 1673 int err;
1661 1674
@@ -1676,6 +1689,39 @@ static int enic_notify_set(struct enic *enic)
1676 return err; 1689 return err;
1677} 1690}
1678 1691
1692static int enic_dev_notify_unset(struct enic *enic)
1693{
1694 int err;
1695
1696 spin_lock(&enic->devcmd_lock);
1697 err = vnic_dev_notify_unset(enic->vdev);
1698 spin_unlock(&enic->devcmd_lock);
1699
1700 return err;
1701}
1702
1703static int enic_dev_enable(struct enic *enic)
1704{
1705 int err;
1706
1707 spin_lock(&enic->devcmd_lock);
1708 err = vnic_dev_enable(enic->vdev);
1709 spin_unlock(&enic->devcmd_lock);
1710
1711 return err;
1712}
1713
1714static int enic_dev_disable(struct enic *enic)
1715{
1716 int err;
1717
1718 spin_lock(&enic->devcmd_lock);
1719 err = vnic_dev_disable(enic->vdev);
1720 spin_unlock(&enic->devcmd_lock);
1721
1722 return err;
1723}
1724
1679static void enic_notify_timer_start(struct enic *enic) 1725static void enic_notify_timer_start(struct enic *enic)
1680{ 1726{
1681 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1727 switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1697,16 +1743,14 @@ static int enic_open(struct net_device *netdev)
1697 1743
1698 err = enic_request_intr(enic); 1744 err = enic_request_intr(enic);
1699 if (err) { 1745 if (err) {
1700 printk(KERN_ERR PFX "%s: Unable to request irq.\n", 1746 netdev_err(netdev, "Unable to request irq.\n");
1701 netdev->name);
1702 return err; 1747 return err;
1703 } 1748 }
1704 1749
1705 err = enic_notify_set(enic); 1750 err = enic_dev_notify_set(enic);
1706 if (err) { 1751 if (err) {
1707 printk(KERN_ERR PFX 1752 netdev_err(netdev,
1708 "%s: Failed to alloc notify buffer, aborting.\n", 1753 "Failed to alloc notify buffer, aborting.\n");
1709 netdev->name);
1710 goto err_out_free_intr; 1754 goto err_out_free_intr;
1711 } 1755 }
1712 1756
@@ -1714,9 +1758,7 @@ static int enic_open(struct net_device *netdev)
1714 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1758 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
1715 /* Need at least one buffer on ring to get going */ 1759 /* Need at least one buffer on ring to get going */
1716 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1760 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1717 printk(KERN_ERR PFX 1761 netdev_err(netdev, "Unable to alloc receive buffers\n");
1718 "%s: Unable to alloc receive buffers.\n",
1719 netdev->name);
1720 err = -ENOMEM; 1762 err = -ENOMEM;
1721 goto err_out_notify_unset; 1763 goto err_out_notify_unset;
1722 } 1764 }
@@ -1732,9 +1774,7 @@ static int enic_open(struct net_device *netdev)
1732 1774
1733 netif_wake_queue(netdev); 1775 netif_wake_queue(netdev);
1734 napi_enable(&enic->napi); 1776 napi_enable(&enic->napi);
1735 spin_lock(&enic->devcmd_lock); 1777 enic_dev_enable(enic);
1736 vnic_dev_enable(enic->vdev);
1737 spin_unlock(&enic->devcmd_lock);
1738 1778
1739 for (i = 0; i < enic->intr_count; i++) 1779 for (i = 0; i < enic->intr_count; i++)
1740 vnic_intr_unmask(&enic->intr[i]); 1780 vnic_intr_unmask(&enic->intr[i]);
@@ -1744,9 +1784,7 @@ static int enic_open(struct net_device *netdev)
1744 return 0; 1784 return 0;
1745 1785
1746err_out_notify_unset: 1786err_out_notify_unset:
1747 spin_lock(&enic->devcmd_lock); 1787 enic_dev_notify_unset(enic);
1748 vnic_dev_notify_unset(enic->vdev);
1749 spin_unlock(&enic->devcmd_lock);
1750err_out_free_intr: 1788err_out_free_intr:
1751 enic_free_intr(enic); 1789 enic_free_intr(enic);
1752 1790
@@ -1760,20 +1798,19 @@ static int enic_stop(struct net_device *netdev)
1760 unsigned int i; 1798 unsigned int i;
1761 int err; 1799 int err;
1762 1800
1763 for (i = 0; i < enic->intr_count; i++) 1801 for (i = 0; i < enic->intr_count; i++) {
1764 vnic_intr_mask(&enic->intr[i]); 1802 vnic_intr_mask(&enic->intr[i]);
1803 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1804 }
1765 1805
1766 enic_synchronize_irqs(enic); 1806 enic_synchronize_irqs(enic);
1767 1807
1768 del_timer_sync(&enic->notify_timer); 1808 del_timer_sync(&enic->notify_timer);
1769 1809
1770 spin_lock(&enic->devcmd_lock); 1810 enic_dev_disable(enic);
1771 vnic_dev_disable(enic->vdev);
1772 spin_unlock(&enic->devcmd_lock);
1773 napi_disable(&enic->napi); 1811 napi_disable(&enic->napi);
1774 netif_carrier_off(netdev); 1812 netif_carrier_off(netdev);
1775 netif_tx_disable(netdev); 1813 netif_tx_disable(netdev);
1776
1777 enic_dev_del_station_addr(enic); 1814 enic_dev_del_station_addr(enic);
1778 1815
1779 for (i = 0; i < enic->wq_count; i++) { 1816 for (i = 0; i < enic->wq_count; i++) {
@@ -1787,9 +1824,7 @@ static int enic_stop(struct net_device *netdev)
1787 return err; 1824 return err;
1788 } 1825 }
1789 1826
1790 spin_lock(&enic->devcmd_lock); 1827 enic_dev_notify_unset(enic);
1791 vnic_dev_notify_unset(enic->vdev);
1792 spin_unlock(&enic->devcmd_lock);
1793 enic_free_intr(enic); 1828 enic_free_intr(enic);
1794 1829
1795 for (i = 0; i < enic->wq_count; i++) 1830 for (i = 0; i < enic->wq_count; i++)
@@ -1818,10 +1853,9 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1818 netdev->mtu = new_mtu; 1853 netdev->mtu = new_mtu;
1819 1854
1820 if (netdev->mtu > enic->port_mtu) 1855 if (netdev->mtu > enic->port_mtu)
1821 printk(KERN_WARNING PFX 1856 netdev_warn(netdev,
1822 "%s: interface MTU (%d) set higher " 1857 "interface MTU (%d) set higher than port MTU (%d)\n",
1823 "than port MTU (%d)\n", 1858 netdev->mtu, enic->port_mtu);
1824 netdev->name, netdev->mtu, enic->port_mtu);
1825 1859
1826 if (running) 1860 if (running)
1827 enic_open(netdev); 1861 enic_open(netdev);
@@ -1894,21 +1928,21 @@ static int enic_dev_open(struct enic *enic)
1894 err = enic_dev_wait(enic->vdev, vnic_dev_open, 1928 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1895 vnic_dev_open_done, 0); 1929 vnic_dev_open_done, 0);
1896 if (err) 1930 if (err)
1897 printk(KERN_ERR PFX 1931 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1898 "vNIC device open failed, err %d.\n", err); 1932 err);
1899 1933
1900 return err; 1934 return err;
1901} 1935}
1902 1936
1903static int enic_dev_soft_reset(struct enic *enic) 1937static int enic_dev_hang_reset(struct enic *enic)
1904{ 1938{
1905 int err; 1939 int err;
1906 1940
1907 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, 1941 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1908 vnic_dev_soft_reset_done, 0); 1942 vnic_dev_hang_reset_done, 0);
1909 if (err) 1943 if (err)
1910 printk(KERN_ERR PFX 1944 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1911 "vNIC soft reset failed, err %d.\n", err); 1945 err);
1912 1946
1913 return err; 1947 return err;
1914} 1948}
@@ -1922,15 +1956,43 @@ static int enic_set_niccfg(struct enic *enic)
1922 const u8 rss_enable = 0; 1956 const u8 rss_enable = 0;
1923 const u8 tso_ipid_split_en = 0; 1957 const u8 tso_ipid_split_en = 0;
1924 const u8 ig_vlan_strip_en = 1; 1958 const u8 ig_vlan_strip_en = 1;
1959 int err;
1925 1960
1926 /* Enable VLAN tag stripping. RSS not enabled (yet). 1961 /* Enable VLAN tag stripping. RSS not enabled (yet).
1927 */ 1962 */
1928 1963
1929 return enic_set_nic_cfg(enic, 1964 spin_lock(&enic->devcmd_lock);
1965 err = enic_set_nic_cfg(enic,
1930 rss_default_cpu, rss_hash_type, 1966 rss_default_cpu, rss_hash_type,
1931 rss_hash_bits, rss_base_cpu, 1967 rss_hash_bits, rss_base_cpu,
1932 rss_enable, tso_ipid_split_en, 1968 rss_enable, tso_ipid_split_en,
1933 ig_vlan_strip_en); 1969 ig_vlan_strip_en);
1970 spin_unlock(&enic->devcmd_lock);
1971
1972 return err;
1973}
1974
1975static int enic_dev_hang_notify(struct enic *enic)
1976{
1977 int err;
1978
1979 spin_lock(&enic->devcmd_lock);
1980 err = vnic_dev_hang_notify(enic->vdev);
1981 spin_unlock(&enic->devcmd_lock);
1982
1983 return err;
1984}
1985
1986int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
1987{
1988 int err;
1989
1990 spin_lock(&enic->devcmd_lock);
1991 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1992 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
1993 spin_unlock(&enic->devcmd_lock);
1994
1995 return err;
1934} 1996}
1935 1997
1936static void enic_reset(struct work_struct *work) 1998static void enic_reset(struct work_struct *work)
@@ -1942,16 +2004,13 @@ static void enic_reset(struct work_struct *work)
1942 2004
1943 rtnl_lock(); 2005 rtnl_lock();
1944 2006
1945 spin_lock(&enic->devcmd_lock); 2007 enic_dev_hang_notify(enic);
1946 vnic_dev_hang_notify(enic->vdev);
1947 spin_unlock(&enic->devcmd_lock);
1948
1949 enic_stop(enic->netdev); 2008 enic_stop(enic->netdev);
1950 enic_dev_soft_reset(enic); 2009 enic_dev_hang_reset(enic);
1951 vnic_dev_init(enic->vdev, 0); 2010 enic_reset_multicast_list(enic);
1952 enic_reset_mcaddrs(enic);
1953 enic_init_vnic_resources(enic); 2011 enic_init_vnic_resources(enic);
1954 enic_set_niccfg(enic); 2012 enic_set_niccfg(enic);
2013 enic_dev_set_ig_vlan_rewrite_mode(enic);
1955 enic_open(enic->netdev); 2014 enic_open(enic->netdev);
1956 2015
1957 rtnl_unlock(); 2016 rtnl_unlock();
@@ -2087,8 +2146,8 @@ static const struct net_device_ops enic_netdev_ops = {
2087 .ndo_start_xmit = enic_hard_start_xmit, 2146 .ndo_start_xmit = enic_hard_start_xmit,
2088 .ndo_get_stats = enic_get_stats, 2147 .ndo_get_stats = enic_get_stats,
2089 .ndo_validate_addr = eth_validate_addr, 2148 .ndo_validate_addr = eth_validate_addr,
2090 .ndo_set_multicast_list = enic_set_multicast_list,
2091 .ndo_set_mac_address = enic_set_mac_address, 2149 .ndo_set_mac_address = enic_set_mac_address,
2150 .ndo_set_multicast_list = enic_set_multicast_list,
2092 .ndo_change_mtu = enic_change_mtu, 2151 .ndo_change_mtu = enic_change_mtu,
2093 .ndo_vlan_rx_register = enic_vlan_rx_register, 2152 .ndo_vlan_rx_register = enic_vlan_rx_register,
2094 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2153 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2106,8 +2165,20 @@ void enic_dev_deinit(struct enic *enic)
2106 enic_clear_intr_mode(enic); 2165 enic_clear_intr_mode(enic);
2107} 2166}
2108 2167
2168static int enic_dev_stats_clear(struct enic *enic)
2169{
2170 int err;
2171
2172 spin_lock(&enic->devcmd_lock);
2173 err = vnic_dev_stats_clear(enic->vdev);
2174 spin_unlock(&enic->devcmd_lock);
2175
2176 return err;
2177}
2178
2109int enic_dev_init(struct enic *enic) 2179int enic_dev_init(struct enic *enic)
2110{ 2180{
2181 struct device *dev = enic_get_dev(enic);
2111 struct net_device *netdev = enic->netdev; 2182 struct net_device *netdev = enic->netdev;
2112 int err; 2183 int err;
2113 2184
@@ -2116,8 +2187,7 @@ int enic_dev_init(struct enic *enic)
2116 2187
2117 err = enic_get_vnic_config(enic); 2188 err = enic_get_vnic_config(enic);
2118 if (err) { 2189 if (err) {
2119 printk(KERN_ERR PFX 2190 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2120 "Get vNIC configuration failed, aborting.\n");
2121 return err; 2191 return err;
2122 } 2192 }
2123 2193
@@ -2132,9 +2202,8 @@ int enic_dev_init(struct enic *enic)
2132 2202
2133 err = enic_set_intr_mode(enic); 2203 err = enic_set_intr_mode(enic);
2134 if (err) { 2204 if (err) {
2135 printk(KERN_ERR PFX 2205 dev_err(dev, "Failed to set intr mode based on resource "
2136 "Failed to set intr mode based on resource " 2206 "counts and system capabilities, aborting\n");
2137 "counts and system capabilities, aborting.\n");
2138 return err; 2207 return err;
2139 } 2208 }
2140 2209
@@ -2143,24 +2212,32 @@ int enic_dev_init(struct enic *enic)
2143 2212
2144 err = enic_alloc_vnic_resources(enic); 2213 err = enic_alloc_vnic_resources(enic);
2145 if (err) { 2214 if (err) {
2146 printk(KERN_ERR PFX 2215 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2147 "Failed to alloc vNIC resources, aborting.\n");
2148 goto err_out_free_vnic_resources; 2216 goto err_out_free_vnic_resources;
2149 } 2217 }
2150 2218
2151 enic_init_vnic_resources(enic); 2219 enic_init_vnic_resources(enic);
2152 2220
2221 /* Clear LIF stats
2222 */
2223 enic_dev_stats_clear(enic);
2224
2153 err = enic_set_rq_alloc_buf(enic); 2225 err = enic_set_rq_alloc_buf(enic);
2154 if (err) { 2226 if (err) {
2155 printk(KERN_ERR PFX 2227 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
2156 "Failed to set RQ buffer allocator, aborting.\n");
2157 goto err_out_free_vnic_resources; 2228 goto err_out_free_vnic_resources;
2158 } 2229 }
2159 2230
2160 err = enic_set_niccfg(enic); 2231 err = enic_set_niccfg(enic);
2161 if (err) { 2232 if (err) {
2162 printk(KERN_ERR PFX 2233 dev_err(dev, "Failed to config nic, aborting\n");
2163 "Failed to config nic, aborting.\n"); 2234 goto err_out_free_vnic_resources;
2235 }
2236
2237 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2238 if (err) {
2239 netdev_err(netdev,
2240 "Failed to set ingress vlan rewrite mode, aborting.\n");
2164 goto err_out_free_vnic_resources; 2241 goto err_out_free_vnic_resources;
2165 } 2242 }
2166 2243
@@ -2194,6 +2271,7 @@ static void enic_iounmap(struct enic *enic)
2194static int __devinit enic_probe(struct pci_dev *pdev, 2271static int __devinit enic_probe(struct pci_dev *pdev,
2195 const struct pci_device_id *ent) 2272 const struct pci_device_id *ent)
2196{ 2273{
2274 struct device *dev = &pdev->dev;
2197 struct net_device *netdev; 2275 struct net_device *netdev;
2198 struct enic *enic; 2276 struct enic *enic;
2199 int using_dac = 0; 2277 int using_dac = 0;
@@ -2206,7 +2284,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2206 2284
2207 netdev = alloc_etherdev(sizeof(struct enic)); 2285 netdev = alloc_etherdev(sizeof(struct enic));
2208 if (!netdev) { 2286 if (!netdev) {
2209 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 2287 pr_err("Etherdev alloc failed, aborting\n");
2210 return -ENOMEM; 2288 return -ENOMEM;
2211 } 2289 }
2212 2290
@@ -2221,17 +2299,15 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2221 /* Setup PCI resources 2299 /* Setup PCI resources
2222 */ 2300 */
2223 2301
2224 err = pci_enable_device(pdev); 2302 err = pci_enable_device_mem(pdev);
2225 if (err) { 2303 if (err) {
2226 printk(KERN_ERR PFX 2304 dev_err(dev, "Cannot enable PCI device, aborting\n");
2227 "Cannot enable PCI device, aborting.\n");
2228 goto err_out_free_netdev; 2305 goto err_out_free_netdev;
2229 } 2306 }
2230 2307
2231 err = pci_request_regions(pdev, DRV_NAME); 2308 err = pci_request_regions(pdev, DRV_NAME);
2232 if (err) { 2309 if (err) {
2233 printk(KERN_ERR PFX 2310 dev_err(dev, "Cannot request PCI regions, aborting\n");
2234 "Cannot request PCI regions, aborting.\n");
2235 goto err_out_disable_device; 2311 goto err_out_disable_device;
2236 } 2312 }
2237 2313
@@ -2246,23 +2322,20 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2246 if (err) { 2322 if (err) {
2247 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2323 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2248 if (err) { 2324 if (err) {
2249 printk(KERN_ERR PFX 2325 dev_err(dev, "No usable DMA configuration, aborting\n");
2250 "No usable DMA configuration, aborting.\n");
2251 goto err_out_release_regions; 2326 goto err_out_release_regions;
2252 } 2327 }
2253 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2328 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2254 if (err) { 2329 if (err) {
2255 printk(KERN_ERR PFX 2330 dev_err(dev, "Unable to obtain %u-bit DMA "
2256 "Unable to obtain 32-bit DMA " 2331 "for consistent allocations, aborting\n", 32);
2257 "for consistent allocations, aborting.\n");
2258 goto err_out_release_regions; 2332 goto err_out_release_regions;
2259 } 2333 }
2260 } else { 2334 } else {
2261 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 2335 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2262 if (err) { 2336 if (err) {
2263 printk(KERN_ERR PFX 2337 dev_err(dev, "Unable to obtain %u-bit DMA "
2264 "Unable to obtain 40-bit DMA " 2338 "for consistent allocations, aborting\n", 40);
2265 "for consistent allocations, aborting.\n");
2266 goto err_out_release_regions; 2339 goto err_out_release_regions;
2267 } 2340 }
2268 using_dac = 1; 2341 using_dac = 1;
@@ -2277,8 +2350,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2277 enic->bar[i].len = pci_resource_len(pdev, i); 2350 enic->bar[i].len = pci_resource_len(pdev, i);
2278 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); 2351 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2279 if (!enic->bar[i].vaddr) { 2352 if (!enic->bar[i].vaddr) {
2280 printk(KERN_ERR PFX 2353 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2281 "Cannot memory-map BAR %d, aborting.\n", i);
2282 err = -ENODEV; 2354 err = -ENODEV;
2283 goto err_out_iounmap; 2355 goto err_out_iounmap;
2284 } 2356 }
@@ -2291,8 +2363,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2291 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, 2363 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2292 ARRAY_SIZE(enic->bar)); 2364 ARRAY_SIZE(enic->bar));
2293 if (!enic->vdev) { 2365 if (!enic->vdev) {
2294 printk(KERN_ERR PFX 2366 dev_err(dev, "vNIC registration failed, aborting\n");
2295 "vNIC registration failed, aborting.\n");
2296 err = -ENODEV; 2367 err = -ENODEV;
2297 goto err_out_iounmap; 2368 goto err_out_iounmap;
2298 } 2369 }
@@ -2302,8 +2373,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2302 2373
2303 err = enic_dev_open(enic); 2374 err = enic_dev_open(enic);
2304 if (err) { 2375 if (err) {
2305 printk(KERN_ERR PFX 2376 dev_err(dev, "vNIC dev open failed, aborting\n");
2306 "vNIC dev open failed, aborting.\n");
2307 goto err_out_vnic_unregister; 2377 goto err_out_vnic_unregister;
2308 } 2378 }
2309 2379
@@ -2317,23 +2387,31 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2317 2387
2318 netif_carrier_off(netdev); 2388 netif_carrier_off(netdev);
2319 2389
2390 /* Do not call dev_init for a dynamic vnic.
2391 * For a dynamic vnic, init_prov_info will be
2392 * called later by an upper layer.
2393 */
2394
2320 if (!enic_is_dynamic(enic)) { 2395 if (!enic_is_dynamic(enic)) {
2321 err = vnic_dev_init(enic->vdev, 0); 2396 err = vnic_dev_init(enic->vdev, 0);
2322 if (err) { 2397 if (err) {
2323 printk(KERN_ERR PFX 2398 dev_err(dev, "vNIC dev init failed, aborting\n");
2324 "vNIC dev init failed, aborting.\n");
2325 goto err_out_dev_close; 2399 goto err_out_dev_close;
2326 } 2400 }
2327 } 2401 }
2328 2402
2403 /* Setup devcmd lock
2404 */
2405
2406 spin_lock_init(&enic->devcmd_lock);
2407
2329 err = enic_dev_init(enic); 2408 err = enic_dev_init(enic);
2330 if (err) { 2409 if (err) {
2331 printk(KERN_ERR PFX 2410 dev_err(dev, "Device initialization failed, aborting\n");
2332 "Device initialization failed, aborting.\n");
2333 goto err_out_dev_close; 2411 goto err_out_dev_close;
2334 } 2412 }
2335 2413
2336 /* Setup notification timer, HW reset task, and locks 2414 /* Setup notification timer, HW reset task, and wq locks
2337 */ 2415 */
2338 2416
2339 init_timer(&enic->notify_timer); 2417 init_timer(&enic->notify_timer);
@@ -2345,8 +2423,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2345 for (i = 0; i < enic->wq_count; i++) 2423 for (i = 0; i < enic->wq_count; i++)
2346 spin_lock_init(&enic->wq_lock[i]); 2424 spin_lock_init(&enic->wq_lock[i]);
2347 2425
2348 spin_lock_init(&enic->devcmd_lock);
2349
2350 /* Register net device 2426 /* Register net device
2351 */ 2427 */
2352 2428
@@ -2355,8 +2431,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2355 2431
2356 err = enic_set_mac_addr(netdev, enic->mac_addr); 2432 err = enic_set_mac_addr(netdev, enic->mac_addr);
2357 if (err) { 2433 if (err) {
2358 printk(KERN_ERR PFX 2434 dev_err(dev, "Invalid MAC address, aborting\n");
2359 "Invalid MAC address, aborting.\n");
2360 goto err_out_dev_deinit; 2435 goto err_out_dev_deinit;
2361 } 2436 }
2362 2437
@@ -2372,31 +2447,27 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2372 netdev->ethtool_ops = &enic_ethtool_ops; 2447 netdev->ethtool_ops = &enic_ethtool_ops;
2373 2448
2374 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2449 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2450 if (ENIC_SETTING(enic, LOOP)) {
2451 netdev->features &= ~NETIF_F_HW_VLAN_TX;
2452 enic->loop_enable = 1;
2453 enic->loop_tag = enic->config.loop_tag;
2454 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2455 }
2375 if (ENIC_SETTING(enic, TXCSUM)) 2456 if (ENIC_SETTING(enic, TXCSUM))
2376 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2457 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2377 if (ENIC_SETTING(enic, TSO)) 2458 if (ENIC_SETTING(enic, TSO))
2378 netdev->features |= NETIF_F_TSO | 2459 netdev->features |= NETIF_F_TSO |
2379 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2460 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2380 if (ENIC_SETTING(enic, LRO)) 2461 if (ENIC_SETTING(enic, LRO))
2381 netdev->features |= NETIF_F_LRO; 2462 netdev->features |= NETIF_F_GRO;
2382 if (using_dac) 2463 if (using_dac)
2383 netdev->features |= NETIF_F_HIGHDMA; 2464 netdev->features |= NETIF_F_HIGHDMA;
2384 2465
2385 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); 2466 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
2386 2467
2387 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
2388 enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
2389 enic->lro_mgr.lro_arr = enic->lro_desc;
2390 enic->lro_mgr.get_skb_header = enic_get_skb_header;
2391 enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
2392 enic->lro_mgr.dev = netdev;
2393 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
2394 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
2395
2396 err = register_netdev(netdev); 2468 err = register_netdev(netdev);
2397 if (err) { 2469 if (err) {
2398 printk(KERN_ERR PFX 2470 dev_err(dev, "Cannot register net device, aborting\n");
2399 "Cannot register net device, aborting.\n");
2400 goto err_out_dev_deinit; 2471 goto err_out_dev_deinit;
2401 } 2472 }
2402 2473
@@ -2450,7 +2521,7 @@ static struct pci_driver enic_driver = {
2450 2521
2451static int __init enic_init_module(void) 2522static int __init enic_init_module(void)
2452{ 2523{
2453 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); 2524 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2454 2525
2455 return pci_register_driver(&enic_driver); 2526 return pci_register_driver(&enic_driver);
2456} 2527}
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 9b18840cba96..29ede8a17a2c 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -46,7 +46,8 @@ int enic_get_vnic_config(struct enic *enic)
46 46
47 err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr); 47 err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
48 if (err) { 48 if (err) {
49 printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err); 49 dev_err(enic_get_dev(enic),
50 "Error getting MAC addr, %d\n", err);
50 return err; 51 return err;
51 } 52 }
52 53
@@ -56,7 +57,7 @@ int enic_get_vnic_config(struct enic *enic)
56 offsetof(struct vnic_enet_config, m), \ 57 offsetof(struct vnic_enet_config, m), \
57 sizeof(c->m), &c->m); \ 58 sizeof(c->m), &c->m); \
58 if (err) { \ 59 if (err) { \
59 printk(KERN_ERR PFX \ 60 dev_err(enic_get_dev(enic), \
60 "Error getting %s, %d\n", #m, err); \ 61 "Error getting %s, %d\n", #m, err); \
61 return err; \ 62 return err; \
62 } \ 63 } \
@@ -69,6 +70,7 @@ int enic_get_vnic_config(struct enic *enic)
69 GET_CONFIG(intr_timer_type); 70 GET_CONFIG(intr_timer_type);
70 GET_CONFIG(intr_mode); 71 GET_CONFIG(intr_mode);
71 GET_CONFIG(intr_timer_usec); 72 GET_CONFIG(intr_timer_usec);
73 GET_CONFIG(loop_tag);
72 74
73 c->wq_desc_count = 75 c->wq_desc_count =
74 min_t(u32, ENIC_MAX_WQ_DESCS, 76 min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -92,10 +94,10 @@ int enic_get_vnic_config(struct enic *enic)
92 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), 94 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
93 c->intr_timer_usec); 95 c->intr_timer_usec);
94 96
95 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n", 97 dev_info(enic_get_dev(enic), "vNIC MAC addr %pM wq/rq %d/%d\n",
96 enic->mac_addr, c->wq_desc_count, c->rq_desc_count); 98 enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
97 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d " 99 dev_info(enic_get_dev(enic), "vNIC mtu %d csum tx/rx %d/%d "
98 "intr timer %d usec\n", 100 "tso/lro %d/%d intr timer %d usec\n",
99 c->mtu, ENIC_SETTING(enic, TXCSUM), 101 c->mtu, ENIC_SETTING(enic, TXCSUM),
100 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO), 102 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
101 ENIC_SETTING(enic, LRO), c->intr_timer_usec); 103 ENIC_SETTING(enic, LRO), c->intr_timer_usec);
@@ -103,17 +105,7 @@ int enic_get_vnic_config(struct enic *enic)
103 return 0; 105 return 0;
104} 106}
105 107
106void enic_add_multicast_addr(struct enic *enic, u8 *addr) 108int enic_add_vlan(struct enic *enic, u16 vlanid)
107{
108 vnic_dev_add_addr(enic->vdev, addr);
109}
110
111void enic_del_multicast_addr(struct enic *enic, u8 *addr)
112{
113 vnic_dev_del_addr(enic->vdev, addr);
114}
115
116void enic_add_vlan(struct enic *enic, u16 vlanid)
117{ 109{
118 u64 a0 = vlanid, a1 = 0; 110 u64 a0 = vlanid, a1 = 0;
119 int wait = 1000; 111 int wait = 1000;
@@ -121,10 +113,12 @@ void enic_add_vlan(struct enic *enic, u16 vlanid)
121 113
122 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); 114 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
123 if (err) 115 if (err)
124 printk(KERN_ERR PFX "Can't add vlan id, %d\n", err); 116 dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
117
118 return err;
125} 119}
126 120
127void enic_del_vlan(struct enic *enic, u16 vlanid) 121int enic_del_vlan(struct enic *enic, u16 vlanid)
128{ 122{
129 u64 a0 = vlanid, a1 = 0; 123 u64 a0 = vlanid, a1 = 0;
130 int wait = 1000; 124 int wait = 1000;
@@ -132,7 +126,9 @@ void enic_del_vlan(struct enic *enic, u16 vlanid)
132 126
133 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); 127 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
134 if (err) 128 if (err)
135 printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err); 129 dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
130
131 return err;
136} 132}
137 133
138int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, 134int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
@@ -198,8 +194,8 @@ void enic_get_res_counts(struct enic *enic)
198 vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL), 194 vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL),
199 ENIC_INTR_MAX); 195 ENIC_INTR_MAX);
200 196
201 printk(KERN_INFO PFX "vNIC resources avail: " 197 dev_info(enic_get_dev(enic),
202 "wq %d rq %d cq %d intr %d\n", 198 "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
203 enic->wq_count, enic->rq_count, 199 enic->wq_count, enic->rq_count,
204 enic->cq_count, enic->intr_count); 200 enic->cq_count, enic->intr_count);
205} 201}
@@ -304,11 +300,6 @@ void enic_init_vnic_resources(struct enic *enic)
304 enic->config.intr_timer_type, 300 enic->config.intr_timer_type,
305 mask_on_assertion); 301 mask_on_assertion);
306 } 302 }
307
308 /* Clear LIF stats
309 */
310
311 vnic_dev_stats_clear(enic->vdev);
312} 303}
313 304
314int enic_alloc_vnic_resources(struct enic *enic) 305int enic_alloc_vnic_resources(struct enic *enic)
@@ -319,15 +310,14 @@ int enic_alloc_vnic_resources(struct enic *enic)
319 310
320 intr_mode = vnic_dev_get_intr_mode(enic->vdev); 311 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
321 312
322 printk(KERN_INFO PFX "vNIC resources used: " 313 dev_info(enic_get_dev(enic), "vNIC resources used: "
323 "wq %d rq %d cq %d intr %d intr mode %s\n", 314 "wq %d rq %d cq %d intr %d intr mode %s\n",
324 enic->wq_count, enic->rq_count, 315 enic->wq_count, enic->rq_count,
325 enic->cq_count, enic->intr_count, 316 enic->cq_count, enic->intr_count,
326 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : 317 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
327 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : 318 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
328 intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : 319 intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
329 "unknown" 320 "unknown");
330 );
331 321
332 /* Allocate queue resources 322 /* Allocate queue resources
333 */ 323 */
@@ -373,7 +363,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
373 enic->legacy_pba = vnic_dev_get_res(enic->vdev, 363 enic->legacy_pba = vnic_dev_get_res(enic->vdev,
374 RES_TYPE_INTR_PBA_LEGACY, 0); 364 RES_TYPE_INTR_PBA_LEGACY, 0);
375 if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { 365 if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
376 printk(KERN_ERR PFX "Failed to hook legacy pba resource\n"); 366 dev_err(enic_get_dev(enic),
367 "Failed to hook legacy pba resource\n");
377 err = -ENODEV; 368 err = -ENODEV;
378 goto err_out_cleanup; 369 goto err_out_cleanup;
379 } 370 }
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 494664f7fccc..83bd172c356c 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -43,7 +43,7 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
43 void *os_buf, dma_addr_t dma_addr, unsigned int len, 43 void *os_buf, dma_addr_t dma_addr, unsigned int len,
44 unsigned int mss_or_csum_offset, unsigned int hdr_len, 44 unsigned int mss_or_csum_offset, unsigned int hdr_len,
45 int vlan_tag_insert, unsigned int vlan_tag, 45 int vlan_tag_insert, unsigned int vlan_tag,
46 int offload_mode, int cq_entry, int sop, int eop) 46 int offload_mode, int cq_entry, int sop, int eop, int loopback)
47{ 47{
48 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); 48 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
49 49
@@ -56,61 +56,62 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
56 0, /* fcoe_encap */ 56 0, /* fcoe_encap */
57 (u8)vlan_tag_insert, 57 (u8)vlan_tag_insert,
58 (u16)vlan_tag, 58 (u16)vlan_tag,
59 0 /* loopback */); 59 (u8)loopback);
60 60
61 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 61 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
62} 62}
63 63
64static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, 64static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
65 void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop) 65 void *os_buf, dma_addr_t dma_addr, unsigned int len,
66 int eop, int loopback)
66{ 67{
67 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 68 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
68 0, 0, 0, 0, 0, 69 0, 0, 0, 0, 0,
69 eop, 0 /* !SOP */, eop); 70 eop, 0 /* !SOP */, eop, loopback);
70} 71}
71 72
72static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, 73static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
73 dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, 74 dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
74 unsigned int vlan_tag, int eop) 75 unsigned int vlan_tag, int eop, int loopback)
75{ 76{
76 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 77 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
77 0, 0, vlan_tag_insert, vlan_tag, 78 0, 0, vlan_tag_insert, vlan_tag,
78 WQ_ENET_OFFLOAD_MODE_CSUM, 79 WQ_ENET_OFFLOAD_MODE_CSUM,
79 eop, 1 /* SOP */, eop); 80 eop, 1 /* SOP */, eop, loopback);
80} 81}
81 82
82static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, 83static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
83 void *os_buf, dma_addr_t dma_addr, unsigned int len, 84 void *os_buf, dma_addr_t dma_addr, unsigned int len,
84 int ip_csum, int tcpudp_csum, int vlan_tag_insert, 85 int ip_csum, int tcpudp_csum, int vlan_tag_insert,
85 unsigned int vlan_tag, int eop) 86 unsigned int vlan_tag, int eop, int loopback)
86{ 87{
87 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 88 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
88 (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), 89 (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
89 0, vlan_tag_insert, vlan_tag, 90 0, vlan_tag_insert, vlan_tag,
90 WQ_ENET_OFFLOAD_MODE_CSUM, 91 WQ_ENET_OFFLOAD_MODE_CSUM,
91 eop, 1 /* SOP */, eop); 92 eop, 1 /* SOP */, eop, loopback);
92} 93}
93 94
94static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, 95static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
95 void *os_buf, dma_addr_t dma_addr, unsigned int len, 96 void *os_buf, dma_addr_t dma_addr, unsigned int len,
96 unsigned int csum_offset, unsigned int hdr_len, 97 unsigned int csum_offset, unsigned int hdr_len,
97 int vlan_tag_insert, unsigned int vlan_tag, int eop) 98 int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
98{ 99{
99 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 100 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
100 csum_offset, hdr_len, vlan_tag_insert, vlan_tag, 101 csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
101 WQ_ENET_OFFLOAD_MODE_CSUM_L4, 102 WQ_ENET_OFFLOAD_MODE_CSUM_L4,
102 eop, 1 /* SOP */, eop); 103 eop, 1 /* SOP */, eop, loopback);
103} 104}
104 105
105static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, 106static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
106 void *os_buf, dma_addr_t dma_addr, unsigned int len, 107 void *os_buf, dma_addr_t dma_addr, unsigned int len,
107 unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, 108 unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
108 unsigned int vlan_tag, int eop) 109 unsigned int vlan_tag, int eop, int loopback)
109{ 110{
110 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 111 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
111 mss, hdr_len, vlan_tag_insert, vlan_tag, 112 mss, hdr_len, vlan_tag_insert, vlan_tag,
112 WQ_ENET_OFFLOAD_MODE_TSO, 113 WQ_ENET_OFFLOAD_MODE_TSO,
113 eop, 1 /* SOP */, eop); 114 eop, 1 /* SOP */, eop, loopback);
114} 115}
115 116
116static inline void enic_queue_rq_desc(struct vnic_rq *rq, 117static inline void enic_queue_rq_desc(struct vnic_rq *rq,
@@ -131,10 +132,8 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
131struct enic; 132struct enic;
132 133
133int enic_get_vnic_config(struct enic *); 134int enic_get_vnic_config(struct enic *);
134void enic_add_multicast_addr(struct enic *enic, u8 *addr); 135int enic_add_vlan(struct enic *enic, u16 vlanid);
135void enic_del_multicast_addr(struct enic *enic, u8 *addr); 136int enic_del_vlan(struct enic *enic, u16 vlanid);
136void enic_add_vlan(struct enic *enic, u16 vlanid);
137void enic_del_vlan(struct enic *enic, u16 vlanid);
138int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, 137int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
139 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, 138 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
140 u8 ig_vlan_strip_en); 139 u8 ig_vlan_strip_en);
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
index a06e649010ce..e6dd30988d6f 100644
--- a/drivers/net/enic/rq_enet_desc.h
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
index 020ae6c3f3d9..b86d6ef8dad3 100644
--- a/drivers/net/enic/vnic_cq.c
+++ b/drivers/net/enic/vnic_cq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -42,7 +42,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
42 42
43 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); 43 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
44 if (!cq->ctrl) { 44 if (!cq->ctrl) {
45 printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index); 45 pr_err("Failed to hook CQ[%d] resource\n", index);
46 return -EINVAL; 46 return -EINVAL;
47 } 47 }
48 48
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
index 114763cbc2f8..552d3daf2508 100644
--- a/drivers/net/enic/vnic_cq.h
+++ b/drivers/net/enic/vnic_cq.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index e0d33281ec98..6a5b578a69e1 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -23,21 +23,23 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/if_ether.h> 25#include <linux/if_ether.h>
26#include <linux/slab.h>
27 26
28#include "vnic_resource.h" 27#include "vnic_resource.h"
29#include "vnic_devcmd.h" 28#include "vnic_devcmd.h"
30#include "vnic_dev.h" 29#include "vnic_dev.h"
31#include "vnic_stats.h" 30#include "vnic_stats.h"
32 31
32enum vnic_proxy_type {
33 PROXY_NONE,
34 PROXY_BY_BDF,
35};
36
33struct vnic_res { 37struct vnic_res {
34 void __iomem *vaddr; 38 void __iomem *vaddr;
35 dma_addr_t bus_addr; 39 dma_addr_t bus_addr;
36 unsigned int count; 40 unsigned int count;
37}; 41};
38 42
39#define VNIC_DEV_CAP_INIT 0x0001
40
41struct vnic_dev { 43struct vnic_dev {
42 void *priv; 44 void *priv;
43 struct pci_dev *pdev; 45 struct pci_dev *pdev;
@@ -48,13 +50,14 @@ struct vnic_dev {
48 struct vnic_devcmd_notify notify_copy; 50 struct vnic_devcmd_notify notify_copy;
49 dma_addr_t notify_pa; 51 dma_addr_t notify_pa;
50 u32 notify_sz; 52 u32 notify_sz;
51 u32 *linkstatus;
52 dma_addr_t linkstatus_pa; 53 dma_addr_t linkstatus_pa;
53 struct vnic_stats *stats; 54 struct vnic_stats *stats;
54 dma_addr_t stats_pa; 55 dma_addr_t stats_pa;
55 struct vnic_devcmd_fw_info *fw_info; 56 struct vnic_devcmd_fw_info *fw_info;
56 dma_addr_t fw_info_pa; 57 dma_addr_t fw_info_pa;
57 u32 cap_flags; 58 enum vnic_proxy_type proxy;
59 u32 proxy_index;
60 u64 args[VNIC_DEVCMD_NARGS];
58}; 61};
59 62
60#define VNIC_MAX_RES_HDR_SIZE \ 63#define VNIC_MAX_RES_HDR_SIZE \
@@ -78,19 +81,19 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
78 return -EINVAL; 81 return -EINVAL;
79 82
80 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 83 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
81 printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); 84 pr_err("vNIC BAR0 res hdr length error\n");
82 return -EINVAL; 85 return -EINVAL;
83 } 86 }
84 87
85 rh = bar->vaddr; 88 rh = bar->vaddr;
86 if (!rh) { 89 if (!rh) {
87 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); 90 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
88 return -EINVAL; 91 return -EINVAL;
89 } 92 }
90 93
91 if (ioread32(&rh->magic) != VNIC_RES_MAGIC || 94 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
92 ioread32(&rh->version) != VNIC_RES_VERSION) { 95 ioread32(&rh->version) != VNIC_RES_VERSION) {
93 printk(KERN_ERR "vNIC BAR0 res magic/version error " 96 pr_err("vNIC BAR0 res magic/version error "
94 "exp (%lx/%lx) curr (%x/%x)\n", 97 "exp (%lx/%lx) curr (%x/%x)\n",
95 VNIC_RES_MAGIC, VNIC_RES_VERSION, 98 VNIC_RES_MAGIC, VNIC_RES_VERSION,
96 ioread32(&rh->magic), ioread32(&rh->version)); 99 ioread32(&rh->magic), ioread32(&rh->version));
@@ -122,7 +125,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
122 /* each count is stride bytes long */ 125 /* each count is stride bytes long */
123 len = count * VNIC_RES_STRIDE; 126 len = count * VNIC_RES_STRIDE;
124 if (len + bar_offset > bar[bar_num].len) { 127 if (len + bar_offset > bar[bar_num].len) {
125 printk(KERN_ERR "vNIC BAR0 resource %d " 128 pr_err("vNIC BAR0 resource %d "
126 "out-of-bounds, offset 0x%x + " 129 "out-of-bounds, offset 0x%x + "
127 "size 0x%x > bar len 0x%lx\n", 130 "size 0x%x > bar len 0x%lx\n",
128 type, bar_offset, 131 type, bar_offset,
@@ -229,8 +232,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
229 &ring->base_addr_unaligned); 232 &ring->base_addr_unaligned);
230 233
231 if (!ring->descs_unaligned) { 234 if (!ring->descs_unaligned) {
232 printk(KERN_ERR 235 pr_err("Failed to allocate ring (size=%d), aborting\n",
233 "Failed to allocate ring (size=%d), aborting\n",
234 (int)ring->size); 236 (int)ring->size);
235 return -ENOMEM; 237 return -ENOMEM;
236 } 238 }
@@ -258,23 +260,28 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
258 } 260 }
259} 261}
260 262
261int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 263static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
262 u64 *a0, u64 *a1, int wait) 264 int wait)
263{ 265{
264 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 266 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
267 unsigned int i;
265 int delay; 268 int delay;
266 u32 status; 269 u32 status;
267 int err; 270 int err;
268 271
269 status = ioread32(&devcmd->status); 272 status = ioread32(&devcmd->status);
273 if (status == 0xFFFFFFFF) {
274 /* PCI-e target device is gone */
275 return -ENODEV;
276 }
270 if (status & STAT_BUSY) { 277 if (status & STAT_BUSY) {
271 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); 278 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
272 return -EBUSY; 279 return -EBUSY;
273 } 280 }
274 281
275 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 282 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
276 writeq(*a0, &devcmd->args[0]); 283 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
277 writeq(*a1, &devcmd->args[1]); 284 writeq(vdev->args[i], &devcmd->args[i]);
278 wmb(); 285 wmb();
279 } 286 }
280 287
@@ -288,31 +295,110 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
288 udelay(100); 295 udelay(100);
289 296
290 status = ioread32(&devcmd->status); 297 status = ioread32(&devcmd->status);
298 if (status == 0xFFFFFFFF) {
299 /* PCI-e target device is gone */
300 return -ENODEV;
301 }
302
291 if (!(status & STAT_BUSY)) { 303 if (!(status & STAT_BUSY)) {
292 304
293 if (status & STAT_ERROR) { 305 if (status & STAT_ERROR) {
294 err = (int)readq(&devcmd->args[0]); 306 err = (int)readq(&devcmd->args[0]);
295 if (err != ERR_ECMDUNKNOWN || 307 if (err != ERR_ECMDUNKNOWN ||
296 cmd != CMD_CAPABILITY) 308 cmd != CMD_CAPABILITY)
297 printk(KERN_ERR "Error %d devcmd %d\n", 309 pr_err("Error %d devcmd %d\n",
298 err, _CMD_N(cmd)); 310 err, _CMD_N(cmd));
299 return err; 311 return err;
300 } 312 }
301 313
302 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 314 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
303 rmb(); 315 rmb();
304 *a0 = readq(&devcmd->args[0]); 316 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
305 *a1 = readq(&devcmd->args[1]); 317 vdev->args[i] = readq(&devcmd->args[i]);
306 } 318 }
307 319
308 return 0; 320 return 0;
309 } 321 }
310 } 322 }
311 323
312 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); 324 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
313 return -ETIMEDOUT; 325 return -ETIMEDOUT;
314} 326}
315 327
328static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev,
329 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
330{
331 u32 status;
332 int err;
333
334 memset(vdev->args, 0, sizeof(vdev->args));
335
336 vdev->args[0] = vdev->proxy_index; /* bdf */
337 vdev->args[1] = cmd;
338 vdev->args[2] = *a0;
339 vdev->args[3] = *a1;
340
341 err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait);
342 if (err)
343 return err;
344
345 status = (u32)vdev->args[0];
346 if (status & STAT_ERROR) {
347 err = (int)vdev->args[1];
348 if (err != ERR_ECMDUNKNOWN ||
349 cmd != CMD_CAPABILITY)
350 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
351 return err;
352 }
353
354 *a0 = vdev->args[1];
355 *a1 = vdev->args[2];
356
357 return 0;
358}
359
360static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
361 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
362{
363 int err;
364
365 vdev->args[0] = *a0;
366 vdev->args[1] = *a1;
367
368 err = _vnic_dev_cmd(vdev, cmd, wait);
369
370 *a0 = vdev->args[0];
371 *a1 = vdev->args[1];
372
373 return err;
374}
375
376void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
377{
378 vdev->proxy = PROXY_BY_BDF;
379 vdev->proxy_index = bdf;
380}
381
382void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
383{
384 vdev->proxy = PROXY_NONE;
385 vdev->proxy_index = 0;
386}
387
388int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
389 u64 *a0, u64 *a1, int wait)
390{
391 memset(vdev->args, 0, sizeof(vdev->args));
392
393 switch (vdev->proxy) {
394 case PROXY_BY_BDF:
395 return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait);
396 case PROXY_NONE:
397 default:
398 return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
399 }
400}
401
316static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) 402static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
317{ 403{
318 u64 a0 = (u32)cmd, a1 = 0; 404 u64 a0 = (u32)cmd, a1 = 0;
@@ -431,6 +517,19 @@ int vnic_dev_enable(struct vnic_dev *vdev)
431 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 517 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
432} 518}
433 519
520int vnic_dev_enable_wait(struct vnic_dev *vdev)
521{
522 u64 a0 = 0, a1 = 0;
523 int wait = 1000;
524 int err;
525
526 err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
527 if (err == ERR_ECMDUNKNOWN)
528 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
529
530 return err;
531}
532
434int vnic_dev_disable(struct vnic_dev *vdev) 533int vnic_dev_disable(struct vnic_dev *vdev)
435{ 534{
436 u64 a0 = 0, a1 = 0; 535 u64 a0 = 0, a1 = 0;
@@ -486,6 +585,44 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
486 return 0; 585 return 0;
487} 586}
488 587
588int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
589{
590 u64 a0 = (u32)arg, a1 = 0;
591 int wait = 1000;
592 int err;
593
594 err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
595 if (err == ERR_ECMDUNKNOWN) {
596 err = vnic_dev_soft_reset(vdev, arg);
597 if (err)
598 return err;
599
600 return vnic_dev_init(vdev, 0);
601 }
602
603 return err;
604}
605
606int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
607{
608 u64 a0 = 0, a1 = 0;
609 int wait = 1000;
610 int err;
611
612 *done = 0;
613
614 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
615 if (err) {
616 if (err == ERR_ECMDUNKNOWN)
617 return vnic_dev_soft_reset_done(vdev, done);
618 return err;
619 }
620
621 *done = (a0 == 0);
622
623 return 0;
624}
625
489int vnic_dev_hang_notify(struct vnic_dev *vdev) 626int vnic_dev_hang_notify(struct vnic_dev *vdev)
490{ 627{
491 u64 a0, a1; 628 u64 a0, a1;
@@ -512,7 +649,7 @@ int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
512 return 0; 649 return 0;
513} 650}
514 651
515void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 652int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
516 int broadcast, int promisc, int allmulti) 653 int broadcast, int promisc, int allmulti)
517{ 654{
518 u64 a0, a1 = 0; 655 u64 a0, a1 = 0;
@@ -527,7 +664,29 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
527 664
528 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 665 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
529 if (err) 666 if (err)
530 printk(KERN_ERR "Can't set packet filter\n"); 667 pr_err("Can't set packet filter\n");
668
669 return err;
670}
671
672int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
673 int multicast, int broadcast, int promisc, int allmulti)
674{
675 u64 a0, a1 = 0;
676 int wait = 1000;
677 int err;
678
679 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
680 (multicast ? CMD_PFILTER_MULTICAST : 0) |
681 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
682 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
683 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
684
685 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
686 if (err)
687 pr_err("Can't set packet filter\n");
688
689 return err;
531} 690}
532 691
533int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 692int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
@@ -542,7 +701,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
542 701
543 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 702 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
544 if (err) 703 if (err)
545 printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err); 704 pr_err("Can't add addr [%pM], %d\n", addr, err);
546 705
547 return err; 706 return err;
548} 707}
@@ -559,7 +718,21 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
559 718
560 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 719 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
561 if (err) 720 if (err)
562 printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err); 721 pr_err("Can't del addr [%pM], %d\n", addr, err);
722
723 return err;
724}
725
726int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
727 u8 ig_vlan_rewrite_mode)
728{
729 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
730 int wait = 1000;
731 int err;
732
733 err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
734 if (err == ERR_ECMDUNKNOWN)
735 return 0;
563 736
564 return err; 737 return err;
565} 738}
@@ -572,8 +745,7 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
572 745
573 err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait); 746 err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
574 if (err) 747 if (err)
575 printk(KERN_ERR "Failed to raise INTR[%d], err %d\n", 748 pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
576 intr, err);
577 749
578 return err; 750 return err;
579} 751}
@@ -604,8 +776,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
604 dma_addr_t notify_pa; 776 dma_addr_t notify_pa;
605 777
606 if (vdev->notify || vdev->notify_pa) { 778 if (vdev->notify || vdev->notify_pa) {
607 printk(KERN_ERR "notify block %p still allocated", 779 pr_err("notify block %p still allocated", vdev->notify);
608 vdev->notify);
609 return -EINVAL; 780 return -EINVAL;
610 } 781 }
611 782
@@ -618,22 +789,25 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
618 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 789 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
619} 790}
620 791
621void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 792int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
622{ 793{
623 u64 a0, a1; 794 u64 a0, a1;
624 int wait = 1000; 795 int wait = 1000;
796 int err;
625 797
626 a0 = 0; /* paddr = 0 to unset notify buffer */ 798 a0 = 0; /* paddr = 0 to unset notify buffer */
627 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 799 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
628 a1 += sizeof(struct vnic_devcmd_notify); 800 a1 += sizeof(struct vnic_devcmd_notify);
629 801
630 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 802 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
631 vdev->notify = NULL; 803 vdev->notify = NULL;
632 vdev->notify_pa = 0; 804 vdev->notify_pa = 0;
633 vdev->notify_sz = 0; 805 vdev->notify_sz = 0;
806
807 return err;
634} 808}
635 809
636void vnic_dev_notify_unset(struct vnic_dev *vdev) 810int vnic_dev_notify_unset(struct vnic_dev *vdev)
637{ 811{
638 if (vdev->notify) { 812 if (vdev->notify) {
639 pci_free_consistent(vdev->pdev, 813 pci_free_consistent(vdev->pdev,
@@ -642,7 +816,7 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
642 vdev->notify_pa); 816 vdev->notify_pa);
643 } 817 }
644 818
645 vnic_dev_notify_unsetcmd(vdev); 819 return vnic_dev_notify_unsetcmd(vdev);
646} 820}
647 821
648static int vnic_dev_notify_ready(struct vnic_dev *vdev) 822static int vnic_dev_notify_ready(struct vnic_dev *vdev)
@@ -672,13 +846,14 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
672 int wait = 1000; 846 int wait = 1000;
673 int r = 0; 847 int r = 0;
674 848
675 if (vdev->cap_flags & VNIC_DEV_CAP_INIT) 849 if (vnic_dev_capable(vdev, CMD_INIT))
676 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 850 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
677 else { 851 else {
678 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 852 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
679 if (a0 & CMD_INITF_DEFAULT_MAC) { 853 if (a0 & CMD_INITF_DEFAULT_MAC) {
680 // Emulate these for old CMD_INIT_v1 which 854 /* Emulate these for old CMD_INIT_v1 which
681 // didn't pass a0 so no CMD_INITF_*. 855 * didn't pass a0 so no CMD_INITF_*.
856 */
682 vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); 857 vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
683 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 858 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
684 } 859 }
@@ -700,7 +875,7 @@ int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
700 875
701 *done = (a0 == 0); 876 *done = (a0 == 0);
702 877
703 *err = (a0 == 0) ? a1 : 0; 878 *err = (a0 == 0) ? (int)a1:0;
704 879
705 return 0; 880 return 0;
706} 881}
@@ -738,9 +913,6 @@ int vnic_dev_deinit(struct vnic_dev *vdev)
738 913
739int vnic_dev_link_status(struct vnic_dev *vdev) 914int vnic_dev_link_status(struct vnic_dev *vdev)
740{ 915{
741 if (vdev->linkstatus)
742 return *vdev->linkstatus;
743
744 if (!vnic_dev_notify_ready(vdev)) 916 if (!vnic_dev_notify_ready(vdev))
745 return 0; 917 return 0;
746 918
@@ -787,6 +959,14 @@ u32 vnic_dev_notify_status(struct vnic_dev *vdev)
787 return vdev->notify_copy.status; 959 return vdev->notify_copy.status;
788} 960}
789 961
962u32 vnic_dev_uif(struct vnic_dev *vdev)
963{
964 if (!vnic_dev_notify_ready(vdev))
965 return 0;
966
967 return vdev->notify_copy.uif;
968}
969
790void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 970void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
791 enum vnic_dev_intr_mode intr_mode) 971 enum vnic_dev_intr_mode intr_mode)
792{ 972{
@@ -807,14 +987,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
807 sizeof(struct vnic_devcmd_notify), 987 sizeof(struct vnic_devcmd_notify),
808 vdev->notify, 988 vdev->notify,
809 vdev->notify_pa); 989 vdev->notify_pa);
810 if (vdev->linkstatus)
811 pci_free_consistent(vdev->pdev,
812 sizeof(u32),
813 vdev->linkstatus,
814 vdev->linkstatus_pa);
815 if (vdev->stats) 990 if (vdev->stats)
816 pci_free_consistent(vdev->pdev, 991 pci_free_consistent(vdev->pdev,
817 sizeof(struct vnic_dev), 992 sizeof(struct vnic_stats),
818 vdev->stats, vdev->stats_pa); 993 vdev->stats, vdev->stats_pa);
819 if (vdev->fw_info) 994 if (vdev->fw_info)
820 pci_free_consistent(vdev->pdev, 995 pci_free_consistent(vdev->pdev,
@@ -844,11 +1019,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
844 if (!vdev->devcmd) 1019 if (!vdev->devcmd)
845 goto err_out; 1020 goto err_out;
846 1021
847 vdev->cap_flags = 0;
848
849 if (vnic_dev_capable(vdev, CMD_INIT))
850 vdev->cap_flags |= VNIC_DEV_CAP_INIT;
851
852 return vdev; 1022 return vdev;
853 1023
854err_out: 1024err_out:
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index caccce36957b..3a61873138b6 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -41,6 +41,9 @@ static inline void writeq(u64 val, void __iomem *reg)
41} 41}
42#endif 42#endif
43 43
44#undef pr_fmt
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
44enum vnic_dev_hw_version { 47enum vnic_dev_hw_version {
45 VNIC_DEV_HW_VER_UNKNOWN, 48 VNIC_DEV_HW_VER_UNKNOWN,
46 VNIC_DEV_HW_VER_A1, 49 VNIC_DEV_HW_VER_A1,
@@ -92,6 +95,8 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
92 struct vnic_dev_ring *ring); 95 struct vnic_dev_ring *ring);
93int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 96int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
94 u64 *a0, u64 *a1, int wait); 97 u64 *a0, u64 *a1, int wait);
98void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
99void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
95int vnic_dev_fw_info(struct vnic_dev *vdev, 100int vnic_dev_fw_info(struct vnic_dev *vdev,
96 struct vnic_devcmd_fw_info **fw_info); 101 struct vnic_devcmd_fw_info **fw_info);
97int vnic_dev_hw_version(struct vnic_dev *vdev, 102int vnic_dev_hw_version(struct vnic_dev *vdev,
@@ -101,8 +106,10 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
101int vnic_dev_stats_clear(struct vnic_dev *vdev); 106int vnic_dev_stats_clear(struct vnic_dev *vdev);
102int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); 107int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
103int vnic_dev_hang_notify(struct vnic_dev *vdev); 108int vnic_dev_hang_notify(struct vnic_dev *vdev);
104void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 109int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
105 int broadcast, int promisc, int allmulti); 110 int broadcast, int promisc, int allmulti);
111int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
112 int multicast, int broadcast, int promisc, int allmulti);
106int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); 113int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
107int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); 114int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
108int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); 115int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
@@ -110,16 +117,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
110int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 117int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
111 void *notify_addr, dma_addr_t notify_pa, u16 intr); 118 void *notify_addr, dma_addr_t notify_pa, u16 intr);
112int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); 119int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
113void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); 120int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
114void vnic_dev_notify_unset(struct vnic_dev *vdev); 121int vnic_dev_notify_unset(struct vnic_dev *vdev);
115int vnic_dev_link_status(struct vnic_dev *vdev); 122int vnic_dev_link_status(struct vnic_dev *vdev);
116u32 vnic_dev_port_speed(struct vnic_dev *vdev); 123u32 vnic_dev_port_speed(struct vnic_dev *vdev);
117u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); 124u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
118u32 vnic_dev_mtu(struct vnic_dev *vdev); 125u32 vnic_dev_mtu(struct vnic_dev *vdev);
119u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); 126u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
120u32 vnic_dev_notify_status(struct vnic_dev *vdev); 127u32 vnic_dev_notify_status(struct vnic_dev *vdev);
128u32 vnic_dev_uif(struct vnic_dev *vdev);
121int vnic_dev_close(struct vnic_dev *vdev); 129int vnic_dev_close(struct vnic_dev *vdev);
122int vnic_dev_enable(struct vnic_dev *vdev); 130int vnic_dev_enable(struct vnic_dev *vdev);
131int vnic_dev_enable_wait(struct vnic_dev *vdev);
123int vnic_dev_disable(struct vnic_dev *vdev); 132int vnic_dev_disable(struct vnic_dev *vdev);
124int vnic_dev_open(struct vnic_dev *vdev, int arg); 133int vnic_dev_open(struct vnic_dev *vdev, int arg);
125int vnic_dev_open_done(struct vnic_dev *vdev, int *done); 134int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
@@ -129,10 +138,14 @@ int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
129int vnic_dev_deinit(struct vnic_dev *vdev); 138int vnic_dev_deinit(struct vnic_dev *vdev);
130int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); 139int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
131int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); 140int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
141int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
142int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
132void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 143void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
133 enum vnic_dev_intr_mode intr_mode); 144 enum vnic_dev_intr_mode intr_mode);
134enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); 145enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
135void vnic_dev_unregister(struct vnic_dev *vdev); 146void vnic_dev_unregister(struct vnic_dev *vdev);
147int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
148 u8 ig_vlan_rewrite_mode);
136struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 149struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
137 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 150 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
138 unsigned int num_bars); 151 unsigned int num_bars);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index d78bbcc1fdf9..20661755df6b 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -98,6 +98,9 @@ enum vnic_devcmd_cmd {
98 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ 98 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
99 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), 99 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
100 100
101 /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
102 CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
103
101 /* hang detection notification */ 104 /* hang detection notification */
102 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), 105 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
103 106
@@ -171,6 +174,9 @@ enum vnic_devcmd_cmd {
171 /* enable virtual link */ 174 /* enable virtual link */
172 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), 175 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
173 176
177 /* enable virtual link, waiting variant. */
178 CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
179
174 /* disable virtual link */ 180 /* disable virtual link */
175 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), 181 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
176 182
@@ -211,6 +217,27 @@ enum vnic_devcmd_cmd {
211 * in: (u16)a0=interrupt number to assert 217 * in: (u16)a0=interrupt number to assert
212 */ 218 */
213 CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), 219 CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
220
221 /* initiate hangreset, like softreset after hang detected */
222 CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
223
224 /* hangreset status:
225 * out: a0=0 reset complete, a0=1 reset in progress */
226 CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
227
228 /*
229 * Set hw ingress packet vlan rewrite mode:
230 * in: (u32)a0=new vlan rewrite mode
231 * out: (u32)a0=old vlan rewrite mode */
232 CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
233
234 /*
235 * in: (u16)a0=bdf of target vnic
236 * (u32)a1=cmd to proxy
237 * a2-a15=args to cmd in a1
238 * out: (u32)a0=status of proxied cmd
239 * a1-a15=out args of proxied cmd */
240 CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
214}; 241};
215 242
216/* flags for CMD_OPEN */ 243/* flags for CMD_OPEN */
@@ -226,6 +253,12 @@ enum vnic_devcmd_cmd {
226#define CMD_PFILTER_PROMISCUOUS 0x08 253#define CMD_PFILTER_PROMISCUOUS 0x08
227#define CMD_PFILTER_ALL_MULTICAST 0x10 254#define CMD_PFILTER_ALL_MULTICAST 0x10
228 255
256/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
257#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
258#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
259#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2
260#define IG_VLAN_REWRITE_MODE_PASS_THRU 3
261
229enum vnic_devcmd_status { 262enum vnic_devcmd_status {
230 STAT_NONE = 0, 263 STAT_NONE = 0,
231 STAT_BUSY = 1 << 0, /* cmd in progress */ 264 STAT_BUSY = 1 << 0, /* cmd in progress */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 8eeb6758491b..3b3291248956 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -35,6 +35,7 @@ struct vnic_enet_config {
35 u8 intr_mode; 35 u8 intr_mode;
36 char devname[16]; 36 char devname[16];
37 u32 intr_timer_usec; 37 u32 intr_timer_usec;
38 u16 loop_tag;
38}; 39};
39 40
40#define VENETF_TSO 0x1 /* TSO enabled */ 41#define VENETF_TSO 0x1 /* TSO enabled */
@@ -48,5 +49,6 @@ struct vnic_enet_config {
48#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */ 49#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
49#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ 50#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
50#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ 51#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
52#define VENETF_LOOP 0x800 /* Loopback enabled */
51 53
52#endif /* _VNIC_ENIC_H_ */ 54#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 3934309a9498..52ab61af2750 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -39,8 +39,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
39 39
40 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); 40 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
41 if (!intr->ctrl) { 41 if (!intr->ctrl) {
42 printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n", 42 pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
43 index);
44 return -EINVAL; 43 return -EINVAL;
45 } 44 }
46 45
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 2fe6c6339e3c..09dc0b73ff46 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -61,7 +61,11 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
61static inline void vnic_intr_mask(struct vnic_intr *intr) 61static inline void vnic_intr_mask(struct vnic_intr *intr)
62{ 62{
63 iowrite32(1, &intr->ctrl->mask); 63 iowrite32(1, &intr->ctrl->mask);
64 (void)ioread32(&intr->ctrl->mask); 64}
65
66static inline int vnic_intr_masked(struct vnic_intr *intr)
67{
68 return ioread32(&intr->ctrl->mask);
65} 69}
66 70
67static inline void vnic_intr_return_credits(struct vnic_intr *intr, 71static inline void vnic_intr_return_credits(struct vnic_intr *intr,
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index cf80ab46d582..995a50dd4c99 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index b61c22aec41a..810287beff14 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index cc580cfec41d..dbb2aca258b9 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -37,23 +37,23 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
37 vdev = rq->vdev; 37 vdev = rq->vdev;
38 38
39 for (i = 0; i < blks; i++) { 39 for (i = 0; i < blks; i++) {
40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); 40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!rq->bufs[i]) { 41 if (!rq->bufs[i]) {
42 printk(KERN_ERR "Failed to alloc rq_bufs\n"); 42 pr_err("Failed to alloc rq_bufs\n");
43 return -ENOMEM; 43 return -ENOMEM;
44 } 44 }
45 } 45 }
46 46
47 for (i = 0; i < blks; i++) { 47 for (i = 0; i < blks; i++) {
48 buf = rq->bufs[i]; 48 buf = rq->bufs[i];
49 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) { 49 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
50 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j; 50 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
51 buf->desc = (u8 *)rq->ring.descs + 51 buf->desc = (u8 *)rq->ring.descs +
52 rq->ring.desc_size * buf->index; 52 rq->ring.desc_size * buf->index;
53 if (buf->index + 1 == count) { 53 if (buf->index + 1 == count) {
54 buf->next = rq->bufs[0]; 54 buf->next = rq->bufs[0];
55 break; 55 break;
56 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) { 56 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
57 buf->next = rq->bufs[i + 1]; 57 buf->next = rq->bufs[i + 1];
58 } else { 58 } else {
59 buf->next = buf + 1; 59 buf->next = buf + 1;
@@ -94,7 +94,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
94 94
95 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); 95 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
96 if (!rq->ctrl) { 96 if (!rq->ctrl) {
97 printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index); 97 pr_err("Failed to hook RQ[%d] resource\n", index);
98 return -EINVAL; 98 return -EINVAL;
99 } 99 }
100 100
@@ -119,10 +119,11 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
119 unsigned int error_interrupt_offset) 119 unsigned int error_interrupt_offset)
120{ 120{
121 u64 paddr; 121 u64 paddr;
122 unsigned int count = rq->ring.desc_count;
122 123
123 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; 124 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
124 writeq(paddr, &rq->ctrl->ring_base); 125 writeq(paddr, &rq->ctrl->ring_base);
125 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); 126 iowrite32(count, &rq->ctrl->ring_size);
126 iowrite32(cq_index, &rq->ctrl->cq_index); 127 iowrite32(cq_index, &rq->ctrl->cq_index);
127 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); 128 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
128 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); 129 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
@@ -132,8 +133,8 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
132 iowrite32(posted_index, &rq->ctrl->posted_index); 133 iowrite32(posted_index, &rq->ctrl->posted_index);
133 134
134 rq->to_use = rq->to_clean = 135 rq->to_use = rq->to_clean =
135 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] 136 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
136 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; 137 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
137} 138}
138 139
139void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, 140void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
@@ -145,6 +146,11 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
145 /* Use current fetch_index as the ring starting point */ 146 /* Use current fetch_index as the ring starting point */
146 fetch_index = ioread32(&rq->ctrl->fetch_index); 147 fetch_index = ioread32(&rq->ctrl->fetch_index);
147 148
149 if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
150 /* Hardware surprise removal: reset fetch_index */
151 fetch_index = 0;
152 }
153
148 vnic_rq_init_start(rq, cq_index, 154 vnic_rq_init_start(rq, cq_index,
149 fetch_index, fetch_index, 155 fetch_index, fetch_index,
150 error_interrupt_enable, 156 error_interrupt_enable,
@@ -174,7 +180,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
174 udelay(10); 180 udelay(10);
175 } 181 }
176 182
177 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); 183 pr_err("Failed to disable RQ[%d]\n", rq->index);
178 184
179 return -ETIMEDOUT; 185 return -ETIMEDOUT;
180} 186}
@@ -184,8 +190,7 @@ void vnic_rq_clean(struct vnic_rq *rq,
184{ 190{
185 struct vnic_rq_buf *buf; 191 struct vnic_rq_buf *buf;
186 u32 fetch_index; 192 u32 fetch_index;
187 193 unsigned int count = rq->ring.desc_count;
188 BUG_ON(ioread32(&rq->ctrl->enable));
189 194
190 buf = rq->to_clean; 195 buf = rq->to_clean;
191 196
@@ -199,9 +204,14 @@ void vnic_rq_clean(struct vnic_rq *rq,
199 204
200 /* Use current fetch_index as the ring starting point */ 205 /* Use current fetch_index as the ring starting point */
201 fetch_index = ioread32(&rq->ctrl->fetch_index); 206 fetch_index = ioread32(&rq->ctrl->fetch_index);
207
208 if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
209 /* Hardware surprise removal: reset fetch_index */
210 fetch_index = 0;
211 }
202 rq->to_use = rq->to_clean = 212 rq->to_use = rq->to_clean =
203 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] 213 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
204 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; 214 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
205 iowrite32(fetch_index, &rq->ctrl->posted_index); 215 iowrite32(fetch_index, &rq->ctrl->posted_index);
206 216
207 vnic_dev_clear_desc_ring(&rq->ring); 217 vnic_dev_clear_desc_ring(&rq->ring);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 35e736cc2d88..2dc48f91abf7 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008, 2009 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -52,12 +52,16 @@ struct vnic_rq_ctrl {
52 u32 pad10; 52 u32 pad10;
53}; 53};
54 54
55/* Break the vnic_rq_buf allocations into blocks of 64 entries */ 55/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
56#define VNIC_RQ_BUF_BLK_ENTRIES 64 56#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
57#define VNIC_RQ_BUF_BLK_SZ \ 57#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
58 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) 58#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
59 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
60 VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
61#define VNIC_RQ_BUF_BLK_SZ(entries) \
62 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
59#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ 63#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
60 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) 64 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
61#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) 65#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
62 66
63struct vnic_rq_buf { 67struct vnic_rq_buf {
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
index 5fbb3c923bcd..f62d18719629 100644
--- a/drivers/net/enic/vnic_rss.h
+++ b/drivers/net/enic/vnic_rss.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
index 9ff9614d89b1..77750ec93954 100644
--- a/drivers/net/enic/vnic_stats.h
+++ b/drivers/net/enic/vnic_stats.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index d769772998c6..197c9d24af82 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -25,9 +25,13 @@
25 25
26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type) 26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
27{ 27{
28 struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags); 28 struct vic_provinfo *vp;
29 29
30 if (!vp || !oui) 30 if (!oui)
31 return NULL;
32
33 vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
34 if (!vp)
31 return NULL; 35 return NULL;
32 36
33 memcpy(vp->oui, oui, sizeof(vp->oui)); 37 memcpy(vp->oui, oui, sizeof(vp->oui));
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 085c2a274cb1..7e46e5e8600f 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -44,7 +44,7 @@ struct vic_provinfo {
44 u16 length; 44 u16 length;
45 u8 value[0]; 45 u8 value[0];
46 } tlv[0]; 46 } tlv[0];
47} __attribute__ ((packed)); 47} __packed;
48 48
49#define VIC_PROVINFO_MAX_DATA 1385 49#define VIC_PROVINFO_MAX_DATA 1385
50#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ 50#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 1378afbdfe67..122e33bcc578 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -37,23 +37,23 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
37 vdev = wq->vdev; 37 vdev = wq->vdev;
38 38
39 for (i = 0; i < blks; i++) { 39 for (i = 0; i < blks; i++) {
40 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); 40 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!wq->bufs[i]) { 41 if (!wq->bufs[i]) {
42 printk(KERN_ERR "Failed to alloc wq_bufs\n"); 42 pr_err("Failed to alloc wq_bufs\n");
43 return -ENOMEM; 43 return -ENOMEM;
44 } 44 }
45 } 45 }
46 46
47 for (i = 0; i < blks; i++) { 47 for (i = 0; i < blks; i++) {
48 buf = wq->bufs[i]; 48 buf = wq->bufs[i];
49 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { 49 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
50 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; 50 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
51 buf->desc = (u8 *)wq->ring.descs + 51 buf->desc = (u8 *)wq->ring.descs +
52 wq->ring.desc_size * buf->index; 52 wq->ring.desc_size * buf->index;
53 if (buf->index + 1 == count) { 53 if (buf->index + 1 == count) {
54 buf->next = wq->bufs[0]; 54 buf->next = wq->bufs[0];
55 break; 55 break;
56 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { 56 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
57 buf->next = wq->bufs[i + 1]; 57 buf->next = wq->bufs[i + 1];
58 } else { 58 } else {
59 buf->next = buf + 1; 59 buf->next = buf + 1;
@@ -94,7 +94,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
94 94
95 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); 95 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
96 if (!wq->ctrl) { 96 if (!wq->ctrl) {
97 printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index); 97 pr_err("Failed to hook WQ[%d] resource\n", index);
98 return -EINVAL; 98 return -EINVAL;
99 } 99 }
100 100
@@ -119,10 +119,11 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
119 unsigned int error_interrupt_offset) 119 unsigned int error_interrupt_offset)
120{ 120{
121 u64 paddr; 121 u64 paddr;
122 unsigned int count = wq->ring.desc_count;
122 123
123 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; 124 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
124 writeq(paddr, &wq->ctrl->ring_base); 125 writeq(paddr, &wq->ctrl->ring_base);
125 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); 126 iowrite32(count, &wq->ctrl->ring_size);
126 iowrite32(fetch_index, &wq->ctrl->fetch_index); 127 iowrite32(fetch_index, &wq->ctrl->fetch_index);
127 iowrite32(posted_index, &wq->ctrl->posted_index); 128 iowrite32(posted_index, &wq->ctrl->posted_index);
128 iowrite32(cq_index, &wq->ctrl->cq_index); 129 iowrite32(cq_index, &wq->ctrl->cq_index);
@@ -131,8 +132,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
131 iowrite32(0, &wq->ctrl->error_status); 132 iowrite32(0, &wq->ctrl->error_status);
132 133
133 wq->to_use = wq->to_clean = 134 wq->to_use = wq->to_clean =
134 &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES] 135 &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
135 [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES]; 136 [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
136} 137}
137 138
138void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 139void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
@@ -167,7 +168,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
167 udelay(10); 168 udelay(10);
168 } 169 }
169 170
170 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); 171 pr_err("Failed to disable WQ[%d]\n", wq->index);
171 172
172 return -ETIMEDOUT; 173 return -ETIMEDOUT;
173} 174}
@@ -177,8 +178,6 @@ void vnic_wq_clean(struct vnic_wq *wq,
177{ 178{
178 struct vnic_wq_buf *buf; 179 struct vnic_wq_buf *buf;
179 180
180 BUG_ON(ioread32(&wq->ctrl->enable));
181
182 buf = wq->to_clean; 181 buf = wq->to_clean;
183 182
184 while (vnic_wq_desc_used(wq) > 0) { 183 while (vnic_wq_desc_used(wq) > 0) {
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
index 9c34d41a887e..94ac4621acc5 100644
--- a/drivers/net/enic/vnic_wq.h
+++ b/drivers/net/enic/vnic_wq.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
@@ -60,12 +60,16 @@ struct vnic_wq_buf {
60 void *desc; 60 void *desc;
61}; 61};
62 62
63/* Break the vnic_wq_buf allocations into blocks of 64 entries */ 63/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
64#define VNIC_WQ_BUF_BLK_ENTRIES 64 64#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
65#define VNIC_WQ_BUF_BLK_SZ \ 65#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
66 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) 66#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
67 ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
68 VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
69#define VNIC_WQ_BUF_BLK_SZ(entries) \
70 (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
67#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ 71#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
68 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) 72 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
69#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) 73#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
70 74
71struct vnic_wq { 75struct vnic_wq {
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
index 483596c2d8bf..c7021e3a631f 100644
--- a/drivers/net/enic/wq_enet_desc.h
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * 4 *
5 * This program is free software; you may redistribute it and/or modify 5 * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 4c274657283c..57c8ac0ef3f1 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -278,7 +278,6 @@ struct epic_private {
278 struct pci_dev *pci_dev; /* PCI bus location. */ 278 struct pci_dev *pci_dev; /* PCI bus location. */
279 int chip_id, chip_flags; 279 int chip_id, chip_flags;
280 280
281 struct net_device_stats stats;
282 struct timer_list timer; /* Media selection timer. */ 281 struct timer_list timer; /* Media selection timer. */
283 int tx_threshold; 282 int tx_threshold;
284 unsigned char mc_filter[8]; 283 unsigned char mc_filter[8];
@@ -770,7 +769,6 @@ static int epic_open(struct net_device *dev)
770static void epic_pause(struct net_device *dev) 769static void epic_pause(struct net_device *dev)
771{ 770{
772 long ioaddr = dev->base_addr; 771 long ioaddr = dev->base_addr;
773 struct epic_private *ep = netdev_priv(dev);
774 772
775 netif_stop_queue (dev); 773 netif_stop_queue (dev);
776 774
@@ -781,9 +779,9 @@ static void epic_pause(struct net_device *dev)
781 779
782 /* Update the error counts. */ 780 /* Update the error counts. */
783 if (inw(ioaddr + COMMAND) != 0xffff) { 781 if (inw(ioaddr + COMMAND) != 0xffff) {
784 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); 782 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
785 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); 783 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
786 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 784 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
787 } 785 }
788 786
789 /* Remove the packets on the Rx queue. */ 787 /* Remove the packets on the Rx queue. */
@@ -900,7 +898,7 @@ static void epic_tx_timeout(struct net_device *dev)
900 } 898 }
901 } 899 }
902 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ 900 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
903 ep->stats.tx_fifo_errors++; 901 dev->stats.tx_fifo_errors++;
904 outl(RestartTx, ioaddr + COMMAND); 902 outl(RestartTx, ioaddr + COMMAND);
905 } else { 903 } else {
906 epic_restart(dev); 904 epic_restart(dev);
@@ -908,7 +906,7 @@ static void epic_tx_timeout(struct net_device *dev)
908 } 906 }
909 907
910 dev->trans_start = jiffies; /* prevent tx timeout */ 908 dev->trans_start = jiffies; /* prevent tx timeout */
911 ep->stats.tx_errors++; 909 dev->stats.tx_errors++;
912 if (!ep->tx_full) 910 if (!ep->tx_full)
913 netif_wake_queue(dev); 911 netif_wake_queue(dev);
914} 912}
@@ -1016,7 +1014,7 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1016static void epic_tx_error(struct net_device *dev, struct epic_private *ep, 1014static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1017 int status) 1015 int status)
1018{ 1016{
1019 struct net_device_stats *stats = &ep->stats; 1017 struct net_device_stats *stats = &dev->stats;
1020 1018
1021#ifndef final_version 1019#ifndef final_version
1022 /* There was an major error, log it. */ 1020 /* There was an major error, log it. */
@@ -1053,9 +1051,9 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
1053 break; /* It still hasn't been Txed */ 1051 break; /* It still hasn't been Txed */
1054 1052
1055 if (likely(txstatus & 0x0001)) { 1053 if (likely(txstatus & 0x0001)) {
1056 ep->stats.collisions += (txstatus >> 8) & 15; 1054 dev->stats.collisions += (txstatus >> 8) & 15;
1057 ep->stats.tx_packets++; 1055 dev->stats.tx_packets++;
1058 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len; 1056 dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1059 } else 1057 } else
1060 epic_tx_error(dev, ep, txstatus); 1058 epic_tx_error(dev, ep, txstatus);
1061 1059
@@ -1125,12 +1123,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1125 goto out; 1123 goto out;
1126 1124
1127 /* Always update the error counts to avoid overhead later. */ 1125 /* Always update the error counts to avoid overhead later. */
1128 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1126 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1129 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1127 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1130 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1128 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1131 1129
1132 if (status & TxUnderrun) { /* Tx FIFO underflow. */ 1130 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1133 ep->stats.tx_fifo_errors++; 1131 dev->stats.tx_fifo_errors++;
1134 outl(ep->tx_threshold += 128, ioaddr + TxThresh); 1132 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1135 /* Restart the transmit process. */ 1133 /* Restart the transmit process. */
1136 outl(RestartTx, ioaddr + COMMAND); 1134 outl(RestartTx, ioaddr + COMMAND);
@@ -1183,10 +1181,10 @@ static int epic_rx(struct net_device *dev, int budget)
1183 if (status & 0x2000) { 1181 if (status & 0x2000) {
1184 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " 1182 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1185 "multiple buffers, status %4.4x!\n", dev->name, status); 1183 "multiple buffers, status %4.4x!\n", dev->name, status);
1186 ep->stats.rx_length_errors++; 1184 dev->stats.rx_length_errors++;
1187 } else if (status & 0x0006) 1185 } else if (status & 0x0006)
1188 /* Rx Frame errors are counted in hardware. */ 1186 /* Rx Frame errors are counted in hardware. */
1189 ep->stats.rx_errors++; 1187 dev->stats.rx_errors++;
1190 } else { 1188 } else {
1191 /* Malloc up new buffer, compatible with net-2e. */ 1189 /* Malloc up new buffer, compatible with net-2e. */
1192 /* Omit the four octet CRC from the length. */ 1190 /* Omit the four octet CRC from the length. */
@@ -1223,8 +1221,8 @@ static int epic_rx(struct net_device *dev, int budget)
1223 } 1221 }
1224 skb->protocol = eth_type_trans(skb, dev); 1222 skb->protocol = eth_type_trans(skb, dev);
1225 netif_receive_skb(skb); 1223 netif_receive_skb(skb);
1226 ep->stats.rx_packets++; 1224 dev->stats.rx_packets++;
1227 ep->stats.rx_bytes += pkt_len; 1225 dev->stats.rx_bytes += pkt_len;
1228 } 1226 }
1229 work_done++; 1227 work_done++;
1230 entry = (++ep->cur_rx) % RX_RING_SIZE; 1228 entry = (++ep->cur_rx) % RX_RING_SIZE;
@@ -1259,7 +1257,7 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1259 if (status == EpicRemoved) 1257 if (status == EpicRemoved)
1260 return; 1258 return;
1261 if (status & RxOverflow) /* Missed a Rx frame. */ 1259 if (status & RxOverflow) /* Missed a Rx frame. */
1262 ep->stats.rx_errors++; 1260 dev->stats.rx_errors++;
1263 if (status & (RxOverflow | RxFull)) 1261 if (status & (RxOverflow | RxFull))
1264 outw(RxQueued, ioaddr + COMMAND); 1262 outw(RxQueued, ioaddr + COMMAND);
1265} 1263}
@@ -1357,17 +1355,16 @@ static int epic_close(struct net_device *dev)
1357 1355
1358static struct net_device_stats *epic_get_stats(struct net_device *dev) 1356static struct net_device_stats *epic_get_stats(struct net_device *dev)
1359{ 1357{
1360 struct epic_private *ep = netdev_priv(dev);
1361 long ioaddr = dev->base_addr; 1358 long ioaddr = dev->base_addr;
1362 1359
1363 if (netif_running(dev)) { 1360 if (netif_running(dev)) {
1364 /* Update the error counts. */ 1361 /* Update the error counts. */
1365 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1362 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1366 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1363 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1367 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1364 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1368 } 1365 }
1369 1366
1370 return &ep->stats; 1367 return &dev->stats;
1371} 1368}
1372 1369
1373/* Set or clear the multicast filter for this adaptor. 1370/* Set or clear the multicast filter for this adaptor.
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 874973f558e9..10e39f2b31c3 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1442,8 +1442,10 @@ int __init init_module(void)
1442 dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]); 1442 dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
1443 1443
1444 if(io[this_dev] == 0) { 1444 if(io[this_dev] == 0) {
1445 if(this_dev != 0) /* Only autoprobe 1st one */ 1445 if (this_dev != 0) { /* Only autoprobe 1st one */
1446 free_netdev(dev);
1446 break; 1447 break;
1448 }
1447 1449
1448 printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n"); 1450 printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
1449 } 1451 }
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6ed2df14ec84..6d653c459c1f 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -180,9 +180,9 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
180 * @dty_tx: last buffer actually sent 180 * @dty_tx: last buffer actually sent
181 * @num_rx: number of receive buffers 181 * @num_rx: number of receive buffers
182 * @cur_rx: current receive buffer 182 * @cur_rx: current receive buffer
183 * @vma: pointer to array of virtual memory addresses for buffers
183 * @netdev: pointer to network device structure 184 * @netdev: pointer to network device structure
184 * @napi: NAPI structure 185 * @napi: NAPI structure
185 * @stats: network device statistics
186 * @msg_enable: device state flags 186 * @msg_enable: device state flags
187 * @rx_lock: receive lock 187 * @rx_lock: receive lock
188 * @lock: device lock 188 * @lock: device lock
@@ -203,9 +203,10 @@ struct ethoc {
203 unsigned int num_rx; 203 unsigned int num_rx;
204 unsigned int cur_rx; 204 unsigned int cur_rx;
205 205
206 void** vma;
207
206 struct net_device *netdev; 208 struct net_device *netdev;
207 struct napi_struct napi; 209 struct napi_struct napi;
208 struct net_device_stats stats;
209 u32 msg_enable; 210 u32 msg_enable;
210 211
211 spinlock_t rx_lock; 212 spinlock_t rx_lock;
@@ -285,18 +286,22 @@ static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
285 ethoc_write(dev, MODER, mode); 286 ethoc_write(dev, MODER, mode);
286} 287}
287 288
288static int ethoc_init_ring(struct ethoc *dev) 289static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
289{ 290{
290 struct ethoc_bd bd; 291 struct ethoc_bd bd;
291 int i; 292 int i;
293 void* vma;
292 294
293 dev->cur_tx = 0; 295 dev->cur_tx = 0;
294 dev->dty_tx = 0; 296 dev->dty_tx = 0;
295 dev->cur_rx = 0; 297 dev->cur_rx = 0;
296 298
299 ethoc_write(dev, TX_BD_NUM, dev->num_tx);
300
297 /* setup transmission buffers */ 301 /* setup transmission buffers */
298 bd.addr = virt_to_phys(dev->membase); 302 bd.addr = mem_start;
299 bd.stat = TX_BD_IRQ | TX_BD_CRC; 303 bd.stat = TX_BD_IRQ | TX_BD_CRC;
304 vma = dev->membase;
300 305
301 for (i = 0; i < dev->num_tx; i++) { 306 for (i = 0; i < dev->num_tx; i++) {
302 if (i == dev->num_tx - 1) 307 if (i == dev->num_tx - 1)
@@ -304,6 +309,9 @@ static int ethoc_init_ring(struct ethoc *dev)
304 309
305 ethoc_write_bd(dev, i, &bd); 310 ethoc_write_bd(dev, i, &bd);
306 bd.addr += ETHOC_BUFSIZ; 311 bd.addr += ETHOC_BUFSIZ;
312
313 dev->vma[i] = vma;
314 vma += ETHOC_BUFSIZ;
307 } 315 }
308 316
309 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 317 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
@@ -314,6 +322,9 @@ static int ethoc_init_ring(struct ethoc *dev)
314 322
315 ethoc_write_bd(dev, dev->num_tx + i, &bd); 323 ethoc_write_bd(dev, dev->num_tx + i, &bd);
316 bd.addr += ETHOC_BUFSIZ; 324 bd.addr += ETHOC_BUFSIZ;
325
326 dev->vma[dev->num_tx + i] = vma;
327 vma += ETHOC_BUFSIZ;
317 } 328 }
318 329
319 return 0; 330 return 0;
@@ -354,39 +365,39 @@ static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
354 365
355 if (bd->stat & RX_BD_TL) { 366 if (bd->stat & RX_BD_TL) {
356 dev_err(&netdev->dev, "RX: frame too long\n"); 367 dev_err(&netdev->dev, "RX: frame too long\n");
357 dev->stats.rx_length_errors++; 368 netdev->stats.rx_length_errors++;
358 ret++; 369 ret++;
359 } 370 }
360 371
361 if (bd->stat & RX_BD_SF) { 372 if (bd->stat & RX_BD_SF) {
362 dev_err(&netdev->dev, "RX: frame too short\n"); 373 dev_err(&netdev->dev, "RX: frame too short\n");
363 dev->stats.rx_length_errors++; 374 netdev->stats.rx_length_errors++;
364 ret++; 375 ret++;
365 } 376 }
366 377
367 if (bd->stat & RX_BD_DN) { 378 if (bd->stat & RX_BD_DN) {
368 dev_err(&netdev->dev, "RX: dribble nibble\n"); 379 dev_err(&netdev->dev, "RX: dribble nibble\n");
369 dev->stats.rx_frame_errors++; 380 netdev->stats.rx_frame_errors++;
370 } 381 }
371 382
372 if (bd->stat & RX_BD_CRC) { 383 if (bd->stat & RX_BD_CRC) {
373 dev_err(&netdev->dev, "RX: wrong CRC\n"); 384 dev_err(&netdev->dev, "RX: wrong CRC\n");
374 dev->stats.rx_crc_errors++; 385 netdev->stats.rx_crc_errors++;
375 ret++; 386 ret++;
376 } 387 }
377 388
378 if (bd->stat & RX_BD_OR) { 389 if (bd->stat & RX_BD_OR) {
379 dev_err(&netdev->dev, "RX: overrun\n"); 390 dev_err(&netdev->dev, "RX: overrun\n");
380 dev->stats.rx_over_errors++; 391 netdev->stats.rx_over_errors++;
381 ret++; 392 ret++;
382 } 393 }
383 394
384 if (bd->stat & RX_BD_MISS) 395 if (bd->stat & RX_BD_MISS)
385 dev->stats.rx_missed_errors++; 396 netdev->stats.rx_missed_errors++;
386 397
387 if (bd->stat & RX_BD_LC) { 398 if (bd->stat & RX_BD_LC) {
388 dev_err(&netdev->dev, "RX: late collision\n"); 399 dev_err(&netdev->dev, "RX: late collision\n");
389 dev->stats.collisions++; 400 netdev->stats.collisions++;
390 ret++; 401 ret++;
391 } 402 }
392 403
@@ -415,18 +426,18 @@ static int ethoc_rx(struct net_device *dev, int limit)
415 skb = netdev_alloc_skb_ip_align(dev, size); 426 skb = netdev_alloc_skb_ip_align(dev, size);
416 427
417 if (likely(skb)) { 428 if (likely(skb)) {
418 void *src = phys_to_virt(bd.addr); 429 void *src = priv->vma[entry];
419 memcpy_fromio(skb_put(skb, size), src, size); 430 memcpy_fromio(skb_put(skb, size), src, size);
420 skb->protocol = eth_type_trans(skb, dev); 431 skb->protocol = eth_type_trans(skb, dev);
421 priv->stats.rx_packets++; 432 dev->stats.rx_packets++;
422 priv->stats.rx_bytes += size; 433 dev->stats.rx_bytes += size;
423 netif_receive_skb(skb); 434 netif_receive_skb(skb);
424 } else { 435 } else {
425 if (net_ratelimit()) 436 if (net_ratelimit())
426 dev_warn(&dev->dev, "low on memory - " 437 dev_warn(&dev->dev, "low on memory - "
427 "packet dropped\n"); 438 "packet dropped\n");
428 439
429 priv->stats.rx_dropped++; 440 dev->stats.rx_dropped++;
430 break; 441 break;
431 } 442 }
432 } 443 }
@@ -447,30 +458,30 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
447 458
448 if (bd->stat & TX_BD_LC) { 459 if (bd->stat & TX_BD_LC) {
449 dev_err(&netdev->dev, "TX: late collision\n"); 460 dev_err(&netdev->dev, "TX: late collision\n");
450 dev->stats.tx_window_errors++; 461 netdev->stats.tx_window_errors++;
451 } 462 }
452 463
453 if (bd->stat & TX_BD_RL) { 464 if (bd->stat & TX_BD_RL) {
454 dev_err(&netdev->dev, "TX: retransmit limit\n"); 465 dev_err(&netdev->dev, "TX: retransmit limit\n");
455 dev->stats.tx_aborted_errors++; 466 netdev->stats.tx_aborted_errors++;
456 } 467 }
457 468
458 if (bd->stat & TX_BD_UR) { 469 if (bd->stat & TX_BD_UR) {
459 dev_err(&netdev->dev, "TX: underrun\n"); 470 dev_err(&netdev->dev, "TX: underrun\n");
460 dev->stats.tx_fifo_errors++; 471 netdev->stats.tx_fifo_errors++;
461 } 472 }
462 473
463 if (bd->stat & TX_BD_CS) { 474 if (bd->stat & TX_BD_CS) {
464 dev_err(&netdev->dev, "TX: carrier sense lost\n"); 475 dev_err(&netdev->dev, "TX: carrier sense lost\n");
465 dev->stats.tx_carrier_errors++; 476 netdev->stats.tx_carrier_errors++;
466 } 477 }
467 478
468 if (bd->stat & TX_BD_STATS) 479 if (bd->stat & TX_BD_STATS)
469 dev->stats.tx_errors++; 480 netdev->stats.tx_errors++;
470 481
471 dev->stats.collisions += (bd->stat >> 4) & 0xf; 482 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
472 dev->stats.tx_bytes += bd->stat >> 16; 483 netdev->stats.tx_bytes += bd->stat >> 16;
473 dev->stats.tx_packets++; 484 netdev->stats.tx_packets++;
474 return 0; 485 return 0;
475} 486}
476 487
@@ -501,7 +512,7 @@ static void ethoc_tx(struct net_device *dev)
501 512
502static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 513static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
503{ 514{
504 struct net_device *dev = (struct net_device *)dev_id; 515 struct net_device *dev = dev_id;
505 struct ethoc *priv = netdev_priv(dev); 516 struct ethoc *priv = netdev_priv(dev);
506 u32 pending; 517 u32 pending;
507 518
@@ -516,7 +527,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
516 527
517 if (pending & INT_MASK_BUSY) { 528 if (pending & INT_MASK_BUSY) {
518 dev_err(&dev->dev, "packet dropped\n"); 529 dev_err(&dev->dev, "packet dropped\n");
519 priv->stats.rx_dropped++; 530 dev->stats.rx_dropped++;
520 } 531 }
521 532
522 if (pending & INT_MASK_RX) { 533 if (pending & INT_MASK_RX) {
@@ -600,8 +611,11 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
600 611
601 while (time_before(jiffies, timeout)) { 612 while (time_before(jiffies, timeout)) {
602 u32 stat = ethoc_read(priv, MIISTATUS); 613 u32 stat = ethoc_read(priv, MIISTATUS);
603 if (!(stat & MIISTATUS_BUSY)) 614 if (!(stat & MIISTATUS_BUSY)) {
615 /* reset MII command register */
616 ethoc_write(priv, MIICOMMAND, 0);
604 return 0; 617 return 0;
618 }
605 619
606 schedule(); 620 schedule();
607 } 621 }
@@ -618,25 +632,16 @@ static void ethoc_mdio_poll(struct net_device *dev)
618{ 632{
619} 633}
620 634
621static int ethoc_mdio_probe(struct net_device *dev) 635static int __devinit ethoc_mdio_probe(struct net_device *dev)
622{ 636{
623 struct ethoc *priv = netdev_priv(dev); 637 struct ethoc *priv = netdev_priv(dev);
624 struct phy_device *phy; 638 struct phy_device *phy;
625 int i; 639 int err;
626 640
627 for (i = 0; i < PHY_MAX_ADDR; i++) { 641 if (priv->phy_id != -1) {
628 phy = priv->mdio->phy_map[i]; 642 phy = priv->mdio->phy_map[priv->phy_id];
629 if (phy) { 643 } else {
630 if (priv->phy_id != -1) { 644 phy = phy_find_first(priv->mdio);
631 /* attach to specified PHY */
632 if (priv->phy_id == phy->addr)
633 break;
634 } else {
635 /* autoselect PHY if none was specified */
636 if (phy->addr != 0)
637 break;
638 }
639 }
640 } 645 }
641 646
642 if (!phy) { 647 if (!phy) {
@@ -644,11 +649,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
644 return -ENXIO; 649 return -ENXIO;
645 } 650 }
646 651
647 phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0, 652 err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
648 PHY_INTERFACE_MODE_GMII); 653 PHY_INTERFACE_MODE_GMII);
649 if (IS_ERR(phy)) { 654 if (err) {
650 dev_err(&dev->dev, "could not attach to PHY\n"); 655 dev_err(&dev->dev, "could not attach to PHY\n");
651 return PTR_ERR(phy); 656 return err;
652 } 657 }
653 658
654 priv->phy = phy; 659 priv->phy = phy;
@@ -658,8 +663,6 @@ static int ethoc_mdio_probe(struct net_device *dev)
658static int ethoc_open(struct net_device *dev) 663static int ethoc_open(struct net_device *dev)
659{ 664{
660 struct ethoc *priv = netdev_priv(dev); 665 struct ethoc *priv = netdev_priv(dev);
661 unsigned int min_tx = 2;
662 unsigned int num_bd;
663 int ret; 666 int ret;
664 667
665 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, 668 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
@@ -667,14 +670,7 @@ static int ethoc_open(struct net_device *dev)
667 if (ret) 670 if (ret)
668 return ret; 671 return ret;
669 672
670 /* calculate the number of TX/RX buffers, maximum 128 supported */ 673 ethoc_init_ring(priv, dev->mem_start);
671 num_bd = min_t(unsigned int,
672 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
673 priv->num_tx = max(min_tx, num_bd / 4);
674 priv->num_rx = num_bd - priv->num_tx;
675 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
676
677 ethoc_init_ring(priv);
678 ethoc_reset(priv); 674 ethoc_reset(priv);
679 675
680 if (netif_queue_stopped(dev)) { 676 if (netif_queue_stopped(dev)) {
@@ -734,7 +730,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
734 phy = priv->phy; 730 phy = priv->phy;
735 } 731 }
736 732
737 return phy_mii_ioctl(phy, mdio, cmd); 733 return phy_mii_ioctl(phy, ifr, cmd);
738} 734}
739 735
740static int ethoc_config(struct net_device *dev, struct ifmap *map) 736static int ethoc_config(struct net_device *dev, struct ifmap *map)
@@ -812,8 +808,7 @@ static void ethoc_tx_timeout(struct net_device *dev)
812 808
813static struct net_device_stats *ethoc_stats(struct net_device *dev) 809static struct net_device_stats *ethoc_stats(struct net_device *dev)
814{ 810{
815 struct ethoc *priv = netdev_priv(dev); 811 return &dev->stats;
816 return &priv->stats;
817} 812}
818 813
819static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 814static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -824,7 +819,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
824 void *dest; 819 void *dest;
825 820
826 if (unlikely(skb->len > ETHOC_BUFSIZ)) { 821 if (unlikely(skb->len > ETHOC_BUFSIZ)) {
827 priv->stats.tx_errors++; 822 dev->stats.tx_errors++;
828 goto out; 823 goto out;
829 } 824 }
830 825
@@ -838,7 +833,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
838 else 833 else
839 bd.stat &= ~TX_BD_PAD; 834 bd.stat &= ~TX_BD_PAD;
840 835
841 dest = phys_to_virt(bd.addr); 836 dest = priv->vma[entry];
842 memcpy_toio(dest, skb->data, skb->len); 837 memcpy_toio(dest, skb->data, skb->len);
843 838
844 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 839 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -876,7 +871,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
876 * ethoc_probe() - initialize OpenCores ethernet MAC 871 * ethoc_probe() - initialize OpenCores ethernet MAC
877 * pdev: platform device 872 * pdev: platform device
878 */ 873 */
879static int ethoc_probe(struct platform_device *pdev) 874static int __devinit ethoc_probe(struct platform_device *pdev)
880{ 875{
881 struct net_device *netdev = NULL; 876 struct net_device *netdev = NULL;
882 struct resource *res = NULL; 877 struct resource *res = NULL;
@@ -884,6 +879,7 @@ static int ethoc_probe(struct platform_device *pdev)
884 struct resource *mem = NULL; 879 struct resource *mem = NULL;
885 struct ethoc *priv = NULL; 880 struct ethoc *priv = NULL;
886 unsigned int phy; 881 unsigned int phy;
882 int num_bd;
887 int ret = 0; 883 int ret = 0;
888 884
889 /* allocate networking device */ 885 /* allocate networking device */
@@ -965,7 +961,7 @@ static int ethoc_probe(struct platform_device *pdev)
965 } 961 }
966 } else { 962 } else {
967 /* Allocate buffer memory */ 963 /* Allocate buffer memory */
968 priv->membase = dma_alloc_coherent(NULL, 964 priv->membase = dmam_alloc_coherent(&pdev->dev,
969 buffer_size, (void *)&netdev->mem_start, 965 buffer_size, (void *)&netdev->mem_start,
970 GFP_KERNEL); 966 GFP_KERNEL);
971 if (!priv->membase) { 967 if (!priv->membase) {
@@ -978,6 +974,18 @@ static int ethoc_probe(struct platform_device *pdev)
978 priv->dma_alloc = buffer_size; 974 priv->dma_alloc = buffer_size;
979 } 975 }
980 976
977 /* calculate the number of TX/RX buffers, maximum 128 supported */
978 num_bd = min_t(unsigned int,
979 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
980 priv->num_tx = max(2, num_bd / 4);
981 priv->num_rx = num_bd - priv->num_tx;
982
983 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
984 if (!priv->vma) {
985 ret = -ENOMEM;
986 goto error;
987 }
988
981 /* Allow the platform setup code to pass in a MAC address. */ 989 /* Allow the platform setup code to pass in a MAC address. */
982 if (pdev->dev.platform_data) { 990 if (pdev->dev.platform_data) {
983 struct ethoc_platform_data *pdata = 991 struct ethoc_platform_data *pdata =
@@ -1063,21 +1071,6 @@ free_mdio:
1063 kfree(priv->mdio->irq); 1071 kfree(priv->mdio->irq);
1064 mdiobus_free(priv->mdio); 1072 mdiobus_free(priv->mdio);
1065free: 1073free:
1066 if (priv) {
1067 if (priv->dma_alloc)
1068 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1069 netdev->mem_start);
1070 else if (priv->membase)
1071 devm_iounmap(&pdev->dev, priv->membase);
1072 if (priv->iobase)
1073 devm_iounmap(&pdev->dev, priv->iobase);
1074 }
1075 if (mem)
1076 devm_release_mem_region(&pdev->dev, mem->start,
1077 mem->end - mem->start + 1);
1078 if (mmio)
1079 devm_release_mem_region(&pdev->dev, mmio->start,
1080 mmio->end - mmio->start + 1);
1081 free_netdev(netdev); 1074 free_netdev(netdev);
1082out: 1075out:
1083 return ret; 1076 return ret;
@@ -1087,7 +1080,7 @@ out:
1087 * ethoc_remove() - shutdown OpenCores ethernet MAC 1080 * ethoc_remove() - shutdown OpenCores ethernet MAC
1088 * @pdev: platform device 1081 * @pdev: platform device
1089 */ 1082 */
1090static int ethoc_remove(struct platform_device *pdev) 1083static int __devexit ethoc_remove(struct platform_device *pdev)
1091{ 1084{
1092 struct net_device *netdev = platform_get_drvdata(pdev); 1085 struct net_device *netdev = platform_get_drvdata(pdev);
1093 struct ethoc *priv = netdev_priv(netdev); 1086 struct ethoc *priv = netdev_priv(netdev);
@@ -1104,17 +1097,6 @@ static int ethoc_remove(struct platform_device *pdev)
1104 kfree(priv->mdio->irq); 1097 kfree(priv->mdio->irq);
1105 mdiobus_free(priv->mdio); 1098 mdiobus_free(priv->mdio);
1106 } 1099 }
1107 if (priv->dma_alloc)
1108 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1109 netdev->mem_start);
1110 else {
1111 devm_iounmap(&pdev->dev, priv->membase);
1112 devm_release_mem_region(&pdev->dev, netdev->mem_start,
1113 netdev->mem_end - netdev->mem_start + 1);
1114 }
1115 devm_iounmap(&pdev->dev, priv->iobase);
1116 devm_release_mem_region(&pdev->dev, netdev->base_addr,
1117 priv->io_region_size);
1118 unregister_netdev(netdev); 1100 unregister_netdev(netdev);
1119 free_netdev(netdev); 1101 free_netdev(netdev);
1120 } 1102 }
@@ -1139,7 +1121,7 @@ static int ethoc_resume(struct platform_device *pdev)
1139 1121
1140static struct platform_driver ethoc_driver = { 1122static struct platform_driver ethoc_driver = {
1141 .probe = ethoc_probe, 1123 .probe = ethoc_probe,
1142 .remove = ethoc_remove, 1124 .remove = __devexit_p(ethoc_remove),
1143 .suspend = ethoc_suspend, 1125 .suspend = ethoc_suspend,
1144 .resume = ethoc_resume, 1126 .resume = ethoc_resume,
1145 .driver = { 1127 .driver = {
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 15f4f8d3d46d..d7e8f6b8f4cf 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -382,8 +382,6 @@ struct netdev_private {
382 382
383 spinlock_t lock; 383 spinlock_t lock;
384 384
385 struct net_device_stats stats;
386
387 /* Media monitoring timer. */ 385 /* Media monitoring timer. */
388 struct timer_list timer; 386 struct timer_list timer;
389 387
@@ -1234,7 +1232,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
1234 spin_unlock_irqrestore(&np->lock, flags); 1232 spin_unlock_irqrestore(&np->lock, flags);
1235 1233
1236 dev->trans_start = jiffies; /* prevent tx timeout */ 1234 dev->trans_start = jiffies; /* prevent tx timeout */
1237 np->stats.tx_errors++; 1235 dev->stats.tx_errors++;
1238 netif_wake_queue(dev); /* or .._start_.. ?? */ 1236 netif_wake_queue(dev); /* or .._start_.. ?? */
1239} 1237}
1240 1238
@@ -1479,10 +1477,11 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1479 1477
1480 if (intr_status & CNTOVF) { 1478 if (intr_status & CNTOVF) {
1481 /* missed pkts */ 1479 /* missed pkts */
1482 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1480 dev->stats.rx_missed_errors +=
1481 ioread32(ioaddr + TALLY) & 0x7fff;
1483 1482
1484 /* crc error */ 1483 /* crc error */
1485 np->stats.rx_crc_errors += 1484 dev->stats.rx_crc_errors +=
1486 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1485 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1487 } 1486 }
1488 1487
@@ -1513,30 +1512,30 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1513 1512
1514 if (!(np->crvalue & CR_W_ENH)) { 1513 if (!(np->crvalue & CR_W_ENH)) {
1515 if (tx_status & (CSL | LC | EC | UDF | HF)) { 1514 if (tx_status & (CSL | LC | EC | UDF | HF)) {
1516 np->stats.tx_errors++; 1515 dev->stats.tx_errors++;
1517 if (tx_status & EC) 1516 if (tx_status & EC)
1518 np->stats.tx_aborted_errors++; 1517 dev->stats.tx_aborted_errors++;
1519 if (tx_status & CSL) 1518 if (tx_status & CSL)
1520 np->stats.tx_carrier_errors++; 1519 dev->stats.tx_carrier_errors++;
1521 if (tx_status & LC) 1520 if (tx_status & LC)
1522 np->stats.tx_window_errors++; 1521 dev->stats.tx_window_errors++;
1523 if (tx_status & UDF) 1522 if (tx_status & UDF)
1524 np->stats.tx_fifo_errors++; 1523 dev->stats.tx_fifo_errors++;
1525 if ((tx_status & HF) && np->mii.full_duplex == 0) 1524 if ((tx_status & HF) && np->mii.full_duplex == 0)
1526 np->stats.tx_heartbeat_errors++; 1525 dev->stats.tx_heartbeat_errors++;
1527 1526
1528 } else { 1527 } else {
1529 np->stats.tx_bytes += 1528 dev->stats.tx_bytes +=
1530 ((tx_control & PKTSMask) >> PKTSShift); 1529 ((tx_control & PKTSMask) >> PKTSShift);
1531 1530
1532 np->stats.collisions += 1531 dev->stats.collisions +=
1533 ((tx_status & NCRMask) >> NCRShift); 1532 ((tx_status & NCRMask) >> NCRShift);
1534 np->stats.tx_packets++; 1533 dev->stats.tx_packets++;
1535 } 1534 }
1536 } else { 1535 } else {
1537 np->stats.tx_bytes += 1536 dev->stats.tx_bytes +=
1538 ((tx_control & PKTSMask) >> PKTSShift); 1537 ((tx_control & PKTSMask) >> PKTSShift);
1539 np->stats.tx_packets++; 1538 dev->stats.tx_packets++;
1540 } 1539 }
1541 1540
1542 /* Free the original skb. */ 1541 /* Free the original skb. */
@@ -1564,10 +1563,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1564 long data; 1563 long data;
1565 1564
1566 data = ioread32(ioaddr + TSR); 1565 data = ioread32(ioaddr + TSR);
1567 np->stats.tx_errors += (data & 0xff000000) >> 24; 1566 dev->stats.tx_errors += (data & 0xff000000) >> 24;
1568 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; 1567 dev->stats.tx_aborted_errors +=
1569 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; 1568 (data & 0xff000000) >> 24;
1570 np->stats.collisions += (data & 0x0000ffff); 1569 dev->stats.tx_window_errors +=
1570 (data & 0x00ff0000) >> 16;
1571 dev->stats.collisions += (data & 0x0000ffff);
1571 } 1572 }
1572 1573
1573 if (--boguscnt < 0) { 1574 if (--boguscnt < 0) {
@@ -1593,10 +1594,11 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1593 1594
1594 /* read the tally counters */ 1595 /* read the tally counters */
1595 /* missed pkts */ 1596 /* missed pkts */
1596 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1597 dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1597 1598
1598 /* crc error */ 1599 /* crc error */
1599 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1600 dev->stats.rx_crc_errors +=
1601 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1600 1602
1601 if (debug) 1603 if (debug)
1602 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1604 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
@@ -1635,13 +1637,13 @@ static int netdev_rx(struct net_device *dev)
1635 "%s: Receive error, Rx status %8.8x.\n", 1637 "%s: Receive error, Rx status %8.8x.\n",
1636 dev->name, rx_status); 1638 dev->name, rx_status);
1637 1639
1638 np->stats.rx_errors++; /* end of a packet. */ 1640 dev->stats.rx_errors++; /* end of a packet. */
1639 if (rx_status & (LONG | RUNT)) 1641 if (rx_status & (LONG | RUNT))
1640 np->stats.rx_length_errors++; 1642 dev->stats.rx_length_errors++;
1641 if (rx_status & RXER) 1643 if (rx_status & RXER)
1642 np->stats.rx_frame_errors++; 1644 dev->stats.rx_frame_errors++;
1643 if (rx_status & CRC) 1645 if (rx_status & CRC)
1644 np->stats.rx_crc_errors++; 1646 dev->stats.rx_crc_errors++;
1645 } else { 1647 } else {
1646 int need_to_reset = 0; 1648 int need_to_reset = 0;
1647 int desno = 0; 1649 int desno = 0;
@@ -1667,7 +1669,7 @@ static int netdev_rx(struct net_device *dev)
1667 if (need_to_reset == 0) { 1669 if (need_to_reset == 0) {
1668 int i; 1670 int i;
1669 1671
1670 np->stats.rx_length_errors++; 1672 dev->stats.rx_length_errors++;
1671 1673
1672 /* free all rx descriptors related this long pkt */ 1674 /* free all rx descriptors related this long pkt */
1673 for (i = 0; i < desno; ++i) { 1675 for (i = 0; i < desno; ++i) {
@@ -1733,8 +1735,8 @@ static int netdev_rx(struct net_device *dev)
1733 } 1735 }
1734 skb->protocol = eth_type_trans(skb, dev); 1736 skb->protocol = eth_type_trans(skb, dev);
1735 netif_rx(skb); 1737 netif_rx(skb);
1736 np->stats.rx_packets++; 1738 dev->stats.rx_packets++;
1737 np->stats.rx_bytes += pkt_len; 1739 dev->stats.rx_bytes += pkt_len;
1738 } 1740 }
1739 1741
1740 np->cur_rx = np->cur_rx->next_desc_logical; 1742 np->cur_rx = np->cur_rx->next_desc_logical;
@@ -1754,11 +1756,13 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1754 1756
1755 /* The chip only need report frame silently dropped. */ 1757 /* The chip only need report frame silently dropped. */
1756 if (netif_running(dev)) { 1758 if (netif_running(dev)) {
1757 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1759 dev->stats.rx_missed_errors +=
1758 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1760 ioread32(ioaddr + TALLY) & 0x7fff;
1761 dev->stats.rx_crc_errors +=
1762 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1759 } 1763 }
1760 1764
1761 return &np->stats; 1765 return &dev->stats;
1762} 1766}
1763 1767
1764 1768
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index edfff92a6d8e..768b840aeb6b 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -118,6 +118,8 @@ static unsigned char fec_mac_default[] = {
118#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 118#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
119#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 119#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
120 120
121#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
122
121/* The FEC stores dest/src/type, data, and checksum for receive packets. 123/* The FEC stores dest/src/type, data, and checksum for receive packets.
122 */ 124 */
123#define PKT_MAXBUF_SIZE 1518 125#define PKT_MAXBUF_SIZE 1518
@@ -187,6 +189,7 @@ struct fec_enet_private {
187 int index; 189 int index;
188 int link; 190 int link;
189 int full_duplex; 191 int full_duplex;
192 struct completion mdio_done;
190}; 193};
191 194
192static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 195static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
@@ -205,12 +208,12 @@ static void fec_stop(struct net_device *dev);
205#define FEC_MMFR_TA (2 << 16) 208#define FEC_MMFR_TA (2 << 16)
206#define FEC_MMFR_DATA(v) (v & 0xffff) 209#define FEC_MMFR_DATA(v) (v & 0xffff)
207 210
208#define FEC_MII_TIMEOUT 10000 211#define FEC_MII_TIMEOUT 1000 /* us */
209 212
210/* Transmitter timeout */ 213/* Transmitter timeout */
211#define TX_TIMEOUT (2 * HZ) 214#define TX_TIMEOUT (2 * HZ)
212 215
213static int 216static netdev_tx_t
214fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 217fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
215{ 218{
216 struct fec_enet_private *fep = netdev_priv(dev); 219 struct fec_enet_private *fep = netdev_priv(dev);
@@ -334,6 +337,11 @@ fec_enet_interrupt(int irq, void * dev_id)
334 ret = IRQ_HANDLED; 337 ret = IRQ_HANDLED;
335 fec_enet_tx(dev); 338 fec_enet_tx(dev);
336 } 339 }
340
341 if (int_events & FEC_ENET_MII) {
342 ret = IRQ_HANDLED;
343 complete(&fep->mdio_done);
344 }
337 } while (int_events); 345 } while (int_events);
338 346
339 return ret; 347 return ret;
@@ -608,18 +616,13 @@ spin_unlock:
608 phy_print_status(phy_dev); 616 phy_print_status(phy_dev);
609} 617}
610 618
611/*
612 * NOTE: a MII transaction is during around 25 us, so polling it...
613 */
614static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 619static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
615{ 620{
616 struct fec_enet_private *fep = bus->priv; 621 struct fec_enet_private *fep = bus->priv;
617 int timeout = FEC_MII_TIMEOUT; 622 unsigned long time_left;
618 623
619 fep->mii_timeout = 0; 624 fep->mii_timeout = 0;
620 625 init_completion(&fep->mdio_done);
621 /* clear MII end of transfer bit*/
622 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
623 626
624 /* start a read op */ 627 /* start a read op */
625 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 628 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -627,13 +630,12 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
627 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 630 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
628 631
629 /* wait for end of transfer */ 632 /* wait for end of transfer */
630 while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { 633 time_left = wait_for_completion_timeout(&fep->mdio_done,
631 cpu_relax(); 634 usecs_to_jiffies(FEC_MII_TIMEOUT));
632 if (timeout-- < 0) { 635 if (time_left == 0) {
633 fep->mii_timeout = 1; 636 fep->mii_timeout = 1;
634 printk(KERN_ERR "FEC: MDIO read timeout\n"); 637 printk(KERN_ERR "FEC: MDIO read timeout\n");
635 return -ETIMEDOUT; 638 return -ETIMEDOUT;
636 }
637 } 639 }
638 640
639 /* return value */ 641 /* return value */
@@ -644,12 +646,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
644 u16 value) 646 u16 value)
645{ 647{
646 struct fec_enet_private *fep = bus->priv; 648 struct fec_enet_private *fep = bus->priv;
647 int timeout = FEC_MII_TIMEOUT; 649 unsigned long time_left;
648 650
649 fep->mii_timeout = 0; 651 fep->mii_timeout = 0;
650 652 init_completion(&fep->mdio_done);
651 /* clear MII end of transfer bit*/
652 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
653 653
654 /* start a read op */ 654 /* start a read op */
655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -658,13 +658,12 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
658 fep->hwp + FEC_MII_DATA); 658 fep->hwp + FEC_MII_DATA);
659 659
660 /* wait for end of transfer */ 660 /* wait for end of transfer */
661 while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { 661 time_left = wait_for_completion_timeout(&fep->mdio_done,
662 cpu_relax(); 662 usecs_to_jiffies(FEC_MII_TIMEOUT));
663 if (timeout-- < 0) { 663 if (time_left == 0) {
664 fep->mii_timeout = 1; 664 fep->mii_timeout = 1;
665 printk(KERN_ERR "FEC: MDIO write timeout\n"); 665 printk(KERN_ERR "FEC: MDIO write timeout\n");
666 return -ETIMEDOUT; 666 return -ETIMEDOUT;
667 }
668 } 667 }
669 668
670 return 0; 669 return 0;
@@ -679,30 +678,24 @@ static int fec_enet_mii_probe(struct net_device *dev)
679{ 678{
680 struct fec_enet_private *fep = netdev_priv(dev); 679 struct fec_enet_private *fep = netdev_priv(dev);
681 struct phy_device *phy_dev = NULL; 680 struct phy_device *phy_dev = NULL;
682 int phy_addr; 681 int ret;
683 682
684 fep->phy_dev = NULL; 683 fep->phy_dev = NULL;
685 684
686 /* find the first phy */ 685 /* find the first phy */
687 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 686 phy_dev = phy_find_first(fep->mii_bus);
688 if (fep->mii_bus->phy_map[phy_addr]) {
689 phy_dev = fep->mii_bus->phy_map[phy_addr];
690 break;
691 }
692 }
693
694 if (!phy_dev) { 687 if (!phy_dev) {
695 printk(KERN_ERR "%s: no PHY found\n", dev->name); 688 printk(KERN_ERR "%s: no PHY found\n", dev->name);
696 return -ENODEV; 689 return -ENODEV;
697 } 690 }
698 691
699 /* attach the mac to the phy */ 692 /* attach the mac to the phy */
700 phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), 693 ret = phy_connect_direct(dev, phy_dev,
701 &fec_enet_adjust_link, 0, 694 &fec_enet_adjust_link, 0,
702 PHY_INTERFACE_MODE_MII); 695 PHY_INTERFACE_MODE_MII);
703 if (IS_ERR(phy_dev)) { 696 if (ret) {
704 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 697 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
705 return PTR_ERR(phy_dev); 698 return ret;
706 } 699 }
707 700
708 /* mask with MAC supported features */ 701 /* mask with MAC supported features */
@@ -834,7 +827,7 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
834 if (!phydev) 827 if (!phydev)
835 return -ENODEV; 828 return -ENODEV;
836 829
837 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 830 return phy_mii_ioctl(phydev, rq, cmd);
838} 831}
839 832
840static void fec_enet_free_buffers(struct net_device *dev) 833static void fec_enet_free_buffers(struct net_device *dev)
@@ -1222,7 +1215,7 @@ fec_restart(struct net_device *dev, int duplex)
1222 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1215 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1223 1216
1224 /* Enable interrupts we wish to service */ 1217 /* Enable interrupts we wish to service */
1225 writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK); 1218 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1226} 1219}
1227 1220
1228static void 1221static void
@@ -1241,11 +1234,8 @@ fec_stop(struct net_device *dev)
1241 /* Whack a reset. We should wait for this. */ 1234 /* Whack a reset. We should wait for this. */
1242 writel(1, fep->hwp + FEC_ECNTRL); 1235 writel(1, fep->hwp + FEC_ECNTRL);
1243 udelay(10); 1236 udelay(10);
1244
1245 /* Clear outstanding MII command interrupts. */
1246 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1247
1248 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1237 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1238 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1249} 1239}
1250 1240
1251static int __devinit 1241static int __devinit
@@ -1365,10 +1355,11 @@ fec_drv_remove(struct platform_device *pdev)
1365 return 0; 1355 return 0;
1366} 1356}
1367 1357
1358#ifdef CONFIG_PM
1368static int 1359static int
1369fec_suspend(struct platform_device *dev, pm_message_t state) 1360fec_suspend(struct device *dev)
1370{ 1361{
1371 struct net_device *ndev = platform_get_drvdata(dev); 1362 struct net_device *ndev = dev_get_drvdata(dev);
1372 struct fec_enet_private *fep; 1363 struct fec_enet_private *fep;
1373 1364
1374 if (ndev) { 1365 if (ndev) {
@@ -1381,9 +1372,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state)
1381} 1372}
1382 1373
1383static int 1374static int
1384fec_resume(struct platform_device *dev) 1375fec_resume(struct device *dev)
1385{ 1376{
1386 struct net_device *ndev = platform_get_drvdata(dev); 1377 struct net_device *ndev = dev_get_drvdata(dev);
1387 struct fec_enet_private *fep; 1378 struct fec_enet_private *fep;
1388 1379
1389 if (ndev) { 1380 if (ndev) {
@@ -1395,15 +1386,26 @@ fec_resume(struct platform_device *dev)
1395 return 0; 1386 return 0;
1396} 1387}
1397 1388
1389static const struct dev_pm_ops fec_pm_ops = {
1390 .suspend = fec_suspend,
1391 .resume = fec_resume,
1392 .freeze = fec_suspend,
1393 .thaw = fec_resume,
1394 .poweroff = fec_suspend,
1395 .restore = fec_resume,
1396};
1397#endif
1398
1398static struct platform_driver fec_driver = { 1399static struct platform_driver fec_driver = {
1399 .driver = { 1400 .driver = {
1400 .name = "fec", 1401 .name = "fec",
1401 .owner = THIS_MODULE, 1402 .owner = THIS_MODULE,
1403#ifdef CONFIG_PM
1404 .pm = &fec_pm_ops,
1405#endif
1402 }, 1406 },
1403 .probe = fec_probe, 1407 .probe = fec_probe,
1404 .remove = __devexit_p(fec_drv_remove), 1408 .remove = __devexit_p(fec_drv_remove),
1405 .suspend = fec_suspend,
1406 .resume = fec_resume,
1407}; 1409};
1408 1410
1409static int __init 1411static int __init
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 25e6cc6840b1..d1a5b17b2a95 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -826,7 +826,7 @@ static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
826 if (!priv->phydev) 826 if (!priv->phydev)
827 return -ENOTSUPP; 827 return -ENOTSUPP;
828 828
829 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 829 return phy_mii_ioctl(priv->phydev, rq, cmd);
830} 830}
831 831
832static const struct net_device_ops mpc52xx_fec_netdev_ops = { 832static const struct net_device_ops mpc52xx_fec_netdev_ops = {
@@ -875,17 +875,21 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
875 if (rv) { 875 if (rv) {
876 printk(KERN_ERR DRIVER_NAME ": " 876 printk(KERN_ERR DRIVER_NAME ": "
877 "Error while parsing device node resource\n" ); 877 "Error while parsing device node resource\n" );
878 return rv; 878 goto err_netdev;
879 } 879 }
880 if ((mem.end - mem.start + 1) < sizeof(struct mpc52xx_fec)) { 880 if ((mem.end - mem.start + 1) < sizeof(struct mpc52xx_fec)) {
881 printk(KERN_ERR DRIVER_NAME 881 printk(KERN_ERR DRIVER_NAME
882 " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n", 882 " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
883 (unsigned long)(mem.end - mem.start + 1), sizeof(struct mpc52xx_fec)); 883 (unsigned long)(mem.end - mem.start + 1), sizeof(struct mpc52xx_fec));
884 return -EINVAL; 884 rv = -EINVAL;
885 goto err_netdev;
885 } 886 }
886 887
887 if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), DRIVER_NAME)) 888 if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
888 return -EBUSY; 889 DRIVER_NAME)) {
890 rv = -EBUSY;
891 goto err_netdev;
892 }
889 893
890 /* Init ether ndev with what we have */ 894 /* Init ether ndev with what we have */
891 ndev->netdev_ops = &mpc52xx_fec_netdev_ops; 895 ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
@@ -901,7 +905,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
901 905
902 if (!priv->fec) { 906 if (!priv->fec) {
903 rv = -ENOMEM; 907 rv = -ENOMEM;
904 goto probe_error; 908 goto err_mem_region;
905 } 909 }
906 910
907 /* Bestcomm init */ 911 /* Bestcomm init */
@@ -914,7 +918,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
914 if (!priv->rx_dmatsk || !priv->tx_dmatsk) { 918 if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
915 printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" ); 919 printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" );
916 rv = -ENOMEM; 920 rv = -ENOMEM;
917 goto probe_error; 921 goto err_rx_tx_dmatsk;
918 } 922 }
919 923
920 /* Get the IRQ we need one by one */ 924 /* Get the IRQ we need one by one */
@@ -966,33 +970,25 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
966 970
967 rv = register_netdev(ndev); 971 rv = register_netdev(ndev);
968 if (rv < 0) 972 if (rv < 0)
969 goto probe_error; 973 goto err_node;
970 974
971 /* We're done ! */ 975 /* We're done ! */
972 dev_set_drvdata(&op->dev, ndev); 976 dev_set_drvdata(&op->dev, ndev);
973 977
974 return 0; 978 return 0;
975 979
976 980err_node:
977 /* Error handling - free everything that might be allocated */ 981 of_node_put(priv->phy_node);
978probe_error:
979
980 if (priv->phy_node)
981 of_node_put(priv->phy_node);
982 priv->phy_node = NULL;
983
984 irq_dispose_mapping(ndev->irq); 982 irq_dispose_mapping(ndev->irq);
985 983err_rx_tx_dmatsk:
986 if (priv->rx_dmatsk) 984 if (priv->rx_dmatsk)
987 bcom_fec_rx_release(priv->rx_dmatsk); 985 bcom_fec_rx_release(priv->rx_dmatsk);
988 if (priv->tx_dmatsk) 986 if (priv->tx_dmatsk)
989 bcom_fec_tx_release(priv->tx_dmatsk); 987 bcom_fec_tx_release(priv->tx_dmatsk);
990 988 iounmap(priv->fec);
991 if (priv->fec) 989err_mem_region:
992 iounmap(priv->fec);
993
994 release_mem_region(mem.start, sizeof(struct mpc52xx_fec)); 990 release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
995 991err_netdev:
996 free_netdev(ndev); 992 free_netdev(ndev);
997 993
998 return rv; 994 return rv;
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 006f64d9f96a..dbaf72cbb233 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -29,15 +29,14 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
29 int reg, u32 value) 29 int reg, u32 value)
30{ 30{
31 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 31 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
32 struct mpc52xx_fec __iomem *fec; 32 struct mpc52xx_fec __iomem *fec = priv->regs;
33 int tries = 3; 33 int tries = 3;
34 34
35 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; 35 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
36 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; 36 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
37 37
38 fec = priv->regs;
39 out_be32(&fec->ievent, FEC_IEVENT_MII); 38 out_be32(&fec->ievent, FEC_IEVENT_MII);
40 out_be32(&priv->regs->mii_data, value); 39 out_be32(&fec->mii_data, value);
41 40
42 /* wait for it to finish, this takes about 23 us on lite5200b */ 41 /* wait for it to finish, this takes about 23 us on lite5200b */
43 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) 42 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
@@ -47,7 +46,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
47 return -ETIMEDOUT; 46 return -ETIMEDOUT;
48 47
49 return value & FEC_MII_DATA_OP_RD ? 48 return value & FEC_MII_DATA_OP_RD ?
50 in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0; 49 in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
51} 50}
52 51
53static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) 52static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -69,9 +68,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
69 struct device_node *np = of->dev.of_node; 68 struct device_node *np = of->dev.of_node;
70 struct mii_bus *bus; 69 struct mii_bus *bus;
71 struct mpc52xx_fec_mdio_priv *priv; 70 struct mpc52xx_fec_mdio_priv *priv;
72 struct resource res = {}; 71 struct resource res;
73 int err; 72 int err;
74 int i;
75 73
76 bus = mdiobus_alloc(); 74 bus = mdiobus_alloc();
77 if (bus == NULL) 75 if (bus == NULL)
@@ -93,7 +91,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
93 err = of_address_to_resource(np, 0, &res); 91 err = of_address_to_resource(np, 0, &res);
94 if (err) 92 if (err)
95 goto out_free; 93 goto out_free;
96 priv->regs = ioremap(res.start, res.end - res.start + 1); 94 priv->regs = ioremap(res.start, resource_size(&res));
97 if (priv->regs == NULL) { 95 if (priv->regs == NULL) {
98 err = -ENOMEM; 96 err = -ENOMEM;
99 goto out_free; 97 goto out_free;
@@ -118,10 +116,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
118 out_unmap: 116 out_unmap:
119 iounmap(priv->regs); 117 iounmap(priv->regs);
120 out_free: 118 out_free:
121 for (i=0; i<PHY_MAX_ADDR; i++)
122 if (bus->irq[i] != PHY_POLL)
123 irq_dispose_mapping(bus->irq[i]);
124 kfree(bus->irq);
125 kfree(priv); 119 kfree(priv);
126 mdiobus_free(bus); 120 mdiobus_free(bus);
127 121
@@ -133,23 +127,16 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of)
133 struct device *dev = &of->dev; 127 struct device *dev = &of->dev;
134 struct mii_bus *bus = dev_get_drvdata(dev); 128 struct mii_bus *bus = dev_get_drvdata(dev);
135 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 129 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
136 int i;
137 130
138 mdiobus_unregister(bus); 131 mdiobus_unregister(bus);
139 dev_set_drvdata(dev, NULL); 132 dev_set_drvdata(dev, NULL);
140
141 iounmap(priv->regs); 133 iounmap(priv->regs);
142 for (i=0; i<PHY_MAX_ADDR; i++)
143 if (bus->irq[i] != PHY_POLL)
144 irq_dispose_mapping(bus->irq[i]);
145 kfree(priv); 134 kfree(priv);
146 kfree(bus->irq);
147 mdiobus_free(bus); 135 mdiobus_free(bus);
148 136
149 return 0; 137 return 0;
150} 138}
151 139
152
153static struct of_device_id mpc52xx_fec_mdio_match[] = { 140static struct of_device_id mpc52xx_fec_mdio_match[] = {
154 { .compatible = "fsl,mpc5200b-mdio", }, 141 { .compatible = "fsl,mpc5200b-mdio", },
155 { .compatible = "fsl,mpc5200-mdio", }, 142 { .compatible = "fsl,mpc5200-mdio", },
@@ -171,5 +158,4 @@ struct of_platform_driver mpc52xx_fec_mdio_driver = {
171/* let fec driver call it, since this has to be registered before it */ 158/* let fec driver call it, since this has to be registered before it */
172EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); 159EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
173 160
174
175MODULE_LICENSE("Dual BSD/GPL"); 161MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 268ea4d566d7..4da05b1b445c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -89,8 +89,10 @@
89#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ 89#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
90#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ 90#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
91#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ 91#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
92#define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */ 92#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
93#define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */ 93#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
94#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
95#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
94#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ 96#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
95#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ 97#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
96#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ 98#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
@@ -2468,7 +2470,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2468 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2470 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2469 2471
2470 while ((np->get_tx.ex != np->put_tx.ex) && 2472 while ((np->get_tx.ex != np->put_tx.ex) &&
2471 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2473 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2472 (tx_work < limit)) { 2474 (tx_work < limit)) {
2473 2475
2474 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2476 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
@@ -6067,111 +6069,111 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6067 }, 6069 },
6068 { /* MCP55 Ethernet Controller */ 6070 { /* MCP55 Ethernet Controller */
6069 PCI_DEVICE(0x10DE, 0x0372), 6071 PCI_DEVICE(0x10DE, 0x0372),
6070 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6072 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6071 }, 6073 },
6072 { /* MCP55 Ethernet Controller */ 6074 { /* MCP55 Ethernet Controller */
6073 PCI_DEVICE(0x10DE, 0x0373), 6075 PCI_DEVICE(0x10DE, 0x0373),
6074 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6076 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6075 }, 6077 },
6076 { /* MCP61 Ethernet Controller */ 6078 { /* MCP61 Ethernet Controller */
6077 PCI_DEVICE(0x10DE, 0x03E5), 6079 PCI_DEVICE(0x10DE, 0x03E5),
6078 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6080 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6079 }, 6081 },
6080 { /* MCP61 Ethernet Controller */ 6082 { /* MCP61 Ethernet Controller */
6081 PCI_DEVICE(0x10DE, 0x03E6), 6083 PCI_DEVICE(0x10DE, 0x03E6),
6082 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6084 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6083 }, 6085 },
6084 { /* MCP61 Ethernet Controller */ 6086 { /* MCP61 Ethernet Controller */
6085 PCI_DEVICE(0x10DE, 0x03EE), 6087 PCI_DEVICE(0x10DE, 0x03EE),
6086 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6088 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6087 }, 6089 },
6088 { /* MCP61 Ethernet Controller */ 6090 { /* MCP61 Ethernet Controller */
6089 PCI_DEVICE(0x10DE, 0x03EF), 6091 PCI_DEVICE(0x10DE, 0x03EF),
6090 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6092 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6091 }, 6093 },
6092 { /* MCP65 Ethernet Controller */ 6094 { /* MCP65 Ethernet Controller */
6093 PCI_DEVICE(0x10DE, 0x0450), 6095 PCI_DEVICE(0x10DE, 0x0450),
6094 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6096 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6095 }, 6097 },
6096 { /* MCP65 Ethernet Controller */ 6098 { /* MCP65 Ethernet Controller */
6097 PCI_DEVICE(0x10DE, 0x0451), 6099 PCI_DEVICE(0x10DE, 0x0451),
6098 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6100 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6099 }, 6101 },
6100 { /* MCP65 Ethernet Controller */ 6102 { /* MCP65 Ethernet Controller */
6101 PCI_DEVICE(0x10DE, 0x0452), 6103 PCI_DEVICE(0x10DE, 0x0452),
6102 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6104 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6103 }, 6105 },
6104 { /* MCP65 Ethernet Controller */ 6106 { /* MCP65 Ethernet Controller */
6105 PCI_DEVICE(0x10DE, 0x0453), 6107 PCI_DEVICE(0x10DE, 0x0453),
6106 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6108 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6107 }, 6109 },
6108 { /* MCP67 Ethernet Controller */ 6110 { /* MCP67 Ethernet Controller */
6109 PCI_DEVICE(0x10DE, 0x054C), 6111 PCI_DEVICE(0x10DE, 0x054C),
6110 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6112 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6111 }, 6113 },
6112 { /* MCP67 Ethernet Controller */ 6114 { /* MCP67 Ethernet Controller */
6113 PCI_DEVICE(0x10DE, 0x054D), 6115 PCI_DEVICE(0x10DE, 0x054D),
6114 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6116 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6115 }, 6117 },
6116 { /* MCP67 Ethernet Controller */ 6118 { /* MCP67 Ethernet Controller */
6117 PCI_DEVICE(0x10DE, 0x054E), 6119 PCI_DEVICE(0x10DE, 0x054E),
6118 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6120 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6119 }, 6121 },
6120 { /* MCP67 Ethernet Controller */ 6122 { /* MCP67 Ethernet Controller */
6121 PCI_DEVICE(0x10DE, 0x054F), 6123 PCI_DEVICE(0x10DE, 0x054F),
6122 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6124 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6123 }, 6125 },
6124 { /* MCP73 Ethernet Controller */ 6126 { /* MCP73 Ethernet Controller */
6125 PCI_DEVICE(0x10DE, 0x07DC), 6127 PCI_DEVICE(0x10DE, 0x07DC),
6126 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6128 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6127 }, 6129 },
6128 { /* MCP73 Ethernet Controller */ 6130 { /* MCP73 Ethernet Controller */
6129 PCI_DEVICE(0x10DE, 0x07DD), 6131 PCI_DEVICE(0x10DE, 0x07DD),
6130 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6132 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6131 }, 6133 },
6132 { /* MCP73 Ethernet Controller */ 6134 { /* MCP73 Ethernet Controller */
6133 PCI_DEVICE(0x10DE, 0x07DE), 6135 PCI_DEVICE(0x10DE, 0x07DE),
6134 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6136 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6135 }, 6137 },
6136 { /* MCP73 Ethernet Controller */ 6138 { /* MCP73 Ethernet Controller */
6137 PCI_DEVICE(0x10DE, 0x07DF), 6139 PCI_DEVICE(0x10DE, 0x07DF),
6138 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6140 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6139 }, 6141 },
6140 { /* MCP77 Ethernet Controller */ 6142 { /* MCP77 Ethernet Controller */
6141 PCI_DEVICE(0x10DE, 0x0760), 6143 PCI_DEVICE(0x10DE, 0x0760),
6142 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6144 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6143 }, 6145 },
6144 { /* MCP77 Ethernet Controller */ 6146 { /* MCP77 Ethernet Controller */
6145 PCI_DEVICE(0x10DE, 0x0761), 6147 PCI_DEVICE(0x10DE, 0x0761),
6146 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6148 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6147 }, 6149 },
6148 { /* MCP77 Ethernet Controller */ 6150 { /* MCP77 Ethernet Controller */
6149 PCI_DEVICE(0x10DE, 0x0762), 6151 PCI_DEVICE(0x10DE, 0x0762),
6150 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6152 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6151 }, 6153 },
6152 { /* MCP77 Ethernet Controller */ 6154 { /* MCP77 Ethernet Controller */
6153 PCI_DEVICE(0x10DE, 0x0763), 6155 PCI_DEVICE(0x10DE, 0x0763),
6154 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6156 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6155 }, 6157 },
6156 { /* MCP79 Ethernet Controller */ 6158 { /* MCP79 Ethernet Controller */
6157 PCI_DEVICE(0x10DE, 0x0AB0), 6159 PCI_DEVICE(0x10DE, 0x0AB0),
6158 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6160 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6159 }, 6161 },
6160 { /* MCP79 Ethernet Controller */ 6162 { /* MCP79 Ethernet Controller */
6161 PCI_DEVICE(0x10DE, 0x0AB1), 6163 PCI_DEVICE(0x10DE, 0x0AB1),
6162 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6164 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6163 }, 6165 },
6164 { /* MCP79 Ethernet Controller */ 6166 { /* MCP79 Ethernet Controller */
6165 PCI_DEVICE(0x10DE, 0x0AB2), 6167 PCI_DEVICE(0x10DE, 0x0AB2),
6166 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6168 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6167 }, 6169 },
6168 { /* MCP79 Ethernet Controller */ 6170 { /* MCP79 Ethernet Controller */
6169 PCI_DEVICE(0x10DE, 0x0AB3), 6171 PCI_DEVICE(0x10DE, 0x0AB3),
6170 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6172 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6171 }, 6173 },
6172 { /* MCP89 Ethernet Controller */ 6174 { /* MCP89 Ethernet Controller */
6173 PCI_DEVICE(0x10DE, 0x0D7D), 6175 PCI_DEVICE(0x10DE, 0x0D7D),
6174 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, 6176 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6175 }, 6177 },
6176 {0,}, 6178 {0,},
6177}; 6179};
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 309a0eaddd81..f08cff9020bd 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -963,12 +963,11 @@ static const struct ethtool_ops fs_ethtool_ops = {
963static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 963static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
964{ 964{
965 struct fs_enet_private *fep = netdev_priv(dev); 965 struct fs_enet_private *fep = netdev_priv(dev);
966 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
967 966
968 if (!netif_running(dev)) 967 if (!netif_running(dev))
969 return -EINVAL; 968 return -EINVAL;
970 969
971 return phy_mii_ioctl(fep->phydev, mii, cmd); 970 return phy_mii_ioctl(fep->phydev, rq, cmd);
972} 971}
973 972
974extern int fs_mii_connect(struct net_device *dev); 973extern int fs_mii_connect(struct net_device *dev);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index b4c41d72c423..f53f850b6418 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -35,6 +35,7 @@
35#include <linux/mii.h> 35#include <linux/mii.h>
36#include <linux/phy.h> 36#include <linux/phy.h>
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/of_address.h>
38#include <linux/of_mdio.h> 39#include <linux/of_mdio.h>
39#include <linux/of_platform.h> 40#include <linux/of_platform.h>
40 41
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 1f7d865cedb6..bd17a2a0139b 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -39,7 +39,7 @@ struct fsl_pq_mdio {
39 u8 reserved[28]; /* Space holder */ 39 u8 reserved[28]; /* Space holder */
40 u32 utbipar; /* TBI phy address reg (only on UCC) */ 40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728]; 41 u8 res4[2728];
42} __attribute__ ((packed)); 42} __packed;
43 43
44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 28b53d1cd4f1..a1b6301bc674 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -85,6 +85,7 @@
85#include <linux/net_tstamp.h> 85#include <linux/net_tstamp.h>
86 86
87#include <asm/io.h> 87#include <asm/io.h>
88#include <asm/reg.h>
88#include <asm/irq.h> 89#include <asm/irq.h>
89#include <asm/uaccess.h> 90#include <asm/uaccess.h>
90#include <linux/module.h> 91#include <linux/module.h>
@@ -685,8 +686,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
685 priv->rx_queue[i] = NULL; 686 priv->rx_queue[i] = NULL;
686 687
687 for (i = 0; i < priv->num_tx_queues; i++) { 688 for (i = 0; i < priv->num_tx_queues; i++) {
688 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( 689 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
689 sizeof (struct gfar_priv_tx_q), GFP_KERNEL); 690 GFP_KERNEL);
690 if (!priv->tx_queue[i]) { 691 if (!priv->tx_queue[i]) {
691 err = -ENOMEM; 692 err = -ENOMEM;
692 goto tx_alloc_failed; 693 goto tx_alloc_failed;
@@ -698,8 +699,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
698 } 699 }
699 700
700 for (i = 0; i < priv->num_rx_queues; i++) { 701 for (i = 0; i < priv->num_rx_queues; i++) {
701 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( 702 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
702 sizeof (struct gfar_priv_rx_q), GFP_KERNEL); 703 GFP_KERNEL);
703 if (!priv->rx_queue[i]) { 704 if (!priv->rx_queue[i]) {
704 err = -ENOMEM; 705 err = -ENOMEM;
705 goto rx_alloc_failed; 706 goto rx_alloc_failed;
@@ -846,7 +847,7 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
846 if (!priv->phydev) 847 if (!priv->phydev)
847 return -ENODEV; 848 return -ENODEV;
848 849
849 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 850 return phy_mii_ioctl(priv->phydev, rq, cmd);
850} 851}
851 852
852static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 853static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
@@ -916,7 +917,7 @@ static void gfar_init_filer_table(struct gfar_private *priv)
916 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 917 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
917 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 918 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
918 919
919 /* cur_filer_idx indicated the fisrt non-masked rule */ 920 /* cur_filer_idx indicated the first non-masked rule */
920 priv->cur_filer_idx = rqfar; 921 priv->cur_filer_idx = rqfar;
921 922
922 /* Rest are masked rules */ 923 /* Rest are masked rules */
@@ -928,6 +929,34 @@ static void gfar_init_filer_table(struct gfar_private *priv)
928 } 929 }
929} 930}
930 931
932static void gfar_detect_errata(struct gfar_private *priv)
933{
934 struct device *dev = &priv->ofdev->dev;
935 unsigned int pvr = mfspr(SPRN_PVR);
936 unsigned int svr = mfspr(SPRN_SVR);
937 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
938 unsigned int rev = svr & 0xffff;
939
940 /* MPC8313 Rev 2.0 and higher; All MPC837x */
941 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
942 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
943 priv->errata |= GFAR_ERRATA_74;
944
945 /* MPC8313 and MPC837x all rev */
946 if ((pvr == 0x80850010 && mod == 0x80b0) ||
947 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
948 priv->errata |= GFAR_ERRATA_76;
949
950 /* MPC8313 and MPC837x all rev */
951 if ((pvr == 0x80850010 && mod == 0x80b0) ||
952 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
953 priv->errata |= GFAR_ERRATA_A002;
954
955 if (priv->errata)
956 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
957 priv->errata);
958}
959
931/* Set up the ethernet device structure, private data, 960/* Set up the ethernet device structure, private data,
932 * and anything else we need before we start */ 961 * and anything else we need before we start */
933static int gfar_probe(struct of_device *ofdev, 962static int gfar_probe(struct of_device *ofdev,
@@ -960,6 +989,8 @@ static int gfar_probe(struct of_device *ofdev,
960 dev_set_drvdata(&ofdev->dev, priv); 989 dev_set_drvdata(&ofdev->dev, priv);
961 regs = priv->gfargrp[0].regs; 990 regs = priv->gfargrp[0].regs;
962 991
992 gfar_detect_errata(priv);
993
963 /* Stop the DMA engine now, in case it was running before */ 994 /* Stop the DMA engine now, in case it was running before */
964 /* (The firmware could have used it, and left it running). */ 995 /* (The firmware could have used it, and left it running). */
965 gfar_halt(dev); 996 gfar_halt(dev);
@@ -974,7 +1005,10 @@ static int gfar_probe(struct of_device *ofdev,
974 gfar_write(&regs->maccfg1, tempval); 1005 gfar_write(&regs->maccfg1, tempval);
975 1006
976 /* Initialize MACCFG2. */ 1007 /* Initialize MACCFG2. */
977 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS); 1008 tempval = MACCFG2_INIT_SETTINGS;
1009 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1010 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1011 gfar_write(&regs->maccfg2, tempval);
978 1012
979 /* Initialize ECNTRL */ 1013 /* Initialize ECNTRL */
980 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); 1014 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
@@ -1541,6 +1575,29 @@ static void init_registers(struct net_device *dev)
1541 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS); 1575 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1542} 1576}
1543 1577
1578static int __gfar_is_rx_idle(struct gfar_private *priv)
1579{
1580 u32 res;
1581
1582 /*
1583 * Normaly TSEC should not hang on GRS commands, so we should
1584 * actually wait for IEVENT_GRSC flag.
1585 */
1586 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1587 return 0;
1588
1589 /*
1590 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1591 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1592 * and the Rx can be safely reset.
1593 */
1594 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1595 res &= 0x7f807f80;
1596 if ((res & 0xffff) == (res >> 16))
1597 return 1;
1598
1599 return 0;
1600}
1544 1601
1545/* Halt the receive and transmit queues */ 1602/* Halt the receive and transmit queues */
1546static void gfar_halt_nodisable(struct net_device *dev) 1603static void gfar_halt_nodisable(struct net_device *dev)
@@ -1564,12 +1621,18 @@ static void gfar_halt_nodisable(struct net_device *dev)
1564 tempval = gfar_read(&regs->dmactrl); 1621 tempval = gfar_read(&regs->dmactrl);
1565 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1622 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1566 != (DMACTRL_GRS | DMACTRL_GTS)) { 1623 != (DMACTRL_GRS | DMACTRL_GTS)) {
1624 int ret;
1625
1567 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1626 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1568 gfar_write(&regs->dmactrl, tempval); 1627 gfar_write(&regs->dmactrl, tempval);
1569 1628
1570 spin_event_timeout(((gfar_read(&regs->ievent) & 1629 do {
1571 (IEVENT_GRSC | IEVENT_GTSC)) == 1630 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1572 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); 1631 (IEVENT_GRSC | IEVENT_GTSC)) ==
1632 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1633 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1634 ret = __gfar_is_rx_idle(priv);
1635 } while (!ret);
1573 } 1636 }
1574} 1637}
1575 1638
@@ -1987,6 +2050,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1987 unsigned int nr_frags, nr_txbds, length; 2050 unsigned int nr_frags, nr_txbds, length;
1988 union skb_shared_tx *shtx; 2051 union skb_shared_tx *shtx;
1989 2052
2053 /*
2054 * TOE=1 frames larger than 2500 bytes may see excess delays
2055 * before start of transmission.
2056 */
2057 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2058 skb->ip_summed == CHECKSUM_PARTIAL &&
2059 skb->len > 2500)) {
2060 int ret;
2061
2062 ret = skb_checksum_help(skb);
2063 if (ret)
2064 return ret;
2065 }
2066
1990 rq = skb->queue_mapping; 2067 rq = skb->queue_mapping;
1991 tx_queue = priv->tx_queue[rq]; 2068 tx_queue = priv->tx_queue[rq];
1992 txq = netdev_get_tx_queue(dev, rq); 2069 txq = netdev_get_tx_queue(dev, rq);
@@ -2300,7 +2377,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2300 * to allow huge frames, and to check the length */ 2377 * to allow huge frames, and to check the length */
2301 tempval = gfar_read(&regs->maccfg2); 2378 tempval = gfar_read(&regs->maccfg2);
2302 2379
2303 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2380 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2381 gfar_has_errata(priv, GFAR_ERRATA_74))
2304 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2382 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2305 else 2383 else
2306 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2384 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2342,6 +2420,15 @@ static void gfar_timeout(struct net_device *dev)
2342 schedule_work(&priv->reset_task); 2420 schedule_work(&priv->reset_task);
2343} 2421}
2344 2422
2423static void gfar_align_skb(struct sk_buff *skb)
2424{
2425 /* We need the data buffer to be aligned properly. We will reserve
2426 * as many bytes as needed to align the data properly
2427 */
2428 skb_reserve(skb, RXBUF_ALIGNMENT -
2429 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2430}
2431
2345/* Interrupt Handler for Transmit complete */ 2432/* Interrupt Handler for Transmit complete */
2346static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2433static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2347{ 2434{
@@ -2426,9 +2513,10 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2426 */ 2513 */
2427 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2514 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2428 skb_recycle_check(skb, priv->rx_buffer_size + 2515 skb_recycle_check(skb, priv->rx_buffer_size +
2429 RXBUF_ALIGNMENT)) 2516 RXBUF_ALIGNMENT)) {
2517 gfar_align_skb(skb);
2430 __skb_queue_head(&priv->rx_recycle, skb); 2518 __skb_queue_head(&priv->rx_recycle, skb);
2431 else 2519 } else
2432 dev_kfree_skb_any(skb); 2520 dev_kfree_skb_any(skb);
2433 2521
2434 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2522 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
@@ -2491,29 +2579,28 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2491 gfar_init_rxbdp(rx_queue, bdp, buf); 2579 gfar_init_rxbdp(rx_queue, bdp, buf);
2492} 2580}
2493 2581
2494 2582static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2495struct sk_buff * gfar_new_skb(struct net_device *dev)
2496{ 2583{
2497 unsigned int alignamount;
2498 struct gfar_private *priv = netdev_priv(dev); 2584 struct gfar_private *priv = netdev_priv(dev);
2499 struct sk_buff *skb = NULL; 2585 struct sk_buff *skb = NULL;
2500 2586
2501 skb = __skb_dequeue(&priv->rx_recycle); 2587 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2502 if (!skb)
2503 skb = netdev_alloc_skb(dev,
2504 priv->rx_buffer_size + RXBUF_ALIGNMENT);
2505
2506 if (!skb) 2588 if (!skb)
2507 return NULL; 2589 return NULL;
2508 2590
2509 alignamount = RXBUF_ALIGNMENT - 2591 gfar_align_skb(skb);
2510 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
2511 2592
2512 /* We need the data buffer to be aligned properly. We will reserve 2593 return skb;
2513 * as many bytes as needed to align the data properly 2594}
2514 */ 2595
2515 skb_reserve(skb, alignamount); 2596struct sk_buff * gfar_new_skb(struct net_device *dev)
2516 GFAR_CB(skb)->alignamount = alignamount; 2597{
2598 struct gfar_private *priv = netdev_priv(dev);
2599 struct sk_buff *skb = NULL;
2600
2601 skb = __skb_dequeue(&priv->rx_recycle);
2602 if (!skb)
2603 skb = gfar_alloc_skb(dev);
2517 2604
2518 return skb; 2605 return skb;
2519} 2606}
@@ -2666,17 +2753,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2666 2753
2667 if (unlikely(!newskb)) 2754 if (unlikely(!newskb))
2668 newskb = skb; 2755 newskb = skb;
2669 else if (skb) { 2756 else if (skb)
2670 /*
2671 * We need to un-reserve() the skb to what it
2672 * was before gfar_new_skb() re-aligned
2673 * it to an RXBUF_ALIGNMENT boundary
2674 * before we put the skb back on the
2675 * recycle list.
2676 */
2677 skb_reserve(skb, -GFAR_CB(skb)->alignamount);
2678 __skb_queue_head(&priv->rx_recycle, skb); 2757 __skb_queue_head(&priv->rx_recycle, skb);
2679 }
2680 } else { 2758 } else {
2681 /* Increment the number of packets */ 2759 /* Increment the number of packets */
2682 rx_queue->stats.rx_packets++; 2760 rx_queue->stats.rx_packets++;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index ac4a92e08c09..710810e2adb4 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1025,6 +1025,12 @@ struct gfar_priv_grp {
1025 char int_name_er[GFAR_INT_NAME_MAX]; 1025 char int_name_er[GFAR_INT_NAME_MAX];
1026}; 1026};
1027 1027
1028enum gfar_errata {
1029 GFAR_ERRATA_74 = 0x01,
1030 GFAR_ERRATA_76 = 0x02,
1031 GFAR_ERRATA_A002 = 0x04,
1032};
1033
1028/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1034/* Struct stolen almost completely (and shamelessly) from the FCC enet source
1029 * (Ok, that's not so true anymore, but there is a family resemblence) 1035 * (Ok, that's not so true anymore, but there is a family resemblence)
1030 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 1036 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1049,6 +1055,7 @@ struct gfar_private {
1049 struct device_node *node; 1055 struct device_node *node;
1050 struct net_device *ndev; 1056 struct net_device *ndev;
1051 struct of_device *ofdev; 1057 struct of_device *ofdev;
1058 enum gfar_errata errata;
1052 1059
1053 struct gfar_priv_grp gfargrp[MAXGROUPS]; 1060 struct gfar_priv_grp gfargrp[MAXGROUPS];
1054 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; 1061 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
@@ -1111,6 +1118,12 @@ struct gfar_private {
1111extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 1118extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1112extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; 1119extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1113 1120
1121static inline int gfar_has_errata(struct gfar_private *priv,
1122 enum gfar_errata err)
1123{
1124 return priv->errata & err;
1125}
1126
1114static inline u32 gfar_read(volatile unsigned __iomem *addr) 1127static inline u32 gfar_read(volatile unsigned __iomem *addr)
1115{ 1128{
1116 u32 val; 1129 u32 val;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a029d02c2b4..4d09eab3548e 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1555,7 +1555,6 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
1555 } 1555 }
1556 1556
1557 /* setup NAPI */ 1557 /* setup NAPI */
1558 memset(&greth->napi, 0, sizeof(greth->napi));
1559 netif_napi_add(dev, &greth->napi, greth_poll, 64); 1558 netif_napi_add(dev, &greth->napi, greth_poll, 64);
1560 1559
1561 return 0; 1560 return 0;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 61f2b1cfcd46..49aac7027fbb 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -492,7 +492,6 @@ struct hamachi_private {
492 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 492 struct sk_buff* tx_skbuff[TX_RING_SIZE];
493 dma_addr_t tx_ring_dma; 493 dma_addr_t tx_ring_dma;
494 dma_addr_t rx_ring_dma; 494 dma_addr_t rx_ring_dma;
495 struct net_device_stats stats;
496 struct timer_list timer; /* Media selection timer. */ 495 struct timer_list timer; /* Media selection timer. */
497 /* Frequently used and paired value: keep adjacent for cache effect. */ 496 /* Frequently used and paired value: keep adjacent for cache effect. */
498 spinlock_t lock; 497 spinlock_t lock;
@@ -1036,7 +1035,7 @@ static inline int hamachi_tx(struct net_device *dev)
1036 if (entry >= TX_RING_SIZE-1) 1035 if (entry >= TX_RING_SIZE-1)
1037 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= 1036 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
1038 cpu_to_le32(DescEndRing); 1037 cpu_to_le32(DescEndRing);
1039 hmp->stats.tx_packets++; 1038 dev->stats.tx_packets++;
1040 } 1039 }
1041 1040
1042 return 0; 1041 return 0;
@@ -1167,7 +1166,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
1167 1166
1168 /* Trigger an immediate transmit demand. */ 1167 /* Trigger an immediate transmit demand. */
1169 dev->trans_start = jiffies; /* prevent tx timeout */ 1168 dev->trans_start = jiffies; /* prevent tx timeout */
1170 hmp->stats.tx_errors++; 1169 dev->stats.tx_errors++;
1171 1170
1172 /* Restart the chip's Tx/Rx processes . */ 1171 /* Restart the chip's Tx/Rx processes . */
1173 writew(0x0002, ioaddr + TxCmd); /* STOP Tx */ 1172 writew(0x0002, ioaddr + TxCmd); /* STOP Tx */
@@ -1434,7 +1433,7 @@ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance)
1434 if (entry >= TX_RING_SIZE-1) 1433 if (entry >= TX_RING_SIZE-1)
1435 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= 1434 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
1436 cpu_to_le32(DescEndRing); 1435 cpu_to_le32(DescEndRing);
1437 hmp->stats.tx_packets++; 1436 dev->stats.tx_packets++;
1438 } 1437 }
1439 if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){ 1438 if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
1440 /* The ring is no longer full */ 1439 /* The ring is no longer full */
@@ -1525,18 +1524,22 @@ static int hamachi_rx(struct net_device *dev)
1525 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000, 1524 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000,
1526 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff, 1525 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff,
1527 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length)); 1526 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length));
1528 hmp->stats.rx_length_errors++; 1527 dev->stats.rx_length_errors++;
1529 } /* else Omit for prototype errata??? */ 1528 } /* else Omit for prototype errata??? */
1530 if (frame_status & 0x00380000) { 1529 if (frame_status & 0x00380000) {
1531 /* There was an error. */ 1530 /* There was an error. */
1532 if (hamachi_debug > 2) 1531 if (hamachi_debug > 2)
1533 printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n", 1532 printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
1534 frame_status); 1533 frame_status);
1535 hmp->stats.rx_errors++; 1534 dev->stats.rx_errors++;
1536 if (frame_status & 0x00600000) hmp->stats.rx_length_errors++; 1535 if (frame_status & 0x00600000)
1537 if (frame_status & 0x00080000) hmp->stats.rx_frame_errors++; 1536 dev->stats.rx_length_errors++;
1538 if (frame_status & 0x00100000) hmp->stats.rx_crc_errors++; 1537 if (frame_status & 0x00080000)
1539 if (frame_status < 0) hmp->stats.rx_dropped++; 1538 dev->stats.rx_frame_errors++;
1539 if (frame_status & 0x00100000)
1540 dev->stats.rx_crc_errors++;
1541 if (frame_status < 0)
1542 dev->stats.rx_dropped++;
1540 } else { 1543 } else {
1541 struct sk_buff *skb; 1544 struct sk_buff *skb;
1542 /* Omit CRC */ 1545 /* Omit CRC */
@@ -1654,7 +1657,7 @@ static int hamachi_rx(struct net_device *dev)
1654#endif /* RX_CHECKSUM */ 1657#endif /* RX_CHECKSUM */
1655 1658
1656 netif_rx(skb); 1659 netif_rx(skb);
1657 hmp->stats.rx_packets++; 1660 dev->stats.rx_packets++;
1658 } 1661 }
1659 entry = (++hmp->cur_rx) % RX_RING_SIZE; 1662 entry = (++hmp->cur_rx) % RX_RING_SIZE;
1660 } 1663 }
@@ -1724,9 +1727,9 @@ static void hamachi_error(struct net_device *dev, int intr_status)
1724 dev->name, intr_status); 1727 dev->name, intr_status);
1725 /* Hmmmmm, it's not clear how to recover from PCI faults. */ 1728 /* Hmmmmm, it's not clear how to recover from PCI faults. */
1726 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) 1729 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1727 hmp->stats.tx_fifo_errors++; 1730 dev->stats.tx_fifo_errors++;
1728 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) 1731 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1729 hmp->stats.rx_fifo_errors++; 1732 dev->stats.rx_fifo_errors++;
1730} 1733}
1731 1734
1732static int hamachi_close(struct net_device *dev) 1735static int hamachi_close(struct net_device *dev)
@@ -1828,19 +1831,27 @@ static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
1828 so I think I'll comment it out here and see if better things 1831 so I think I'll comment it out here and see if better things
1829 happen. 1832 happen.
1830 */ 1833 */
1831 /* hmp->stats.tx_packets = readl(ioaddr + 0x000); */ 1834 /* dev->stats.tx_packets = readl(ioaddr + 0x000); */
1832 1835
1833 hmp->stats.rx_bytes = readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */ 1836 /* Total Uni+Brd+Multi */
1834 hmp->stats.tx_bytes = readl(ioaddr + 0x3B0); /* Total Uni+Brd+Multi */ 1837 dev->stats.rx_bytes = readl(ioaddr + 0x330);
1835 hmp->stats.multicast = readl(ioaddr + 0x320); /* Multicast Rx */ 1838 /* Total Uni+Brd+Multi */
1836 1839 dev->stats.tx_bytes = readl(ioaddr + 0x3B0);
1837 hmp->stats.rx_length_errors = readl(ioaddr + 0x368); /* Over+Undersized */ 1840 /* Multicast Rx */
1838 hmp->stats.rx_over_errors = readl(ioaddr + 0x35C); /* Jabber */ 1841 dev->stats.multicast = readl(ioaddr + 0x320);
1839 hmp->stats.rx_crc_errors = readl(ioaddr + 0x360); /* Jabber */ 1842
1840 hmp->stats.rx_frame_errors = readl(ioaddr + 0x364); /* Symbol Errs */ 1843 /* Over+Undersized */
1841 hmp->stats.rx_missed_errors = readl(ioaddr + 0x36C); /* Dropped */ 1844 dev->stats.rx_length_errors = readl(ioaddr + 0x368);
1842 1845 /* Jabber */
1843 return &hmp->stats; 1846 dev->stats.rx_over_errors = readl(ioaddr + 0x35C);
1847 /* Jabber */
1848 dev->stats.rx_crc_errors = readl(ioaddr + 0x360);
1849 /* Symbol Errs */
1850 dev->stats.rx_frame_errors = readl(ioaddr + 0x364);
1851 /* Dropped */
1852 dev->stats.rx_missed_errors = readl(ioaddr + 0x36C);
1853
1854 return &dev->stats;
1844} 1855}
1845 1856
1846static void set_rx_mode(struct net_device *dev) 1857static void set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 68e5ac8832ad..095b17ecf609 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -168,7 +168,6 @@ struct hp100_private {
168 u_char mac1_mode; 168 u_char mac1_mode;
169 u_char mac2_mode; 169 u_char mac2_mode;
170 u_char hash_bytes[8]; 170 u_char hash_bytes[8];
171 struct net_device_stats stats;
172 171
173 /* Rings for busmaster mode: */ 172 /* Rings for busmaster mode: */
174 hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */ 173 hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */
@@ -721,9 +720,10 @@ static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
721 /* Conversion to new PCI API : 720 /* Conversion to new PCI API :
722 * Pages are always aligned and zeroed, no need to it ourself. 721 * Pages are always aligned and zeroed, no need to it ourself.
723 * Doc says should be OK for EISA bus as well - Jean II */ 722 * Doc says should be OK for EISA bus as well - Jean II */
724 if ((lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr)) == NULL) { 723 lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr);
724 if (!lp->page_vaddr_algn) {
725 err = -ENOMEM; 725 err = -ENOMEM;
726 goto out2; 726 goto out_mem_ptr;
727 } 727 }
728 lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn); 728 lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
729 729
@@ -799,6 +799,7 @@ out3:
799 pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f, 799 pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f,
800 lp->page_vaddr_algn, 800 lp->page_vaddr_algn,
801 virt_to_whatever(dev, lp->page_vaddr_algn)); 801 virt_to_whatever(dev, lp->page_vaddr_algn));
802out_mem_ptr:
802 if (mem_ptr_virt) 803 if (mem_ptr_virt)
803 iounmap(mem_ptr_virt); 804 iounmap(mem_ptr_virt);
804out2: 805out2:
@@ -1071,7 +1072,7 @@ static void hp100_mmuinit(struct net_device *dev)
1071 if (lp->mode == 1) 1072 if (lp->mode == 1)
1072 hp100_init_pdls(dev); 1073 hp100_init_pdls(dev);
1073 1074
1074 /* Go to performance page and initalize isr and imr registers */ 1075 /* Go to performance page and initialize isr and imr registers */
1075 hp100_page(PERFORMANCE); 1076 hp100_page(PERFORMANCE);
1076 hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ 1077 hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
1077 hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */ 1078 hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
@@ -1582,8 +1583,8 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1582 spin_unlock_irqrestore(&lp->lock, flags); 1583 spin_unlock_irqrestore(&lp->lock, flags);
1583 1584
1584 /* Update statistics */ 1585 /* Update statistics */
1585 lp->stats.tx_packets++; 1586 dev->stats.tx_packets++;
1586 lp->stats.tx_bytes += skb->len; 1587 dev->stats.tx_bytes += skb->len;
1587 1588
1588 return NETDEV_TX_OK; 1589 return NETDEV_TX_OK;
1589 1590
@@ -1740,8 +1741,8 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
1740 1741
1741 hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */ 1742 hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */
1742 1743
1743 lp->stats.tx_packets++; 1744 dev->stats.tx_packets++;
1744 lp->stats.tx_bytes += skb->len; 1745 dev->stats.tx_bytes += skb->len;
1745 hp100_ints_on(); 1746 hp100_ints_on();
1746 spin_unlock_irqrestore(&lp->lock, flags); 1747 spin_unlock_irqrestore(&lp->lock, flags);
1747 1748
@@ -1822,7 +1823,7 @@ static void hp100_rx(struct net_device *dev)
1822 printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n", 1823 printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
1823 dev->name, pkt_len); 1824 dev->name, pkt_len);
1824#endif 1825#endif
1825 lp->stats.rx_dropped++; 1826 dev->stats.rx_dropped++;
1826 } else { /* skb successfully allocated */ 1827 } else { /* skb successfully allocated */
1827 1828
1828 u_char *ptr; 1829 u_char *ptr;
@@ -1848,8 +1849,8 @@ static void hp100_rx(struct net_device *dev)
1848 ptr[9], ptr[10], ptr[11]); 1849 ptr[9], ptr[10], ptr[11]);
1849#endif 1850#endif
1850 netif_rx(skb); 1851 netif_rx(skb);
1851 lp->stats.rx_packets++; 1852 dev->stats.rx_packets++;
1852 lp->stats.rx_bytes += pkt_len; 1853 dev->stats.rx_bytes += pkt_len;
1853 } 1854 }
1854 1855
1855 /* Indicate the card that we have got the packet */ 1856 /* Indicate the card that we have got the packet */
@@ -1858,7 +1859,7 @@ static void hp100_rx(struct net_device *dev)
1858 switch (header & 0x00070000) { 1859 switch (header & 0x00070000) {
1859 case (HP100_MULTI_ADDR_HASH << 16): 1860 case (HP100_MULTI_ADDR_HASH << 16):
1860 case (HP100_MULTI_ADDR_NO_HASH << 16): 1861 case (HP100_MULTI_ADDR_NO_HASH << 16):
1861 lp->stats.multicast++; 1862 dev->stats.multicast++;
1862 break; 1863 break;
1863 } 1864 }
1864 } /* end of while(there are packets) loop */ 1865 } /* end of while(there are packets) loop */
@@ -1930,7 +1931,7 @@ static void hp100_rx_bm(struct net_device *dev)
1930 if (ptr->skb == NULL) { 1931 if (ptr->skb == NULL) {
1931 printk("hp100: %s: rx_bm: skb null\n", dev->name); 1932 printk("hp100: %s: rx_bm: skb null\n", dev->name);
1932 /* can happen if we only allocated room for the pdh due to memory shortage. */ 1933 /* can happen if we only allocated room for the pdh due to memory shortage. */
1933 lp->stats.rx_dropped++; 1934 dev->stats.rx_dropped++;
1934 } else { 1935 } else {
1935 skb_trim(ptr->skb, pkt_len); /* Shorten it */ 1936 skb_trim(ptr->skb, pkt_len); /* Shorten it */
1936 ptr->skb->protocol = 1937 ptr->skb->protocol =
@@ -1938,14 +1939,14 @@ static void hp100_rx_bm(struct net_device *dev)
1938 1939
1939 netif_rx(ptr->skb); /* Up and away... */ 1940 netif_rx(ptr->skb); /* Up and away... */
1940 1941
1941 lp->stats.rx_packets++; 1942 dev->stats.rx_packets++;
1942 lp->stats.rx_bytes += pkt_len; 1943 dev->stats.rx_bytes += pkt_len;
1943 } 1944 }
1944 1945
1945 switch (header & 0x00070000) { 1946 switch (header & 0x00070000) {
1946 case (HP100_MULTI_ADDR_HASH << 16): 1947 case (HP100_MULTI_ADDR_HASH << 16):
1947 case (HP100_MULTI_ADDR_NO_HASH << 16): 1948 case (HP100_MULTI_ADDR_NO_HASH << 16):
1948 lp->stats.multicast++; 1949 dev->stats.multicast++;
1949 break; 1950 break;
1950 } 1951 }
1951 } else { 1952 } else {
@@ -1954,7 +1955,7 @@ static void hp100_rx_bm(struct net_device *dev)
1954#endif 1955#endif
1955 if (ptr->skb != NULL) 1956 if (ptr->skb != NULL)
1956 dev_kfree_skb_any(ptr->skb); 1957 dev_kfree_skb_any(ptr->skb);
1957 lp->stats.rx_errors++; 1958 dev->stats.rx_errors++;
1958 } 1959 }
1959 1960
1960 lp->rxrhead = lp->rxrhead->next; 1961 lp->rxrhead = lp->rxrhead->next;
@@ -1992,14 +1993,13 @@ static struct net_device_stats *hp100_get_stats(struct net_device *dev)
1992 hp100_update_stats(dev); 1993 hp100_update_stats(dev);
1993 hp100_ints_on(); 1994 hp100_ints_on();
1994 spin_unlock_irqrestore(&lp->lock, flags); 1995 spin_unlock_irqrestore(&lp->lock, flags);
1995 return &(lp->stats); 1996 return &(dev->stats);
1996} 1997}
1997 1998
1998static void hp100_update_stats(struct net_device *dev) 1999static void hp100_update_stats(struct net_device *dev)
1999{ 2000{
2000 int ioaddr = dev->base_addr; 2001 int ioaddr = dev->base_addr;
2001 u_short val; 2002 u_short val;
2002 struct hp100_private *lp = netdev_priv(dev);
2003 2003
2004#ifdef HP100_DEBUG_B 2004#ifdef HP100_DEBUG_B
2005 hp100_outw(0x4216, TRACE); 2005 hp100_outw(0x4216, TRACE);
@@ -2009,14 +2009,14 @@ static void hp100_update_stats(struct net_device *dev)
2009 /* Note: Statistics counters clear when read. */ 2009 /* Note: Statistics counters clear when read. */
2010 hp100_page(MAC_CTRL); 2010 hp100_page(MAC_CTRL);
2011 val = hp100_inw(DROPPED) & 0x0fff; 2011 val = hp100_inw(DROPPED) & 0x0fff;
2012 lp->stats.rx_errors += val; 2012 dev->stats.rx_errors += val;
2013 lp->stats.rx_over_errors += val; 2013 dev->stats.rx_over_errors += val;
2014 val = hp100_inb(CRC); 2014 val = hp100_inb(CRC);
2015 lp->stats.rx_errors += val; 2015 dev->stats.rx_errors += val;
2016 lp->stats.rx_crc_errors += val; 2016 dev->stats.rx_crc_errors += val;
2017 val = hp100_inb(ABORT); 2017 val = hp100_inb(ABORT);
2018 lp->stats.tx_errors += val; 2018 dev->stats.tx_errors += val;
2019 lp->stats.tx_aborted_errors += val; 2019 dev->stats.tx_aborted_errors += val;
2020 hp100_page(PERFORMANCE); 2020 hp100_page(PERFORMANCE);
2021} 2021}
2022 2022
@@ -2025,7 +2025,6 @@ static void hp100_misc_interrupt(struct net_device *dev)
2025#ifdef HP100_DEBUG_B 2025#ifdef HP100_DEBUG_B
2026 int ioaddr = dev->base_addr; 2026 int ioaddr = dev->base_addr;
2027#endif 2027#endif
2028 struct hp100_private *lp = netdev_priv(dev);
2029 2028
2030#ifdef HP100_DEBUG_B 2029#ifdef HP100_DEBUG_B
2031 int ioaddr = dev->base_addr; 2030 int ioaddr = dev->base_addr;
@@ -2034,8 +2033,8 @@ static void hp100_misc_interrupt(struct net_device *dev)
2034#endif 2033#endif
2035 2034
2036 /* Note: Statistics counters clear when read. */ 2035 /* Note: Statistics counters clear when read. */
2037 lp->stats.rx_errors++; 2036 dev->stats.rx_errors++;
2038 lp->stats.tx_errors++; 2037 dev->stats.tx_errors++;
2039} 2038}
2040 2039
2041static void hp100_clear_stats(struct hp100_private *lp, int ioaddr) 2040static void hp100_clear_stats(struct hp100_private *lp, int ioaddr)
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index b150c102ca5a..eeec7bc2ce74 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1045,7 +1045,7 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1045 DBG(dev, "change_mtu(%d)" NL, new_mtu); 1045 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1046 1046
1047 if (netif_running(ndev)) { 1047 if (netif_running(ndev)) {
1048 /* Check if we really need to reinitalize RX ring */ 1048 /* Check if we really need to reinitialize RX ring */
1049 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu)) 1049 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1050 ret = emac_resize_rx_ring(dev, new_mtu); 1050 ret = emac_resize_rx_ring(dev, new_mtu);
1051 } 1051 }
@@ -2339,11 +2339,11 @@ static int __devinit emac_wait_deps(struct emac_instance *dev)
2339 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph; 2339 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2340 if (dev->blist && dev->blist > emac_boot_list) 2340 if (dev->blist && dev->blist > emac_boot_list)
2341 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu; 2341 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2342 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier); 2342 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2343 wait_event_timeout(emac_probe_wait, 2343 wait_event_timeout(emac_probe_wait,
2344 emac_check_deps(dev, deps), 2344 emac_check_deps(dev, deps),
2345 EMAC_PROBE_DEP_TIMEOUT); 2345 EMAC_PROBE_DEP_TIMEOUT);
2346 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier); 2346 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2347 err = emac_check_deps(dev, deps) ? 0 : -ENODEV; 2347 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2348 for (i = 0; i < EMAC_DEP_COUNT; i++) { 2348 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2349 if (deps[i].node) 2349 if (deps[i].node)
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 86438b59fa21..187622f1c816 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -63,6 +63,7 @@ static bool igb_sgmii_active_82575(struct e1000_hw *);
63static s32 igb_reset_init_script_82575(struct e1000_hw *); 63static s32 igb_reset_init_script_82575(struct e1000_hw *);
64static s32 igb_read_mac_addr_82575(struct e1000_hw *); 64static s32 igb_read_mac_addr_82575(struct e1000_hw *);
65static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 65static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
66static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
66 67
67static const u16 e1000_82580_rxpbs_table[] = 68static const u16 e1000_82580_rxpbs_table[] =
68 { 36, 72, 144, 1, 2, 4, 8, 16, 69 { 36, 72, 144, 1, 2, 4, 8, 16,
@@ -70,6 +71,35 @@ static const u16 e1000_82580_rxpbs_table[] =
70#define E1000_82580_RXPBS_TABLE_SIZE \ 71#define E1000_82580_RXPBS_TABLE_SIZE \
71 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 72 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
72 73
74/**
75 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
76 * @hw: pointer to the HW structure
77 *
78 * Called to determine if the I2C pins are being used for I2C or as an
79 * external MDIO interface since the two options are mutually exclusive.
80 **/
81static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
82{
83 u32 reg = 0;
84 bool ext_mdio = false;
85
86 switch (hw->mac.type) {
87 case e1000_82575:
88 case e1000_82576:
89 reg = rd32(E1000_MDIC);
90 ext_mdio = !!(reg & E1000_MDIC_DEST);
91 break;
92 case e1000_82580:
93 case e1000_i350:
94 reg = rd32(E1000_MDICNFG);
95 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
96 break;
97 default:
98 break;
99 }
100 return ext_mdio;
101}
102
73static s32 igb_get_invariants_82575(struct e1000_hw *hw) 103static s32 igb_get_invariants_82575(struct e1000_hw *hw)
74{ 104{
75 struct e1000_phy_info *phy = &hw->phy; 105 struct e1000_phy_info *phy = &hw->phy;
@@ -130,27 +160,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
130 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 160 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
131 case E1000_CTRL_EXT_LINK_MODE_SGMII: 161 case E1000_CTRL_EXT_LINK_MODE_SGMII:
132 dev_spec->sgmii_active = true; 162 dev_spec->sgmii_active = true;
133 ctrl_ext |= E1000_CTRL_I2C_ENA;
134 break; 163 break;
135 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 164 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
136 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 165 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
137 hw->phy.media_type = e1000_media_type_internal_serdes; 166 hw->phy.media_type = e1000_media_type_internal_serdes;
138 ctrl_ext |= E1000_CTRL_I2C_ENA;
139 break; 167 break;
140 default: 168 default:
141 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
142 break; 169 break;
143 } 170 }
144 171
145 wr32(E1000_CTRL_EXT, ctrl_ext);
146
147 /*
148 * if using i2c make certain the MDICNFG register is cleared to prevent
149 * communications from being misrouted to the mdic registers
150 */
151 if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
152 wr32(E1000_MDICNFG, 0);
153
154 /* Set mta register count */ 172 /* Set mta register count */
155 mac->mta_reg_count = 128; 173 mac->mta_reg_count = 128;
156 /* Set rar entry count */ 174 /* Set rar entry count */
@@ -228,19 +246,29 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
228 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 246 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
229 phy->reset_delay_us = 100; 247 phy->reset_delay_us = 100;
230 248
249 ctrl_ext = rd32(E1000_CTRL_EXT);
250
231 /* PHY function pointers */ 251 /* PHY function pointers */
232 if (igb_sgmii_active_82575(hw)) { 252 if (igb_sgmii_active_82575(hw)) {
233 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 253 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
234 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 254 ctrl_ext |= E1000_CTRL_I2C_ENA;
235 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 255 } else {
256 phy->ops.reset = igb_phy_hw_reset;
257 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
258 }
259
260 wr32(E1000_CTRL_EXT, ctrl_ext);
261 igb_reset_mdicnfg_82580(hw);
262
263 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
264 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
265 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
236 } else if (hw->mac.type >= e1000_82580) { 266 } else if (hw->mac.type >= e1000_82580) {
237 phy->ops.reset = igb_phy_hw_reset; 267 phy->ops.read_reg = igb_read_phy_reg_82580;
238 phy->ops.read_reg = igb_read_phy_reg_82580; 268 phy->ops.write_reg = igb_write_phy_reg_82580;
239 phy->ops.write_reg = igb_write_phy_reg_82580;
240 } else { 269 } else {
241 phy->ops.reset = igb_phy_hw_reset; 270 phy->ops.read_reg = igb_read_phy_reg_igp;
242 phy->ops.read_reg = igb_read_phy_reg_igp; 271 phy->ops.write_reg = igb_write_phy_reg_igp;
243 phy->ops.write_reg = igb_write_phy_reg_igp;
244 } 272 }
245 273
246 /* set lan id */ 274 /* set lan id */
@@ -295,6 +323,10 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
295 323
296 if (hw->bus.func == E1000_FUNC_1) 324 if (hw->bus.func == E1000_FUNC_1)
297 mask = E1000_SWFW_PHY1_SM; 325 mask = E1000_SWFW_PHY1_SM;
326 else if (hw->bus.func == E1000_FUNC_2)
327 mask = E1000_SWFW_PHY2_SM;
328 else if (hw->bus.func == E1000_FUNC_3)
329 mask = E1000_SWFW_PHY3_SM;
298 330
299 return igb_acquire_swfw_sync_82575(hw, mask); 331 return igb_acquire_swfw_sync_82575(hw, mask);
300} 332}
@@ -312,6 +344,10 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
312 344
313 if (hw->bus.func == E1000_FUNC_1) 345 if (hw->bus.func == E1000_FUNC_1)
314 mask = E1000_SWFW_PHY1_SM; 346 mask = E1000_SWFW_PHY1_SM;
347 else if (hw->bus.func == E1000_FUNC_2)
348 mask = E1000_SWFW_PHY2_SM;
349 else if (hw->bus.func == E1000_FUNC_3)
350 mask = E1000_SWFW_PHY3_SM;
315 351
316 igb_release_swfw_sync_82575(hw, mask); 352 igb_release_swfw_sync_82575(hw, mask);
317} 353}
@@ -392,6 +428,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
392 s32 ret_val = 0; 428 s32 ret_val = 0;
393 u16 phy_id; 429 u16 phy_id;
394 u32 ctrl_ext; 430 u32 ctrl_ext;
431 u32 mdic;
395 432
396 /* 433 /*
397 * For SGMII PHYs, we try the list of possible addresses until 434 * For SGMII PHYs, we try the list of possible addresses until
@@ -406,6 +443,29 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
406 goto out; 443 goto out;
407 } 444 }
408 445
446 if (igb_sgmii_uses_mdio_82575(hw)) {
447 switch (hw->mac.type) {
448 case e1000_82575:
449 case e1000_82576:
450 mdic = rd32(E1000_MDIC);
451 mdic &= E1000_MDIC_PHY_MASK;
452 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
453 break;
454 case e1000_82580:
455 case e1000_i350:
456 mdic = rd32(E1000_MDICNFG);
457 mdic &= E1000_MDICNFG_PHY_MASK;
458 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
459 break;
460 default:
461 ret_val = -E1000_ERR_PHY;
462 goto out;
463 break;
464 }
465 ret_val = igb_get_phy_id(hw);
466 goto out;
467 }
468
409 /* Power on sgmii phy if it is disabled */ 469 /* Power on sgmii phy if it is disabled */
410 ctrl_ext = rd32(E1000_CTRL_EXT); 470 ctrl_ext = rd32(E1000_CTRL_EXT);
411 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 471 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
@@ -1493,6 +1553,43 @@ out:
1493} 1553}
1494 1554
1495/** 1555/**
1556 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
1557 * @hw: pointer to the HW structure
1558 *
1559 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
1560 * the values found in the EEPROM. This addresses an issue in which these
1561 * bits are not restored from EEPROM after reset.
1562 **/
1563static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
1564{
1565 s32 ret_val = 0;
1566 u32 mdicnfg;
1567 u16 nvm_data;
1568
1569 if (hw->mac.type != e1000_82580)
1570 goto out;
1571 if (!igb_sgmii_active_82575(hw))
1572 goto out;
1573
1574 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1575 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1576 &nvm_data);
1577 if (ret_val) {
1578 hw_dbg("NVM Read Error\n");
1579 goto out;
1580 }
1581
1582 mdicnfg = rd32(E1000_MDICNFG);
1583 if (nvm_data & NVM_WORD24_EXT_MDIO)
1584 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
1585 if (nvm_data & NVM_WORD24_COM_MDIO)
1586 mdicnfg |= E1000_MDICNFG_COM_MDIO;
1587 wr32(E1000_MDICNFG, mdicnfg);
1588out:
1589 return ret_val;
1590}
1591
1592/**
1496 * igb_reset_hw_82580 - Reset hardware 1593 * igb_reset_hw_82580 - Reset hardware
1497 * @hw: pointer to the HW structure 1594 * @hw: pointer to the HW structure
1498 * 1595 *
@@ -1567,6 +1664,10 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1567 wr32(E1000_IMC, 0xffffffff); 1664 wr32(E1000_IMC, 0xffffffff);
1568 icr = rd32(E1000_ICR); 1665 icr = rd32(E1000_ICR);
1569 1666
1667 ret_val = igb_reset_mdicnfg_82580(hw);
1668 if (ret_val)
1669 hw_dbg("Could not reset MDICNFG based on EEPROM\n");
1670
1570 /* Install any alternate MAC address into RAR0 */ 1671 /* Install any alternate MAC address into RAR0 */
1571 ret_val = igb_check_alt_mac_addr(hw); 1672 ret_val = igb_check_alt_mac_addr(hw);
1572 1673
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 24d9be64342f..bbd2ec308eb0 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -164,6 +164,8 @@
164#define E1000_SWFW_EEP_SM 0x1 164#define E1000_SWFW_EEP_SM 0x1
165#define E1000_SWFW_PHY0_SM 0x2 165#define E1000_SWFW_PHY0_SM 0x2
166#define E1000_SWFW_PHY1_SM 0x4 166#define E1000_SWFW_PHY1_SM 0x4
167#define E1000_SWFW_PHY2_SM 0x20
168#define E1000_SWFW_PHY3_SM 0x40
167 169
168/* FACTPS Definitions */ 170/* FACTPS Definitions */
169/* Device Control */ 171/* Device Control */
@@ -466,6 +468,11 @@
466 468
467#define E1000_TIMINCA_16NS_SHIFT 24 469#define E1000_TIMINCA_16NS_SHIFT 24
468 470
471#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
472#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
473#define E1000_MDICNFG_PHY_MASK 0x03E00000
474#define E1000_MDICNFG_PHY_SHIFT 21
475
469/* PCI Express Control */ 476/* PCI Express Control */
470#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 477#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
471#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 478#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
@@ -563,6 +570,10 @@
563 570
564#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) 571#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
565 572
573/* Mask bits for fields in Word 0x24 of the NVM */
574#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
575#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */
576
566/* Mask bits for fields in Word 0x0f of the NVM */ 577/* Mask bits for fields in Word 0x0f of the NVM */
567#define NVM_WORD0F_PAUSE_MASK 0x3000 578#define NVM_WORD0F_PAUSE_MASK 0x3000
568#define NVM_WORD0F_ASM_DIR 0x2000 579#define NVM_WORD0F_ASM_DIR 0x2000
@@ -696,12 +707,17 @@
696#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 707#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
697 708
698/* MDI Control */ 709/* MDI Control */
710#define E1000_MDIC_DATA_MASK 0x0000FFFF
711#define E1000_MDIC_REG_MASK 0x001F0000
699#define E1000_MDIC_REG_SHIFT 16 712#define E1000_MDIC_REG_SHIFT 16
713#define E1000_MDIC_PHY_MASK 0x03E00000
700#define E1000_MDIC_PHY_SHIFT 21 714#define E1000_MDIC_PHY_SHIFT 21
701#define E1000_MDIC_OP_WRITE 0x04000000 715#define E1000_MDIC_OP_WRITE 0x04000000
702#define E1000_MDIC_OP_READ 0x08000000 716#define E1000_MDIC_OP_READ 0x08000000
703#define E1000_MDIC_READY 0x10000000 717#define E1000_MDIC_READY 0x10000000
718#define E1000_MDIC_INT_EN 0x20000000
704#define E1000_MDIC_ERROR 0x40000000 719#define E1000_MDIC_ERROR 0x40000000
720#define E1000_MDIC_DEST 0x80000000
705 721
706/* SerDes Control */ 722/* SerDes Control */
707#define E1000_GEN_CTL_READY 0x80000000 723#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index f2ebf927e4bc..26bf6a13d1c1 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1823,12 +1823,10 @@ static void igb_diag_test(struct net_device *netdev,
1823 dev_info(&adapter->pdev->dev, "online testing starting\n"); 1823 dev_info(&adapter->pdev->dev, "online testing starting\n");
1824 1824
1825 /* PHY is powered down when interface is down */ 1825 /* PHY is powered down when interface is down */
1826 if (!netif_carrier_ok(netdev)) { 1826 if (if_running && igb_link_test(adapter, &data[4]))
1827 eth_test->flags |= ETH_TEST_FL_FAILED;
1828 else
1827 data[4] = 0; 1829 data[4] = 0;
1828 } else {
1829 if (igb_link_test(adapter, &data[4]))
1830 eth_test->flags |= ETH_TEST_FL_FAILED;
1831 }
1832 1830
1833 /* Online tests aren't run; pass by default */ 1831 /* Online tests aren't run; pass by default */
1834 data[0] = 0; 1832 data[0] = 0;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index cea37e0837ff..df5dcd23e4fc 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -630,9 +630,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
630 for (; i < adapter->rss_queues; i++) 630 for (; i < adapter->rss_queues; i++)
631 adapter->rx_ring[i]->reg_idx = rbase_offset + 631 adapter->rx_ring[i]->reg_idx = rbase_offset +
632 Q_IDX_82576(i); 632 Q_IDX_82576(i);
633 for (; j < adapter->rss_queues; j++)
634 adapter->tx_ring[j]->reg_idx = rbase_offset +
635 Q_IDX_82576(j);
636 } 633 }
637 case e1000_82575: 634 case e1000_82575:
638 case e1000_82580: 635 case e1000_82580:
@@ -996,7 +993,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
996 993
997 /* Number of supported queues. */ 994 /* Number of supported queues. */
998 adapter->num_rx_queues = adapter->rss_queues; 995 adapter->num_rx_queues = adapter->rss_queues;
999 adapter->num_tx_queues = adapter->rss_queues; 996 if (adapter->vfs_allocated_count)
997 adapter->num_tx_queues = 1;
998 else
999 adapter->num_tx_queues = adapter->rss_queues;
1000 1000
1001 /* start with one vector for every rx queue */ 1001 /* start with one vector for every rx queue */
1002 numvecs = adapter->num_rx_queues; 1002 numvecs = adapter->num_rx_queues;
@@ -1290,7 +1290,13 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1290 wr32(E1000_IAM, 0); 1290 wr32(E1000_IAM, 0);
1291 wr32(E1000_IMC, ~0); 1291 wr32(E1000_IMC, ~0);
1292 wrfl(); 1292 wrfl();
1293 synchronize_irq(adapter->pdev->irq); 1293 if (adapter->msix_entries) {
1294 int i;
1295 for (i = 0; i < adapter->num_q_vectors; i++)
1296 synchronize_irq(adapter->msix_entries[i].vector);
1297 } else {
1298 synchronize_irq(adapter->pdev->irq);
1299 }
1294} 1300}
1295 1301
1296/** 1302/**
@@ -2100,9 +2106,6 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2100#ifdef CONFIG_PCI_IOV 2106#ifdef CONFIG_PCI_IOV
2101 struct pci_dev *pdev = adapter->pdev; 2107 struct pci_dev *pdev = adapter->pdev;
2102 2108
2103 if (adapter->vfs_allocated_count > 7)
2104 adapter->vfs_allocated_count = 7;
2105
2106 if (adapter->vfs_allocated_count) { 2109 if (adapter->vfs_allocated_count) {
2107 adapter->vf_data = kcalloc(adapter->vfs_allocated_count, 2110 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2108 sizeof(struct vf_data_storage), 2111 sizeof(struct vf_data_storage),
@@ -2267,7 +2270,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2267 2270
2268#ifdef CONFIG_PCI_IOV 2271#ifdef CONFIG_PCI_IOV
2269 if (hw->mac.type == e1000_82576) 2272 if (hw->mac.type == e1000_82576)
2270 adapter->vfs_allocated_count = max_vfs; 2273 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
2271 2274
2272#endif /* CONFIG_PCI_IOV */ 2275#endif /* CONFIG_PCI_IOV */
2273 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2276 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
@@ -2729,14 +2732,16 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2729 } 2732 }
2730 igb_vmm_control(adapter); 2733 igb_vmm_control(adapter);
2731 2734
2732 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | 2735 /*
2733 E1000_MRQC_RSS_FIELD_IPV4_TCP); 2736 * Generate RSS hash based on TCP port numbers and/or
2734 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | 2737 * IPv4/v6 src and dst addresses since UDP cannot be
2735 E1000_MRQC_RSS_FIELD_IPV6_TCP); 2738 * hashed reliably due to IP fragmentation
2736 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP | 2739 */
2737 E1000_MRQC_RSS_FIELD_IPV6_UDP); 2740 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2738 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | 2741 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2739 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 2742 E1000_MRQC_RSS_FIELD_IPV6 |
2743 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2744 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
2740 2745
2741 wr32(E1000_MRQC, mrqc); 2746 wr32(E1000_MRQC, mrqc);
2742} 2747}
@@ -4986,6 +4991,10 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4986 4991
4987static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 4992static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4988{ 4993{
4994 /*
4995 * The VF MAC Address is stored in a packed array of bytes
4996 * starting at the second 32 bit word of the msg array
4997 */
4989 unsigned char *addr = (char *)&msg[1]; 4998 unsigned char *addr = (char *)&msg[1];
4990 int err = -1; 4999 int err = -1;
4991 5000
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 5e2b2a8c56c6..ec808fa8dc21 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -48,7 +48,7 @@
48#define DRV_VERSION "1.0.0-k0" 48#define DRV_VERSION "1.0.0-k0"
49char igbvf_driver_name[] = "igbvf"; 49char igbvf_driver_name[] = "igbvf";
50const char igbvf_driver_version[] = DRV_VERSION; 50const char igbvf_driver_version[] = DRV_VERSION;
51struct pm_qos_request_list *igbvf_driver_pm_qos_req; 51static struct pm_qos_request_list igbvf_driver_pm_qos_req;
52static const char igbvf_driver_string[] = 52static const char igbvf_driver_string[] =
53 "Intel(R) Virtual Function Network Driver"; 53 "Intel(R) Virtual Function Network Driver";
54static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 54static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -2751,7 +2751,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2751 dev_info(&pdev->dev, 2751 dev_info(&pdev->dev,
2752 "PF still in reset state, assigning new address." 2752 "PF still in reset state, assigning new address."
2753 " Is the PF interface up?\n"); 2753 " Is the PF interface up?\n");
2754 random_ether_addr(hw->mac.addr); 2754 dev_hw_addr_random(adapter->netdev, hw->mac.addr);
2755 } else { 2755 } else {
2756 err = hw->mac.ops.read_mac_addr(hw); 2756 err = hw->mac.ops.read_mac_addr(hw);
2757 if (err) { 2757 if (err) {
@@ -2902,8 +2902,8 @@ static int __init igbvf_init_module(void)
2902 printk(KERN_INFO "%s\n", igbvf_copyright); 2902 printk(KERN_INFO "%s\n", igbvf_copyright);
2903 2903
2904 ret = pci_register_driver(&igbvf_driver); 2904 ret = pci_register_driver(&igbvf_driver);
2905 igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 2905 pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
2906 PM_QOS_DEFAULT_VALUE); 2906 PM_QOS_DEFAULT_VALUE);
2907 2907
2908 return ret; 2908 return ret;
2909} 2909}
@@ -2918,8 +2918,7 @@ module_init(igbvf_init_module);
2918static void __exit igbvf_exit_module(void) 2918static void __exit igbvf_exit_module(void)
2919{ 2919{
2920 pci_unregister_driver(&igbvf_driver); 2920 pci_unregister_driver(&igbvf_driver);
2921 pm_qos_remove_request(igbvf_driver_pm_qos_req); 2921 pm_qos_remove_request(&igbvf_driver_pm_qos_req);
2922 igbvf_driver_pm_qos_req = NULL;
2923} 2922}
2924module_exit(igbvf_exit_module); 2923module_exit(igbvf_exit_module);
2925 2924
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index e3b5e9490601..0b3f6df5cff7 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -82,7 +82,6 @@ struct ioc3_private {
82 struct ioc3_etxd *txr; 82 struct ioc3_etxd *txr;
83 struct sk_buff *rx_skbs[512]; 83 struct sk_buff *rx_skbs[512];
84 struct sk_buff *tx_skbs[128]; 84 struct sk_buff *tx_skbs[128];
85 struct net_device_stats stats;
86 int rx_ci; /* RX consumer index */ 85 int rx_ci; /* RX consumer index */
87 int rx_pi; /* RX producer index */ 86 int rx_pi; /* RX producer index */
88 int tx_ci; /* TX consumer index */ 87 int tx_ci; /* TX consumer index */
@@ -504,8 +503,8 @@ static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
504 struct ioc3_private *ip = netdev_priv(dev); 503 struct ioc3_private *ip = netdev_priv(dev);
505 struct ioc3 *ioc3 = ip->regs; 504 struct ioc3 *ioc3 = ip->regs;
506 505
507 ip->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK); 506 dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
508 return &ip->stats; 507 return &dev->stats;
509} 508}
510 509
511static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) 510static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
@@ -576,8 +575,9 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
576 skb->ip_summed = CHECKSUM_UNNECESSARY; 575 skb->ip_summed = CHECKSUM_UNNECESSARY;
577} 576}
578 577
579static inline void ioc3_rx(struct ioc3_private *ip) 578static inline void ioc3_rx(struct net_device *dev)
580{ 579{
580 struct ioc3_private *ip = netdev_priv(dev);
581 struct sk_buff *skb, *new_skb; 581 struct sk_buff *skb, *new_skb;
582 struct ioc3 *ioc3 = ip->regs; 582 struct ioc3 *ioc3 = ip->regs;
583 int rx_entry, n_entry, len; 583 int rx_entry, n_entry, len;
@@ -598,13 +598,13 @@ static inline void ioc3_rx(struct ioc3_private *ip)
598 if (err & ERXBUF_GOODPKT) { 598 if (err & ERXBUF_GOODPKT) {
599 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; 599 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
600 skb_trim(skb, len); 600 skb_trim(skb, len);
601 skb->protocol = eth_type_trans(skb, priv_netdev(ip)); 601 skb->protocol = eth_type_trans(skb, dev);
602 602
603 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 603 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
604 if (!new_skb) { 604 if (!new_skb) {
605 /* Ouch, drop packet and just recycle packet 605 /* Ouch, drop packet and just recycle packet
606 to keep the ring filled. */ 606 to keep the ring filled. */
607 ip->stats.rx_dropped++; 607 dev->stats.rx_dropped++;
608 new_skb = skb; 608 new_skb = skb;
609 goto next; 609 goto next;
610 } 610 }
@@ -622,19 +622,19 @@ static inline void ioc3_rx(struct ioc3_private *ip)
622 rxb = (struct ioc3_erxbuf *) new_skb->data; 622 rxb = (struct ioc3_erxbuf *) new_skb->data;
623 skb_reserve(new_skb, RX_OFFSET); 623 skb_reserve(new_skb, RX_OFFSET);
624 624
625 ip->stats.rx_packets++; /* Statistics */ 625 dev->stats.rx_packets++; /* Statistics */
626 ip->stats.rx_bytes += len; 626 dev->stats.rx_bytes += len;
627 } else { 627 } else {
628 /* The frame is invalid and the skb never 628 /* The frame is invalid and the skb never
629 reached the network layer so we can just 629 reached the network layer so we can just
630 recycle it. */ 630 recycle it. */
631 new_skb = skb; 631 new_skb = skb;
632 ip->stats.rx_errors++; 632 dev->stats.rx_errors++;
633 } 633 }
634 if (err & ERXBUF_CRCERR) /* Statistics */ 634 if (err & ERXBUF_CRCERR) /* Statistics */
635 ip->stats.rx_crc_errors++; 635 dev->stats.rx_crc_errors++;
636 if (err & ERXBUF_FRAMERR) 636 if (err & ERXBUF_FRAMERR)
637 ip->stats.rx_frame_errors++; 637 dev->stats.rx_frame_errors++;
638next: 638next:
639 ip->rx_skbs[n_entry] = new_skb; 639 ip->rx_skbs[n_entry] = new_skb;
640 rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); 640 rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
@@ -652,8 +652,9 @@ next:
652 ip->rx_ci = rx_entry; 652 ip->rx_ci = rx_entry;
653} 653}
654 654
655static inline void ioc3_tx(struct ioc3_private *ip) 655static inline void ioc3_tx(struct net_device *dev)
656{ 656{
657 struct ioc3_private *ip = netdev_priv(dev);
657 unsigned long packets, bytes; 658 unsigned long packets, bytes;
658 struct ioc3 *ioc3 = ip->regs; 659 struct ioc3 *ioc3 = ip->regs;
659 int tx_entry, o_entry; 660 int tx_entry, o_entry;
@@ -681,12 +682,12 @@ static inline void ioc3_tx(struct ioc3_private *ip)
681 tx_entry = (etcir >> 7) & 127; 682 tx_entry = (etcir >> 7) & 127;
682 } 683 }
683 684
684 ip->stats.tx_packets += packets; 685 dev->stats.tx_packets += packets;
685 ip->stats.tx_bytes += bytes; 686 dev->stats.tx_bytes += bytes;
686 ip->txqlen -= packets; 687 ip->txqlen -= packets;
687 688
688 if (ip->txqlen < 128) 689 if (ip->txqlen < 128)
689 netif_wake_queue(priv_netdev(ip)); 690 netif_wake_queue(dev);
690 691
691 ip->tx_ci = o_entry; 692 ip->tx_ci = o_entry;
692 spin_unlock(&ip->ioc3_lock); 693 spin_unlock(&ip->ioc3_lock);
@@ -699,9 +700,9 @@ static inline void ioc3_tx(struct ioc3_private *ip)
699 * with such error interrupts if something really goes wrong, so we might 700 * with such error interrupts if something really goes wrong, so we might
700 * also consider to take the interface down. 701 * also consider to take the interface down.
701 */ 702 */
702static void ioc3_error(struct ioc3_private *ip, u32 eisr) 703static void ioc3_error(struct net_device *dev, u32 eisr)
703{ 704{
704 struct net_device *dev = priv_netdev(ip); 705 struct ioc3_private *ip = netdev_priv(dev);
705 unsigned char *iface = dev->name; 706 unsigned char *iface = dev->name;
706 707
707 spin_lock(&ip->ioc3_lock); 708 spin_lock(&ip->ioc3_lock);
@@ -747,11 +748,11 @@ static irqreturn_t ioc3_interrupt(int irq, void *_dev)
747 748
748 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | 749 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
749 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) 750 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
750 ioc3_error(ip, eisr); 751 ioc3_error(dev, eisr);
751 if (eisr & EISR_RXTIMERINT) 752 if (eisr & EISR_RXTIMERINT)
752 ioc3_rx(ip); 753 ioc3_rx(dev);
753 if (eisr & EISR_TXEXPLICIT) 754 if (eisr & EISR_TXEXPLICIT)
754 ioc3_tx(ip); 755 ioc3_tx(dev);
755 756
756 return IRQ_HANDLED; 757 return IRQ_HANDLED;
757} 758}
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 0dbd1932b72f..36c3060411d2 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -273,7 +273,7 @@ struct OboeSlot
273 __u8 control; /*Slot control/status see below */ 273 __u8 control; /*Slot control/status see below */
274 __u32 address; /*Slot buffer address */ 274 __u32 address; /*Slot buffer address */
275} 275}
276__attribute__ ((packed)); 276__packed;
277 277
278#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS 278#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
279 279
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index ac0443d52e50..58ddb5214916 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -125,7 +125,7 @@ struct irda_class_desc {
125 __u8 bmAdditionalBOFs; 125 __u8 bmAdditionalBOFs;
126 __u8 bIrdaRateSniff; 126 __u8 bIrdaRateSniff;
127 __u8 bMaxUnicastList; 127 __u8 bMaxUnicastList;
128} __attribute__ ((packed)); 128} __packed;
129 129
130/* class specific interface request to get the IrDA-USB class descriptor 130/* class specific interface request to get the IrDA-USB class descriptor
131 * (6.2.5, USB-IrDA class spec 1.0) */ 131 * (6.2.5, USB-IrDA class spec 1.0) */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index b54d3b48045e..1046014dd6c2 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -154,7 +154,7 @@ struct ks959_speedparams {
154 __le32 baudrate; /* baud rate, little endian */ 154 __le32 baudrate; /* baud rate, little endian */
155 __u8 flags; 155 __u8 flags;
156 __u8 reserved[3]; 156 __u8 reserved[3];
157} __attribute__ ((packed)); 157} __packed;
158 158
159#define KS_DATA_5_BITS 0x00 159#define KS_DATA_5_BITS 0x00
160#define KS_DATA_6_BITS 0x01 160#define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 8d713ebac15b..9cc142fcc712 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -117,7 +117,7 @@ struct ksdazzle_speedparams {
117 __le32 baudrate; /* baud rate, little endian */ 117 __le32 baudrate; /* baud rate, little endian */
118 __u8 flags; 118 __u8 flags;
119 __u8 reserved[3]; 119 __u8 reserved[3];
120} __attribute__ ((packed)); 120} __packed;
121 121
122#define KS_DATA_5_BITS 0x00 122#define KS_DATA_5_BITS 0x00
123#define KS_DATA_6_BITS 0x01 123#define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9a828b06a57e..edd5666f0ffb 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -749,7 +749,7 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
749 struct sh_irda_self *self; 749 struct sh_irda_self *self;
750 struct resource *res; 750 struct resource *res;
751 char clk_name[8]; 751 char clk_name[8];
752 unsigned int irq; 752 int irq;
753 int err = -ENOMEM; 753 int err = -ENOMEM;
754 754
755 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 755 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 5c5f99d50341..00b38bccd6d0 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -709,7 +709,7 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
709 struct sh_sir_self *self; 709 struct sh_sir_self *self;
710 struct resource *res; 710 struct resource *res;
711 char clk_name[8]; 711 char clk_name[8];
712 unsigned int irq; 712 int irq;
713 int err = -ENOMEM; 713 int err = -ENOMEM;
714 714
715 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 715 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index d67e48418e55..850ca1c5ee19 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2848,9 +2848,7 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2848 unsigned short ss_device = 0x0000; 2848 unsigned short ss_device = 0x0000;
2849 int ret = 0; 2849 int ret = 0;
2850 2850
2851 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 2851 for_each_pci_dev(dev) {
2852
2853 while (dev != NULL) {
2854 struct smsc_ircc_subsystem_configuration *conf; 2852 struct smsc_ircc_subsystem_configuration *conf;
2855 2853
2856 /* 2854 /*
@@ -2899,7 +2897,6 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2899 ret = -ENODEV; 2897 ret = -ENODEV;
2900 } 2898 }
2901 } 2899 }
2902 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
2903 } 2900 }
2904 2901
2905 return ret; 2902 return ret;
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 3050d1a0cccf..3f24a1f33022 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -544,9 +544,9 @@ struct ring_descr_hw {
544 struct { 544 struct {
545 u8 addr_res[3]; 545 u8 addr_res[3];
546 volatile u8 status; /* descriptor status */ 546 volatile u8 status; /* descriptor status */
547 } __attribute__((packed)) rd_s; 547 } __packed rd_s;
548 } __attribute((packed)) rd_u; 548 } __packed rd_u;
549} __attribute__ ((packed)); 549} __packed;
550 550
551#define rd_addr rd_u.addr 551#define rd_addr rd_u.addr
552#define rd_status rd_u.rd_s.status 552#define rd_status rd_u.rd_s.status
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ffae480587ae..9e15eb93860e 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -44,11 +44,9 @@
44#include <linux/dca.h> 44#include <linux/dca.h>
45#endif 45#endif
46 46
47#define PFX "ixgbe: " 47/* common prefix used by pr_<> macros */
48#define DPRINTK(nlevel, klevel, fmt, args...) \ 48#undef pr_fmt
49 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
51 __func__ , ## args)))
52 50
53/* TX/RX descriptor defines */ 51/* TX/RX descriptor defines */
54#define IXGBE_DEFAULT_TXD 512 52#define IXGBE_DEFAULT_TXD 512
@@ -112,7 +110,6 @@ struct vf_data_storage {
112 u16 vlans_enabled; 110 u16 vlans_enabled;
113 bool clear_to_send; 111 bool clear_to_send;
114 bool pf_set_mac; 112 bool pf_set_mac;
115 int rar;
116 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 113 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
117 u16 pf_qos; 114 u16 pf_qos;
118}; 115};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a4e2901f2f08..3e06a61da921 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -206,6 +206,14 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
206 s32 status = 0; 206 s32 status = 0;
207 u32 autoc = 0; 207 u32 autoc = 0;
208 208
209 /* Determine 1G link capabilities off of SFP+ type */
210 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
211 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
212 *speed = IXGBE_LINK_SPEED_1GB_FULL;
213 *negotiation = true;
214 goto out;
215 }
216
209 /* 217 /*
210 * Determine link capabilities based on the stored value of AUTOC, 218 * Determine link capabilities based on the stored value of AUTOC,
211 * which represents EEPROM defaults. If AUTOC value has not been 219 * which represents EEPROM defaults. If AUTOC value has not been
@@ -707,9 +715,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
707 715
708out: 716out:
709 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 717 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
710 netif_info(adapter, hw, adapter->netdev, "Smartspeed has" 718 e_info(hw, "Smartspeed has downgraded the link speed from "
711 " downgraded the link speed from the maximum" 719 "the maximum advertised\n");
712 " advertised\n");
713 return status; 720 return status;
714} 721}
715 722
@@ -2088,6 +2095,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2088 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 2095 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2089 u16 ext_ability = 0; 2096 u16 ext_ability = 0;
2090 u8 comp_codes_10g = 0; 2097 u8 comp_codes_10g = 0;
2098 u8 comp_codes_1g = 0;
2091 2099
2092 hw->phy.ops.identify(hw); 2100 hw->phy.ops.identify(hw);
2093 2101
@@ -2168,11 +2176,15 @@ sfp_check:
2168 case ixgbe_phy_sfp_intel: 2176 case ixgbe_phy_sfp_intel:
2169 case ixgbe_phy_sfp_unknown: 2177 case ixgbe_phy_sfp_unknown:
2170 hw->phy.ops.read_i2c_eeprom(hw, 2178 hw->phy.ops.read_i2c_eeprom(hw,
2179 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2180 hw->phy.ops.read_i2c_eeprom(hw,
2171 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2181 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2172 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2182 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2173 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2183 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2174 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2184 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2175 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2185 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2186 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2187 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2176 break; 2188 break;
2177 default: 2189 default:
2178 break; 2190 break;
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 3080afb12bdf..5cf15aa11cac 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -105,12 +105,23 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
105 105
106#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 106#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
107 107
108#ifdef DEBUG 108extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
109extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
110#define hw_dbg(hw, format, arg...) \ 109#define hw_dbg(hw, format, arg...) \
111 printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg) 110 netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
112#else 111#define e_dev_info(format, arg...) \
113#define hw_dbg(hw, format, arg...) do {} while (0) 112 dev_info(&adapter->pdev->dev, format, ## arg)
114#endif 113#define e_dev_warn(format, arg...) \
115 114 dev_warn(&adapter->pdev->dev, format, ## arg)
115#define e_dev_err(format, arg...) \
116 dev_err(&adapter->pdev->dev, format, ## arg)
117#define e_dev_notice(format, arg...) \
118 dev_notice(&adapter->pdev->dev, format, ## arg)
119#define e_info(msglvl, format, arg...) \
120 netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
121#define e_err(msglvl, format, arg...) \
122 netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
123#define e_warn(msglvl, format, arg...) \
124 netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
125#define e_crit(msglvl, format, arg...) \
126 netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
116#endif /* IXGBE_COMMON */ 127#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 4f7a26ab411e..25b02fb425ac 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -346,7 +346,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
346 */ 346 */
347 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 347 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
348 reg &= ~IXGBE_MFLCN_RFCE; 348 reg &= ~IXGBE_MFLCN_RFCE;
349 reg |= IXGBE_MFLCN_RPFCE; 349 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
350 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 350 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
351out: 351out:
352 return 0; 352 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 71da325dfa80..b53b465e24af 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -121,7 +121,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
121 goto out; 121 goto out;
122 122
123 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 123 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
124 DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n"); 124 e_err(drv, "Enable failed, needs MSI-X\n");
125 err = 1; 125 err = 1;
126 goto out; 126 goto out;
127 } 127 }
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3a93a81872b8..dcebc82c6f4d 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -54,14 +54,14 @@ struct ixgbe_stats {
54 sizeof(((struct ixgbe_adapter *)0)->m), \ 54 sizeof(((struct ixgbe_adapter *)0)->m), \
55 offsetof(struct ixgbe_adapter, m) 55 offsetof(struct ixgbe_adapter, m)
56#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 56#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
57 sizeof(((struct net_device *)0)->m), \ 57 sizeof(((struct rtnl_link_stats64 *)0)->m), \
58 offsetof(struct net_device, m) 58 offsetof(struct rtnl_link_stats64, m)
59 59
60static struct ixgbe_stats ixgbe_gstrings_stats[] = { 60static struct ixgbe_stats ixgbe_gstrings_stats[] = {
61 {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)}, 61 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
62 {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)}, 62 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
63 {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)}, 63 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
64 {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)}, 64 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
65 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 65 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
66 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 66 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
67 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 67 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
@@ -69,27 +69,27 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
69 {"lsc_int", IXGBE_STAT(lsc_int)}, 69 {"lsc_int", IXGBE_STAT(lsc_int)},
70 {"tx_busy", IXGBE_STAT(tx_busy)}, 70 {"tx_busy", IXGBE_STAT(tx_busy)},
71 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 71 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
72 {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)}, 72 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
73 {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)}, 73 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
74 {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)}, 74 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
75 {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)}, 75 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
76 {"multicast", IXGBE_NETDEV_STAT(stats.multicast)}, 76 {"multicast", IXGBE_NETDEV_STAT(multicast)},
77 {"broadcast", IXGBE_STAT(stats.bprc)}, 77 {"broadcast", IXGBE_STAT(stats.bprc)},
78 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 78 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
79 {"collisions", IXGBE_NETDEV_STAT(stats.collisions)}, 79 {"collisions", IXGBE_NETDEV_STAT(collisions)},
80 {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)}, 80 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
81 {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)}, 81 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
82 {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)}, 82 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
83 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 83 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
87 {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)}, 87 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
88 {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)}, 88 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
89 {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)}, 89 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
90 {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)}, 90 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
91 {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)}, 91 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
92 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)}, 92 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
93 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 93 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
94 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 94 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
95 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 95 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
@@ -234,6 +234,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
234 case ixgbe_sfp_type_not_present: 234 case ixgbe_sfp_type_not_present:
235 ecmd->port = PORT_NONE; 235 ecmd->port = PORT_NONE;
236 break; 236 break;
237 case ixgbe_sfp_type_1g_cu_core0:
238 case ixgbe_sfp_type_1g_cu_core1:
239 ecmd->port = PORT_TP;
240 ecmd->supported = SUPPORTED_TP;
241 ecmd->advertising = (ADVERTISED_1000baseT_Full |
242 ADVERTISED_TP);
243 break;
237 case ixgbe_sfp_type_unknown: 244 case ixgbe_sfp_type_unknown:
238 default: 245 default:
239 ecmd->port = PORT_OTHER; 246 ecmd->port = PORT_OTHER;
@@ -294,8 +301,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
294 hw->mac.autotry_restart = true; 301 hw->mac.autotry_restart = true;
295 err = hw->mac.ops.setup_link(hw, advertised, true, true); 302 err = hw->mac.ops.setup_link(hw, advertised, true, true);
296 if (err) { 303 if (err) {
297 DPRINTK(PROBE, INFO, 304 e_info(probe, "setup link failed with code %d\n", err);
298 "setup link failed with code %d\n", err);
299 hw->mac.ops.setup_link(hw, old, true, true); 305 hw->mac.ops.setup_link(hw, old, true, true);
300 } 306 }
301 } else { 307 } else {
@@ -992,16 +998,18 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
992 struct ixgbe_adapter *adapter = netdev_priv(netdev); 998 struct ixgbe_adapter *adapter = netdev_priv(netdev);
993 u64 *queue_stat; 999 u64 *queue_stat;
994 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); 1000 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
1001 struct rtnl_link_stats64 temp;
1002 const struct rtnl_link_stats64 *net_stats;
995 int j, k; 1003 int j, k;
996 int i; 1004 int i;
997 char *p = NULL; 1005 char *p = NULL;
998 1006
999 ixgbe_update_stats(adapter); 1007 ixgbe_update_stats(adapter);
1000 dev_get_stats(netdev); 1008 net_stats = dev_get_stats(netdev, &temp);
1001 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1009 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1002 switch (ixgbe_gstrings_stats[i].type) { 1010 switch (ixgbe_gstrings_stats[i].type) {
1003 case NETDEV_STATS: 1011 case NETDEV_STATS:
1004 p = (char *) netdev + 1012 p = (char *) net_stats +
1005 ixgbe_gstrings_stats[i].stat_offset; 1013 ixgbe_gstrings_stats[i].stat_offset;
1006 break; 1014 break;
1007 case IXGBE_STATS: 1015 case IXGBE_STATS:
@@ -1188,9 +1196,9 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1188 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 1196 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
1189 val = readl(adapter->hw.hw_addr + R); \ 1197 val = readl(adapter->hw.hw_addr + R); \
1190 if (val != (_test[pat] & W & M)) { \ 1198 if (val != (_test[pat] & W & M)) { \
1191 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ 1199 e_err(drv, "pattern test reg %04X failed: got " \
1192 "0x%08X expected 0x%08X\n", \ 1200 "0x%08X expected 0x%08X\n", \
1193 R, val, (_test[pat] & W & M)); \ 1201 R, val, (_test[pat] & W & M)); \
1194 *data = R; \ 1202 *data = R; \
1195 writel(before, adapter->hw.hw_addr + R); \ 1203 writel(before, adapter->hw.hw_addr + R); \
1196 return 1; \ 1204 return 1; \
@@ -1206,8 +1214,8 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1206 writel((W & M), (adapter->hw.hw_addr + R)); \ 1214 writel((W & M), (adapter->hw.hw_addr + R)); \
1207 val = readl(adapter->hw.hw_addr + R); \ 1215 val = readl(adapter->hw.hw_addr + R); \
1208 if ((W & M) != (val & M)) { \ 1216 if ((W & M) != (val & M)) { \
1209 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 1217 e_err(drv, "set/check reg %04X test failed: got 0x%08X " \
1210 "expected 0x%08X\n", R, (val & M), (W & M)); \ 1218 "expected 0x%08X\n", R, (val & M), (W & M)); \
1211 *data = R; \ 1219 *data = R; \
1212 writel(before, (adapter->hw.hw_addr + R)); \ 1220 writel(before, (adapter->hw.hw_addr + R)); \
1213 return 1; \ 1221 return 1; \
@@ -1240,8 +1248,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1240 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1248 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1241 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1249 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1242 if (value != after) { 1250 if (value != after) {
1243 DPRINTK(DRV, ERR, "failed STATUS register test got: " 1251 e_err(drv, "failed STATUS register test got: 0x%08X "
1244 "0x%08X expected: 0x%08X\n", after, value); 1252 "expected: 0x%08X\n", after, value);
1245 *data = 1; 1253 *data = 1;
1246 return 1; 1254 return 1;
1247 } 1255 }
@@ -1341,8 +1349,8 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1341 *data = 1; 1349 *data = 1;
1342 return -1; 1350 return -1;
1343 } 1351 }
1344 DPRINTK(HW, INFO, "testing %s interrupt\n", 1352 e_info(hw, "testing %s interrupt\n", shared_int ?
1345 (shared_int ? "shared" : "unshared")); 1353 "shared" : "unshared");
1346 1354
1347 /* Disable all the interrupts */ 1355 /* Disable all the interrupts */
1348 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1356 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
@@ -1847,7 +1855,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1847 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1855 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1848 /* Offline tests */ 1856 /* Offline tests */
1849 1857
1850 DPRINTK(HW, INFO, "offline testing starting\n"); 1858 e_info(hw, "offline testing starting\n");
1851 1859
1852 /* Link test performed before hardware reset so autoneg doesn't 1860 /* Link test performed before hardware reset so autoneg doesn't
1853 * interfere with test result */ 1861 * interfere with test result */
@@ -1880,17 +1888,17 @@ static void ixgbe_diag_test(struct net_device *netdev,
1880 else 1888 else
1881 ixgbe_reset(adapter); 1889 ixgbe_reset(adapter);
1882 1890
1883 DPRINTK(HW, INFO, "register testing starting\n"); 1891 e_info(hw, "register testing starting\n");
1884 if (ixgbe_reg_test(adapter, &data[0])) 1892 if (ixgbe_reg_test(adapter, &data[0]))
1885 eth_test->flags |= ETH_TEST_FL_FAILED; 1893 eth_test->flags |= ETH_TEST_FL_FAILED;
1886 1894
1887 ixgbe_reset(adapter); 1895 ixgbe_reset(adapter);
1888 DPRINTK(HW, INFO, "eeprom testing starting\n"); 1896 e_info(hw, "eeprom testing starting\n");
1889 if (ixgbe_eeprom_test(adapter, &data[1])) 1897 if (ixgbe_eeprom_test(adapter, &data[1]))
1890 eth_test->flags |= ETH_TEST_FL_FAILED; 1898 eth_test->flags |= ETH_TEST_FL_FAILED;
1891 1899
1892 ixgbe_reset(adapter); 1900 ixgbe_reset(adapter);
1893 DPRINTK(HW, INFO, "interrupt testing starting\n"); 1901 e_info(hw, "interrupt testing starting\n");
1894 if (ixgbe_intr_test(adapter, &data[2])) 1902 if (ixgbe_intr_test(adapter, &data[2]))
1895 eth_test->flags |= ETH_TEST_FL_FAILED; 1903 eth_test->flags |= ETH_TEST_FL_FAILED;
1896 1904
@@ -1898,14 +1906,14 @@ static void ixgbe_diag_test(struct net_device *netdev,
1898 * loopback diagnostic. */ 1906 * loopback diagnostic. */
1899 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1907 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1900 IXGBE_FLAG_VMDQ_ENABLED)) { 1908 IXGBE_FLAG_VMDQ_ENABLED)) {
1901 DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT " 1909 e_info(hw, "Skip MAC loopback diagnostic in VT "
1902 "mode\n"); 1910 "mode\n");
1903 data[3] = 0; 1911 data[3] = 0;
1904 goto skip_loopback; 1912 goto skip_loopback;
1905 } 1913 }
1906 1914
1907 ixgbe_reset(adapter); 1915 ixgbe_reset(adapter);
1908 DPRINTK(HW, INFO, "loopback testing starting\n"); 1916 e_info(hw, "loopback testing starting\n");
1909 if (ixgbe_loopback_test(adapter, &data[3])) 1917 if (ixgbe_loopback_test(adapter, &data[3]))
1910 eth_test->flags |= ETH_TEST_FL_FAILED; 1918 eth_test->flags |= ETH_TEST_FL_FAILED;
1911 1919
@@ -1916,7 +1924,7 @@ skip_loopback:
1916 if (if_running) 1924 if (if_running)
1917 dev_open(netdev); 1925 dev_open(netdev);
1918 } else { 1926 } else {
1919 DPRINTK(HW, INFO, "online testing starting\n"); 1927 e_info(hw, "online testing starting\n");
1920 /* Online tests */ 1928 /* Online tests */
1921 if (ixgbe_link_test(adapter, &data[4])) 1929 if (ixgbe_link_test(adapter, &data[4]))
1922 eth_test->flags |= ETH_TEST_FL_FAILED; 1930 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -2134,8 +2142,8 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2134 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2142 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2135 if (netdev->features & NETIF_F_LRO) { 2143 if (netdev->features & NETIF_F_LRO) {
2136 netdev->features &= ~NETIF_F_LRO; 2144 netdev->features &= ~NETIF_F_LRO;
2137 DPRINTK(PROBE, INFO, "rx-usecs set to 0, " 2145 e_info(probe, "rx-usecs set to 0, "
2138 "disabling LRO/RSC\n"); 2146 "disabling RSC\n");
2139 } 2147 }
2140 need_reset = true; 2148 need_reset = true;
2141 } 2149 }
@@ -2208,8 +2216,11 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2208{ 2216{
2209 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2217 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2210 bool need_reset = false; 2218 bool need_reset = false;
2219 int rc;
2211 2220
2212 ethtool_op_set_flags(netdev, data); 2221 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
2222 if (rc)
2223 return rc;
2213 2224
2214 /* if state changes we need to update adapter->flags and reset */ 2225 /* if state changes we need to update adapter->flags and reset */
2215 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { 2226 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
@@ -2230,10 +2241,10 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2230 break; 2241 break;
2231 } 2242 }
2232 } else if (!adapter->rx_itr_setting) { 2243 } else if (!adapter->rx_itr_setting) {
2233 netdev->features &= ~ETH_FLAG_LRO; 2244 netdev->features &= ~NETIF_F_LRO;
2234 if (data & ETH_FLAG_LRO) 2245 if (data & ETH_FLAG_LRO)
2235 DPRINTK(PROBE, INFO, "rx-usecs set to 0, " 2246 e_info(probe, "rx-usecs set to 0, "
2236 "LRO/RSC cannot be enabled.\n"); 2247 "LRO/RSC cannot be enabled.\n");
2237 } 2248 }
2238 } 2249 }
2239 2250
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 45182ab41d6b..072327c5e41a 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -25,7 +25,6 @@
25 25
26*******************************************************************************/ 26*******************************************************************************/
27 27
28
29#include "ixgbe.h" 28#include "ixgbe.h"
30#ifdef CONFIG_IXGBE_DCB 29#ifdef CONFIG_IXGBE_DCB
31#include "ixgbe_dcb_82599.h" 30#include "ixgbe_dcb_82599.h"
@@ -165,20 +164,20 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
165 164
166 adapter = netdev_priv(netdev); 165 adapter = netdev_priv(netdev);
167 if (xid >= IXGBE_FCOE_DDP_MAX) { 166 if (xid >= IXGBE_FCOE_DDP_MAX) {
168 DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid); 167 e_warn(drv, "xid=0x%x out-of-range\n", xid);
169 return 0; 168 return 0;
170 } 169 }
171 170
172 fcoe = &adapter->fcoe; 171 fcoe = &adapter->fcoe;
173 if (!fcoe->pool) { 172 if (!fcoe->pool) {
174 DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid); 173 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
175 return 0; 174 return 0;
176 } 175 }
177 176
178 ddp = &fcoe->ddp[xid]; 177 ddp = &fcoe->ddp[xid];
179 if (ddp->sgl) { 178 if (ddp->sgl) {
180 DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 179 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
181 xid, ddp->sgl, ddp->sgc); 180 xid, ddp->sgl, ddp->sgc);
182 return 0; 181 return 0;
183 } 182 }
184 ixgbe_fcoe_clear_ddp(ddp); 183 ixgbe_fcoe_clear_ddp(ddp);
@@ -186,14 +185,14 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
186 /* setup dma from scsi command sgl */ 185 /* setup dma from scsi command sgl */
187 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 186 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
188 if (dmacount == 0) { 187 if (dmacount == 0) {
189 DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid); 188 e_err(drv, "xid 0x%x DMA map error\n", xid);
190 return 0; 189 return 0;
191 } 190 }
192 191
193 /* alloc the udl from our ddp pool */ 192 /* alloc the udl from our ddp pool */
194 ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp); 193 ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
195 if (!ddp->udl) { 194 if (!ddp->udl) {
196 DPRINTK(DRV, ERR, "failed allocated ddp context\n"); 195 e_err(drv, "failed allocated ddp context\n");
197 goto out_noddp_unmap; 196 goto out_noddp_unmap;
198 } 197 }
199 ddp->sgl = sgl; 198 ddp->sgl = sgl;
@@ -206,10 +205,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
206 while (len) { 205 while (len) {
207 /* max number of buffers allowed in one DDP context */ 206 /* max number of buffers allowed in one DDP context */
208 if (j >= IXGBE_BUFFCNT_MAX) { 207 if (j >= IXGBE_BUFFCNT_MAX) {
209 netif_err(adapter, drv, adapter->netdev, 208 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
210 "xid=%x:%d,%d,%d:addr=%llx " 209 "not enough descriptors\n",
211 "not enough descriptors\n", 210 xid, i, j, dmacount, (u64)addr);
212 xid, i, j, dmacount, (u64)addr);
213 goto out_noddp_free; 211 goto out_noddp_free;
214 } 212 }
215 213
@@ -387,8 +385,8 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
387 struct fc_frame_header *fh; 385 struct fc_frame_header *fh;
388 386
389 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 387 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
390 DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 388 e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
391 skb_shinfo(skb)->gso_type); 389 skb_shinfo(skb)->gso_type);
392 return -EINVAL; 390 return -EINVAL;
393 } 391 }
394 392
@@ -414,7 +412,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
414 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 412 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
415 break; 413 break;
416 default: 414 default:
417 DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof); 415 e_warn(drv, "unknown sof = 0x%x\n", sof);
418 return -EINVAL; 416 return -EINVAL;
419 } 417 }
420 418
@@ -441,7 +439,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
441 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 439 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
442 break; 440 break;
443 default: 441 default:
444 DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof); 442 e_warn(drv, "unknown eof = 0x%x\n", eof);
445 return -EINVAL; 443 return -EINVAL;
446 } 444 }
447 445
@@ -517,8 +515,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
517 adapter->pdev, IXGBE_FCPTR_MAX, 515 adapter->pdev, IXGBE_FCPTR_MAX,
518 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 516 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
519 if (!fcoe->pool) 517 if (!fcoe->pool)
520 DPRINTK(DRV, ERR, 518 e_err(drv, "failed to allocated FCoE DDP pool\n");
521 "failed to allocated FCoE DDP pool\n");
522 519
523 spin_lock_init(&fcoe->lock); 520 spin_lock_init(&fcoe->lock);
524 } 521 }
@@ -614,7 +611,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
614 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 611 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
615 goto out_enable; 612 goto out_enable;
616 613
617 DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n"); 614 e_info(drv, "Enabling FCoE offload features.\n");
618 if (netif_running(netdev)) 615 if (netif_running(netdev))
619 netdev->netdev_ops->ndo_stop(netdev); 616 netdev->netdev_ops->ndo_stop(netdev);
620 617
@@ -625,9 +622,6 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
625 netdev->features |= NETIF_F_FCOE_CRC; 622 netdev->features |= NETIF_F_FCOE_CRC;
626 netdev->features |= NETIF_F_FSO; 623 netdev->features |= NETIF_F_FSO;
627 netdev->features |= NETIF_F_FCOE_MTU; 624 netdev->features |= NETIF_F_FCOE_MTU;
628 netdev->vlan_features |= NETIF_F_FCOE_CRC;
629 netdev->vlan_features |= NETIF_F_FSO;
630 netdev->vlan_features |= NETIF_F_FCOE_MTU;
631 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 625 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
632 626
633 ixgbe_init_interrupt_scheme(adapter); 627 ixgbe_init_interrupt_scheme(adapter);
@@ -660,25 +654,21 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
660 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 654 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
661 goto out_disable; 655 goto out_disable;
662 656
663 DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n"); 657 e_info(drv, "Disabling FCoE offload features.\n");
658 netdev->features &= ~NETIF_F_FCOE_CRC;
659 netdev->features &= ~NETIF_F_FSO;
660 netdev->features &= ~NETIF_F_FCOE_MTU;
661 netdev->fcoe_ddp_xid = 0;
662 netdev_features_change(netdev);
663
664 if (netif_running(netdev)) 664 if (netif_running(netdev))
665 netdev->netdev_ops->ndo_stop(netdev); 665 netdev->netdev_ops->ndo_stop(netdev);
666 666
667 ixgbe_clear_interrupt_scheme(adapter); 667 ixgbe_clear_interrupt_scheme(adapter);
668
669 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 668 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
670 adapter->ring_feature[RING_F_FCOE].indices = 0; 669 adapter->ring_feature[RING_F_FCOE].indices = 0;
671 netdev->features &= ~NETIF_F_FCOE_CRC;
672 netdev->features &= ~NETIF_F_FSO;
673 netdev->features &= ~NETIF_F_FCOE_MTU;
674 netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
675 netdev->vlan_features &= ~NETIF_F_FSO;
676 netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
677 netdev->fcoe_ddp_xid = 0;
678
679 ixgbe_cleanup_fcoe(adapter); 670 ixgbe_cleanup_fcoe(adapter);
680 ixgbe_init_interrupt_scheme(adapter); 671 ixgbe_init_interrupt_scheme(adapter);
681 netdev_features_change(netdev);
682 672
683 if (netif_running(netdev)) 673 if (netif_running(netdev))
684 netdev->netdev_ops->ndo_open(netdev); 674 netdev->netdev_ops->ndo_open(netdev);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 74d9b6df3029..7d6a415bcf88 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "2.0.62-k2" 55#define DRV_VERSION "2.0.84-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
@@ -696,19 +696,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
696 /* detected Tx unit hang */ 696 /* detected Tx unit hang */
697 union ixgbe_adv_tx_desc *tx_desc; 697 union ixgbe_adv_tx_desc *tx_desc;
698 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 698 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
699 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 699 e_err(drv, "Detected Tx Unit Hang\n"
700 " Tx Queue <%d>\n" 700 " Tx Queue <%d>\n"
701 " TDH, TDT <%x>, <%x>\n" 701 " TDH, TDT <%x>, <%x>\n"
702 " next_to_use <%x>\n" 702 " next_to_use <%x>\n"
703 " next_to_clean <%x>\n" 703 " next_to_clean <%x>\n"
704 "tx_buffer_info[next_to_clean]\n" 704 "tx_buffer_info[next_to_clean]\n"
705 " time_stamp <%lx>\n" 705 " time_stamp <%lx>\n"
706 " jiffies <%lx>\n", 706 " jiffies <%lx>\n",
707 tx_ring->queue_index, 707 tx_ring->queue_index,
708 IXGBE_READ_REG(hw, tx_ring->head), 708 IXGBE_READ_REG(hw, tx_ring->head),
709 IXGBE_READ_REG(hw, tx_ring->tail), 709 IXGBE_READ_REG(hw, tx_ring->tail),
710 tx_ring->next_to_use, eop, 710 tx_ring->next_to_use, eop,
711 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 711 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
712 return true; 712 return true;
713 } 713 }
714 714
@@ -812,9 +812,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
812 if (adapter->detect_tx_hung) { 812 if (adapter->detect_tx_hung) {
813 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { 813 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
814 /* schedule immediate reset if we believe we hung */ 814 /* schedule immediate reset if we believe we hung */
815 DPRINTK(PROBE, INFO, 815 e_info(probe, "tx hang %d detected, resetting "
816 "tx hang %d detected, resetting adapter\n", 816 "adapter\n", adapter->tx_timeout_count + 1);
817 adapter->tx_timeout_count + 1);
818 ixgbe_tx_timeout(adapter->netdev); 817 ixgbe_tx_timeout(adapter->netdev);
819 } 818 }
820 } 819 }
@@ -1653,10 +1652,10 @@ static void ixgbe_check_overtemp_task(struct work_struct *work)
1653 return; 1652 return;
1654 break; 1653 break;
1655 } 1654 }
1656 DPRINTK(DRV, ERR, "Network adapter has been stopped because it " 1655 e_crit(drv, "Network adapter has been stopped because it has "
1657 "has over heated. Restart the computer. If the problem " 1656 "over heated. Restart the computer. If the problem "
1658 "persists, power off the system and replace the " 1657 "persists, power off the system and replace the "
1659 "adapter\n"); 1658 "adapter\n");
1660 /* write to clear the interrupt */ 1659 /* write to clear the interrupt */
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); 1660 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1662 } 1661 }
@@ -1668,7 +1667,7 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1668 1667
1669 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 1668 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1670 (eicr & IXGBE_EICR_GPI_SDP1)) { 1669 (eicr & IXGBE_EICR_GPI_SDP1)) {
1671 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); 1670 e_crit(probe, "Fan has stopped, replace the adapter\n");
1672 /* write to clear the interrupt */ 1671 /* write to clear the interrupt */
1673 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1672 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1674 } 1673 }
@@ -2154,9 +2153,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2154 handler, 0, adapter->name[vector], 2153 handler, 0, adapter->name[vector],
2155 adapter->q_vector[vector]); 2154 adapter->q_vector[vector]);
2156 if (err) { 2155 if (err) {
2157 DPRINTK(PROBE, ERR, 2156 e_err(probe, "request_irq failed for MSIX interrupt "
2158 "request_irq failed for MSIX interrupt " 2157 "Error: %d\n", err);
2159 "Error: %d\n", err);
2160 goto free_queue_irqs; 2158 goto free_queue_irqs;
2161 } 2159 }
2162 } 2160 }
@@ -2165,8 +2163,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2165 err = request_irq(adapter->msix_entries[vector].vector, 2163 err = request_irq(adapter->msix_entries[vector].vector,
2166 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2164 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
2167 if (err) { 2165 if (err) {
2168 DPRINTK(PROBE, ERR, 2166 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2169 "request_irq for msix_lsc failed: %d\n", err);
2170 goto free_queue_irqs; 2167 goto free_queue_irqs;
2171 } 2168 }
2172 2169
@@ -2352,7 +2349,7 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2352 } 2349 }
2353 2350
2354 if (err) 2351 if (err)
2355 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); 2352 e_err(probe, "request_irq failed, Error %d\n", err);
2356 2353
2357 return err; 2354 return err;
2358} 2355}
@@ -2423,7 +2420,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2423 map_vector_to_rxq(adapter, 0, 0); 2420 map_vector_to_rxq(adapter, 0, 0);
2424 map_vector_to_txq(adapter, 0, 0); 2421 map_vector_to_txq(adapter, 0, 0);
2425 2422
2426 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); 2423 e_info(hw, "Legacy interrupt IVAR setup done\n");
2427} 2424}
2428 2425
2429/** 2426/**
@@ -2803,10 +2800,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2803 /* Perform hash on these packet types */ 2800 /* Perform hash on these packet types */
2804 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2801 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2805 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2802 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2806 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2807 | IXGBE_MRQC_RSS_FIELD_IPV6 2803 | IXGBE_MRQC_RSS_FIELD_IPV6
2808 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 2804 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2809 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2810 } 2805 }
2811 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2806 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2812 2807
@@ -2995,6 +2990,48 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2995} 2990}
2996 2991
2997/** 2992/**
2993 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
2994 * @netdev: network interface device structure
2995 *
2996 * Writes unicast address list to the RAR table.
2997 * Returns: -ENOMEM on failure/insufficient address space
2998 * 0 on no addresses written
2999 * X on writing X addresses to the RAR table
3000 **/
3001static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3002{
3003 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3004 struct ixgbe_hw *hw = &adapter->hw;
3005 unsigned int vfn = adapter->num_vfs;
3006 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3007 int count = 0;
3008
3009 /* return ENOMEM indicating insufficient memory for addresses */
3010 if (netdev_uc_count(netdev) > rar_entries)
3011 return -ENOMEM;
3012
3013 if (!netdev_uc_empty(netdev) && rar_entries) {
3014 struct netdev_hw_addr *ha;
3015 /* return error if we do not support writing to RAR table */
3016 if (!hw->mac.ops.set_rar)
3017 return -ENOMEM;
3018
3019 netdev_for_each_uc_addr(ha, netdev) {
3020 if (!rar_entries)
3021 break;
3022 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3023 vfn, IXGBE_RAH_AV);
3024 count++;
3025 }
3026 }
3027 /* write the addresses in reverse order to avoid write combining */
3028 for (; rar_entries > 0 ; rar_entries--)
3029 hw->mac.ops.clear_rar(hw, rar_entries);
3030
3031 return count;
3032}
3033
3034/**
2998 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 3035 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2999 * @netdev: network interface device structure 3036 * @netdev: network interface device structure
3000 * 3037 *
@@ -3007,38 +3044,58 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3007{ 3044{
3008 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3045 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3009 struct ixgbe_hw *hw = &adapter->hw; 3046 struct ixgbe_hw *hw = &adapter->hw;
3010 u32 fctrl; 3047 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3048 int count;
3011 3049
3012 /* Check for Promiscuous and All Multicast modes */ 3050 /* Check for Promiscuous and All Multicast modes */
3013 3051
3014 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3052 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3015 3053
3054 /* clear the bits we are changing the status of */
3055 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3056
3016 if (netdev->flags & IFF_PROMISC) { 3057 if (netdev->flags & IFF_PROMISC) {
3017 hw->addr_ctrl.user_set_promisc = true; 3058 hw->addr_ctrl.user_set_promisc = true;
3018 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3059 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3060 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3019 /* don't hardware filter vlans in promisc mode */ 3061 /* don't hardware filter vlans in promisc mode */
3020 ixgbe_vlan_filter_disable(adapter); 3062 ixgbe_vlan_filter_disable(adapter);
3021 } else { 3063 } else {
3022 if (netdev->flags & IFF_ALLMULTI) { 3064 if (netdev->flags & IFF_ALLMULTI) {
3023 fctrl |= IXGBE_FCTRL_MPE; 3065 fctrl |= IXGBE_FCTRL_MPE;
3024 fctrl &= ~IXGBE_FCTRL_UPE; 3066 vmolr |= IXGBE_VMOLR_MPE;
3025 } else if (!hw->addr_ctrl.uc_set_promisc) { 3067 } else {
3026 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3068 /*
3069 * Write addresses to the MTA, if the attempt fails
3070 * then we should just turn on promiscous mode so
3071 * that we can at least receive multicast traffic
3072 */
3073 hw->mac.ops.update_mc_addr_list(hw, netdev);
3074 vmolr |= IXGBE_VMOLR_ROMPE;
3027 } 3075 }
3028 ixgbe_vlan_filter_enable(adapter); 3076 ixgbe_vlan_filter_enable(adapter);
3029 hw->addr_ctrl.user_set_promisc = false; 3077 hw->addr_ctrl.user_set_promisc = false;
3078 /*
3079 * Write addresses to available RAR registers, if there is not
3080 * sufficient space to store all the addresses then enable
3081 * unicast promiscous mode
3082 */
3083 count = ixgbe_write_uc_addr_list(netdev);
3084 if (count < 0) {
3085 fctrl |= IXGBE_FCTRL_UPE;
3086 vmolr |= IXGBE_VMOLR_ROPE;
3087 }
3030 } 3088 }
3031 3089
3032 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3090 if (adapter->num_vfs) {
3033
3034 /* reprogram secondary unicast list */
3035 hw->mac.ops.update_uc_addr_list(hw, netdev);
3036
3037 /* reprogram multicast list */
3038 hw->mac.ops.update_mc_addr_list(hw, netdev);
3039
3040 if (adapter->num_vfs)
3041 ixgbe_restore_vf_multicasts(adapter); 3091 ixgbe_restore_vf_multicasts(adapter);
3092 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3093 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3094 IXGBE_VMOLR_ROPE);
3095 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3096 }
3097
3098 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3042} 3099}
3043 3100
3044static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 3101static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -3257,8 +3314,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3257 msleep(1); 3314 msleep(1);
3258 } 3315 }
3259 if (k >= IXGBE_MAX_RX_DESC_POLL) { 3316 if (k >= IXGBE_MAX_RX_DESC_POLL) {
3260 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " 3317 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3261 "not set within the polling period\n", rxr); 3318 "the polling period\n", rxr);
3262 } 3319 }
3263 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], 3320 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
3264 (adapter->rx_ring[rxr]->count - 1)); 3321 (adapter->rx_ring[rxr]->count - 1));
@@ -3387,8 +3444,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3387 } while (--wait_loop && 3444 } while (--wait_loop &&
3388 !(txdctl & IXGBE_TXDCTL_ENABLE)); 3445 !(txdctl & IXGBE_TXDCTL_ENABLE));
3389 if (!wait_loop) 3446 if (!wait_loop)
3390 DPRINTK(DRV, ERR, "Could not enable " 3447 e_err(drv, "Could not enable Tx Queue %d\n", j);
3391 "Tx Queue %d\n", j);
3392 } 3448 }
3393 } 3449 }
3394 3450
@@ -3436,8 +3492,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3436 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3492 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3437 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 3493 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3438 if (esdp & IXGBE_ESDP_SDP1) 3494 if (esdp & IXGBE_ESDP_SDP1)
3439 DPRINTK(DRV, CRIT, 3495 e_crit(drv, "Fan has stopped, replace the adapter\n");
3440 "Fan has stopped, replace the adapter\n");
3441 } 3496 }
3442 3497
3443 /* 3498 /*
@@ -3466,7 +3521,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3466 } else { 3521 } else {
3467 err = ixgbe_non_sfp_link_config(hw); 3522 err = ixgbe_non_sfp_link_config(hw);
3468 if (err) 3523 if (err)
3469 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); 3524 e_err(probe, "link_config FAILED %d\n", err);
3470 } 3525 }
3471 3526
3472 for (i = 0; i < adapter->num_tx_queues; i++) 3527 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3527,19 +3582,19 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3527 case IXGBE_ERR_SFP_NOT_PRESENT: 3582 case IXGBE_ERR_SFP_NOT_PRESENT:
3528 break; 3583 break;
3529 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 3584 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
3530 dev_err(&adapter->pdev->dev, "master disable timed out\n"); 3585 e_dev_err("master disable timed out\n");
3531 break; 3586 break;
3532 case IXGBE_ERR_EEPROM_VERSION: 3587 case IXGBE_ERR_EEPROM_VERSION:
3533 /* We are running on a pre-production device, log a warning */ 3588 /* We are running on a pre-production device, log a warning */
3534 dev_warn(&adapter->pdev->dev, "This device is a pre-production " 3589 e_dev_warn("This device is a pre-production adapter/LOM. "
3535 "adapter/LOM. Please be aware there may be issues " 3590 "Please be aware there may be issuesassociated with "
3536 "associated with your hardware. If you are " 3591 "your hardware. If you are experiencing problems "
3537 "experiencing problems please contact your Intel or " 3592 "please contact your Intel or hardware "
3538 "hardware representative who provided you with this " 3593 "representative who provided you with this "
3539 "hardware.\n"); 3594 "hardware.\n");
3540 break; 3595 break;
3541 default: 3596 default:
3542 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); 3597 e_dev_err("Hardware Error: %d\n", err);
3543 } 3598 }
3544 3599
3545 /* reprogram the RAR[0] in case user changed it. */ 3600 /* reprogram the RAR[0] in case user changed it. */
@@ -3920,12 +3975,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3920 adapter->num_tx_queues = 1; 3975 adapter->num_tx_queues = 1;
3921#ifdef CONFIG_IXGBE_DCB 3976#ifdef CONFIG_IXGBE_DCB
3922 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3977 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3923 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n"); 3978 e_info(probe, "FCoE enabled with DCB\n");
3924 ixgbe_set_dcb_queues(adapter); 3979 ixgbe_set_dcb_queues(adapter);
3925 } 3980 }
3926#endif 3981#endif
3927 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3982 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3928 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n"); 3983 e_info(probe, "FCoE enabled with RSS\n");
3929 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3984 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3930 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3985 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3931 ixgbe_set_fdir_queues(adapter); 3986 ixgbe_set_fdir_queues(adapter);
@@ -4038,7 +4093,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4038 * This just means we'll go with either a single MSI 4093 * This just means we'll go with either a single MSI
4039 * vector or fall back to legacy interrupts. 4094 * vector or fall back to legacy interrupts.
4040 */ 4095 */
4041 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); 4096 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4097 "Unable to allocate MSI-X interrupts\n");
4042 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 4098 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4043 kfree(adapter->msix_entries); 4099 kfree(adapter->msix_entries);
4044 adapter->msix_entries = NULL; 4100 adapter->msix_entries = NULL;
@@ -4435,8 +4491,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4435 if (!err) { 4491 if (!err) {
4436 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 4492 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4437 } else { 4493 } else {
4438 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 4494 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4439 "falling back to legacy. Error: %d\n", err); 4495 "Unable to allocate MSI interrupt, "
4496 "falling back to legacy. Error: %d\n", err);
4440 /* reset err */ 4497 /* reset err */
4441 err = 0; 4498 err = 0;
4442 } 4499 }
@@ -4557,27 +4614,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
4557 4614
4558 err = ixgbe_set_interrupt_capability(adapter); 4615 err = ixgbe_set_interrupt_capability(adapter);
4559 if (err) { 4616 if (err) {
4560 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); 4617 e_dev_err("Unable to setup interrupt capabilities\n");
4561 goto err_set_interrupt; 4618 goto err_set_interrupt;
4562 } 4619 }
4563 4620
4564 err = ixgbe_alloc_q_vectors(adapter); 4621 err = ixgbe_alloc_q_vectors(adapter);
4565 if (err) { 4622 if (err) {
4566 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " 4623 e_dev_err("Unable to allocate memory for queue vectors\n");
4567 "vectors\n");
4568 goto err_alloc_q_vectors; 4624 goto err_alloc_q_vectors;
4569 } 4625 }
4570 4626
4571 err = ixgbe_alloc_queues(adapter); 4627 err = ixgbe_alloc_queues(adapter);
4572 if (err) { 4628 if (err) {
4573 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 4629 e_dev_err("Unable to allocate memory for queues\n");
4574 goto err_alloc_queues; 4630 goto err_alloc_queues;
4575 } 4631 }
4576 4632
4577 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 4633 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
4578 "Tx Queue count = %u\n", 4634 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4579 (adapter->num_rx_queues > 1) ? "Enabled" : 4635 adapter->num_rx_queues, adapter->num_tx_queues);
4580 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
4581 4636
4582 set_bit(__IXGBE_DOWN, &adapter->state); 4637 set_bit(__IXGBE_DOWN, &adapter->state);
4583 4638
@@ -4648,15 +4703,13 @@ static void ixgbe_sfp_task(struct work_struct *work)
4648 goto reschedule; 4703 goto reschedule;
4649 ret = hw->phy.ops.reset(hw); 4704 ret = hw->phy.ops.reset(hw);
4650 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4705 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4651 dev_err(&adapter->pdev->dev, "failed to initialize " 4706 e_dev_err("failed to initialize because an unsupported "
4652 "because an unsupported SFP+ module type " 4707 "SFP+ module type was detected.\n");
4653 "was detected.\n" 4708 e_dev_err("Reload the driver after installing a "
4654 "Reload the driver after installing a " 4709 "supported module.\n");
4655 "supported module.\n");
4656 unregister_netdev(adapter->netdev); 4710 unregister_netdev(adapter->netdev);
4657 } else { 4711 } else {
4658 DPRINTK(PROBE, INFO, "detected SFP+: %d\n", 4712 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
4659 hw->phy.sfp_type);
4660 } 4713 }
4661 /* don't need this routine any more */ 4714 /* don't need this routine any more */
4662 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 4715 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
@@ -4730,6 +4783,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4730#ifdef CONFIG_IXGBE_DCB 4783#ifdef CONFIG_IXGBE_DCB
4731 /* Default traffic class to use for FCoE */ 4784 /* Default traffic class to use for FCoE */
4732 adapter->fcoe.tc = IXGBE_FCOE_DEFTC; 4785 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
4786 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4733#endif 4787#endif
4734#endif /* IXGBE_FCOE */ 4788#endif /* IXGBE_FCOE */
4735 } 4789 }
@@ -4783,7 +4837,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4783 4837
4784 /* initialize eeprom parameters */ 4838 /* initialize eeprom parameters */
4785 if (ixgbe_init_eeprom_params_generic(hw)) { 4839 if (ixgbe_init_eeprom_params_generic(hw)) {
4786 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 4840 e_dev_err("EEPROM initialization failed\n");
4787 return -EIO; 4841 return -EIO;
4788 } 4842 }
4789 4843
@@ -4836,8 +4890,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4836err: 4890err:
4837 vfree(tx_ring->tx_buffer_info); 4891 vfree(tx_ring->tx_buffer_info);
4838 tx_ring->tx_buffer_info = NULL; 4892 tx_ring->tx_buffer_info = NULL;
4839 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit " 4893 e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
4840 "descriptor ring\n");
4841 return -ENOMEM; 4894 return -ENOMEM;
4842} 4895}
4843 4896
@@ -4859,7 +4912,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4859 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); 4912 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
4860 if (!err) 4913 if (!err)
4861 continue; 4914 continue;
4862 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); 4915 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
4863 break; 4916 break;
4864 } 4917 }
4865 4918
@@ -4884,8 +4937,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4884 if (!rx_ring->rx_buffer_info) 4937 if (!rx_ring->rx_buffer_info)
4885 rx_ring->rx_buffer_info = vmalloc(size); 4938 rx_ring->rx_buffer_info = vmalloc(size);
4886 if (!rx_ring->rx_buffer_info) { 4939 if (!rx_ring->rx_buffer_info) {
4887 DPRINTK(PROBE, ERR, 4940 e_err(probe, "vmalloc allocation failed for the Rx "
4888 "vmalloc allocation failed for the rx desc ring\n"); 4941 "descriptor ring\n");
4889 goto alloc_failed; 4942 goto alloc_failed;
4890 } 4943 }
4891 memset(rx_ring->rx_buffer_info, 0, size); 4944 memset(rx_ring->rx_buffer_info, 0, size);
@@ -4898,8 +4951,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4898 &rx_ring->dma, GFP_KERNEL); 4951 &rx_ring->dma, GFP_KERNEL);
4899 4952
4900 if (!rx_ring->desc) { 4953 if (!rx_ring->desc) {
4901 DPRINTK(PROBE, ERR, 4954 e_err(probe, "Memory allocation failed for the Rx "
4902 "Memory allocation failed for the rx desc ring\n"); 4955 "descriptor ring\n");
4903 vfree(rx_ring->rx_buffer_info); 4956 vfree(rx_ring->rx_buffer_info);
4904 goto alloc_failed; 4957 goto alloc_failed;
4905 } 4958 }
@@ -4932,7 +4985,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4932 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 4985 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
4933 if (!err) 4986 if (!err)
4934 continue; 4987 continue;
4935 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); 4988 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
4936 break; 4989 break;
4937 } 4990 }
4938 4991
@@ -5031,8 +5084,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5031 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 5084 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5032 return -EINVAL; 5085 return -EINVAL;
5033 5086
5034 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 5087 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5035 netdev->mtu, new_mtu);
5036 /* must set new MTU before calling down or up */ 5088 /* must set new MTU before calling down or up */
5037 netdev->mtu = new_mtu; 5089 netdev->mtu = new_mtu;
5038 5090
@@ -5145,8 +5197,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5145 5197
5146 err = pci_enable_device_mem(pdev); 5198 err = pci_enable_device_mem(pdev);
5147 if (err) { 5199 if (err) {
5148 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " 5200 e_dev_err("Cannot enable PCI device from suspend\n");
5149 "suspend\n");
5150 return err; 5201 return err;
5151 } 5202 }
5152 pci_set_master(pdev); 5203 pci_set_master(pdev);
@@ -5155,8 +5206,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5155 5206
5156 err = ixgbe_init_interrupt_scheme(adapter); 5207 err = ixgbe_init_interrupt_scheme(adapter);
5157 if (err) { 5208 if (err) {
5158 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " 5209 e_dev_err("Cannot initialize interrupts for device\n");
5159 "device\n");
5160 return err; 5210 return err;
5161 } 5211 }
5162 5212
@@ -5517,10 +5567,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5517 err = hw->phy.ops.identify_sfp(hw); 5567 err = hw->phy.ops.identify_sfp(hw);
5518 5568
5519 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 5569 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5520 dev_err(&adapter->pdev->dev, "failed to initialize because " 5570 e_dev_err("failed to initialize because an unsupported SFP+ "
5521 "an unsupported SFP+ module type was detected.\n" 5571 "module type was detected.\n");
5522 "Reload the driver after installing a supported " 5572 e_dev_err("Reload the driver after installing a supported "
5523 "module.\n"); 5573 "module.\n");
5524 unregister_netdev(adapter->netdev); 5574 unregister_netdev(adapter->netdev);
5525 return; 5575 return;
5526 } 5576 }
@@ -5549,8 +5599,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5549 set_bit(__IXGBE_FDIR_INIT_DONE, 5599 set_bit(__IXGBE_FDIR_INIT_DONE,
5550 &(adapter->tx_ring[i]->reinit_state)); 5600 &(adapter->tx_ring[i]->reinit_state));
5551 } else { 5601 } else {
5552 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5602 e_err(probe, "failed to finish FDIR re-initialization, "
5553 "ignored adding FDIR ATR filters\n"); 5603 "ignored adding FDIR ATR filters\n");
5554 } 5604 }
5555 /* Done FDIR Re-initialization, enable transmits */ 5605 /* Done FDIR Re-initialization, enable transmits */
5556 netif_tx_start_all_queues(adapter->netdev); 5606 netif_tx_start_all_queues(adapter->netdev);
@@ -5621,16 +5671,14 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5621 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 5671 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5622 } 5672 }
5623 5673
5624 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " 5674 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5625 "Flow Control: %s\n",
5626 netdev->name,
5627 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 5675 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5628 "10 Gbps" : 5676 "10 Gbps" :
5629 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 5677 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5630 "1 Gbps" : "unknown speed")), 5678 "1 Gbps" : "unknown speed")),
5631 ((flow_rx && flow_tx) ? "RX/TX" : 5679 ((flow_rx && flow_tx) ? "RX/TX" :
5632 (flow_rx ? "RX" : 5680 (flow_rx ? "RX" :
5633 (flow_tx ? "TX" : "None")))); 5681 (flow_tx ? "TX" : "None"))));
5634 5682
5635 netif_carrier_on(netdev); 5683 netif_carrier_on(netdev);
5636 } else { 5684 } else {
@@ -5641,8 +5689,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5641 adapter->link_up = false; 5689 adapter->link_up = false;
5642 adapter->link_speed = 0; 5690 adapter->link_speed = 0;
5643 if (netif_carrier_ok(netdev)) { 5691 if (netif_carrier_ok(netdev)) {
5644 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", 5692 e_info(drv, "NIC Link is Down\n");
5645 netdev->name);
5646 netif_carrier_off(netdev); 5693 netif_carrier_off(netdev);
5647 } 5694 }
5648 } 5695 }
@@ -5818,9 +5865,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5818 break; 5865 break;
5819 default: 5866 default:
5820 if (unlikely(net_ratelimit())) { 5867 if (unlikely(net_ratelimit())) {
5821 DPRINTK(PROBE, WARNING, 5868 e_warn(probe, "partial checksum "
5822 "partial checksum but proto=%x!\n", 5869 "but proto=%x!\n",
5823 skb->protocol); 5870 skb->protocol);
5824 } 5871 }
5825 break; 5872 break;
5826 } 5873 }
@@ -5931,7 +5978,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5931 return count; 5978 return count;
5932 5979
5933dma_error: 5980dma_error:
5934 dev_err(&pdev->dev, "TX DMA map failed\n"); 5981 e_dev_err("TX DMA map failed\n");
5935 5982
5936 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 5983 /* clear timestamp and dma mappings for failed tx_buffer_info map */
5937 tx_buffer_info->dma = 0; 5984 tx_buffer_info->dma = 0;
@@ -6101,21 +6148,26 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6101 struct ixgbe_adapter *adapter = netdev_priv(dev); 6148 struct ixgbe_adapter *adapter = netdev_priv(dev);
6102 int txq = smp_processor_id(); 6149 int txq = smp_processor_id();
6103 6150
6151#ifdef IXGBE_FCOE
6152 if ((skb->protocol == htons(ETH_P_FCOE)) ||
6153 (skb->protocol == htons(ETH_P_FIP))) {
6154 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6155 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6156 txq += adapter->ring_feature[RING_F_FCOE].mask;
6157 return txq;
6158 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6159 txq = adapter->fcoe.up;
6160 return txq;
6161 }
6162 }
6163#endif
6164
6104 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 6165 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6105 while (unlikely(txq >= dev->real_num_tx_queues)) 6166 while (unlikely(txq >= dev->real_num_tx_queues))
6106 txq -= dev->real_num_tx_queues; 6167 txq -= dev->real_num_tx_queues;
6107 return txq; 6168 return txq;
6108 } 6169 }
6109 6170
6110#ifdef IXGBE_FCOE
6111 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
6112 ((skb->protocol == htons(ETH_P_FCOE)) ||
6113 (skb->protocol == htons(ETH_P_FIP)))) {
6114 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6115 txq += adapter->ring_feature[RING_F_FCOE].mask;
6116 return txq;
6117 }
6118#endif
6119 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6171 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6120 if (skb->priority == TC_PRIO_CONTROL) 6172 if (skb->priority == TC_PRIO_CONTROL)
6121 txq = adapter->ring_feature[RING_F_DCB].indices-1; 6173 txq = adapter->ring_feature[RING_F_DCB].indices-1;
@@ -6159,18 +6211,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6159 tx_ring = adapter->tx_ring[skb->queue_mapping]; 6211 tx_ring = adapter->tx_ring[skb->queue_mapping];
6160 6212
6161#ifdef IXGBE_FCOE 6213#ifdef IXGBE_FCOE
6162 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6214 /* for FCoE with DCB, we force the priority to what
6163#ifdef CONFIG_IXGBE_DCB 6215 * was specified by the switch */
6164 /* for FCoE with DCB, we force the priority to what 6216 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6165 * was specified by the switch */ 6217 (skb->protocol == htons(ETH_P_FCOE) ||
6166 if ((skb->protocol == htons(ETH_P_FCOE)) || 6218 skb->protocol == htons(ETH_P_FIP))) {
6167 (skb->protocol == htons(ETH_P_FIP))) { 6219 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6168 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6220 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6169 << IXGBE_TX_FLAGS_VLAN_SHIFT); 6221 tx_flags |= ((adapter->fcoe.up << 13)
6170 tx_flags |= ((adapter->fcoe.up << 13) 6222 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6171 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6172 }
6173#endif
6174 /* flag for FCoE offloads */ 6223 /* flag for FCoE offloads */
6175 if (skb->protocol == htons(ETH_P_FCOE)) 6224 if (skb->protocol == htons(ETH_P_FCOE))
6176 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6225 tx_flags |= IXGBE_TX_FLAGS_FCOE;
@@ -6430,8 +6479,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6430 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; 6479 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6431 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); 6480 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6432 if (err) { 6481 if (err) {
6433 DPRINTK(PROBE, ERR, 6482 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
6434 "Failed to enable PCI sriov: %d\n", err);
6435 goto err_novfs; 6483 goto err_novfs;
6436 } 6484 }
6437 /* If call to enable VFs succeeded then allocate memory 6485 /* If call to enable VFs succeeded then allocate memory
@@ -6455,9 +6503,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6455 } 6503 }
6456 6504
6457 /* Oh oh */ 6505 /* Oh oh */
6458 DPRINTK(PROBE, ERR, 6506 e_err(probe, "Unable to allocate memory for VF Data Storage - "
6459 "Unable to allocate memory for VF " 6507 "SRIOV disabled\n");
6460 "Data Storage - SRIOV disabled\n");
6461 pci_disable_sriov(adapter->pdev); 6508 pci_disable_sriov(adapter->pdev);
6462 6509
6463err_novfs: 6510err_novfs:
@@ -6514,8 +6561,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6514 err = dma_set_coherent_mask(&pdev->dev, 6561 err = dma_set_coherent_mask(&pdev->dev,
6515 DMA_BIT_MASK(32)); 6562 DMA_BIT_MASK(32));
6516 if (err) { 6563 if (err) {
6517 dev_err(&pdev->dev, "No usable DMA " 6564 dev_err(&pdev->dev,
6518 "configuration, aborting\n"); 6565 "No usable DMA configuration, aborting\n");
6519 goto err_dma; 6566 goto err_dma;
6520 } 6567 }
6521 } 6568 }
@@ -6526,7 +6573,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6526 IORESOURCE_MEM), ixgbe_driver_name); 6573 IORESOURCE_MEM), ixgbe_driver_name);
6527 if (err) { 6574 if (err) {
6528 dev_err(&pdev->dev, 6575 dev_err(&pdev->dev,
6529 "pci_request_selected_regions failed 0x%x\n", err); 6576 "pci_request_selected_regions failed 0x%x\n", err);
6530 goto err_pci_reg; 6577 goto err_pci_reg;
6531 } 6578 }
6532 6579
@@ -6637,8 +6684,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6637 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 6684 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
6638 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 6685 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
6639 if (esdp & IXGBE_ESDP_SDP1) 6686 if (esdp & IXGBE_ESDP_SDP1)
6640 DPRINTK(PROBE, CRIT, 6687 e_crit(probe, "Fan has stopped, replace the adapter\n");
6641 "Fan has stopped, replace the adapter\n");
6642 } 6688 }
6643 6689
6644 /* reset_hw fills in the perm_addr as well */ 6690 /* reset_hw fills in the perm_addr as well */
@@ -6657,13 +6703,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6657 round_jiffies(jiffies + (2 * HZ))); 6703 round_jiffies(jiffies + (2 * HZ)));
6658 err = 0; 6704 err = 0;
6659 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 6705 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
6660 dev_err(&adapter->pdev->dev, "failed to initialize because " 6706 e_dev_err("failed to initialize because an unsupported SFP+ "
6661 "an unsupported SFP+ module type was detected.\n" 6707 "module type was detected.\n");
6662 "Reload the driver after installing a supported " 6708 e_dev_err("Reload the driver after installing a supported "
6663 "module.\n"); 6709 "module.\n");
6664 goto err_sw_init; 6710 goto err_sw_init;
6665 } else if (err) { 6711 } else if (err) {
6666 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); 6712 e_dev_err("HW Init failed: %d\n", err);
6667 goto err_sw_init; 6713 goto err_sw_init;
6668 } 6714 }
6669 6715
@@ -6707,6 +6753,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6707 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 6753 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6708 } 6754 }
6709 } 6755 }
6756 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
6757 netdev->vlan_features |= NETIF_F_FCOE_CRC;
6758 netdev->vlan_features |= NETIF_F_FSO;
6759 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6760 }
6710#endif /* IXGBE_FCOE */ 6761#endif /* IXGBE_FCOE */
6711 if (pci_using_dac) 6762 if (pci_using_dac)
6712 netdev->features |= NETIF_F_HIGHDMA; 6763 netdev->features |= NETIF_F_HIGHDMA;
@@ -6716,7 +6767,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6716 6767
6717 /* make sure the EEPROM is good */ 6768 /* make sure the EEPROM is good */
6718 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 6769 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
6719 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 6770 e_dev_err("The EEPROM Checksum Is Not Valid\n");
6720 err = -EIO; 6771 err = -EIO;
6721 goto err_eeprom; 6772 goto err_eeprom;
6722 } 6773 }
@@ -6725,7 +6776,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6725 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 6776 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6726 6777
6727 if (ixgbe_validate_mac_addr(netdev->perm_addr)) { 6778 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
6728 dev_err(&pdev->dev, "invalid MAC address\n"); 6779 e_dev_err("invalid MAC address\n");
6729 err = -EIO; 6780 err = -EIO;
6730 goto err_eeprom; 6781 goto err_eeprom;
6731 } 6782 }
@@ -6760,7 +6811,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6760 hw->mac.ops.get_bus_info(hw); 6811 hw->mac.ops.get_bus_info(hw);
6761 6812
6762 /* print bus type/speed/width info */ 6813 /* print bus type/speed/width info */
6763 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", 6814 e_dev_info("(PCI Express:%s:%s) %pM\n",
6764 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 6815 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
6765 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 6816 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
6766 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 6817 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
@@ -6770,20 +6821,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6770 netdev->dev_addr); 6821 netdev->dev_addr);
6771 ixgbe_read_pba_num_generic(hw, &part_num); 6822 ixgbe_read_pba_num_generic(hw, &part_num);
6772 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 6823 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6773 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n", 6824 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
6774 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 6825 "PBA No: %06x-%03x\n",
6775 (part_num >> 8), (part_num & 0xff)); 6826 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6827 (part_num >> 8), (part_num & 0xff));
6776 else 6828 else
6777 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 6829 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
6778 hw->mac.type, hw->phy.type, 6830 hw->mac.type, hw->phy.type,
6779 (part_num >> 8), (part_num & 0xff)); 6831 (part_num >> 8), (part_num & 0xff));
6780 6832
6781 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 6833 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6782 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 6834 e_dev_warn("PCI-Express bandwidth available for this card is "
6783 "this card is not sufficient for optimal " 6835 "not sufficient for optimal performance.\n");
6784 "performance.\n"); 6836 e_dev_warn("For optimal performance a x8 PCI-Express slot "
6785 dev_warn(&pdev->dev, "For optimal performance a x8 " 6837 "is required.\n");
6786 "PCI-Express slot is required.\n");
6787 } 6838 }
6788 6839
6789 /* save off EEPROM version number */ 6840 /* save off EEPROM version number */
@@ -6794,12 +6845,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6794 6845
6795 if (err == IXGBE_ERR_EEPROM_VERSION) { 6846 if (err == IXGBE_ERR_EEPROM_VERSION) {
6796 /* We are running on a pre-production device, log a warning */ 6847 /* We are running on a pre-production device, log a warning */
6797 dev_warn(&pdev->dev, "This device is a pre-production " 6848 e_dev_warn("This device is a pre-production adapter/LOM. "
6798 "adapter/LOM. Please be aware there may be issues " 6849 "Please be aware there may be issues associated "
6799 "associated with your hardware. If you are " 6850 "with your hardware. If you are experiencing "
6800 "experiencing problems please contact your Intel or " 6851 "problems please contact your Intel or hardware "
6801 "hardware representative who provided you with this " 6852 "representative who provided you with this "
6802 "hardware.\n"); 6853 "hardware.\n");
6803 } 6854 }
6804 strcpy(netdev->name, "eth%d"); 6855 strcpy(netdev->name, "eth%d");
6805 err = register_netdev(netdev); 6856 err = register_netdev(netdev);
@@ -6822,8 +6873,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6822 } 6873 }
6823#endif 6874#endif
6824 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 6875 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6825 DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", 6876 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
6826 adapter->num_vfs);
6827 for (i = 0; i < adapter->num_vfs; i++) 6877 for (i = 0; i < adapter->num_vfs; i++)
6828 ixgbe_vf_configuration(pdev, (i | 0x10000000)); 6878 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6829 } 6879 }
@@ -6831,7 +6881,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6831 /* add san mac addr to netdev */ 6881 /* add san mac addr to netdev */
6832 ixgbe_add_sanmac_netdev(netdev); 6882 ixgbe_add_sanmac_netdev(netdev);
6833 6883
6834 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); 6884 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
6835 cards_found++; 6885 cards_found++;
6836 return 0; 6886 return 0;
6837 6887
@@ -6921,7 +6971,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6921 pci_release_selected_regions(pdev, pci_select_bars(pdev, 6971 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6922 IORESOURCE_MEM)); 6972 IORESOURCE_MEM));
6923 6973
6924 DPRINTK(PROBE, INFO, "complete\n"); 6974 e_dev_info("complete\n");
6925 6975
6926 free_netdev(netdev); 6976 free_netdev(netdev);
6927 6977
@@ -6971,8 +7021,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
6971 int err; 7021 int err;
6972 7022
6973 if (pci_enable_device_mem(pdev)) { 7023 if (pci_enable_device_mem(pdev)) {
6974 DPRINTK(PROBE, ERR, 7024 e_err(probe, "Cannot re-enable PCI device after reset.\n");
6975 "Cannot re-enable PCI device after reset.\n");
6976 result = PCI_ERS_RESULT_DISCONNECT; 7025 result = PCI_ERS_RESULT_DISCONNECT;
6977 } else { 7026 } else {
6978 pci_set_master(pdev); 7027 pci_set_master(pdev);
@@ -6988,8 +7037,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
6988 7037
6989 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7038 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6990 if (err) { 7039 if (err) {
6991 dev_err(&pdev->dev, 7040 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
6992 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); 7041 "failed 0x%0x\n", err);
6993 /* non-fatal, continue */ 7042 /* non-fatal, continue */
6994 } 7043 }
6995 7044
@@ -7010,7 +7059,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
7010 7059
7011 if (netif_running(netdev)) { 7060 if (netif_running(netdev)) {
7012 if (ixgbe_up(adapter)) { 7061 if (ixgbe_up(adapter)) {
7013 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); 7062 e_info(probe, "ixgbe_up failed after reset\n");
7014 return; 7063 return;
7015 } 7064 }
7016 } 7065 }
@@ -7046,10 +7095,9 @@ static struct pci_driver ixgbe_driver = {
7046static int __init ixgbe_init_module(void) 7095static int __init ixgbe_init_module(void)
7047{ 7096{
7048 int ret; 7097 int ret;
7049 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, 7098 pr_info("%s - version %s\n", ixgbe_driver_string,
7050 ixgbe_driver_string, ixgbe_driver_version); 7099 ixgbe_driver_version);
7051 7100 pr_info("%s\n", ixgbe_copyright);
7052 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
7053 7101
7054#ifdef CONFIG_IXGBE_DCA 7102#ifdef CONFIG_IXGBE_DCA
7055 dca_register_notify(&dca_notifier); 7103 dca_register_notify(&dca_notifier);
@@ -7088,18 +7136,17 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7088} 7136}
7089 7137
7090#endif /* CONFIG_IXGBE_DCA */ 7138#endif /* CONFIG_IXGBE_DCA */
7091#ifdef DEBUG 7139
7092/** 7140/**
7093 * ixgbe_get_hw_dev_name - return device name string 7141 * ixgbe_get_hw_dev return device
7094 * used by hardware layer to print debugging information 7142 * used by hardware layer to print debugging information
7095 **/ 7143 **/
7096char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) 7144struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7097{ 7145{
7098 struct ixgbe_adapter *adapter = hw->back; 7146 struct ixgbe_adapter *adapter = hw->back;
7099 return adapter->netdev->name; 7147 return adapter->netdev;
7100} 7148}
7101 7149
7102#endif
7103module_exit(ixgbe_exit_module); 7150module_exit(ixgbe_exit_module);
7104 7151
7105/* ixgbe_main.c */ 7152/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 48325a5beff2..6c0d42e33f21 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -577,6 +577,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
577 * 6 SFP_SR/LR_CORE1 - 82599-specific 577 * 6 SFP_SR/LR_CORE1 - 82599-specific
578 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific 578 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
579 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 579 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
580 * 9 SFP_1g_cu_CORE0 - 82599-specific
581 * 10 SFP_1g_cu_CORE1 - 82599-specific
580 */ 582 */
581 if (hw->mac.type == ixgbe_mac_82598EB) { 583 if (hw->mac.type == ixgbe_mac_82598EB) {
582 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 584 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -625,6 +627,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
625 else 627 else
626 hw->phy.sfp_type = 628 hw->phy.sfp_type =
627 ixgbe_sfp_type_srlr_core1; 629 ixgbe_sfp_type_srlr_core1;
630 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
631 if (hw->bus.lan_id == 0)
632 hw->phy.sfp_type =
633 ixgbe_sfp_type_1g_cu_core0;
634 else
635 hw->phy.sfp_type =
636 ixgbe_sfp_type_1g_cu_core1;
628 else 637 else
629 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 638 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
630 } 639 }
@@ -696,8 +705,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
696 goto out; 705 goto out;
697 } 706 }
698 707
699 /* 1G SFP modules are not supported */ 708 /* Verify supported 1G SFP modules */
700 if (comp_codes_10g == 0) { 709 if (comp_codes_10g == 0 &&
710 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
711 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
701 hw->phy.type = ixgbe_phy_sfp_unsupported; 712 hw->phy.type = ixgbe_phy_sfp_unsupported;
702 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 713 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
703 goto out; 714 goto out;
@@ -711,7 +722,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
711 722
712 /* This is guaranteed to be 82599, no need to check for NULL */ 723 /* This is guaranteed to be 82599, no need to check for NULL */
713 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 724 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
714 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { 725 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
726 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
727 (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
715 /* Make sure we're a supported PHY type */ 728 /* Make sure we're a supported PHY type */
716 if (hw->phy.type == ixgbe_phy_sfp_intel) { 729 if (hw->phy.type == ixgbe_phy_sfp_intel) {
717 status = 0; 730 status = 0;
@@ -742,6 +755,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
742 u16 *data_offset) 755 u16 *data_offset)
743{ 756{
744 u16 sfp_id; 757 u16 sfp_id;
758 u16 sfp_type = hw->phy.sfp_type;
745 759
746 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) 760 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
747 return IXGBE_ERR_SFP_NOT_SUPPORTED; 761 return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -753,6 +767,17 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
753 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) 767 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
754 return IXGBE_ERR_SFP_NOT_SUPPORTED; 768 return IXGBE_ERR_SFP_NOT_SUPPORTED;
755 769
770 /*
771 * Limiting active cables and 1G Phys must be initialized as
772 * SR modules
773 */
774 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
775 sfp_type == ixgbe_sfp_type_1g_cu_core0)
776 sfp_type = ixgbe_sfp_type_srlr_core0;
777 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
778 sfp_type == ixgbe_sfp_type_1g_cu_core1)
779 sfp_type = ixgbe_sfp_type_srlr_core1;
780
756 /* Read offset to PHY init contents */ 781 /* Read offset to PHY init contents */
757 hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); 782 hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
758 783
@@ -769,7 +794,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
769 hw->eeprom.ops.read(hw, *list_offset, &sfp_id); 794 hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
770 795
771 while (sfp_id != IXGBE_PHY_INIT_END_NL) { 796 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
772 if (sfp_id == hw->phy.sfp_type) { 797 if (sfp_id == sfp_type) {
773 (*list_offset)++; 798 (*list_offset)++;
774 hw->eeprom.ops.read(hw, *list_offset, data_offset); 799 hw->eeprom.ops.read(hw, *list_offset, data_offset);
775 if ((!*data_offset) || (*data_offset == 0xFFFF)) { 800 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index ef4ba834c593..fb3898f12fc5 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -48,6 +48,7 @@
48#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 48#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
49#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 49#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
50#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 50#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
51#define IXGBE_SFF_1GBASET_CAPABLE 0x8
51#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 52#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
52#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 53#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
53#define IXGBE_I2C_EEPROM_READ_MASK 0x100 54#define IXGBE_I2C_EEPROM_READ_MASK 0x100
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index f6cee94ec8e8..49661a138e22 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -25,7 +25,6 @@
25 25
26*******************************************************************************/ 26*******************************************************************************/
27 27
28
29#include <linux/types.h> 28#include <linux/types.h>
30#include <linux/module.h> 29#include <linux/module.h>
31#include <linux/pci.h> 30#include <linux/pci.h>
@@ -138,6 +137,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
138inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 137inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
139{ 138{
140 struct ixgbe_hw *hw = &adapter->hw; 139 struct ixgbe_hw *hw = &adapter->hw;
140 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
141 141
142 /* reset offloads to defaults */ 142 /* reset offloads to defaults */
143 if (adapter->vfinfo[vf].pf_vlan) { 143 if (adapter->vfinfo[vf].pf_vlan) {
@@ -159,26 +159,17 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
159 /* Flush and reset the mta with the new values */ 159 /* Flush and reset the mta with the new values */
160 ixgbe_set_rx_mode(adapter->netdev); 160 ixgbe_set_rx_mode(adapter->netdev);
161 161
162 if (adapter->vfinfo[vf].rar > 0) { 162 hw->mac.ops.clear_rar(hw, rar_entry);
163 adapter->hw.mac.ops.clear_rar(&adapter->hw,
164 adapter->vfinfo[vf].rar);
165 adapter->vfinfo[vf].rar = -1;
166 }
167} 163}
168 164
169int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 165int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
170 int vf, unsigned char *mac_addr) 166 int vf, unsigned char *mac_addr)
171{ 167{
172 struct ixgbe_hw *hw = &adapter->hw; 168 struct ixgbe_hw *hw = &adapter->hw;
173 169 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
174 adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
175 vf, IXGBE_RAH_AV);
176 if (adapter->vfinfo[vf].rar < 0) {
177 DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
178 return -1;
179 }
180 170
181 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); 171 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
172 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
182 173
183 return 0; 174 return 0;
184} 175}
@@ -194,11 +185,8 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
194 185
195 if (enable) { 186 if (enable) {
196 random_ether_addr(vf_mac_addr); 187 random_ether_addr(vf_mac_addr);
197 DPRINTK(PROBE, INFO, "IOV: VF %d is enabled " 188 e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
198 "mac %02X:%02X:%02X:%02X:%02X:%02X\n", 189 vfn, vf_mac_addr);
199 vfn,
200 vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
201 vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
202 /* 190 /*
203 * Store away the VF "permananet" MAC address, it will ask 191 * Store away the VF "permananet" MAC address, it will ask
204 * for it later. 192 * for it later.
@@ -243,7 +231,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
243 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 231 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
244 232
245 if (retval) 233 if (retval)
246 printk(KERN_ERR "Error receiving message from VF\n"); 234 pr_err("Error receiving message from VF\n");
247 235
248 /* this is a message we already processed, do nothing */ 236 /* this is a message we already processed, do nothing */
249 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) 237 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
@@ -257,7 +245,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
257 if (msgbuf[0] == IXGBE_VF_RESET) { 245 if (msgbuf[0] == IXGBE_VF_RESET) {
258 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 246 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
259 u8 *addr = (u8 *)(&msgbuf[1]); 247 u8 *addr = (u8 *)(&msgbuf[1]);
260 DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf); 248 e_info(probe, "VF Reset msg received from vf %d\n", vf);
261 adapter->vfinfo[vf].clear_to_send = false; 249 adapter->vfinfo[vf].clear_to_send = false;
262 ixgbe_vf_reset_msg(adapter, vf); 250 ixgbe_vf_reset_msg(adapter, vf);
263 adapter->vfinfo[vf].clear_to_send = true; 251 adapter->vfinfo[vf].clear_to_send = true;
@@ -310,7 +298,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
310 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); 298 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
311 break; 299 break;
312 default: 300 default:
313 DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]); 301 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
314 retval = IXGBE_ERR_MBX; 302 retval = IXGBE_ERR_MBX;
315 break; 303 break;
316 } 304 }
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index cdd1998f18c7..9587d975d66c 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2214,6 +2214,8 @@ enum ixgbe_sfp_type {
2214 ixgbe_sfp_type_srlr_core1 = 6, 2214 ixgbe_sfp_type_srlr_core1 = 6,
2215 ixgbe_sfp_type_da_act_lmt_core0 = 7, 2215 ixgbe_sfp_type_da_act_lmt_core0 = 7,
2216 ixgbe_sfp_type_da_act_lmt_core1 = 8, 2216 ixgbe_sfp_type_da_act_lmt_core1 = 8,
2217 ixgbe_sfp_type_1g_cu_core0 = 9,
2218 ixgbe_sfp_type_1g_cu_core1 = 10,
2217 ixgbe_sfp_type_not_present = 0xFFFE, 2219 ixgbe_sfp_type_not_present = 0xFFFE,
2218 ixgbe_sfp_type_unknown = 0xFFFF 2220 ixgbe_sfp_type_unknown = 0xFFFF
2219}; 2221};
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index a16cff7e54a3..3e291ccc629d 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1463,18 +1463,10 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1463{ 1463{
1464 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1464 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1465 struct ixgbe_hw *hw = &adapter->hw; 1465 struct ixgbe_hw *hw = &adapter->hw;
1466 struct net_device *v_netdev;
1467 1466
1468 /* add VID to filter table */ 1467 /* add VID to filter table */
1469 if (hw->mac.ops.set_vfta) 1468 if (hw->mac.ops.set_vfta)
1470 hw->mac.ops.set_vfta(hw, vid, 0, true); 1469 hw->mac.ops.set_vfta(hw, vid, 0, true);
1471 /*
1472 * Copy feature flags from netdev to the vlan netdev for this vid.
1473 * This allows things like TSO to bubble down to our vlan device.
1474 */
1475 v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
1476 v_netdev->features |= adapter->netdev->features;
1477 vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
1478} 1470}
1479 1471
1480static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1472static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2229,7 +2221,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2229 if (err) { 2221 if (err) {
2230 dev_info(&pdev->dev, 2222 dev_info(&pdev->dev,
2231 "PF still in reset state, assigning new address\n"); 2223 "PF still in reset state, assigning new address\n");
2232 random_ether_addr(hw->mac.addr); 2224 dev_hw_addr_random(adapter->netdev, hw->mac.addr);
2233 } else { 2225 } else {
2234 err = hw->mac.ops.init_hw(hw); 2226 err = hw->mac.ops.init_hw(hw);
2235 if (err) { 2227 if (err) {
@@ -2935,7 +2927,8 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2935 struct ixgbevf_tx_buffer *tx_buffer_info; 2927 struct ixgbevf_tx_buffer *tx_buffer_info;
2936 unsigned int len; 2928 unsigned int len;
2937 unsigned int total = skb->len; 2929 unsigned int total = skb->len;
2938 unsigned int offset = 0, size, count = 0; 2930 unsigned int offset = 0, size;
2931 int count = 0;
2939 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2932 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2940 unsigned int f; 2933 unsigned int f;
2941 int i; 2934 int i;
@@ -3401,7 +3394,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3401 /* setup the private structure */ 3394 /* setup the private structure */
3402 err = ixgbevf_sw_init(adapter); 3395 err = ixgbevf_sw_init(adapter);
3403 3396
3404#ifdef MAX_SKB_FRAGS
3405 netdev->features = NETIF_F_SG | 3397 netdev->features = NETIF_F_SG |
3406 NETIF_F_IP_CSUM | 3398 NETIF_F_IP_CSUM |
3407 NETIF_F_HW_VLAN_TX | 3399 NETIF_F_HW_VLAN_TX |
@@ -3411,16 +3403,16 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3411 netdev->features |= NETIF_F_IPV6_CSUM; 3403 netdev->features |= NETIF_F_IPV6_CSUM;
3412 netdev->features |= NETIF_F_TSO; 3404 netdev->features |= NETIF_F_TSO;
3413 netdev->features |= NETIF_F_TSO6; 3405 netdev->features |= NETIF_F_TSO6;
3406 netdev->features |= NETIF_F_GRO;
3414 netdev->vlan_features |= NETIF_F_TSO; 3407 netdev->vlan_features |= NETIF_F_TSO;
3415 netdev->vlan_features |= NETIF_F_TSO6; 3408 netdev->vlan_features |= NETIF_F_TSO6;
3416 netdev->vlan_features |= NETIF_F_IP_CSUM; 3409 netdev->vlan_features |= NETIF_F_IP_CSUM;
3410 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3417 netdev->vlan_features |= NETIF_F_SG; 3411 netdev->vlan_features |= NETIF_F_SG;
3418 3412
3419 if (pci_using_dac) 3413 if (pci_using_dac)
3420 netdev->features |= NETIF_F_HIGHDMA; 3414 netdev->features |= NETIF_F_HIGHDMA;
3421 3415
3422#endif /* MAX_SKB_FRAGS */
3423
3424 /* The HW MAC address was set and/or determined in sw_init */ 3416 /* The HW MAC address was set and/or determined in sw_init */
3425 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 3417 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3426 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 3418 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 3e6aaf9e5ce7..949c1f933644 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -82,11 +82,20 @@ static unsigned short known_revisions[] =
82 82
83static int jazzsonic_open(struct net_device* dev) 83static int jazzsonic_open(struct net_device* dev)
84{ 84{
85 if (request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, "sonic", dev)) { 85 int retval;
86 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); 86
87 return -EAGAIN; 87 retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
88 "sonic", dev);
89 if (retval) {
90 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
91 dev->name, dev->irq);
92 return retval;
88 } 93 }
89 return sonic_open(dev); 94
95 retval = sonic_open(dev);
96 if (retval)
97 free_irq(dev->irq, dev);
98 return retval;
90} 99}
91 100
92static int jazzsonic_close(struct net_device* dev) 101static int jazzsonic_close(struct net_device* dev)
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index f852ab3ae9cf..928b2b83cef5 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -18,6 +18,7 @@
18 18
19/* Supports: 19/* Supports:
20 * The Micrel KS8842 behind the timberdale FPGA 20 * The Micrel KS8842 behind the timberdale FPGA
21 * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
21 */ 22 */
22 23
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -29,11 +30,19 @@
29#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
30#include <linux/ethtool.h> 31#include <linux/ethtool.h>
31#include <linux/ks8842.h> 32#include <linux/ks8842.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/scatterlist.h>
32 36
33#define DRV_NAME "ks8842" 37#define DRV_NAME "ks8842"
34 38
35/* Timberdale specific Registers */ 39/* Timberdale specific Registers */
36#define REG_TIMB_RST 0x1c 40#define REG_TIMB_RST 0x1c
41#define REG_TIMB_FIFO 0x20
42#define REG_TIMB_ISR 0x24
43#define REG_TIMB_IER 0x28
44#define REG_TIMB_IAR 0x2C
45#define REQ_TIMB_DMA_RESUME 0x30
37 46
38/* KS8842 registers */ 47/* KS8842 registers */
39 48
@@ -76,6 +85,15 @@
76#define IRQ_RX_ERROR 0x0080 85#define IRQ_RX_ERROR 0x0080
77#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ 86#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
78 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) 87 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
88/* When running via timberdale in DMA mode, the RX interrupt should be
89 enabled in the KS8842, but not in the FPGA IP, since the IP handles
90 RX DMA internally.
91 TX interrupts are not needed it is handled by the FPGA the driver is
92 notified via DMA callbacks.
93*/
94#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
95 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
96#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX)
79#define REG_ISR 0x02 97#define REG_ISR 0x02
80#define REG_RXSR 0x04 98#define REG_RXSR 0x04
81#define RXSR_VALID 0x8000 99#define RXSR_VALID 0x8000
@@ -114,14 +132,53 @@
114#define REG_P1CR4 0x02 132#define REG_P1CR4 0x02
115#define REG_P1SR 0x04 133#define REG_P1SR 0x04
116 134
135/* flags passed by platform_device for configuration */
136#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */
137#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */
138
139#define DMA_BUFFER_SIZE 2048
140
141struct ks8842_tx_dma_ctl {
142 struct dma_chan *chan;
143 struct dma_async_tx_descriptor *adesc;
144 void *buf;
145 struct scatterlist sg;
146 int channel;
147};
148
149struct ks8842_rx_dma_ctl {
150 struct dma_chan *chan;
151 struct dma_async_tx_descriptor *adesc;
152 struct sk_buff *skb;
153 struct scatterlist sg;
154 struct tasklet_struct tasklet;
155 int channel;
156};
157
158#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
159 ((adapter)->dma_rx.channel != -1))
160
117struct ks8842_adapter { 161struct ks8842_adapter {
118 void __iomem *hw_addr; 162 void __iomem *hw_addr;
119 int irq; 163 int irq;
164 unsigned long conf_flags; /* copy of platform_device config */
120 struct tasklet_struct tasklet; 165 struct tasklet_struct tasklet;
121 spinlock_t lock; /* spinlock to be interrupt safe */ 166 spinlock_t lock; /* spinlock to be interrupt safe */
122 struct platform_device *pdev; 167 struct work_struct timeout_work;
168 struct net_device *netdev;
169 struct device *dev;
170 struct ks8842_tx_dma_ctl dma_tx;
171 struct ks8842_rx_dma_ctl dma_rx;
123}; 172};
124 173
174static void ks8842_dma_rx_cb(void *data);
175static void ks8842_dma_tx_cb(void *data);
176
177static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
178{
179 iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
180}
181
125static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) 182static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
126{ 183{
127 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); 184 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
@@ -191,16 +248,21 @@ static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
191 248
192static void ks8842_reset(struct ks8842_adapter *adapter) 249static void ks8842_reset(struct ks8842_adapter *adapter)
193{ 250{
194 /* The KS8842 goes haywire when doing softare reset 251 if (adapter->conf_flags & MICREL_KS884X) {
195 * a work around in the timberdale IP is implemented to 252 ks8842_write16(adapter, 3, 1, REG_GRR);
196 * do a hardware reset instead 253 msleep(10);
197 ks8842_write16(adapter, 3, 1, REG_GRR); 254 iowrite16(0, adapter->hw_addr + REG_GRR);
198 msleep(10); 255 } else {
199 iowrite16(0, adapter->hw_addr + REG_GRR); 256 /* The KS8842 goes haywire when doing softare reset
200 */ 257 * a work around in the timberdale IP is implemented to
201 iowrite16(32, adapter->hw_addr + REG_SELECT_BANK); 258 * do a hardware reset instead
202 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); 259 ks8842_write16(adapter, 3, 1, REG_GRR);
203 msleep(20); 260 msleep(10);
261 iowrite16(0, adapter->hw_addr + REG_GRR);
262 */
263 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
264 msleep(20);
265 }
204} 266}
205 267
206static void ks8842_update_link_status(struct net_device *netdev, 268static void ks8842_update_link_status(struct net_device *netdev,
@@ -269,8 +331,6 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
269 331
270 /* restart port auto-negotiation */ 332 /* restart port auto-negotiation */
271 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); 333 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
272 /* only advertise 10Mbps */
273 ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
274 334
275 /* Enable the transmitter */ 335 /* Enable the transmitter */
276 ks8842_enable_tx(adapter); 336 ks8842_enable_tx(adapter);
@@ -282,8 +342,19 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
282 ks8842_write16(adapter, 18, 0xffff, REG_ISR); 342 ks8842_write16(adapter, 18, 0xffff, REG_ISR);
283 343
284 /* enable interrupts */ 344 /* enable interrupts */
285 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 345 if (KS8842_USE_DMA(adapter)) {
286 346 /* When running in DMA Mode the RX interrupt is not enabled in
347 timberdale because RX data is received by DMA callbacks
348 it must still be enabled in the KS8842 because it indicates
349 to timberdale when there is RX data for it's DMA FIFOs */
350 iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
351 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
352 } else {
353 if (!(adapter->conf_flags & MICREL_KS884X))
354 iowrite16(ENABLED_IRQS,
355 adapter->hw_addr + REG_TIMB_IER);
356 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
357 }
287 /* enable the switch */ 358 /* enable the switch */
288 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); 359 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
289} 360}
@@ -296,13 +367,28 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
296 for (i = 0; i < ETH_ALEN; i++) 367 for (i = 0; i < ETH_ALEN; i++)
297 dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); 368 dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
298 369
299 /* make sure the switch port uses the same MAC as the QMU */ 370 if (adapter->conf_flags & MICREL_KS884X) {
300 mac = ks8842_read16(adapter, 2, REG_MARL); 371 /*
301 ks8842_write16(adapter, 39, mac, REG_MACAR1); 372 the sequence of saving mac addr between MAC and Switch is
302 mac = ks8842_read16(adapter, 2, REG_MARM); 373 different.
303 ks8842_write16(adapter, 39, mac, REG_MACAR2); 374 */
304 mac = ks8842_read16(adapter, 2, REG_MARH); 375
305 ks8842_write16(adapter, 39, mac, REG_MACAR3); 376 mac = ks8842_read16(adapter, 2, REG_MARL);
377 ks8842_write16(adapter, 39, mac, REG_MACAR3);
378 mac = ks8842_read16(adapter, 2, REG_MARM);
379 ks8842_write16(adapter, 39, mac, REG_MACAR2);
380 mac = ks8842_read16(adapter, 2, REG_MARH);
381 ks8842_write16(adapter, 39, mac, REG_MACAR1);
382 } else {
383
384 /* make sure the switch port uses the same MAC as the QMU */
385 mac = ks8842_read16(adapter, 2, REG_MARL);
386 ks8842_write16(adapter, 39, mac, REG_MACAR1);
387 mac = ks8842_read16(adapter, 2, REG_MARM);
388 ks8842_write16(adapter, 39, mac, REG_MACAR2);
389 mac = ks8842_read16(adapter, 2, REG_MARH);
390 ks8842_write16(adapter, 39, mac, REG_MACAR3);
391 }
306} 392}
307 393
308static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) 394static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
@@ -313,8 +399,25 @@ static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
313 spin_lock_irqsave(&adapter->lock, flags); 399 spin_lock_irqsave(&adapter->lock, flags);
314 for (i = 0; i < ETH_ALEN; i++) { 400 for (i = 0; i < ETH_ALEN; i++) {
315 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); 401 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
316 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], 402 if (!(adapter->conf_flags & MICREL_KS884X))
317 REG_MACAR1 + i); 403 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
404 REG_MACAR1 + i);
405 }
406
407 if (adapter->conf_flags & MICREL_KS884X) {
408 /*
409 the sequence of saving mac addr between MAC and Switch is
410 different.
411 */
412
413 u16 mac;
414
415 mac = ks8842_read16(adapter, 2, REG_MARL);
416 ks8842_write16(adapter, 39, mac, REG_MACAR3);
417 mac = ks8842_read16(adapter, 2, REG_MARM);
418 ks8842_write16(adapter, 39, mac, REG_MACAR2);
419 mac = ks8842_read16(adapter, 2, REG_MARH);
420 ks8842_write16(adapter, 39, mac, REG_MACAR1);
318 } 421 }
319 spin_unlock_irqrestore(&adapter->lock, flags); 422 spin_unlock_irqrestore(&adapter->lock, flags);
320} 423}
@@ -324,15 +427,59 @@ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
324 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; 427 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
325} 428}
326 429
430static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
431{
432 struct ks8842_adapter *adapter = netdev_priv(netdev);
433 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
434 u8 *buf = ctl->buf;
435
436 if (ctl->adesc) {
437 netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
438 /* transfer ongoing */
439 return NETDEV_TX_BUSY;
440 }
441
442 sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
443
444 /* copy data to the TX buffer */
445 /* the control word, enable IRQ, port 1 and the length */
446 *buf++ = 0x00;
447 *buf++ = 0x01; /* Port 1 */
448 *buf++ = skb->len & 0xff;
449 *buf++ = (skb->len >> 8) & 0xff;
450 skb_copy_from_linear_data(skb, buf, skb->len);
451
452 dma_sync_single_range_for_device(adapter->dev,
453 sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
454 DMA_TO_DEVICE);
455
456 /* make sure the length is a multiple of 4 */
457 if (sg_dma_len(&ctl->sg) % 4)
458 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
459
460 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
461 &ctl->sg, 1, DMA_TO_DEVICE,
462 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
463 if (!ctl->adesc)
464 return NETDEV_TX_BUSY;
465
466 ctl->adesc->callback_param = netdev;
467 ctl->adesc->callback = ks8842_dma_tx_cb;
468 ctl->adesc->tx_submit(ctl->adesc);
469
470 netdev->stats.tx_bytes += skb->len;
471
472 dev_kfree_skb(skb);
473
474 return NETDEV_TX_OK;
475}
476
327static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) 477static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
328{ 478{
329 struct ks8842_adapter *adapter = netdev_priv(netdev); 479 struct ks8842_adapter *adapter = netdev_priv(netdev);
330 int len = skb->len; 480 int len = skb->len;
331 u32 *ptr = (u32 *)skb->data;
332 u32 ctrl;
333 481
334 dev_dbg(&adapter->pdev->dev, 482 netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
335 "%s: len %u head %p data %p tail %p end %p\n",
336 __func__, skb->len, skb->head, skb->data, 483 __func__, skb->len, skb->head, skb->data,
337 skb_tail_pointer(skb), skb_end_pointer(skb)); 484 skb_tail_pointer(skb), skb_end_pointer(skb));
338 485
@@ -340,17 +487,34 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
340 if (ks8842_tx_fifo_space(adapter) < len + 8) 487 if (ks8842_tx_fifo_space(adapter) < len + 8)
341 return NETDEV_TX_BUSY; 488 return NETDEV_TX_BUSY;
342 489
343 /* the control word, enable IRQ, port 1 and the length */ 490 if (adapter->conf_flags & KS884X_16BIT) {
344 ctrl = 0x8000 | 0x100 | (len << 16); 491 u16 *ptr16 = (u16 *)skb->data;
345 ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); 492 ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
493 ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
494 netdev->stats.tx_bytes += len;
495
496 /* copy buffer */
497 while (len > 0) {
498 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
499 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
500 len -= sizeof(u32);
501 }
502 } else {
346 503
347 netdev->stats.tx_bytes += len; 504 u32 *ptr = (u32 *)skb->data;
505 u32 ctrl;
506 /* the control word, enable IRQ, port 1 and the length */
507 ctrl = 0x8000 | 0x100 | (len << 16);
508 ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
348 509
349 /* copy buffer */ 510 netdev->stats.tx_bytes += len;
350 while (len > 0) { 511
351 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); 512 /* copy buffer */
352 len -= sizeof(u32); 513 while (len > 0) {
353 ptr++; 514 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
515 len -= sizeof(u32);
516 ptr++;
517 }
354 } 518 }
355 519
356 /* enqueue packet */ 520 /* enqueue packet */
@@ -361,54 +525,174 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
361 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
362} 526}
363 527
364static void ks8842_rx_frame(struct net_device *netdev, 528static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
365 struct ks8842_adapter *adapter) 529{
530 netdev_dbg(netdev, "RX error, status: %x\n", status);
531
532 netdev->stats.rx_errors++;
533 if (status & RXSR_TOO_LONG)
534 netdev->stats.rx_length_errors++;
535 if (status & RXSR_CRC_ERROR)
536 netdev->stats.rx_crc_errors++;
537 if (status & RXSR_RUNT)
538 netdev->stats.rx_frame_errors++;
539}
540
541static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
542 int len)
543{
544 netdev_dbg(netdev, "RX packet, len: %d\n", len);
545
546 netdev->stats.rx_packets++;
547 netdev->stats.rx_bytes += len;
548 if (status & RXSR_MULTICAST)
549 netdev->stats.multicast++;
550}
551
552static int __ks8842_start_new_rx_dma(struct net_device *netdev)
366{ 553{
367 u32 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); 554 struct ks8842_adapter *adapter = netdev_priv(netdev);
368 int len = (status >> 16) & 0x7ff; 555 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
556 struct scatterlist *sg = &ctl->sg;
557 int err;
369 558
370 status &= 0xffff; 559 ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
560 if (ctl->skb) {
561 sg_init_table(sg, 1);
562 sg_dma_address(sg) = dma_map_single(adapter->dev,
563 ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
564 err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
565 if (unlikely(err)) {
566 sg_dma_address(sg) = 0;
567 goto out;
568 }
569
570 sg_dma_len(sg) = DMA_BUFFER_SIZE;
571
572 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
573 sg, 1, DMA_FROM_DEVICE,
574 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
575
576 if (!ctl->adesc)
577 goto out;
578
579 ctl->adesc->callback_param = netdev;
580 ctl->adesc->callback = ks8842_dma_rx_cb;
581 ctl->adesc->tx_submit(ctl->adesc);
582 } else {
583 err = -ENOMEM;
584 sg_dma_address(sg) = 0;
585 goto out;
586 }
371 587
372 dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n", 588 return err;
373 __func__, status); 589out:
590 if (sg_dma_address(sg))
591 dma_unmap_single(adapter->dev, sg_dma_address(sg),
592 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
593 sg_dma_address(sg) = 0;
594 if (ctl->skb)
595 dev_kfree_skb(ctl->skb);
596
597 ctl->skb = NULL;
598
599 printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
600 return err;
601}
602
603static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
604{
605 struct net_device *netdev = (struct net_device *)arg;
606 struct ks8842_adapter *adapter = netdev_priv(netdev);
607 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
608 struct sk_buff *skb = ctl->skb;
609 dma_addr_t addr = sg_dma_address(&ctl->sg);
610 u32 status;
611
612 ctl->adesc = NULL;
613
614 /* kick next transfer going */
615 __ks8842_start_new_rx_dma(netdev);
616
617 /* now handle the data we got */
618 dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
619
620 status = *((u32 *)skb->data);
621
622 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
623 __func__, status & 0xffff);
374 624
375 /* check the status */ 625 /* check the status */
376 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 626 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
377 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); 627 int len = (status >> 16) & 0x7ff;
378 628
379 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", 629 ks8842_update_rx_counters(netdev, status, len);
380 __func__, len);
381 if (skb) {
382 u32 *data;
383 630
384 netdev->stats.rx_packets++; 631 /* reserve 4 bytes which is the status word */
385 netdev->stats.rx_bytes += len; 632 skb_reserve(skb, 4);
386 if (status & RXSR_MULTICAST) 633 skb_put(skb, len);
387 netdev->stats.multicast++;
388 634
389 data = (u32 *)skb_put(skb, len); 635 skb->protocol = eth_type_trans(skb, netdev);
636 netif_rx(skb);
637 } else {
638 ks8842_update_rx_err_counters(netdev, status);
639 dev_kfree_skb(skb);
640 }
641}
390 642
391 ks8842_select_bank(adapter, 17); 643static void ks8842_rx_frame(struct net_device *netdev,
392 while (len > 0) { 644 struct ks8842_adapter *adapter)
393 *data++ = ioread32(adapter->hw_addr + 645{
394 REG_QMU_DATA_LO); 646 u32 status;
395 len -= sizeof(u32); 647 int len;
396 } 648
649 if (adapter->conf_flags & KS884X_16BIT) {
650 status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
651 len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
652 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
653 __func__, status);
654 } else {
655 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
656 len = (status >> 16) & 0x7ff;
657 status &= 0xffff;
658 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
659 __func__, status);
660 }
397 661
662 /* check the status */
663 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
664 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
665
666 if (skb) {
667
668 ks8842_update_rx_counters(netdev, status, len);
669
670 if (adapter->conf_flags & KS884X_16BIT) {
671 u16 *data16 = (u16 *)skb_put(skb, len);
672 ks8842_select_bank(adapter, 17);
673 while (len > 0) {
674 *data16++ = ioread16(adapter->hw_addr +
675 REG_QMU_DATA_LO);
676 *data16++ = ioread16(adapter->hw_addr +
677 REG_QMU_DATA_HI);
678 len -= sizeof(u32);
679 }
680 } else {
681 u32 *data = (u32 *)skb_put(skb, len);
682
683 ks8842_select_bank(adapter, 17);
684 while (len > 0) {
685 *data++ = ioread32(adapter->hw_addr +
686 REG_QMU_DATA_LO);
687 len -= sizeof(u32);
688 }
689 }
398 skb->protocol = eth_type_trans(skb, netdev); 690 skb->protocol = eth_type_trans(skb, netdev);
399 netif_rx(skb); 691 netif_rx(skb);
400 } else 692 } else
401 netdev->stats.rx_dropped++; 693 netdev->stats.rx_dropped++;
402 } else { 694 } else
403 dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status); 695 ks8842_update_rx_err_counters(netdev, status);
404 netdev->stats.rx_errors++;
405 if (status & RXSR_TOO_LONG)
406 netdev->stats.rx_length_errors++;
407 if (status & RXSR_CRC_ERROR)
408 netdev->stats.rx_crc_errors++;
409 if (status & RXSR_RUNT)
410 netdev->stats.rx_frame_errors++;
411 }
412 696
413 /* set high watermark to 3K */ 697 /* set high watermark to 3K */
414 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); 698 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
@@ -423,8 +707,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
423void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) 707void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
424{ 708{
425 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 709 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
426 dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n", 710 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
427 __func__, rx_data);
428 while (rx_data) { 711 while (rx_data) {
429 ks8842_rx_frame(netdev, adapter); 712 ks8842_rx_frame(netdev, adapter);
430 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 713 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
@@ -434,7 +717,7 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
434void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) 717void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
435{ 718{
436 u16 sr = ks8842_read16(adapter, 16, REG_TXSR); 719 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
437 dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr); 720 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
438 netdev->stats.tx_packets++; 721 netdev->stats.tx_packets++;
439 if (netif_queue_stopped(netdev)) 722 if (netif_queue_stopped(netdev))
440 netif_wake_queue(netdev); 723 netif_wake_queue(netdev);
@@ -443,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
443void ks8842_handle_rx_overrun(struct net_device *netdev, 726void ks8842_handle_rx_overrun(struct net_device *netdev,
444 struct ks8842_adapter *adapter) 727 struct ks8842_adapter *adapter)
445{ 728{
446 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 729 netdev_dbg(netdev, "%s: entry\n", __func__);
447 netdev->stats.rx_errors++; 730 netdev->stats.rx_errors++;
448 netdev->stats.rx_fifo_errors++; 731 netdev->stats.rx_fifo_errors++;
449} 732}
@@ -462,20 +745,32 @@ void ks8842_tasklet(unsigned long arg)
462 spin_unlock_irqrestore(&adapter->lock, flags); 745 spin_unlock_irqrestore(&adapter->lock, flags);
463 746
464 isr = ks8842_read16(adapter, 18, REG_ISR); 747 isr = ks8842_read16(adapter, 18, REG_ISR);
465 dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr); 748 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
749
750 /* when running in DMA mode, do not ack RX interrupts, it is handled
751 internally by timberdale, otherwise it's DMA FIFO:s would stop
752 */
753 if (KS8842_USE_DMA(adapter))
754 isr &= ~IRQ_RX;
466 755
467 /* Ack */ 756 /* Ack */
468 ks8842_write16(adapter, 18, isr, REG_ISR); 757 ks8842_write16(adapter, 18, isr, REG_ISR);
469 758
759 if (!(adapter->conf_flags & MICREL_KS884X))
760 /* Ack in the timberdale IP as well */
761 iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
762
470 if (!netif_running(netdev)) 763 if (!netif_running(netdev))
471 return; 764 return;
472 765
473 if (isr & IRQ_LINK_CHANGE) 766 if (isr & IRQ_LINK_CHANGE)
474 ks8842_update_link_status(netdev, adapter); 767 ks8842_update_link_status(netdev, adapter);
475 768
476 if (isr & (IRQ_RX | IRQ_RX_ERROR)) 769 /* should not get IRQ_RX when running DMA mode */
770 if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
477 ks8842_handle_rx(netdev, adapter); 771 ks8842_handle_rx(netdev, adapter);
478 772
773 /* should only happen when in PIO mode */
479 if (isr & IRQ_TX) 774 if (isr & IRQ_TX)
480 ks8842_handle_tx(netdev, adapter); 775 ks8842_handle_tx(netdev, adapter);
481 776
@@ -494,24 +789,38 @@ void ks8842_tasklet(unsigned long arg)
494 789
495 /* re-enable interrupts, put back the bank selection register */ 790 /* re-enable interrupts, put back the bank selection register */
496 spin_lock_irqsave(&adapter->lock, flags); 791 spin_lock_irqsave(&adapter->lock, flags);
497 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 792 if (KS8842_USE_DMA(adapter))
793 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
794 else
795 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
498 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 796 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
797
798 /* Make sure timberdale continues DMA operations, they are stopped while
799 we are handling the ks8842 because we might change bank */
800 if (KS8842_USE_DMA(adapter))
801 ks8842_resume_dma(adapter);
802
499 spin_unlock_irqrestore(&adapter->lock, flags); 803 spin_unlock_irqrestore(&adapter->lock, flags);
500} 804}
501 805
502static irqreturn_t ks8842_irq(int irq, void *devid) 806static irqreturn_t ks8842_irq(int irq, void *devid)
503{ 807{
504 struct ks8842_adapter *adapter = devid; 808 struct net_device *netdev = devid;
809 struct ks8842_adapter *adapter = netdev_priv(netdev);
505 u16 isr; 810 u16 isr;
506 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); 811 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
507 irqreturn_t ret = IRQ_NONE; 812 irqreturn_t ret = IRQ_NONE;
508 813
509 isr = ks8842_read16(adapter, 18, REG_ISR); 814 isr = ks8842_read16(adapter, 18, REG_ISR);
510 dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr); 815 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
511 816
512 if (isr) { 817 if (isr) {
513 /* disable IRQ */ 818 if (KS8842_USE_DMA(adapter))
514 ks8842_write16(adapter, 18, 0x00, REG_IER); 819 /* disable all but RX IRQ, since the FPGA relies on it*/
820 ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
821 else
822 /* disable IRQ */
823 ks8842_write16(adapter, 18, 0x00, REG_IER);
515 824
516 /* schedule tasklet */ 825 /* schedule tasklet */
517 tasklet_schedule(&adapter->tasklet); 826 tasklet_schedule(&adapter->tasklet);
@@ -521,9 +830,151 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
521 830
522 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 831 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
523 832
833 /* After an interrupt, tell timberdale to continue DMA operations.
834 DMA is disabled while we are handling the ks8842 because we might
835 change bank */
836 ks8842_resume_dma(adapter);
837
524 return ret; 838 return ret;
525} 839}
526 840
841static void ks8842_dma_rx_cb(void *data)
842{
843 struct net_device *netdev = data;
844 struct ks8842_adapter *adapter = netdev_priv(netdev);
845
846 netdev_dbg(netdev, "RX DMA finished\n");
847 /* schedule tasklet */
848 if (adapter->dma_rx.adesc)
849 tasklet_schedule(&adapter->dma_rx.tasklet);
850}
851
852static void ks8842_dma_tx_cb(void *data)
853{
854 struct net_device *netdev = data;
855 struct ks8842_adapter *adapter = netdev_priv(netdev);
856 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
857
858 netdev_dbg(netdev, "TX DMA finished\n");
859
860 if (!ctl->adesc)
861 return;
862
863 netdev->stats.tx_packets++;
864 ctl->adesc = NULL;
865
866 if (netif_queue_stopped(netdev))
867 netif_wake_queue(netdev);
868}
869
870static void ks8842_stop_dma(struct ks8842_adapter *adapter)
871{
872 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
873 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
874
875 tx_ctl->adesc = NULL;
876 if (tx_ctl->chan)
877 tx_ctl->chan->device->device_control(tx_ctl->chan,
878 DMA_TERMINATE_ALL, 0);
879
880 rx_ctl->adesc = NULL;
881 if (rx_ctl->chan)
882 rx_ctl->chan->device->device_control(rx_ctl->chan,
883 DMA_TERMINATE_ALL, 0);
884
885 if (sg_dma_address(&rx_ctl->sg))
886 dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
887 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
888 sg_dma_address(&rx_ctl->sg) = 0;
889
890 dev_kfree_skb(rx_ctl->skb);
891 rx_ctl->skb = NULL;
892}
893
894static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
895{
896 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
897 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
898
899 ks8842_stop_dma(adapter);
900
901 if (tx_ctl->chan)
902 dma_release_channel(tx_ctl->chan);
903 tx_ctl->chan = NULL;
904
905 if (rx_ctl->chan)
906 dma_release_channel(rx_ctl->chan);
907 rx_ctl->chan = NULL;
908
909 tasklet_kill(&rx_ctl->tasklet);
910
911 if (sg_dma_address(&tx_ctl->sg))
912 dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
913 DMA_BUFFER_SIZE, DMA_TO_DEVICE);
914 sg_dma_address(&tx_ctl->sg) = 0;
915
916 kfree(tx_ctl->buf);
917 tx_ctl->buf = NULL;
918}
919
920static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
921{
922 return chan->chan_id == (long)filter_param;
923}
924
925static int ks8842_alloc_dma_bufs(struct net_device *netdev)
926{
927 struct ks8842_adapter *adapter = netdev_priv(netdev);
928 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
929 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
930 int err;
931
932 dma_cap_mask_t mask;
933
934 dma_cap_zero(mask);
935 dma_cap_set(DMA_SLAVE, mask);
936 dma_cap_set(DMA_PRIVATE, mask);
937
938 sg_init_table(&tx_ctl->sg, 1);
939
940 tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
941 (void *)(long)tx_ctl->channel);
942 if (!tx_ctl->chan) {
943 err = -ENODEV;
944 goto err;
945 }
946
947 /* allocate DMA buffer */
948 tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
949 if (!tx_ctl->buf) {
950 err = -ENOMEM;
951 goto err;
952 }
953
954 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
955 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
956 err = dma_mapping_error(adapter->dev,
957 sg_dma_address(&tx_ctl->sg));
958 if (err) {
959 sg_dma_address(&tx_ctl->sg) = 0;
960 goto err;
961 }
962
963 rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
964 (void *)(long)rx_ctl->channel);
965 if (!rx_ctl->chan) {
966 err = -ENODEV;
967 goto err;
968 }
969
970 tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
971 (unsigned long)netdev);
972
973 return 0;
974err:
975 ks8842_dealloc_dma_bufs(adapter);
976 return err;
977}
527 978
528/* Netdevice operations */ 979/* Netdevice operations */
529 980
@@ -532,7 +983,26 @@ static int ks8842_open(struct net_device *netdev)
532 struct ks8842_adapter *adapter = netdev_priv(netdev); 983 struct ks8842_adapter *adapter = netdev_priv(netdev);
533 int err; 984 int err;
534 985
535 dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__); 986 netdev_dbg(netdev, "%s - entry\n", __func__);
987
988 if (KS8842_USE_DMA(adapter)) {
989 err = ks8842_alloc_dma_bufs(netdev);
990
991 if (!err) {
992 /* start RX dma */
993 err = __ks8842_start_new_rx_dma(netdev);
994 if (err)
995 ks8842_dealloc_dma_bufs(adapter);
996 }
997
998 if (err) {
999 printk(KERN_WARNING DRV_NAME
1000 ": Failed to initiate DMA, running PIO\n");
1001 ks8842_dealloc_dma_bufs(adapter);
1002 adapter->dma_rx.channel = -1;
1003 adapter->dma_tx.channel = -1;
1004 }
1005 }
536 1006
537 /* reset the HW */ 1007 /* reset the HW */
538 ks8842_reset_hw(adapter); 1008 ks8842_reset_hw(adapter);
@@ -542,7 +1012,7 @@ static int ks8842_open(struct net_device *netdev)
542 ks8842_update_link_status(netdev, adapter); 1012 ks8842_update_link_status(netdev, adapter);
543 1013
544 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, 1014 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
545 adapter); 1015 netdev);
546 if (err) { 1016 if (err) {
547 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); 1017 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
548 return err; 1018 return err;
@@ -555,10 +1025,15 @@ static int ks8842_close(struct net_device *netdev)
555{ 1025{
556 struct ks8842_adapter *adapter = netdev_priv(netdev); 1026 struct ks8842_adapter *adapter = netdev_priv(netdev);
557 1027
558 dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__); 1028 netdev_dbg(netdev, "%s - entry\n", __func__);
1029
1030 cancel_work_sync(&adapter->timeout_work);
1031
1032 if (KS8842_USE_DMA(adapter))
1033 ks8842_dealloc_dma_bufs(adapter);
559 1034
560 /* free the irq */ 1035 /* free the irq */
561 free_irq(adapter->irq, adapter); 1036 free_irq(adapter->irq, netdev);
562 1037
563 /* disable the switch */ 1038 /* disable the switch */
564 ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); 1039 ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
@@ -572,7 +1047,18 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
572 int ret; 1047 int ret;
573 struct ks8842_adapter *adapter = netdev_priv(netdev); 1048 struct ks8842_adapter *adapter = netdev_priv(netdev);
574 1049
575 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 1050 netdev_dbg(netdev, "%s: entry\n", __func__);
1051
1052 if (KS8842_USE_DMA(adapter)) {
1053 unsigned long flags;
1054 ret = ks8842_tx_frame_dma(skb, netdev);
1055 /* for now only allow one transfer at the time */
1056 spin_lock_irqsave(&adapter->lock, flags);
1057 if (adapter->dma_tx.adesc)
1058 netif_stop_queue(netdev);
1059 spin_unlock_irqrestore(&adapter->lock, flags);
1060 return ret;
1061 }
576 1062
577 ret = ks8842_tx_frame(skb, netdev); 1063 ret = ks8842_tx_frame(skb, netdev);
578 1064
@@ -588,7 +1074,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
588 struct sockaddr *addr = p; 1074 struct sockaddr *addr = p;
589 char *mac = (u8 *)addr->sa_data; 1075 char *mac = (u8 *)addr->sa_data;
590 1076
591 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 1077 netdev_dbg(netdev, "%s: entry\n", __func__);
592 1078
593 if (!is_valid_ether_addr(addr->sa_data)) 1079 if (!is_valid_ether_addr(addr->sa_data))
594 return -EADDRNOTAVAIL; 1080 return -EADDRNOTAVAIL;
@@ -599,17 +1085,26 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
599 return 0; 1085 return 0;
600} 1086}
601 1087
602static void ks8842_tx_timeout(struct net_device *netdev) 1088static void ks8842_tx_timeout_work(struct work_struct *work)
603{ 1089{
604 struct ks8842_adapter *adapter = netdev_priv(netdev); 1090 struct ks8842_adapter *adapter =
1091 container_of(work, struct ks8842_adapter, timeout_work);
1092 struct net_device *netdev = adapter->netdev;
605 unsigned long flags; 1093 unsigned long flags;
606 1094
607 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 1095 netdev_dbg(netdev, "%s: entry\n", __func__);
608 1096
609 spin_lock_irqsave(&adapter->lock, flags); 1097 spin_lock_irqsave(&adapter->lock, flags);
1098
1099 if (KS8842_USE_DMA(adapter))
1100 ks8842_stop_dma(adapter);
1101
610 /* disable interrupts */ 1102 /* disable interrupts */
611 ks8842_write16(adapter, 18, 0, REG_IER); 1103 ks8842_write16(adapter, 18, 0, REG_IER);
612 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); 1104 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
1105
1106 netif_stop_queue(netdev);
1107
613 spin_unlock_irqrestore(&adapter->lock, flags); 1108 spin_unlock_irqrestore(&adapter->lock, flags);
614 1109
615 ks8842_reset_hw(adapter); 1110 ks8842_reset_hw(adapter);
@@ -617,6 +1112,18 @@ static void ks8842_tx_timeout(struct net_device *netdev)
617 ks8842_write_mac_addr(adapter, netdev->dev_addr); 1112 ks8842_write_mac_addr(adapter, netdev->dev_addr);
618 1113
619 ks8842_update_link_status(netdev, adapter); 1114 ks8842_update_link_status(netdev, adapter);
1115
1116 if (KS8842_USE_DMA(adapter))
1117 __ks8842_start_new_rx_dma(netdev);
1118}
1119
1120static void ks8842_tx_timeout(struct net_device *netdev)
1121{
1122 struct ks8842_adapter *adapter = netdev_priv(netdev);
1123
1124 netdev_dbg(netdev, "%s: entry\n", __func__);
1125
1126 schedule_work(&adapter->timeout_work);
620} 1127}
621 1128
622static const struct net_device_ops ks8842_netdev_ops = { 1129static const struct net_device_ops ks8842_netdev_ops = {
@@ -653,7 +1160,11 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
653 SET_NETDEV_DEV(netdev, &pdev->dev); 1160 SET_NETDEV_DEV(netdev, &pdev->dev);
654 1161
655 adapter = netdev_priv(netdev); 1162 adapter = netdev_priv(netdev);
1163 adapter->netdev = netdev;
1164 INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
656 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); 1165 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
1166 adapter->conf_flags = iomem->flags;
1167
657 if (!adapter->hw_addr) 1168 if (!adapter->hw_addr)
658 goto err_ioremap; 1169 goto err_ioremap;
659 1170
@@ -663,7 +1174,18 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
663 goto err_get_irq; 1174 goto err_get_irq;
664 } 1175 }
665 1176
666 adapter->pdev = pdev; 1177 adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1178
1179 /* DMA is only supported when accessed via timberdale */
1180 if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1181 (pdata->tx_dma_channel != -1) &&
1182 (pdata->rx_dma_channel != -1)) {
1183 adapter->dma_rx.channel = pdata->rx_dma_channel;
1184 adapter->dma_tx.channel = pdata->tx_dma_channel;
1185 } else {
1186 adapter->dma_rx.channel = -1;
1187 adapter->dma_tx.channel = -1;
1188 }
667 1189
668 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); 1190 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
669 spin_lock_init(&adapter->lock); 1191 spin_lock_init(&adapter->lock);
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7805bbf1d53a..37504a398906 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -1457,7 +1457,6 @@ struct dev_info {
1457 * @adapter: Adapter device information. 1457 * @adapter: Adapter device information.
1458 * @port: Port information. 1458 * @port: Port information.
1459 * @monitor_time_info: Timer to monitor ports. 1459 * @monitor_time_info: Timer to monitor ports.
1460 * @stats: Network statistics.
1461 * @proc_sem: Semaphore for proc accessing. 1460 * @proc_sem: Semaphore for proc accessing.
1462 * @id: Device ID. 1461 * @id: Device ID.
1463 * @mii_if: MII interface information. 1462 * @mii_if: MII interface information.
@@ -1471,7 +1470,6 @@ struct dev_priv {
1471 struct dev_info *adapter; 1470 struct dev_info *adapter;
1472 struct ksz_port port; 1471 struct ksz_port port;
1473 struct ksz_timer_info monitor_timer_info; 1472 struct ksz_timer_info monitor_timer_info;
1474 struct net_device_stats stats;
1475 1473
1476 struct semaphore proc_sem; 1474 struct semaphore proc_sem;
1477 int id; 1475 int id;
@@ -4751,8 +4749,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
4751 hw_send_pkt(hw); 4749 hw_send_pkt(hw);
4752 4750
4753 /* Update transmit statistics. */ 4751 /* Update transmit statistics. */
4754 priv->stats.tx_packets++; 4752 dev->stats.tx_packets++;
4755 priv->stats.tx_bytes += len; 4753 dev->stats.tx_bytes += len;
4756} 4754}
4757 4755
4758/** 4756/**
@@ -5030,7 +5028,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
5030 /* skb->data != skb->head */ 5028 /* skb->data != skb->head */
5031 skb = dev_alloc_skb(packet_len + 2); 5029 skb = dev_alloc_skb(packet_len + 2);
5032 if (!skb) { 5030 if (!skb) {
5033 priv->stats.rx_dropped++; 5031 dev->stats.rx_dropped++;
5034 return -ENOMEM; 5032 return -ENOMEM;
5035 } 5033 }
5036 5034
@@ -5050,8 +5048,8 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
5050 csum_verified(skb); 5048 csum_verified(skb);
5051 5049
5052 /* Update receive statistics. */ 5050 /* Update receive statistics. */
5053 priv->stats.rx_packets++; 5051 dev->stats.rx_packets++;
5054 priv->stats.rx_bytes += packet_len; 5052 dev->stats.rx_bytes += packet_len;
5055 5053
5056 /* Notify upper layer for received packet. */ 5054 /* Notify upper layer for received packet. */
5057 rx_status = netif_rx(skb); 5055 rx_status = netif_rx(skb);
@@ -5291,7 +5289,7 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
5291 } 5289 }
5292 5290
5293 if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) { 5291 if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
5294 priv->stats.rx_fifo_errors++; 5292 dev->stats.rx_fifo_errors++;
5295 hw_resume_rx(hw); 5293 hw_resume_rx(hw);
5296 } 5294 }
5297 5295
@@ -5522,7 +5520,7 @@ static int netdev_open(struct net_device *dev)
5522 priv->promiscuous = 0; 5520 priv->promiscuous = 0;
5523 5521
5524 /* Reset device statistics. */ 5522 /* Reset device statistics. */
5525 memset(&priv->stats, 0, sizeof(struct net_device_stats)); 5523 memset(&dev->stats, 0, sizeof(struct net_device_stats));
5526 memset((void *) port->counter, 0, 5524 memset((void *) port->counter, 0,
5527 (sizeof(u64) * OID_COUNTER_LAST)); 5525 (sizeof(u64) * OID_COUNTER_LAST));
5528 5526
@@ -5622,42 +5620,42 @@ static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
5622 int i; 5620 int i;
5623 int p; 5621 int p;
5624 5622
5625 priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; 5623 dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
5626 priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; 5624 dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
5627 5625
5628 /* Reset to zero to add count later. */ 5626 /* Reset to zero to add count later. */
5629 priv->stats.multicast = 0; 5627 dev->stats.multicast = 0;
5630 priv->stats.collisions = 0; 5628 dev->stats.collisions = 0;
5631 priv->stats.rx_length_errors = 0; 5629 dev->stats.rx_length_errors = 0;
5632 priv->stats.rx_crc_errors = 0; 5630 dev->stats.rx_crc_errors = 0;
5633 priv->stats.rx_frame_errors = 0; 5631 dev->stats.rx_frame_errors = 0;
5634 priv->stats.tx_window_errors = 0; 5632 dev->stats.tx_window_errors = 0;
5635 5633
5636 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { 5634 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
5637 mib = &hw->port_mib[p]; 5635 mib = &hw->port_mib[p];
5638 5636
5639 priv->stats.multicast += (unsigned long) 5637 dev->stats.multicast += (unsigned long)
5640 mib->counter[MIB_COUNTER_RX_MULTICAST]; 5638 mib->counter[MIB_COUNTER_RX_MULTICAST];
5641 5639
5642 priv->stats.collisions += (unsigned long) 5640 dev->stats.collisions += (unsigned long)
5643 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION]; 5641 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
5644 5642
5645 priv->stats.rx_length_errors += (unsigned long)( 5643 dev->stats.rx_length_errors += (unsigned long)(
5646 mib->counter[MIB_COUNTER_RX_UNDERSIZE] + 5644 mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
5647 mib->counter[MIB_COUNTER_RX_FRAGMENT] + 5645 mib->counter[MIB_COUNTER_RX_FRAGMENT] +
5648 mib->counter[MIB_COUNTER_RX_OVERSIZE] + 5646 mib->counter[MIB_COUNTER_RX_OVERSIZE] +
5649 mib->counter[MIB_COUNTER_RX_JABBER]); 5647 mib->counter[MIB_COUNTER_RX_JABBER]);
5650 priv->stats.rx_crc_errors += (unsigned long) 5648 dev->stats.rx_crc_errors += (unsigned long)
5651 mib->counter[MIB_COUNTER_RX_CRC_ERR]; 5649 mib->counter[MIB_COUNTER_RX_CRC_ERR];
5652 priv->stats.rx_frame_errors += (unsigned long)( 5650 dev->stats.rx_frame_errors += (unsigned long)(
5653 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] + 5651 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
5654 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]); 5652 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
5655 5653
5656 priv->stats.tx_window_errors += (unsigned long) 5654 dev->stats.tx_window_errors += (unsigned long)
5657 mib->counter[MIB_COUNTER_TX_LATE_COLLISION]; 5655 mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
5658 } 5656 }
5659 5657
5660 return &priv->stats; 5658 return &dev->stats;
5661} 5659}
5662 5660
5663/** 5661/**
@@ -5718,7 +5716,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
5718 * from the bridge. 5716 * from the bridge.
5719 */ 5717 */
5720 if ((hw->features & STP_SUPPORT) && !promiscuous && 5718 if ((hw->features & STP_SUPPORT) && !promiscuous &&
5721 dev->br_port) { 5719 (dev->priv_flags & IFF_BRIDGE_PORT)) {
5722 struct ksz_switch *sw = hw->ksz_switch; 5720 struct ksz_switch *sw = hw->ksz_switch;
5723 int port = priv->port.first_port; 5721 int port = priv->port.first_port;
5724 5722
@@ -6812,7 +6810,7 @@ static int stp;
6812static int fast_aging; 6810static int fast_aging;
6813 6811
6814/** 6812/**
6815 * netdev_init - initalize network device. 6813 * netdev_init - initialize network device.
6816 * @dev: Network device. 6814 * @dev: Network device.
6817 * 6815 *
6818 * This function initializes the network device. 6816 * This function initializes the network device.
@@ -6896,13 +6894,12 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
6896 i = j = num = got_num = 0; 6894 i = j = num = got_num = 0;
6897 while (j < MAC_ADDR_LEN) { 6895 while (j < MAC_ADDR_LEN) {
6898 if (macaddr[i]) { 6896 if (macaddr[i]) {
6897 int digit;
6898
6899 got_num = 1; 6899 got_num = 1;
6900 if ('0' <= macaddr[i] && macaddr[i] <= '9') 6900 digit = hex_to_bin(macaddr[i]);
6901 num = num * 16 + macaddr[i] - '0'; 6901 if (digit >= 0)
6902 else if ('A' <= macaddr[i] && macaddr[i] <= 'F') 6902 num = num * 16 + digit;
6903 num = num * 16 + 10 + macaddr[i] - 'A';
6904 else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
6905 num = num * 16 + 10 + macaddr[i] - 'a';
6906 else if (':' == macaddr[i]) 6903 else if (':' == macaddr[i])
6907 got_num = 2; 6904 got_num = 2;
6908 else 6905 else
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 21f8adaa87c1..f06296bfe293 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -248,7 +248,6 @@ struct lance_private {
248 int cur_rx, cur_tx; /* The next free ring entry */ 248 int cur_rx, cur_tx; /* The next free ring entry */
249 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 249 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
250 int dma; 250 int dma;
251 struct net_device_stats stats;
252 unsigned char chip_version; /* See lance_chip_type. */ 251 unsigned char chip_version; /* See lance_chip_type. */
253 spinlock_t devlock; 252 spinlock_t devlock;
254}; 253};
@@ -925,7 +924,7 @@ static void lance_tx_timeout (struct net_device *dev)
925 printk ("%s: transmit timed out, status %4.4x, resetting.\n", 924 printk ("%s: transmit timed out, status %4.4x, resetting.\n",
926 dev->name, inw (ioaddr + LANCE_DATA)); 925 dev->name, inw (ioaddr + LANCE_DATA));
927 outw (0x0004, ioaddr + LANCE_DATA); 926 outw (0x0004, ioaddr + LANCE_DATA);
928 lp->stats.tx_errors++; 927 dev->stats.tx_errors++;
929#ifndef final_version 928#ifndef final_version
930 if (lance_debug > 3) { 929 if (lance_debug > 3) {
931 int i; 930 int i;
@@ -989,7 +988,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
989 988
990 lp->tx_ring[entry].misc = 0x0000; 989 lp->tx_ring[entry].misc = 0x0000;
991 990
992 lp->stats.tx_bytes += skb->len; 991 dev->stats.tx_bytes += skb->len;
993 992
994 /* If any part of this buffer is >16M we must copy it to a low-memory 993 /* If any part of this buffer is >16M we must copy it to a low-memory
995 buffer. */ 994 buffer. */
@@ -1062,13 +1061,16 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
1062 if (status & 0x40000000) { 1061 if (status & 0x40000000) {
1063 /* There was an major error, log it. */ 1062 /* There was an major error, log it. */
1064 int err_status = lp->tx_ring[entry].misc; 1063 int err_status = lp->tx_ring[entry].misc;
1065 lp->stats.tx_errors++; 1064 dev->stats.tx_errors++;
1066 if (err_status & 0x0400) lp->stats.tx_aborted_errors++; 1065 if (err_status & 0x0400)
1067 if (err_status & 0x0800) lp->stats.tx_carrier_errors++; 1066 dev->stats.tx_aborted_errors++;
1068 if (err_status & 0x1000) lp->stats.tx_window_errors++; 1067 if (err_status & 0x0800)
1068 dev->stats.tx_carrier_errors++;
1069 if (err_status & 0x1000)
1070 dev->stats.tx_window_errors++;
1069 if (err_status & 0x4000) { 1071 if (err_status & 0x4000) {
1070 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1072 /* Ackk! On FIFO errors the Tx unit is turned off! */
1071 lp->stats.tx_fifo_errors++; 1073 dev->stats.tx_fifo_errors++;
1072 /* Remove this verbosity later! */ 1074 /* Remove this verbosity later! */
1073 printk("%s: Tx FIFO error! Status %4.4x.\n", 1075 printk("%s: Tx FIFO error! Status %4.4x.\n",
1074 dev->name, csr0); 1076 dev->name, csr0);
@@ -1077,8 +1079,8 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
1077 } 1079 }
1078 } else { 1080 } else {
1079 if (status & 0x18000000) 1081 if (status & 0x18000000)
1080 lp->stats.collisions++; 1082 dev->stats.collisions++;
1081 lp->stats.tx_packets++; 1083 dev->stats.tx_packets++;
1082 } 1084 }
1083 1085
1084 /* We must free the original skb if it's not a data-only copy 1086 /* We must free the original skb if it's not a data-only copy
@@ -1108,8 +1110,10 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
1108 } 1110 }
1109 1111
1110 /* Log misc errors. */ 1112 /* Log misc errors. */
1111 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */ 1113 if (csr0 & 0x4000)
1112 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */ 1114 dev->stats.tx_errors++; /* Tx babble. */
1115 if (csr0 & 0x1000)
1116 dev->stats.rx_errors++; /* Missed a Rx frame. */
1113 if (csr0 & 0x0800) { 1117 if (csr0 & 0x0800) {
1114 printk("%s: Bus master arbitration failure, status %4.4x.\n", 1118 printk("%s: Bus master arbitration failure, status %4.4x.\n",
1115 dev->name, csr0); 1119 dev->name, csr0);
@@ -1155,11 +1159,15 @@ lance_rx(struct net_device *dev)
1155 buffers it's possible for a jabber packet to use two 1159 buffers it's possible for a jabber packet to use two
1156 buffers, with only the last correctly noting the error. */ 1160 buffers, with only the last correctly noting the error. */
1157 if (status & 0x01) /* Only count a general error at the */ 1161 if (status & 0x01) /* Only count a general error at the */
1158 lp->stats.rx_errors++; /* end of a packet.*/ 1162 dev->stats.rx_errors++; /* end of a packet.*/
1159 if (status & 0x20) lp->stats.rx_frame_errors++; 1163 if (status & 0x20)
1160 if (status & 0x10) lp->stats.rx_over_errors++; 1164 dev->stats.rx_frame_errors++;
1161 if (status & 0x08) lp->stats.rx_crc_errors++; 1165 if (status & 0x10)
1162 if (status & 0x04) lp->stats.rx_fifo_errors++; 1166 dev->stats.rx_over_errors++;
1167 if (status & 0x08)
1168 dev->stats.rx_crc_errors++;
1169 if (status & 0x04)
1170 dev->stats.rx_fifo_errors++;
1163 lp->rx_ring[entry].base &= 0x03ffffff; 1171 lp->rx_ring[entry].base &= 0x03ffffff;
1164 } 1172 }
1165 else 1173 else
@@ -1171,7 +1179,7 @@ lance_rx(struct net_device *dev)
1171 if(pkt_len<60) 1179 if(pkt_len<60)
1172 { 1180 {
1173 printk("%s: Runt packet!\n",dev->name); 1181 printk("%s: Runt packet!\n",dev->name);
1174 lp->stats.rx_errors++; 1182 dev->stats.rx_errors++;
1175 } 1183 }
1176 else 1184 else
1177 { 1185 {
@@ -1185,7 +1193,7 @@ lance_rx(struct net_device *dev)
1185 1193
1186 if (i > RX_RING_SIZE -2) 1194 if (i > RX_RING_SIZE -2)
1187 { 1195 {
1188 lp->stats.rx_dropped++; 1196 dev->stats.rx_dropped++;
1189 lp->rx_ring[entry].base |= 0x80000000; 1197 lp->rx_ring[entry].base |= 0x80000000;
1190 lp->cur_rx++; 1198 lp->cur_rx++;
1191 } 1199 }
@@ -1198,8 +1206,8 @@ lance_rx(struct net_device *dev)
1198 pkt_len); 1206 pkt_len);
1199 skb->protocol=eth_type_trans(skb,dev); 1207 skb->protocol=eth_type_trans(skb,dev);
1200 netif_rx(skb); 1208 netif_rx(skb);
1201 lp->stats.rx_packets++; 1209 dev->stats.rx_packets++;
1202 lp->stats.rx_bytes+=pkt_len; 1210 dev->stats.rx_bytes += pkt_len;
1203 } 1211 }
1204 } 1212 }
1205 /* The docs say that the buffer length isn't touched, but Andrew Boyd 1213 /* The docs say that the buffer length isn't touched, but Andrew Boyd
@@ -1225,7 +1233,7 @@ lance_close(struct net_device *dev)
1225 1233
1226 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { 1234 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1227 outw(112, ioaddr+LANCE_ADDR); 1235 outw(112, ioaddr+LANCE_ADDR);
1228 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); 1236 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1229 } 1237 }
1230 outw(0, ioaddr+LANCE_ADDR); 1238 outw(0, ioaddr+LANCE_ADDR);
1231 1239
@@ -1262,12 +1270,12 @@ static struct net_device_stats *lance_get_stats(struct net_device *dev)
1262 spin_lock_irqsave(&lp->devlock, flags); 1270 spin_lock_irqsave(&lp->devlock, flags);
1263 saved_addr = inw(ioaddr+LANCE_ADDR); 1271 saved_addr = inw(ioaddr+LANCE_ADDR);
1264 outw(112, ioaddr+LANCE_ADDR); 1272 outw(112, ioaddr+LANCE_ADDR);
1265 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); 1273 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1266 outw(saved_addr, ioaddr+LANCE_ADDR); 1274 outw(saved_addr, ioaddr+LANCE_ADDR);
1267 spin_unlock_irqrestore(&lp->devlock, flags); 1275 spin_unlock_irqrestore(&lp->devlock, flags);
1268 } 1276 }
1269 1277
1270 return &lp->stats; 1278 return &dev->stats;
1271} 1279}
1272 1280
1273/* Set or clear the multicast filter for this adaptor. 1281/* Set or clear the multicast filter for this adaptor.
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 6474c4973d3a..4eea3f70c5cf 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -193,6 +193,35 @@ static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
193#endif 193#endif
194 194
195/** 195/**
196 * * temac_dma_bd_release - Release buffer descriptor rings
197 */
198static void temac_dma_bd_release(struct net_device *ndev)
199{
200 struct temac_local *lp = netdev_priv(ndev);
201 int i;
202
203 for (i = 0; i < RX_BD_NUM; i++) {
204 if (!lp->rx_skb[i])
205 break;
206 else {
207 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
208 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
209 dev_kfree_skb(lp->rx_skb[i]);
210 }
211 }
212 if (lp->rx_bd_v)
213 dma_free_coherent(ndev->dev.parent,
214 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
215 lp->rx_bd_v, lp->rx_bd_p);
216 if (lp->tx_bd_v)
217 dma_free_coherent(ndev->dev.parent,
218 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
219 lp->tx_bd_v, lp->tx_bd_p);
220 if (lp->rx_skb)
221 kfree(lp->rx_skb);
222}
223
224/**
196 * temac_dma_bd_init - Setup buffer descriptor rings 225 * temac_dma_bd_init - Setup buffer descriptor rings
197 */ 226 */
198static int temac_dma_bd_init(struct net_device *ndev) 227static int temac_dma_bd_init(struct net_device *ndev)
@@ -202,14 +231,29 @@ static int temac_dma_bd_init(struct net_device *ndev)
202 int i; 231 int i;
203 232
204 lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL); 233 lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
234 if (!lp->rx_skb) {
235 dev_err(&ndev->dev,
236 "can't allocate memory for DMA RX buffer\n");
237 goto out;
238 }
205 /* allocate the tx and rx ring buffer descriptors. */ 239 /* allocate the tx and rx ring buffer descriptors. */
206 /* returns a virtual addres and a physical address. */ 240 /* returns a virtual addres and a physical address. */
207 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 241 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
208 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 242 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
209 &lp->tx_bd_p, GFP_KERNEL); 243 &lp->tx_bd_p, GFP_KERNEL);
244 if (!lp->tx_bd_v) {
245 dev_err(&ndev->dev,
246 "unable to allocate DMA TX buffer descriptors");
247 goto out;
248 }
210 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 249 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
211 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 250 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
212 &lp->rx_bd_p, GFP_KERNEL); 251 &lp->rx_bd_p, GFP_KERNEL);
252 if (!lp->rx_bd_v) {
253 dev_err(&ndev->dev,
254 "unable to allocate DMA RX buffer descriptors");
255 goto out;
256 }
213 257
214 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); 258 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
215 for (i = 0; i < TX_BD_NUM; i++) { 259 for (i = 0; i < TX_BD_NUM; i++) {
@@ -227,7 +271,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
227 271
228 if (skb == 0) { 272 if (skb == 0) {
229 dev_err(&ndev->dev, "alloc_skb error %d\n", i); 273 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
230 return -1; 274 goto out;
231 } 275 }
232 lp->rx_skb[i] = skb; 276 lp->rx_skb[i] = skb;
233 /* returns physical address of skb->data */ 277 /* returns physical address of skb->data */
@@ -258,6 +302,10 @@ static int temac_dma_bd_init(struct net_device *ndev)
258 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); 302 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
259 303
260 return 0; 304 return 0;
305
306out:
307 temac_dma_bd_release(ndev);
308 return -ENOMEM;
261} 309}
262 310
263/* --------------------------------------------------------------------- 311/* ---------------------------------------------------------------------
@@ -449,7 +497,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
449 return (0); 497 return (0);
450} 498}
451 499
452/* Initilize temac */ 500/* Initialize temac */
453static void temac_device_reset(struct net_device *ndev) 501static void temac_device_reset(struct net_device *ndev)
454{ 502{
455 struct temac_local *lp = netdev_priv(ndev); 503 struct temac_local *lp = netdev_priv(ndev);
@@ -505,7 +553,10 @@ static void temac_device_reset(struct net_device *ndev)
505 } 553 }
506 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); 554 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
507 555
508 temac_dma_bd_init(ndev); 556 if (temac_dma_bd_init(ndev)) {
557 dev_err(&ndev->dev,
558 "temac_device_reset descriptor allocation failed\n");
559 }
509 560
510 temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0); 561 temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
511 temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0); 562 temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
@@ -837,6 +888,8 @@ static int temac_stop(struct net_device *ndev)
837 phy_disconnect(lp->phy_dev); 888 phy_disconnect(lp->phy_dev);
838 lp->phy_dev = NULL; 889 lp->phy_dev = NULL;
839 890
891 temac_dma_bd_release(ndev);
892
840 return 0; 893 return 0;
841} 894}
842 895
@@ -862,6 +915,7 @@ static const struct net_device_ops temac_netdev_ops = {
862 .ndo_stop = temac_stop, 915 .ndo_stop = temac_stop,
863 .ndo_start_xmit = temac_start_xmit, 916 .ndo_start_xmit = temac_start_xmit,
864 .ndo_set_mac_address = netdev_set_mac_address, 917 .ndo_set_mac_address = netdev_set_mac_address,
918 .ndo_validate_addr = eth_validate_addr,
865 //.ndo_set_multicast_list = temac_set_multicast_list, 919 //.ndo_set_multicast_list = temac_set_multicast_list,
866#ifdef CONFIG_NET_POLL_CONTROLLER 920#ifdef CONFIG_NET_POLL_CONTROLLER
867 .ndo_poll_controller = temac_poll_controller, 921 .ndo_poll_controller = temac_poll_controller,
@@ -978,19 +1032,22 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
978 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); 1032 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
979 } else { 1033 } else {
980 dev_err(&op->dev, "unable to map DMA registers\n"); 1034 dev_err(&op->dev, "unable to map DMA registers\n");
1035 of_node_put(np);
981 goto err_iounmap; 1036 goto err_iounmap;
982 } 1037 }
983 } 1038 }
984 1039
985 lp->rx_irq = irq_of_parse_and_map(np, 0); 1040 lp->rx_irq = irq_of_parse_and_map(np, 0);
986 lp->tx_irq = irq_of_parse_and_map(np, 1); 1041 lp->tx_irq = irq_of_parse_and_map(np, 1);
1042
1043 of_node_put(np); /* Finished with the DMA node; drop the reference */
1044
987 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { 1045 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
988 dev_err(&op->dev, "could not determine irqs\n"); 1046 dev_err(&op->dev, "could not determine irqs\n");
989 rc = -ENOMEM; 1047 rc = -ENOMEM;
990 goto err_iounmap_2; 1048 goto err_iounmap_2;
991 } 1049 }
992 1050
993 of_node_put(np); /* Finished with the DMA node; drop the reference */
994 1051
995 /* Retrieve the MAC address */ 1052 /* Retrieve the MAC address */
996 addr = of_get_property(op->dev.of_node, "local-mac-address", &size); 1053 addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 72b7949c91b1..9a0996795321 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -58,11 +58,13 @@
58#include <linux/tcp.h> 58#include <linux/tcp.h>
59#include <linux/percpu.h> 59#include <linux/percpu.h>
60#include <net/net_namespace.h> 60#include <net/net_namespace.h>
61#include <linux/u64_stats_sync.h>
61 62
62struct pcpu_lstats { 63struct pcpu_lstats {
63 unsigned long packets; 64 u64 packets;
64 unsigned long bytes; 65 u64 bytes;
65 unsigned long drops; 66 struct u64_stats_sync syncp;
67 unsigned long drops;
66}; 68};
67 69
68/* 70/*
@@ -86,31 +88,40 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
86 88
87 len = skb->len; 89 len = skb->len;
88 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { 90 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
91 u64_stats_update_begin(&lb_stats->syncp);
89 lb_stats->bytes += len; 92 lb_stats->bytes += len;
90 lb_stats->packets++; 93 lb_stats->packets++;
94 u64_stats_update_end(&lb_stats->syncp);
91 } else 95 } else
92 lb_stats->drops++; 96 lb_stats->drops++;
93 97
94 return NETDEV_TX_OK; 98 return NETDEV_TX_OK;
95} 99}
96 100
97static struct net_device_stats *loopback_get_stats(struct net_device *dev) 101static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
102 struct rtnl_link_stats64 *stats)
98{ 103{
99 const struct pcpu_lstats __percpu *pcpu_lstats; 104 const struct pcpu_lstats __percpu *pcpu_lstats;
100 struct net_device_stats *stats = &dev->stats; 105 u64 bytes = 0;
101 unsigned long bytes = 0; 106 u64 packets = 0;
102 unsigned long packets = 0; 107 u64 drops = 0;
103 unsigned long drops = 0;
104 int i; 108 int i;
105 109
106 pcpu_lstats = (void __percpu __force *)dev->ml_priv; 110 pcpu_lstats = (void __percpu __force *)dev->ml_priv;
107 for_each_possible_cpu(i) { 111 for_each_possible_cpu(i) {
108 const struct pcpu_lstats *lb_stats; 112 const struct pcpu_lstats *lb_stats;
113 u64 tbytes, tpackets;
114 unsigned int start;
109 115
110 lb_stats = per_cpu_ptr(pcpu_lstats, i); 116 lb_stats = per_cpu_ptr(pcpu_lstats, i);
111 bytes += lb_stats->bytes; 117 do {
112 packets += lb_stats->packets; 118 start = u64_stats_fetch_begin(&lb_stats->syncp);
119 tbytes = lb_stats->bytes;
120 tpackets = lb_stats->packets;
121 } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
113 drops += lb_stats->drops; 122 drops += lb_stats->drops;
123 bytes += tbytes;
124 packets += tpackets;
114 } 125 }
115 stats->rx_packets = packets; 126 stats->rx_packets = packets;
116 stats->tx_packets = packets; 127 stats->tx_packets = packets;
@@ -158,7 +169,7 @@ static void loopback_dev_free(struct net_device *dev)
158static const struct net_device_ops loopback_ops = { 169static const struct net_device_ops loopback_ops = {
159 .ndo_init = loopback_dev_init, 170 .ndo_init = loopback_dev_init,
160 .ndo_start_xmit= loopback_xmit, 171 .ndo_start_xmit= loopback_xmit,
161 .ndo_get_stats = loopback_get_stats, 172 .ndo_get_stats64 = loopback_get_stats64,
162}; 173};
163 174
164/* 175/*
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 1136c9a22b67..3832fa4961dd 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -157,6 +157,8 @@ static void dayna_block_output(struct net_device *dev, int count,
157#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) 157#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
158#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) 158#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
159 159
160#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
161
160/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 162/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
161static void slow_sane_get_8390_hdr(struct net_device *dev, 163static void slow_sane_get_8390_hdr(struct net_device *dev,
162 struct e8390_pkt_hdr *hdr, int ring_page); 164 struct e8390_pkt_hdr *hdr, int ring_page);
@@ -164,8 +166,8 @@ static void slow_sane_block_input(struct net_device *dev, int count,
164 struct sk_buff *skb, int ring_offset); 166 struct sk_buff *skb, int ring_offset);
165static void slow_sane_block_output(struct net_device *dev, int count, 167static void slow_sane_block_output(struct net_device *dev, int count,
166 const unsigned char *buf, int start_page); 168 const unsigned char *buf, int start_page);
167static void word_memcpy_tocard(void *tp, const void *fp, int count); 169static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
168static void word_memcpy_fromcard(void *tp, const void *fp, int count); 170static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
169 171
170static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) 172static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
171{ 173{
@@ -245,9 +247,9 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
245 unsigned long outdata = 0xA5A0B5B0; 247 unsigned long outdata = 0xA5A0B5B0;
246 unsigned long indata = 0x00000000; 248 unsigned long indata = 0x00000000;
247 /* Try writing 32 bits */ 249 /* Try writing 32 bits */
248 memcpy(membase, &outdata, 4); 250 memcpy_toio(membase, &outdata, 4);
249 /* Now compare them */ 251 /* Now compare them */
250 if (memcmp((char *)&outdata, (char *)membase, 4) == 0) 252 if (memcmp_withio(&outdata, membase, 4) == 0)
251 return ACCESS_32; 253 return ACCESS_32;
252 /* Write 16 bit output */ 254 /* Write 16 bit output */
253 word_memcpy_tocard(membase, &outdata, 4); 255 word_memcpy_tocard(membase, &outdata, 4);
@@ -554,7 +556,7 @@ static int __init mac8390_initdev(struct net_device *dev,
554 case MAC8390_APPLE: 556 case MAC8390_APPLE:
555 switch (mac8390_testio(dev->mem_start)) { 557 switch (mac8390_testio(dev->mem_start)) {
556 case ACCESS_UNKNOWN: 558 case ACCESS_UNKNOWN:
557 pr_info("Don't know how to access card memory!\n"); 559 pr_err("Don't know how to access card memory!\n");
558 return -ENODEV; 560 return -ENODEV;
559 break; 561 break;
560 562
@@ -641,12 +643,13 @@ static int __init mac8390_initdev(struct net_device *dev,
641 643
642static int mac8390_open(struct net_device *dev) 644static int mac8390_open(struct net_device *dev)
643{ 645{
646 int err;
647
644 __ei_open(dev); 648 __ei_open(dev);
645 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { 649 err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev);
646 pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq); 650 if (err)
647 return -EAGAIN; 651 pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq);
648 } 652 return err;
649 return 0;
650} 653}
651 654
652static int mac8390_close(struct net_device *dev) 655static int mac8390_close(struct net_device *dev)
@@ -731,7 +734,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
731 struct e8390_pkt_hdr *hdr, int ring_page) 734 struct e8390_pkt_hdr *hdr, int ring_page)
732{ 735{
733 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 736 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
734 memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4); 737 memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
735 /* Fix endianness */ 738 /* Fix endianness */
736 hdr->count = swab16(hdr->count); 739 hdr->count = swab16(hdr->count);
737} 740}
@@ -745,14 +748,13 @@ static void sane_block_input(struct net_device *dev, int count,
745 if (xfer_start + count > ei_status.rmem_end) { 748 if (xfer_start + count > ei_status.rmem_end) {
746 /* We must wrap the input move. */ 749 /* We must wrap the input move. */
747 int semi_count = ei_status.rmem_end - xfer_start; 750 int semi_count = ei_status.rmem_end - xfer_start;
748 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, 751 memcpy_fromio(skb->data, dev->mem_start + xfer_base,
749 semi_count); 752 semi_count);
750 count -= semi_count; 753 count -= semi_count;
751 memcpy_toio(skb->data + semi_count, 754 memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
752 (char *)ei_status.rmem_start, count);
753 } else {
754 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
755 count); 755 count);
756 } else {
757 memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
756 } 758 }
757} 759}
758 760
@@ -761,7 +763,7 @@ static void sane_block_output(struct net_device *dev, int count,
761{ 763{
762 long shmem = (start_page - WD_START_PG)<<8; 764 long shmem = (start_page - WD_START_PG)<<8;
763 765
764 memcpy_toio((char *)dev->mem_start + shmem, buf, count); 766 memcpy_toio(dev->mem_start + shmem, buf, count);
765} 767}
766 768
767/* dayna block input/output */ 769/* dayna block input/output */
@@ -812,7 +814,7 @@ static void slow_sane_get_8390_hdr(struct net_device *dev,
812 int ring_page) 814 int ring_page)
813{ 815{
814 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 816 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
815 word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4); 817 word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4);
816 /* Register endianism - fix here rather than 8390.c */ 818 /* Register endianism - fix here rather than 8390.c */
817 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); 819 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
818} 820}
@@ -826,15 +828,14 @@ static void slow_sane_block_input(struct net_device *dev, int count,
826 if (xfer_start + count > ei_status.rmem_end) { 828 if (xfer_start + count > ei_status.rmem_end) {
827 /* We must wrap the input move. */ 829 /* We must wrap the input move. */
828 int semi_count = ei_status.rmem_end - xfer_start; 830 int semi_count = ei_status.rmem_end - xfer_start;
829 word_memcpy_fromcard(skb->data, 831 word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
830 (char *)dev->mem_start + xfer_base,
831 semi_count); 832 semi_count);
832 count -= semi_count; 833 count -= semi_count;
833 word_memcpy_fromcard(skb->data + semi_count, 834 word_memcpy_fromcard(skb->data + semi_count,
834 (char *)ei_status.rmem_start, count); 835 ei_status.rmem_start, count);
835 } else { 836 } else {
836 word_memcpy_fromcard(skb->data, 837 word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
837 (char *)dev->mem_start + xfer_base, count); 838 count);
838 } 839 }
839} 840}
840 841
@@ -843,12 +844,12 @@ static void slow_sane_block_output(struct net_device *dev, int count,
843{ 844{
844 long shmem = (start_page - WD_START_PG)<<8; 845 long shmem = (start_page - WD_START_PG)<<8;
845 846
846 word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count); 847 word_memcpy_tocard(dev->mem_start + shmem, buf, count);
847} 848}
848 849
849static void word_memcpy_tocard(void *tp, const void *fp, int count) 850static void word_memcpy_tocard(unsigned long tp, const void *fp, int count)
850{ 851{
851 volatile unsigned short *to = tp; 852 volatile unsigned short *to = (void *)tp;
852 const unsigned short *from = fp; 853 const unsigned short *from = fp;
853 854
854 count++; 855 count++;
@@ -858,10 +859,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
858 *to++ = *from++; 859 *to++ = *from++;
859} 860}
860 861
861static void word_memcpy_fromcard(void *tp, const void *fp, int count) 862static void word_memcpy_fromcard(void *tp, unsigned long fp, int count)
862{ 863{
863 unsigned short *to = tp; 864 unsigned short *to = tp;
864 const volatile unsigned short *from = fp; 865 const volatile unsigned short *from = (const void *)fp;
865 866
866 count++; 867 count++;
867 count /= 2; 868 count /= 2;
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 69fa4ef64dd2..669b317974a8 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -110,7 +110,6 @@ static unsigned int net_debug = NET_DEBUG;
110 110
111/* Information that need to be kept for each board. */ 111/* Information that need to be kept for each board. */
112struct net_local { 112struct net_local {
113 struct net_device_stats stats;
114 int chip_type; /* one of: CS8900, CS8920, CS8920M */ 113 int chip_type; /* one of: CS8900, CS8920, CS8920M */
115 char chip_revision; /* revision letter of the chip ('A'...) */ 114 char chip_revision; /* revision letter of the chip ('A'...) */
116 int send_cmd; /* the propercommand used to send a packet. */ 115 int send_cmd; /* the propercommand used to send a packet. */
@@ -444,13 +443,18 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
444 net_rx(dev); 443 net_rx(dev);
445 break; 444 break;
446 case ISQ_TRANSMITTER_EVENT: 445 case ISQ_TRANSMITTER_EVENT:
447 lp->stats.tx_packets++; 446 dev->stats.tx_packets++;
448 netif_wake_queue(dev); 447 netif_wake_queue(dev);
449 if ((status & TX_OK) == 0) lp->stats.tx_errors++; 448 if ((status & TX_OK) == 0)
450 if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++; 449 dev->stats.tx_errors++;
451 if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++; 450 if (status & TX_LOST_CRS)
452 if (status & TX_LATE_COL) lp->stats.tx_window_errors++; 451 dev->stats.tx_carrier_errors++;
453 if (status & TX_16_COL) lp->stats.tx_aborted_errors++; 452 if (status & TX_SQE_ERROR)
453 dev->stats.tx_heartbeat_errors++;
454 if (status & TX_LATE_COL)
455 dev->stats.tx_window_errors++;
456 if (status & TX_16_COL)
457 dev->stats.tx_aborted_errors++;
454 break; 458 break;
455 case ISQ_BUFFER_EVENT: 459 case ISQ_BUFFER_EVENT:
456 if (status & READY_FOR_TX) { 460 if (status & READY_FOR_TX) {
@@ -469,10 +473,10 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
469 } 473 }
470 break; 474 break;
471 case ISQ_RX_MISS_EVENT: 475 case ISQ_RX_MISS_EVENT:
472 lp->stats.rx_missed_errors += (status >>6); 476 dev->stats.rx_missed_errors += (status >> 6);
473 break; 477 break;
474 case ISQ_TX_COL_EVENT: 478 case ISQ_TX_COL_EVENT:
475 lp->stats.collisions += (status >>6); 479 dev->stats.collisions += (status >> 6);
476 break; 480 break;
477 } 481 }
478 } 482 }
@@ -483,19 +487,22 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
483static void 487static void
484net_rx(struct net_device *dev) 488net_rx(struct net_device *dev)
485{ 489{
486 struct net_local *lp = netdev_priv(dev);
487 struct sk_buff *skb; 490 struct sk_buff *skb;
488 int status, length; 491 int status, length;
489 492
490 status = readreg(dev, PP_RxStatus); 493 status = readreg(dev, PP_RxStatus);
491 if ((status & RX_OK) == 0) { 494 if ((status & RX_OK) == 0) {
492 lp->stats.rx_errors++; 495 dev->stats.rx_errors++;
493 if (status & RX_RUNT) lp->stats.rx_length_errors++; 496 if (status & RX_RUNT)
494 if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++; 497 dev->stats.rx_length_errors++;
495 if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT))) 498 if (status & RX_EXTRA_DATA)
499 dev->stats.rx_length_errors++;
500 if ((status & RX_CRC_ERROR) &&
501 !(status & (RX_EXTRA_DATA|RX_RUNT)))
496 /* per str 172 */ 502 /* per str 172 */
497 lp->stats.rx_crc_errors++; 503 dev->stats.rx_crc_errors++;
498 if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++; 504 if (status & RX_DRIBBLE)
505 dev->stats.rx_frame_errors++;
499 return; 506 return;
500 } 507 }
501 508
@@ -504,7 +511,7 @@ net_rx(struct net_device *dev)
504 skb = alloc_skb(length, GFP_ATOMIC); 511 skb = alloc_skb(length, GFP_ATOMIC);
505 if (skb == NULL) { 512 if (skb == NULL) {
506 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 513 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
507 lp->stats.rx_dropped++; 514 dev->stats.rx_dropped++;
508 return; 515 return;
509 } 516 }
510 skb_put(skb, length); 517 skb_put(skb, length);
@@ -519,8 +526,8 @@ net_rx(struct net_device *dev)
519 526
520 skb->protocol=eth_type_trans(skb,dev); 527 skb->protocol=eth_type_trans(skb,dev);
521 netif_rx(skb); 528 netif_rx(skb);
522 lp->stats.rx_packets++; 529 dev->stats.rx_packets++;
523 lp->stats.rx_bytes += length; 530 dev->stats.rx_bytes += length;
524} 531}
525 532
526/* The inverse routine to net_open(). */ 533/* The inverse routine to net_open(). */
@@ -548,16 +555,15 @@ net_close(struct net_device *dev)
548static struct net_device_stats * 555static struct net_device_stats *
549net_get_stats(struct net_device *dev) 556net_get_stats(struct net_device *dev)
550{ 557{
551 struct net_local *lp = netdev_priv(dev);
552 unsigned long flags; 558 unsigned long flags;
553 559
554 local_irq_save(flags); 560 local_irq_save(flags);
555 /* Update the statistics from the device registers. */ 561 /* Update the statistics from the device registers. */
556 lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); 562 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
557 lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6); 563 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
558 local_irq_restore(flags); 564 local_irq_restore(flags);
559 565
560 return &lp->stats; 566 return &dev->stats;
561} 567}
562 568
563static void set_multicast_list(struct net_device *dev) 569static void set_multicast_list(struct net_device *dev)
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 40797fbdca9f..ff2f158ab0b9 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1082,7 +1082,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1082 if (!phydev) 1082 if (!phydev)
1083 return -ENODEV; 1083 return -ENODEV;
1084 1084
1085 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1085 return phy_mii_ioctl(phydev, rq, cmd);
1086} 1086}
1087 1087
1088static const struct net_device_ops macb_netdev_ops = { 1088static const struct net_device_ops macb_netdev_ops = {
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index adb54fe2d82a..c93679ee6994 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -140,21 +140,40 @@ static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
140 140
141static int macsonic_open(struct net_device* dev) 141static int macsonic_open(struct net_device* dev)
142{ 142{
143 if (request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { 143 int retval;
144 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); 144
145 return -EAGAIN; 145 retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST,
146 "sonic", dev);
147 if (retval) {
148 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
149 dev->name, dev->irq);
150 goto err;
146 } 151 }
147 /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes 152 /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes
148 * in at priority level 3. However, we sometimes get the level 2 inter- 153 * in at priority level 3. However, we sometimes get the level 2 inter-
149 * rupt as well, which must prevent re-entrance of the sonic handler. 154 * rupt as well, which must prevent re-entrance of the sonic handler.
150 */ 155 */
151 if (dev->irq == IRQ_AUTO_3) 156 if (dev->irq == IRQ_AUTO_3) {
152 if (request_irq(IRQ_NUBUS_9, macsonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { 157 retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt,
153 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9); 158 IRQ_FLG_FAST, "sonic", dev);
154 free_irq(dev->irq, dev); 159 if (retval) {
155 return -EAGAIN; 160 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
161 dev->name, IRQ_NUBUS_9);
162 goto err_irq;
156 } 163 }
157 return sonic_open(dev); 164 }
165 retval = sonic_open(dev);
166 if (retval)
167 goto err_irq_nubus;
168 return 0;
169
170err_irq_nubus:
171 if (dev->irq == IRQ_AUTO_3)
172 free_irq(IRQ_NUBUS_9, dev);
173err_irq:
174 free_irq(dev->irq, dev);
175err:
176 return retval;
158} 177}
159 178
160static int macsonic_close(struct net_device* dev) 179static int macsonic_close(struct net_device* dev)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f15fe2cf72ae..0ef0eb0db945 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -37,8 +37,14 @@ struct macvlan_port {
37 struct net_device *dev; 37 struct net_device *dev;
38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu;
40}; 41};
41 42
43#define macvlan_port_get_rcu(dev) \
44 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
45#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
46#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
47
42static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, 48static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
43 const unsigned char *addr) 49 const unsigned char *addr)
44{ 50{
@@ -145,15 +151,17 @@ static void macvlan_broadcast(struct sk_buff *skb,
145} 151}
146 152
147/* called under rcu_read_lock() from netif_receive_skb */ 153/* called under rcu_read_lock() from netif_receive_skb */
148static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port, 154static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
149 struct sk_buff *skb)
150{ 155{
156 struct macvlan_port *port;
151 const struct ethhdr *eth = eth_hdr(skb); 157 const struct ethhdr *eth = eth_hdr(skb);
152 const struct macvlan_dev *vlan; 158 const struct macvlan_dev *vlan;
153 const struct macvlan_dev *src; 159 const struct macvlan_dev *src;
154 struct net_device *dev; 160 struct net_device *dev;
155 unsigned int len; 161 unsigned int len = 0;
162 int ret = NET_RX_DROP;
156 163
164 port = macvlan_port_get_rcu(skb->dev);
157 if (is_multicast_ether_addr(eth->h_dest)) { 165 if (is_multicast_ether_addr(eth->h_dest)) {
158 src = macvlan_hash_lookup(port, eth->h_source); 166 src = macvlan_hash_lookup(port, eth->h_source);
159 if (!src) 167 if (!src)
@@ -188,14 +196,16 @@ static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
188 } 196 }
189 len = skb->len + ETH_HLEN; 197 len = skb->len + ETH_HLEN;
190 skb = skb_share_check(skb, GFP_ATOMIC); 198 skb = skb_share_check(skb, GFP_ATOMIC);
191 macvlan_count_rx(vlan, len, skb != NULL, 0);
192 if (!skb) 199 if (!skb)
193 return NULL; 200 goto out;
194 201
195 skb->dev = dev; 202 skb->dev = dev;
196 skb->pkt_type = PACKET_HOST; 203 skb->pkt_type = PACKET_HOST;
197 204
198 vlan->receive(skb); 205 ret = vlan->receive(skb);
206
207out:
208 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
199 return NULL; 209 return NULL;
200} 210}
201 211
@@ -424,29 +434,38 @@ static void macvlan_uninit(struct net_device *dev)
424 free_percpu(vlan->rx_stats); 434 free_percpu(vlan->rx_stats);
425} 435}
426 436
427static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev) 437static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
438 struct rtnl_link_stats64 *stats)
428{ 439{
429 struct net_device_stats *stats = &dev->stats;
430 struct macvlan_dev *vlan = netdev_priv(dev); 440 struct macvlan_dev *vlan = netdev_priv(dev);
431 441
432 dev_txq_stats_fold(dev, stats); 442 dev_txq_stats_fold(dev, stats);
433 443
434 if (vlan->rx_stats) { 444 if (vlan->rx_stats) {
435 struct macvlan_rx_stats *p, rx = {0}; 445 struct macvlan_rx_stats *p, accum = {0};
446 u64 rx_packets, rx_bytes, rx_multicast;
447 unsigned int start;
436 int i; 448 int i;
437 449
438 for_each_possible_cpu(i) { 450 for_each_possible_cpu(i) {
439 p = per_cpu_ptr(vlan->rx_stats, i); 451 p = per_cpu_ptr(vlan->rx_stats, i);
440 rx.rx_packets += p->rx_packets; 452 do {
441 rx.rx_bytes += p->rx_bytes; 453 start = u64_stats_fetch_begin_bh(&p->syncp);
442 rx.rx_errors += p->rx_errors; 454 rx_packets = p->rx_packets;
443 rx.multicast += p->multicast; 455 rx_bytes = p->rx_bytes;
456 rx_multicast = p->rx_multicast;
457 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
458 accum.rx_packets += rx_packets;
459 accum.rx_bytes += rx_bytes;
460 accum.rx_multicast += rx_multicast;
461 /* rx_errors is an ulong, updated without syncp protection */
462 accum.rx_errors += p->rx_errors;
444 } 463 }
445 stats->rx_packets = rx.rx_packets; 464 stats->rx_packets = accum.rx_packets;
446 stats->rx_bytes = rx.rx_bytes; 465 stats->rx_bytes = accum.rx_bytes;
447 stats->rx_errors = rx.rx_errors; 466 stats->rx_errors = accum.rx_errors;
448 stats->rx_dropped = rx.rx_errors; 467 stats->rx_dropped = accum.rx_errors;
449 stats->multicast = rx.multicast; 468 stats->multicast = accum.rx_multicast;
450 } 469 }
451 return stats; 470 return stats;
452} 471}
@@ -495,7 +514,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
495 .ndo_change_rx_flags = macvlan_change_rx_flags, 514 .ndo_change_rx_flags = macvlan_change_rx_flags,
496 .ndo_set_mac_address = macvlan_set_mac_address, 515 .ndo_set_mac_address = macvlan_set_mac_address,
497 .ndo_set_multicast_list = macvlan_set_multicast_list, 516 .ndo_set_multicast_list = macvlan_set_multicast_list,
498 .ndo_get_stats = macvlan_dev_get_stats, 517 .ndo_get_stats64 = macvlan_dev_get_stats64,
499 .ndo_validate_addr = eth_validate_addr, 518 .ndo_validate_addr = eth_validate_addr,
500}; 519};
501 520
@@ -521,6 +540,7 @@ static int macvlan_port_create(struct net_device *dev)
521{ 540{
522 struct macvlan_port *port; 541 struct macvlan_port *port;
523 unsigned int i; 542 unsigned int i;
543 int err;
524 544
525 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) 545 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
526 return -EINVAL; 546 return -EINVAL;
@@ -533,19 +553,32 @@ static int macvlan_port_create(struct net_device *dev)
533 INIT_LIST_HEAD(&port->vlans); 553 INIT_LIST_HEAD(&port->vlans);
534 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 554 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
535 INIT_HLIST_HEAD(&port->vlan_hash[i]); 555 INIT_HLIST_HEAD(&port->vlan_hash[i]);
536 rcu_assign_pointer(dev->macvlan_port, port); 556
537 return 0; 557 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
558 if (err)
559 kfree(port);
560
561 dev->priv_flags |= IFF_MACVLAN_PORT;
562 return err;
538} 563}
539 564
540static void macvlan_port_destroy(struct net_device *dev) 565static void macvlan_port_rcu_free(struct rcu_head *head)
541{ 566{
542 struct macvlan_port *port = dev->macvlan_port; 567 struct macvlan_port *port;
543 568
544 rcu_assign_pointer(dev->macvlan_port, NULL); 569 port = container_of(head, struct macvlan_port, rcu);
545 synchronize_rcu();
546 kfree(port); 570 kfree(port);
547} 571}
548 572
573static void macvlan_port_destroy(struct net_device *dev)
574{
575 struct macvlan_port *port = macvlan_port_get(dev);
576
577 dev->priv_flags &= ~IFF_MACVLAN_PORT;
578 netdev_rx_handler_unregister(dev);
579 call_rcu(&port->rcu, macvlan_port_rcu_free);
580}
581
549static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) 582static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
550{ 583{
551 if (tb[IFLA_ADDRESS]) { 584 if (tb[IFLA_ADDRESS]) {
@@ -621,12 +654,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
621 if (!tb[IFLA_ADDRESS]) 654 if (!tb[IFLA_ADDRESS])
622 random_ether_addr(dev->dev_addr); 655 random_ether_addr(dev->dev_addr);
623 656
624 if (lowerdev->macvlan_port == NULL) { 657 if (!macvlan_port_exists(lowerdev)) {
625 err = macvlan_port_create(lowerdev); 658 err = macvlan_port_create(lowerdev);
626 if (err < 0) 659 if (err < 0)
627 return err; 660 return err;
628 } 661 }
629 port = lowerdev->macvlan_port; 662 port = macvlan_port_get(lowerdev);
630 663
631 vlan->lowerdev = lowerdev; 664 vlan->lowerdev = lowerdev;
632 vlan->dev = dev; 665 vlan->dev = dev;
@@ -736,10 +769,11 @@ static int macvlan_device_event(struct notifier_block *unused,
736 struct macvlan_dev *vlan, *next; 769 struct macvlan_dev *vlan, *next;
737 struct macvlan_port *port; 770 struct macvlan_port *port;
738 771
739 port = dev->macvlan_port; 772 if (!macvlan_port_exists(dev))
740 if (port == NULL)
741 return NOTIFY_DONE; 773 return NOTIFY_DONE;
742 774
775 port = macvlan_port_get(dev);
776
743 switch (event) { 777 switch (event) {
744 case NETDEV_CHANGE: 778 case NETDEV_CHANGE:
745 list_for_each_entry(vlan, &port->vlans, list) 779 list_for_each_entry(vlan, &port->vlans, list)
@@ -773,14 +807,12 @@ static int __init macvlan_init_module(void)
773 int err; 807 int err;
774 808
775 register_netdevice_notifier(&macvlan_notifier_block); 809 register_netdevice_notifier(&macvlan_notifier_block);
776 macvlan_handle_frame_hook = macvlan_handle_frame;
777 810
778 err = macvlan_link_register(&macvlan_link_ops); 811 err = macvlan_link_register(&macvlan_link_ops);
779 if (err < 0) 812 if (err < 0)
780 goto err1; 813 goto err1;
781 return 0; 814 return 0;
782err1: 815err1:
783 macvlan_handle_frame_hook = NULL;
784 unregister_netdevice_notifier(&macvlan_notifier_block); 816 unregister_netdevice_notifier(&macvlan_notifier_block);
785 return err; 817 return err;
786} 818}
@@ -788,7 +820,6 @@ err1:
788static void __exit macvlan_cleanup_module(void) 820static void __exit macvlan_cleanup_module(void)
789{ 821{
790 rtnl_link_unregister(&macvlan_link_ops); 822 rtnl_link_unregister(&macvlan_link_ops);
791 macvlan_handle_frame_hook = NULL;
792 unregister_netdevice_notifier(&macvlan_notifier_block); 823 unregister_netdevice_notifier(&macvlan_notifier_block);
793} 824}
794 825
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ff02b836c3c4..3b1c54a9c6ef 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -58,7 +58,7 @@ static struct proto macvtap_proto = {
58 * only has one tap, the interface numbers assure that the 58 * only has one tap, the interface numbers assure that the
59 * device nodes are unique. 59 * device nodes are unique.
60 */ 60 */
61static unsigned int macvtap_major; 61static dev_t macvtap_major;
62#define MACVTAP_NUM_DEVS 65536 62#define MACVTAP_NUM_DEVS 65536
63static struct class *macvtap_class; 63static struct class *macvtap_class;
64static struct cdev macvtap_cdev; 64static struct cdev macvtap_cdev;
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index f599294fa8ab..68aaa42d0ced 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -101,8 +101,8 @@ static void catas_reset(struct work_struct *work)
101 ret = mlx4_restart_one(priv->dev.pdev); 101 ret = mlx4_restart_one(priv->dev.pdev);
102 /* 'priv' now is not valid */ 102 /* 'priv' now is not valid */
103 if (ret) 103 if (ret)
104 printk(KERN_ERR "mlx4 %s: Reset failed (%d)\n", 104 pr_err("mlx4 %s: Reset failed (%d)\n",
105 pci_name(pdev), ret); 105 pci_name(pdev), ret);
106 else { 106 else {
107 dev = pci_get_drvdata(pdev); 107 dev = pci_get_drvdata(pdev);
108 mlx4_dbg(dev, "Reset succeeded\n"); 108 mlx4_dbg(dev, "Reset succeeded\n");
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index d5afd037cd7d..b275238fe70d 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -387,6 +387,42 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
387 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; 387 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
388} 388}
389 389
390static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
391{
392 struct mlx4_en_priv *priv = netdev_priv(dev);
393 struct mlx4_en_dev *mdev = priv->mdev;
394 int rc = 0;
395 int changed = 0;
396
397 if (data & ~ETH_FLAG_LRO)
398 return -EOPNOTSUPP;
399
400 if (data & ETH_FLAG_LRO) {
401 if (mdev->profile.num_lro == 0)
402 return -EOPNOTSUPP;
403 if (!(dev->features & NETIF_F_LRO))
404 changed = 1;
405 } else if (dev->features & NETIF_F_LRO) {
406 changed = 1;
407 }
408
409 if (changed) {
410 if (netif_running(dev)) {
411 mutex_lock(&mdev->state_lock);
412 mlx4_en_stop_port(dev);
413 }
414 dev->features ^= NETIF_F_LRO;
415 if (netif_running(dev)) {
416 rc = mlx4_en_start_port(dev);
417 if (rc)
418 en_err(priv, "Failed to restart port\n");
419 mutex_unlock(&mdev->state_lock);
420 }
421 }
422
423 return rc;
424}
425
390const struct ethtool_ops mlx4_en_ethtool_ops = { 426const struct ethtool_ops mlx4_en_ethtool_ops = {
391 .get_drvinfo = mlx4_en_get_drvinfo, 427 .get_drvinfo = mlx4_en_get_drvinfo,
392 .get_settings = mlx4_en_get_settings, 428 .get_settings = mlx4_en_get_settings,
@@ -415,7 +451,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
415 .get_ringparam = mlx4_en_get_ringparam, 451 .get_ringparam = mlx4_en_get_ringparam,
416 .set_ringparam = mlx4_en_set_ringparam, 452 .set_ringparam = mlx4_en_set_ringparam,
417 .get_flags = ethtool_op_get_flags, 453 .get_flags = ethtool_op_get_flags,
418 .set_flags = ethtool_op_set_flags, 454 .set_flags = mlx4_ethtool_op_set_flags,
419}; 455};
420 456
421 457
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index cbabf14f95d0..97934f1ec53a 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -79,6 +79,29 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
79MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." 79MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
80 " Per priority bit mask"); 80 " Per priority bit mask");
81 81
82int en_print(const char *level, const struct mlx4_en_priv *priv,
83 const char *format, ...)
84{
85 va_list args;
86 struct va_format vaf;
87 int i;
88
89 va_start(args, format);
90
91 vaf.fmt = format;
92 vaf.va = &args;
93 if (priv->registered)
94 i = printk("%s%s: %s: %pV",
95 level, DRV_NAME, priv->dev->name, &vaf);
96 else
97 i = printk("%s%s: %s: Port %d: %pV",
98 level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
99 priv->port, &vaf);
100 va_end(args);
101
102 return i;
103}
104
82static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) 105static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
83{ 106{
84 struct mlx4_en_profile *params = &mdev->profile; 107 struct mlx4_en_profile *params = &mdev->profile;
@@ -152,15 +175,11 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
152 175
153static void *mlx4_en_add(struct mlx4_dev *dev) 176static void *mlx4_en_add(struct mlx4_dev *dev)
154{ 177{
155 static int mlx4_en_version_printed;
156 struct mlx4_en_dev *mdev; 178 struct mlx4_en_dev *mdev;
157 int i; 179 int i;
158 int err; 180 int err;
159 181
160 if (!mlx4_en_version_printed) { 182 printk_once(KERN_INFO "%s", mlx4_en_version);
161 printk(KERN_INFO "%s", mlx4_en_version);
162 mlx4_en_version_printed++;
163 }
164 183
165 mdev = kzalloc(sizeof *mdev, GFP_KERNEL); 184 mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
166 if (!mdev) { 185 if (!mdev) {
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 96180c0ec206..a0d8a26f5a02 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -961,6 +961,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
961 } 961 }
962 962
963 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 963 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
964 dev->dev_id = port - 1;
964 965
965 /* 966 /*
966 * Initialize driver private data 967 * Initialize driver private data
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 423053482ed5..6d7b2bf210ce 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -110,7 +110,7 @@ struct mlx4_eqe {
110 u32 raw[6]; 110 u32 raw[6];
111 struct { 111 struct {
112 __be32 cqn; 112 __be32 cqn;
113 } __attribute__((packed)) comp; 113 } __packed comp;
114 struct { 114 struct {
115 u16 reserved1; 115 u16 reserved1;
116 __be16 token; 116 __be16 token;
@@ -118,27 +118,27 @@ struct mlx4_eqe {
118 u8 reserved3[3]; 118 u8 reserved3[3];
119 u8 status; 119 u8 status;
120 __be64 out_param; 120 __be64 out_param;
121 } __attribute__((packed)) cmd; 121 } __packed cmd;
122 struct { 122 struct {
123 __be32 qpn; 123 __be32 qpn;
124 } __attribute__((packed)) qp; 124 } __packed qp;
125 struct { 125 struct {
126 __be32 srqn; 126 __be32 srqn;
127 } __attribute__((packed)) srq; 127 } __packed srq;
128 struct { 128 struct {
129 __be32 cqn; 129 __be32 cqn;
130 u32 reserved1; 130 u32 reserved1;
131 u8 reserved2[3]; 131 u8 reserved2[3];
132 u8 syndrome; 132 u8 syndrome;
133 } __attribute__((packed)) cq_err; 133 } __packed cq_err;
134 struct { 134 struct {
135 u32 reserved1[2]; 135 u32 reserved1[2];
136 __be32 port; 136 __be32 port;
137 } __attribute__((packed)) port_change; 137 } __packed port_change;
138 } event; 138 } event;
139 u8 reserved3[3]; 139 u8 reserved3[3];
140 u8 owner; 140 u8 owner;
141} __attribute__((packed)); 141} __packed;
142 142
143static void eq_set_ci(struct mlx4_eq *eq, int req_not) 143static void eq_set_ci(struct mlx4_eq *eq, int req_not)
144{ 144{
@@ -475,10 +475,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
475 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); 475 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
476 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { 476 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
477 if (i % 4 == 0) 477 if (i % 4 == 0)
478 printk("[%02x] ", i * 4); 478 pr_cont("[%02x] ", i * 4);
479 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); 479 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
480 if ((i + 1) % 4 == 0) 480 if ((i + 1) % 4 == 0)
481 printk("\n"); 481 pr_cont("\n");
482 } 482 }
483 } 483 }
484 484
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index e3e0d54a7c87..5102ab1ac561 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1050,8 +1050,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1050 int err; 1050 int err;
1051 int port; 1051 int port;
1052 1052
1053 printk(KERN_INFO PFX "Initializing %s\n", 1053 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1054 pci_name(pdev));
1055 1054
1056 err = pci_enable_device(pdev); 1055 err = pci_enable_device(pdev);
1057 if (err) { 1056 if (err) {
@@ -1216,12 +1215,7 @@ err_disable_pdev:
1216static int __devinit mlx4_init_one(struct pci_dev *pdev, 1215static int __devinit mlx4_init_one(struct pci_dev *pdev,
1217 const struct pci_device_id *id) 1216 const struct pci_device_id *id)
1218{ 1217{
1219 static int mlx4_version_printed; 1218 printk_once(KERN_INFO "%s", mlx4_version);
1220
1221 if (!mlx4_version_printed) {
1222 printk(KERN_INFO "%s", mlx4_version);
1223 ++mlx4_version_printed;
1224 }
1225 1219
1226 return __mlx4_init_one(pdev, id); 1220 return __mlx4_init_one(pdev, id);
1227} 1221}
@@ -1301,17 +1295,17 @@ static struct pci_driver mlx4_driver = {
1301static int __init mlx4_verify_params(void) 1295static int __init mlx4_verify_params(void)
1302{ 1296{
1303 if ((log_num_mac < 0) || (log_num_mac > 7)) { 1297 if ((log_num_mac < 0) || (log_num_mac > 7)) {
1304 printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac); 1298 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
1305 return -1; 1299 return -1;
1306 } 1300 }
1307 1301
1308 if ((log_num_vlan < 0) || (log_num_vlan > 7)) { 1302 if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1309 printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan); 1303 pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1310 return -1; 1304 return -1;
1311 } 1305 }
1312 1306
1313 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { 1307 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1314 printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 1308 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1315 return -1; 1309 return -1;
1316 } 1310 }
1317 1311
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 13343e884999..0da5bb7285b4 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -48,7 +48,6 @@
48#include <linux/mlx4/doorbell.h> 48#include <linux/mlx4/doorbell.h>
49 49
50#define DRV_NAME "mlx4_core" 50#define DRV_NAME "mlx4_core"
51#define PFX DRV_NAME ": "
52#define DRV_VERSION "0.01" 51#define DRV_VERSION "0.01"
53#define DRV_RELDATE "May 1, 2007" 52#define DRV_RELDATE "May 1, 2007"
54 53
@@ -88,17 +87,17 @@ extern int mlx4_debug_level;
88#endif /* CONFIG_MLX4_DEBUG */ 87#endif /* CONFIG_MLX4_DEBUG */
89 88
90#define mlx4_dbg(mdev, format, arg...) \ 89#define mlx4_dbg(mdev, format, arg...) \
91 do { \ 90do { \
92 if (mlx4_debug_level) \ 91 if (mlx4_debug_level) \
93 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ 92 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
94 } while (0) 93} while (0)
95 94
96#define mlx4_err(mdev, format, arg...) \ 95#define mlx4_err(mdev, format, arg...) \
97 dev_err(&mdev->pdev->dev, format, ## arg) 96 dev_err(&mdev->pdev->dev, format, ##arg)
98#define mlx4_info(mdev, format, arg...) \ 97#define mlx4_info(mdev, format, arg...) \
99 dev_info(&mdev->pdev->dev, format, ## arg) 98 dev_info(&mdev->pdev->dev, format, ##arg)
100#define mlx4_warn(mdev, format, arg...) \ 99#define mlx4_warn(mdev, format, arg...) \
101 dev_warn(&mdev->pdev->dev, format, ## arg) 100 dev_warn(&mdev->pdev->dev, format, ##arg)
102 101
103struct mlx4_bitmap { 102struct mlx4_bitmap {
104 u32 last; 103 u32 last;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index b55e46c8b682..449210994ee9 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -52,40 +52,8 @@
52#define DRV_VERSION "1.4.1.1" 52#define DRV_VERSION "1.4.1.1"
53#define DRV_RELDATE "June 2009" 53#define DRV_RELDATE "June 2009"
54 54
55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57 56
58#define en_print(level, priv, format, arg...) \
59 { \
60 if ((priv)->registered) \
61 printk(level "%s: %s: " format, DRV_NAME, \
62 (priv->dev)->name, ## arg); \
63 else \
64 printk(level "%s: %s: Port %d: " format, \
65 DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
66 (priv)->port, ## arg); \
67 }
68
69#define en_dbg(mlevel, priv, format, arg...) \
70 { \
71 if (NETIF_MSG_##mlevel & priv->msg_enable) \
72 en_print(KERN_DEBUG, priv, format, ## arg) \
73 }
74#define en_warn(priv, format, arg...) \
75 en_print(KERN_WARNING, priv, format, ## arg)
76#define en_err(priv, format, arg...) \
77 en_print(KERN_ERR, priv, format, ## arg)
78
79#define mlx4_err(mdev, format, arg...) \
80 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
81 dev_name(&mdev->pdev->dev) , ## arg)
82#define mlx4_info(mdev, format, arg...) \
83 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
84 dev_name(&mdev->pdev->dev) , ## arg)
85#define mlx4_warn(mdev, format, arg...) \
86 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
87 dev_name(&mdev->pdev->dev) , ## arg)
88
89/* 57/*
90 * Device constants 58 * Device constants
91 */ 59 */
@@ -568,4 +536,34 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
568 * Globals 536 * Globals
569 */ 537 */
570extern const struct ethtool_ops mlx4_en_ethtool_ops; 538extern const struct ethtool_ops mlx4_en_ethtool_ops;
539
540
541
542/*
543 * printk / logging functions
544 */
545
546int en_print(const char *level, const struct mlx4_en_priv *priv,
547 const char *format, ...) __attribute__ ((format (printf, 3, 4)));
548
549#define en_dbg(mlevel, priv, format, arg...) \
550do { \
551 if (NETIF_MSG_##mlevel & priv->msg_enable) \
552 en_print(KERN_DEBUG, priv, format, ##arg); \
553} while (0)
554#define en_warn(priv, format, arg...) \
555 en_print(KERN_WARNING, priv, format, ##arg)
556#define en_err(priv, format, arg...) \
557 en_print(KERN_ERR, priv, format, ##arg)
558
559#define mlx4_err(mdev, format, arg...) \
560 pr_err("%s %s: " format, DRV_NAME, \
561 dev_name(&mdev->pdev->dev), ##arg)
562#define mlx4_info(mdev, format, arg...) \
563 pr_info("%s %s: " format, DRV_NAME, \
564 dev_name(&mdev->pdev->dev), ##arg)
565#define mlx4_warn(mdev, format, arg...) \
566 pr_warning("%s %s: " format, DRV_NAME, \
567 dev_name(&mdev->pdev->dev), ##arg)
568
571#endif 569#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 3dc69be4949f..9c188bdd7f4f 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -58,7 +58,7 @@ struct mlx4_mpt_entry {
58 __be32 mtt_sz; 58 __be32 mtt_sz;
59 __be32 entity_size; 59 __be32 entity_size;
60 __be32 first_byte_offset; 60 __be32 first_byte_offset;
61} __attribute__((packed)); 61} __packed;
62 62
63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
64#define MLX4_MPT_FLAG_FREE (0x3UL << 28) 64#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 73bb8ea6f54a..2d488abcf62d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1640,6 +1640,11 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1640 } 1640 }
1641} 1641}
1642 1642
1643static int mv643xx_eth_set_flags(struct net_device *dev, u32 data)
1644{
1645 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO);
1646}
1647
1643static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1648static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1644{ 1649{
1645 if (sset == ETH_SS_STATS) 1650 if (sset == ETH_SS_STATS)
@@ -1665,7 +1670,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1665 .get_strings = mv643xx_eth_get_strings, 1670 .get_strings = mv643xx_eth_get_strings,
1666 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1671 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1667 .get_flags = ethtool_op_get_flags, 1672 .get_flags = ethtool_op_get_flags,
1668 .set_flags = ethtool_op_set_flags, 1673 .set_flags = mv643xx_eth_set_flags,
1669 .get_sset_count = mv643xx_eth_get_sset_count, 1674 .get_sset_count = mv643xx_eth_get_sset_count,
1670}; 1675};
1671 1676
@@ -2456,7 +2461,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2456 struct mv643xx_eth_private *mp = netdev_priv(dev); 2461 struct mv643xx_eth_private *mp = netdev_priv(dev);
2457 2462
2458 if (mp->phy != NULL) 2463 if (mp->phy != NULL)
2459 return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd); 2464 return phy_mii_ioctl(mp->phy, ifr, cmd);
2460 2465
2461 return -EOPNOTSUPP; 2466 return -EOPNOTSUPP;
2462} 2467}
@@ -2670,7 +2675,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2670 * Detect hardware parameters. 2675 * Detect hardware parameters.
2671 */ 2676 */
2672 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2677 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2673 msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024; 2678 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2679 pd->tx_csum_limit : 9 * 1024;
2674 infer_hw_params(msp); 2680 infer_hw_params(msp);
2675 2681
2676 platform_set_drvdata(pdev, msp); 2682 platform_set_drvdata(pdev, msp);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e0b47cc8a86e..d771d1650d60 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1730,8 +1730,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1730 if (csum_enabled) 1730 if (csum_enabled)
1731 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 1731 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
1732 else { 1732 else {
1733 u32 flags = ethtool_op_get_flags(netdev); 1733 netdev->features &= ~NETIF_F_LRO;
1734 err = ethtool_op_set_flags(netdev, (flags & ~ETH_FLAG_LRO));
1735 mgp->csum_flag = 0; 1734 mgp->csum_flag = 0;
1736 1735
1737 } 1736 }
@@ -1900,6 +1899,11 @@ static u32 myri10ge_get_msglevel(struct net_device *netdev)
1900 return mgp->msg_enable; 1899 return mgp->msg_enable;
1901} 1900}
1902 1901
1902static int myri10ge_set_flags(struct net_device *netdev, u32 value)
1903{
1904 return ethtool_op_set_flags(netdev, value, ETH_FLAG_LRO);
1905}
1906
1903static const struct ethtool_ops myri10ge_ethtool_ops = { 1907static const struct ethtool_ops myri10ge_ethtool_ops = {
1904 .get_settings = myri10ge_get_settings, 1908 .get_settings = myri10ge_get_settings,
1905 .get_drvinfo = myri10ge_get_drvinfo, 1909 .get_drvinfo = myri10ge_get_drvinfo,
@@ -1920,7 +1924,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
1920 .set_msglevel = myri10ge_set_msglevel, 1924 .set_msglevel = myri10ge_set_msglevel,
1921 .get_msglevel = myri10ge_get_msglevel, 1925 .get_msglevel = myri10ge_get_msglevel,
1922 .get_flags = ethtool_op_get_flags, 1926 .get_flags = ethtool_op_get_flags,
1923 .set_flags = ethtool_op_set_flags 1927 .set_flags = myri10ge_set_flags
1924}; 1928};
1925 1929
1926static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) 1930static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 1a57c3da1f49..04e552aa14ec 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -1079,7 +1079,7 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic
1079 1079
1080 mp->dev = dev; 1080 mp->dev = dev;
1081 dev->watchdog_timeo = 5*HZ; 1081 dev->watchdog_timeo = 5*HZ;
1082 dev->irq = op->irqs[0]; 1082 dev->irq = op->archdata.irqs[0];
1083 dev->netdev_ops = &myri_ops; 1083 dev->netdev_ops = &myri_ops;
1084 1084
1085 /* Register interrupt handler now. */ 1085 /* Register interrupt handler now. */
@@ -1172,12 +1172,12 @@ static struct of_platform_driver myri_sbus_driver = {
1172 1172
1173static int __init myri_sbus_init(void) 1173static int __init myri_sbus_init(void)
1174{ 1174{
1175 return of_register_driver(&myri_sbus_driver, &of_bus_type); 1175 return of_register_platform_driver(&myri_sbus_driver);
1176} 1176}
1177 1177
1178static void __exit myri_sbus_exit(void) 1178static void __exit myri_sbus_exit(void)
1179{ 1179{
1180 of_unregister_driver(&myri_sbus_driver); 1180 of_unregister_platform_driver(&myri_sbus_driver);
1181} 1181}
1182 1182
1183module_init(myri_sbus_init); 1183module_init(myri_sbus_init);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 2a17b503feaa..a6033d48b5cc 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -548,7 +548,6 @@ struct netdev_private {
548 dma_addr_t tx_dma[TX_RING_SIZE]; 548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev; 549 struct net_device *dev;
550 struct napi_struct napi; 550 struct napi_struct napi;
551 struct net_device_stats stats;
552 /* Media monitoring timer */ 551 /* Media monitoring timer */
553 struct timer_list timer; 552 struct timer_list timer;
554 /* Frequently used values: keep some adjacent for cache effect */ 553 /* Frequently used values: keep some adjacent for cache effect */
@@ -1906,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev)
1906 enable_irq(dev->irq); 1905 enable_irq(dev->irq);
1907 1906
1908 dev->trans_start = jiffies; /* prevent tx timeout */ 1907 dev->trans_start = jiffies; /* prevent tx timeout */
1909 np->stats.tx_errors++; 1908 dev->stats.tx_errors++;
1910 netif_wake_queue(dev); 1909 netif_wake_queue(dev);
1911} 1910}
1912 1911
@@ -2009,7 +2008,7 @@ static void drain_tx(struct net_device *dev)
2009 np->tx_dma[i], np->tx_skbuff[i]->len, 2008 np->tx_dma[i], np->tx_skbuff[i]->len,
2010 PCI_DMA_TODEVICE); 2009 PCI_DMA_TODEVICE);
2011 dev_kfree_skb(np->tx_skbuff[i]); 2010 dev_kfree_skb(np->tx_skbuff[i]);
2012 np->stats.tx_dropped++; 2011 dev->stats.tx_dropped++;
2013 } 2012 }
2014 np->tx_skbuff[i] = NULL; 2013 np->tx_skbuff[i] = NULL;
2015 } 2014 }
@@ -2115,7 +2114,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2115 writel(TxOn, ioaddr + ChipCmd); 2114 writel(TxOn, ioaddr + ChipCmd);
2116 } else { 2115 } else {
2117 dev_kfree_skb_irq(skb); 2116 dev_kfree_skb_irq(skb);
2118 np->stats.tx_dropped++; 2117 dev->stats.tx_dropped++;
2119 } 2118 }
2120 spin_unlock_irqrestore(&np->lock, flags); 2119 spin_unlock_irqrestore(&np->lock, flags);
2121 2120
@@ -2140,20 +2139,20 @@ static void netdev_tx_done(struct net_device *dev)
2140 dev->name, np->dirty_tx, 2139 dev->name, np->dirty_tx,
2141 le32_to_cpu(np->tx_ring[entry].cmd_status)); 2140 le32_to_cpu(np->tx_ring[entry].cmd_status));
2142 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { 2141 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2143 np->stats.tx_packets++; 2142 dev->stats.tx_packets++;
2144 np->stats.tx_bytes += np->tx_skbuff[entry]->len; 2143 dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2145 } else { /* Various Tx errors */ 2144 } else { /* Various Tx errors */
2146 int tx_status = 2145 int tx_status =
2147 le32_to_cpu(np->tx_ring[entry].cmd_status); 2146 le32_to_cpu(np->tx_ring[entry].cmd_status);
2148 if (tx_status & (DescTxAbort|DescTxExcColl)) 2147 if (tx_status & (DescTxAbort|DescTxExcColl))
2149 np->stats.tx_aborted_errors++; 2148 dev->stats.tx_aborted_errors++;
2150 if (tx_status & DescTxFIFO) 2149 if (tx_status & DescTxFIFO)
2151 np->stats.tx_fifo_errors++; 2150 dev->stats.tx_fifo_errors++;
2152 if (tx_status & DescTxCarrier) 2151 if (tx_status & DescTxCarrier)
2153 np->stats.tx_carrier_errors++; 2152 dev->stats.tx_carrier_errors++;
2154 if (tx_status & DescTxOOWCol) 2153 if (tx_status & DescTxOOWCol)
2155 np->stats.tx_window_errors++; 2154 dev->stats.tx_window_errors++;
2156 np->stats.tx_errors++; 2155 dev->stats.tx_errors++;
2157 } 2156 }
2158 pci_unmap_single(np->pci_dev,np->tx_dma[entry], 2157 pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2159 np->tx_skbuff[entry]->len, 2158 np->tx_skbuff[entry]->len,
@@ -2301,7 +2300,7 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2301 "buffers, entry %#08x " 2300 "buffers, entry %#08x "
2302 "status %#08x.\n", dev->name, 2301 "status %#08x.\n", dev->name,
2303 np->cur_rx, desc_status); 2302 np->cur_rx, desc_status);
2304 np->stats.rx_length_errors++; 2303 dev->stats.rx_length_errors++;
2305 2304
2306 /* The RX state machine has probably 2305 /* The RX state machine has probably
2307 * locked up beneath us. Follow the 2306 * locked up beneath us. Follow the
@@ -2321,15 +2320,15 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2321 2320
2322 } else { 2321 } else {
2323 /* There was an error. */ 2322 /* There was an error. */
2324 np->stats.rx_errors++; 2323 dev->stats.rx_errors++;
2325 if (desc_status & (DescRxAbort|DescRxOver)) 2324 if (desc_status & (DescRxAbort|DescRxOver))
2326 np->stats.rx_over_errors++; 2325 dev->stats.rx_over_errors++;
2327 if (desc_status & (DescRxLong|DescRxRunt)) 2326 if (desc_status & (DescRxLong|DescRxRunt))
2328 np->stats.rx_length_errors++; 2327 dev->stats.rx_length_errors++;
2329 if (desc_status & (DescRxInvalid|DescRxAlign)) 2328 if (desc_status & (DescRxInvalid|DescRxAlign))
2330 np->stats.rx_frame_errors++; 2329 dev->stats.rx_frame_errors++;
2331 if (desc_status & DescRxCRC) 2330 if (desc_status & DescRxCRC)
2332 np->stats.rx_crc_errors++; 2331 dev->stats.rx_crc_errors++;
2333 } 2332 }
2334 } else if (pkt_len > np->rx_buf_sz) { 2333 } else if (pkt_len > np->rx_buf_sz) {
2335 /* if this is the tail of a double buffer 2334 /* if this is the tail of a double buffer
@@ -2364,8 +2363,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2364 } 2363 }
2365 skb->protocol = eth_type_trans(skb, dev); 2364 skb->protocol = eth_type_trans(skb, dev);
2366 netif_receive_skb(skb); 2365 netif_receive_skb(skb);
2367 np->stats.rx_packets++; 2366 dev->stats.rx_packets++;
2368 np->stats.rx_bytes += pkt_len; 2367 dev->stats.rx_bytes += pkt_len;
2369 } 2368 }
2370 entry = (++np->cur_rx) % RX_RING_SIZE; 2369 entry = (++np->cur_rx) % RX_RING_SIZE;
2371 np->rx_head_desc = &np->rx_ring[entry]; 2370 np->rx_head_desc = &np->rx_ring[entry];
@@ -2428,17 +2427,17 @@ static void netdev_error(struct net_device *dev, int intr_status)
2428 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", 2427 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2429 dev->name); 2428 dev->name);
2430 } 2429 }
2431 np->stats.rx_fifo_errors++; 2430 dev->stats.rx_fifo_errors++;
2432 np->stats.rx_errors++; 2431 dev->stats.rx_errors++;
2433 } 2432 }
2434 /* Hmmmmm, it's not clear how to recover from PCI faults. */ 2433 /* Hmmmmm, it's not clear how to recover from PCI faults. */
2435 if (intr_status & IntrPCIErr) { 2434 if (intr_status & IntrPCIErr) {
2436 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, 2435 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2437 intr_status & IntrPCIErr); 2436 intr_status & IntrPCIErr);
2438 np->stats.tx_fifo_errors++; 2437 dev->stats.tx_fifo_errors++;
2439 np->stats.tx_errors++; 2438 dev->stats.tx_errors++;
2440 np->stats.rx_fifo_errors++; 2439 dev->stats.rx_fifo_errors++;
2441 np->stats.rx_errors++; 2440 dev->stats.rx_errors++;
2442 } 2441 }
2443 spin_unlock(&np->lock); 2442 spin_unlock(&np->lock);
2444} 2443}
@@ -2446,11 +2445,10 @@ static void netdev_error(struct net_device *dev, int intr_status)
2446static void __get_stats(struct net_device *dev) 2445static void __get_stats(struct net_device *dev)
2447{ 2446{
2448 void __iomem * ioaddr = ns_ioaddr(dev); 2447 void __iomem * ioaddr = ns_ioaddr(dev);
2449 struct netdev_private *np = netdev_priv(dev);
2450 2448
2451 /* The chip only need report frame silently dropped. */ 2449 /* The chip only need report frame silently dropped. */
2452 np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); 2450 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2453 np->stats.rx_missed_errors += readl(ioaddr + RxMissed); 2451 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2454} 2452}
2455 2453
2456static struct net_device_stats *get_stats(struct net_device *dev) 2454static struct net_device_stats *get_stats(struct net_device *dev)
@@ -2463,7 +2461,7 @@ static struct net_device_stats *get_stats(struct net_device *dev)
2463 __get_stats(dev); 2461 __get_stats(dev);
2464 spin_unlock_irq(&np->lock); 2462 spin_unlock_irq(&np->lock);
2465 2463
2466 return &np->stats; 2464 return &dev->stats;
2467} 2465}
2468 2466
2469#ifdef CONFIG_NET_POLL_CONTROLLER 2467#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 3a41b6a84a68..12612127a087 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -255,6 +255,19 @@ out_free_rq:
255} 255}
256 256
257static void 257static void
258nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
259{
260
261 netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
262 adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
263 NX_CDRP_CMD_DESTROY_RX_CTX);
264
265 netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
266 adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
267 NX_CDRP_CMD_DESTROY_TX_CTX);
268}
269
270static void
258nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) 271nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
259{ 272{
260 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 273 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -685,7 +698,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
685 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 698 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
686 if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) 699 if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
687 goto done; 700 goto done;
688 701 if (reset_devices)
702 nx_fw_cmd_reset_ctx(adapter);
689 err = nx_fw_cmd_create_rx_ctx(adapter); 703 err = nx_fw_cmd_create_rx_ctx(adapter);
690 if (err) 704 if (err)
691 goto err_out_free; 705 goto err_out_free;
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 20f7c58bd092..b30de24f4a52 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -887,12 +887,19 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
887 struct netxen_adapter *adapter = netdev_priv(netdev); 887 struct netxen_adapter *adapter = netdev_priv(netdev);
888 int hw_lro; 888 int hw_lro;
889 889
890 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) 890 if (data & ~ETH_FLAG_LRO)
891 return -EINVAL; 891 return -EINVAL;
892 892
893 ethtool_op_set_flags(netdev, data); 893 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
894 return -EINVAL;
894 895
895 hw_lro = (data & ETH_FLAG_LRO) ? NETXEN_NIC_LRO_ENABLED : 0; 896 if (data & ETH_FLAG_LRO) {
897 hw_lro = NETXEN_NIC_LRO_ENABLED;
898 netdev->features |= NETIF_F_LRO;
899 } else {
900 hw_lro = 0;
901 netdev->features &= ~NETIF_F_LRO;
902 }
896 903
897 if (netxen_config_hw_lro(adapter, hw_lro)) 904 if (netxen_config_hw_lro(adapter, hw_lro))
898 return -EIO; 905 return -EIO;
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 9bddb5fa7a96..33618edc61f9 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -185,7 +185,6 @@ static void ni52_xmt_int(struct net_device *dev);
185static void ni52_rnr_int(struct net_device *dev); 185static void ni52_rnr_int(struct net_device *dev);
186 186
187struct priv { 187struct priv {
188 struct net_device_stats stats;
189 char __iomem *base; 188 char __iomem *base;
190 char __iomem *mapped; 189 char __iomem *mapped;
191 char __iomem *memtop; 190 char __iomem *memtop;
@@ -972,10 +971,10 @@ static void ni52_rcv_int(struct net_device *dev)
972 memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen); 971 memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
973 skb->protocol = eth_type_trans(skb, dev); 972 skb->protocol = eth_type_trans(skb, dev);
974 netif_rx(skb); 973 netif_rx(skb);
975 p->stats.rx_packets++; 974 dev->stats.rx_packets++;
976 p->stats.rx_bytes += totlen; 975 dev->stats.rx_bytes += totlen;
977 } else 976 } else
978 p->stats.rx_dropped++; 977 dev->stats.rx_dropped++;
979 } else { 978 } else {
980 int rstat; 979 int rstat;
981 /* free all RBD's until RBD_LAST is set */ 980 /* free all RBD's until RBD_LAST is set */
@@ -993,12 +992,12 @@ static void ni52_rcv_int(struct net_device *dev)
993 writew(0, &rbd->status); 992 writew(0, &rbd->status);
994 printk(KERN_ERR "%s: received oversized frame! length: %d\n", 993 printk(KERN_ERR "%s: received oversized frame! length: %d\n",
995 dev->name, totlen); 994 dev->name, totlen);
996 p->stats.rx_dropped++; 995 dev->stats.rx_dropped++;
997 } 996 }
998 } else {/* frame !(ok), only with 'save-bad-frames' */ 997 } else {/* frame !(ok), only with 'save-bad-frames' */
999 printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n", 998 printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
1000 dev->name, status); 999 dev->name, status);
1001 p->stats.rx_errors++; 1000 dev->stats.rx_errors++;
1002 } 1001 }
1003 writeb(0, &p->rfd_top->stat_high); 1002 writeb(0, &p->rfd_top->stat_high);
1004 writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */ 1003 writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
@@ -1043,7 +1042,7 @@ static void ni52_rnr_int(struct net_device *dev)
1043{ 1042{
1044 struct priv *p = netdev_priv(dev); 1043 struct priv *p = netdev_priv(dev);
1045 1044
1046 p->stats.rx_errors++; 1045 dev->stats.rx_errors++;
1047 1046
1048 wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ 1047 wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
1049 writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */ 1048 writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
@@ -1076,29 +1075,29 @@ static void ni52_xmt_int(struct net_device *dev)
1076 printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); 1075 printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
1077 1076
1078 if (status & STAT_OK) { 1077 if (status & STAT_OK) {
1079 p->stats.tx_packets++; 1078 dev->stats.tx_packets++;
1080 p->stats.collisions += (status & TCMD_MAXCOLLMASK); 1079 dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
1081 } else { 1080 } else {
1082 p->stats.tx_errors++; 1081 dev->stats.tx_errors++;
1083 if (status & TCMD_LATECOLL) { 1082 if (status & TCMD_LATECOLL) {
1084 printk(KERN_ERR "%s: late collision detected.\n", 1083 printk(KERN_ERR "%s: late collision detected.\n",
1085 dev->name); 1084 dev->name);
1086 p->stats.collisions++; 1085 dev->stats.collisions++;
1087 } else if (status & TCMD_NOCARRIER) { 1086 } else if (status & TCMD_NOCARRIER) {
1088 p->stats.tx_carrier_errors++; 1087 dev->stats.tx_carrier_errors++;
1089 printk(KERN_ERR "%s: no carrier detected.\n", 1088 printk(KERN_ERR "%s: no carrier detected.\n",
1090 dev->name); 1089 dev->name);
1091 } else if (status & TCMD_LOSTCTS) 1090 } else if (status & TCMD_LOSTCTS)
1092 printk(KERN_ERR "%s: loss of CTS detected.\n", 1091 printk(KERN_ERR "%s: loss of CTS detected.\n",
1093 dev->name); 1092 dev->name);
1094 else if (status & TCMD_UNDERRUN) { 1093 else if (status & TCMD_UNDERRUN) {
1095 p->stats.tx_fifo_errors++; 1094 dev->stats.tx_fifo_errors++;
1096 printk(KERN_ERR "%s: DMA underrun detected.\n", 1095 printk(KERN_ERR "%s: DMA underrun detected.\n",
1097 dev->name); 1096 dev->name);
1098 } else if (status & TCMD_MAXCOLL) { 1097 } else if (status & TCMD_MAXCOLL) {
1099 printk(KERN_ERR "%s: Max. collisions exceeded.\n", 1098 printk(KERN_ERR "%s: Max. collisions exceeded.\n",
1100 dev->name); 1099 dev->name);
1101 p->stats.collisions += 16; 1100 dev->stats.collisions += 16;
1102 } 1101 }
1103 } 1102 }
1104#if (NUM_XMIT_BUFFS > 1) 1103#if (NUM_XMIT_BUFFS > 1)
@@ -1286,12 +1285,12 @@ static struct net_device_stats *ni52_get_stats(struct net_device *dev)
1286 ovrn = readw(&p->scb->ovrn_errs); 1285 ovrn = readw(&p->scb->ovrn_errs);
1287 writew(0, &p->scb->ovrn_errs); 1286 writew(0, &p->scb->ovrn_errs);
1288 1287
1289 p->stats.rx_crc_errors += crc; 1288 dev->stats.rx_crc_errors += crc;
1290 p->stats.rx_fifo_errors += ovrn; 1289 dev->stats.rx_fifo_errors += ovrn;
1291 p->stats.rx_frame_errors += aln; 1290 dev->stats.rx_frame_errors += aln;
1292 p->stats.rx_dropped += rsc; 1291 dev->stats.rx_dropped += rsc;
1293 1292
1294 return &p->stats; 1293 return &dev->stats;
1295} 1294}
1296 1295
1297/******************************************************** 1296/********************************************************
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 63e8e3893bd6..404f2d552888 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -28,10 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29 29
30#include <linux/io.h> 30#include <linux/io.h>
31
32#ifdef CONFIG_SPARC64
33#include <linux/of_device.h> 31#include <linux/of_device.h>
34#endif
35 32
36#include "niu.h" 33#include "niu.h"
37 34
@@ -3330,10 +3327,12 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3330 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { 3327 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3331 if (p->index == addr) { 3328 if (p->index == addr) {
3332 *link = pp; 3329 *link = pp;
3333 break; 3330 goto found;
3334 } 3331 }
3335 } 3332 }
3333 BUG();
3336 3334
3335found:
3337 return p; 3336 return p;
3338} 3337}
3339 3338
@@ -7920,14 +7919,7 @@ static int niu_phys_id(struct net_device *dev, u32 data)
7920 7919
7921static int niu_set_flags(struct net_device *dev, u32 data) 7920static int niu_set_flags(struct net_device *dev, u32 data)
7922{ 7921{
7923 if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE)) 7922 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7924 return -EOPNOTSUPP;
7925
7926 if (data & ETH_FLAG_RXHASH)
7927 dev->features |= NETIF_F_RXHASH;
7928 else
7929 dev->features &= ~NETIF_F_RXHASH;
7930 return 0;
7931} 7923}
7932 7924
7933static const struct ethtool_ops niu_ethtool_ops = { 7925static const struct ethtool_ops niu_ethtool_ops = {
@@ -9119,12 +9111,12 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9119 if (!int_prop) 9111 if (!int_prop)
9120 return -ENODEV; 9112 return -ENODEV;
9121 9113
9122 for (i = 0; i < op->num_irqs; i++) { 9114 for (i = 0; i < op->archdata.num_irqs; i++) {
9123 ldg_num_map[i] = int_prop[i]; 9115 ldg_num_map[i] = int_prop[i];
9124 np->ldg[i].irq = op->irqs[i]; 9116 np->ldg[i].irq = op->archdata.irqs[i];
9125 } 9117 }
9126 9118
9127 np->num_ldg = op->num_irqs; 9119 np->num_ldg = op->archdata.num_irqs;
9128 9120
9129 return 0; 9121 return 0;
9130#else 9122#else
@@ -10254,14 +10246,14 @@ static int __init niu_init(void)
10254 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 10246 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10255 10247
10256#ifdef CONFIG_SPARC64 10248#ifdef CONFIG_SPARC64
10257 err = of_register_driver(&niu_of_driver, &of_bus_type); 10249 err = of_register_platform_driver(&niu_of_driver);
10258#endif 10250#endif
10259 10251
10260 if (!err) { 10252 if (!err) {
10261 err = pci_register_driver(&niu_pci_driver); 10253 err = pci_register_driver(&niu_pci_driver);
10262#ifdef CONFIG_SPARC64 10254#ifdef CONFIG_SPARC64
10263 if (err) 10255 if (err)
10264 of_unregister_driver(&niu_of_driver); 10256 of_unregister_platform_driver(&niu_of_driver);
10265#endif 10257#endif
10266 } 10258 }
10267 10259
@@ -10272,7 +10264,7 @@ static void __exit niu_exit(void)
10272{ 10264{
10273 pci_unregister_driver(&niu_pci_driver); 10265 pci_unregister_driver(&niu_pci_driver);
10274#ifdef CONFIG_SPARC64 10266#ifdef CONFIG_SPARC64
10275 of_unregister_driver(&niu_of_driver); 10267 of_unregister_platform_driver(&niu_of_driver);
10276#endif 10268#endif
10277} 10269}
10278 10270
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index d6715465f35d..a41fa8ebe05f 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3236,7 +3236,7 @@ struct niu_phy_ops {
3236 int (*link_status)(struct niu *np, int *); 3236 int (*link_status)(struct niu *np, int *);
3237}; 3237};
3238 3238
3239struct of_device; 3239struct platform_device;
3240struct niu { 3240struct niu {
3241 void __iomem *regs; 3241 void __iomem *regs;
3242 struct net_device *dev; 3242 struct net_device *dev;
@@ -3297,7 +3297,7 @@ struct niu {
3297 struct niu_vpd vpd; 3297 struct niu_vpd vpd;
3298 u32 eeprom_len; 3298 u32 eeprom_len;
3299 3299
3300 struct of_device *op; 3300 struct platform_device *op;
3301 void __iomem *vir_regs_1; 3301 void __iomem *vir_regs_1;
3302 void __iomem *vir_regs_2; 3302 void __iomem *vir_regs_2;
3303}; 3303};
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index e88e97cd1b10..5a3488f76b38 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -424,7 +424,6 @@ struct rx_info {
424 424
425 425
426struct ns83820 { 426struct ns83820 {
427 struct net_device_stats stats;
428 u8 __iomem *base; 427 u8 __iomem *base;
429 428
430 struct pci_dev *pci_dev; 429 struct pci_dev *pci_dev;
@@ -918,9 +917,9 @@ static void rx_irq(struct net_device *ndev)
918 if (unlikely(!skb)) 917 if (unlikely(!skb))
919 goto netdev_mangle_me_harder_failed; 918 goto netdev_mangle_me_harder_failed;
920 if (cmdsts & CMDSTS_DEST_MULTI) 919 if (cmdsts & CMDSTS_DEST_MULTI)
921 dev->stats.multicast ++; 920 ndev->stats.multicast++;
922 dev->stats.rx_packets ++; 921 ndev->stats.rx_packets++;
923 dev->stats.rx_bytes += len; 922 ndev->stats.rx_bytes += len;
924 if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { 923 if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
925 skb->ip_summed = CHECKSUM_UNNECESSARY; 924 skb->ip_summed = CHECKSUM_UNNECESSARY;
926 } else { 925 } else {
@@ -940,7 +939,7 @@ static void rx_irq(struct net_device *ndev)
940#endif 939#endif
941 if (NET_RX_DROP == rx_rc) { 940 if (NET_RX_DROP == rx_rc) {
942netdev_mangle_me_harder_failed: 941netdev_mangle_me_harder_failed:
943 dev->stats.rx_dropped ++; 942 ndev->stats.rx_dropped++;
944 } 943 }
945 } else { 944 } else {
946 kfree_skb(skb); 945 kfree_skb(skb);
@@ -1008,11 +1007,11 @@ static void do_tx_done(struct net_device *ndev)
1008 dma_addr_t addr; 1007 dma_addr_t addr;
1009 1008
1010 if (cmdsts & CMDSTS_ERR) 1009 if (cmdsts & CMDSTS_ERR)
1011 dev->stats.tx_errors ++; 1010 ndev->stats.tx_errors++;
1012 if (cmdsts & CMDSTS_OK) 1011 if (cmdsts & CMDSTS_OK)
1013 dev->stats.tx_packets ++; 1012 ndev->stats.tx_packets++;
1014 if (cmdsts & CMDSTS_OK) 1013 if (cmdsts & CMDSTS_OK)
1015 dev->stats.tx_bytes += cmdsts & 0xffff; 1014 ndev->stats.tx_bytes += cmdsts & 0xffff;
1016 1015
1017 dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", 1016 dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
1018 tx_done_idx, dev->tx_free_idx, cmdsts); 1017 tx_done_idx, dev->tx_free_idx, cmdsts);
@@ -1212,20 +1211,21 @@ again:
1212 1211
1213static void ns83820_update_stats(struct ns83820 *dev) 1212static void ns83820_update_stats(struct ns83820 *dev)
1214{ 1213{
1214 struct net_device *ndev = dev->ndev;
1215 u8 __iomem *base = dev->base; 1215 u8 __iomem *base = dev->base;
1216 1216
1217 /* the DP83820 will freeze counters, so we need to read all of them */ 1217 /* the DP83820 will freeze counters, so we need to read all of them */
1218 dev->stats.rx_errors += readl(base + 0x60) & 0xffff; 1218 ndev->stats.rx_errors += readl(base + 0x60) & 0xffff;
1219 dev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff; 1219 ndev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff;
1220 dev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff; 1220 ndev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff;
1221 dev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff; 1221 ndev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff;
1222 /*dev->stats.rx_symbol_errors +=*/ readl(base + 0x70); 1222 /*ndev->stats.rx_symbol_errors +=*/ readl(base + 0x70);
1223 dev->stats.rx_length_errors += readl(base + 0x74) & 0xffff; 1223 ndev->stats.rx_length_errors += readl(base + 0x74) & 0xffff;
1224 dev->stats.rx_length_errors += readl(base + 0x78) & 0xffff; 1224 ndev->stats.rx_length_errors += readl(base + 0x78) & 0xffff;
1225 /*dev->stats.rx_badopcode_errors += */ readl(base + 0x7c); 1225 /*ndev->stats.rx_badopcode_errors += */ readl(base + 0x7c);
1226 /*dev->stats.rx_pause_count += */ readl(base + 0x80); 1226 /*ndev->stats.rx_pause_count += */ readl(base + 0x80);
1227 /*dev->stats.tx_pause_count += */ readl(base + 0x84); 1227 /*ndev->stats.tx_pause_count += */ readl(base + 0x84);
1228 dev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff; 1228 ndev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff;
1229} 1229}
1230 1230
1231static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) 1231static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
@@ -1237,7 +1237,7 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
1237 ns83820_update_stats(dev); 1237 ns83820_update_stats(dev);
1238 spin_unlock_irq(&dev->misc_lock); 1238 spin_unlock_irq(&dev->misc_lock);
1239 1239
1240 return &dev->stats; 1240 return &ndev->stats;
1241} 1241}
1242 1242
1243/* Let ethtool retrieve info */ 1243/* Let ethtool retrieve info */
@@ -1464,12 +1464,12 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr)
1464 1464
1465 if (unlikely(ISR_RXSOVR & isr)) { 1465 if (unlikely(ISR_RXSOVR & isr)) {
1466 //printk("overrun: rxsovr\n"); 1466 //printk("overrun: rxsovr\n");
1467 dev->stats.rx_fifo_errors ++; 1467 ndev->stats.rx_fifo_errors++;
1468 } 1468 }
1469 1469
1470 if (unlikely(ISR_RXORN & isr)) { 1470 if (unlikely(ISR_RXORN & isr)) {
1471 //printk("overrun: rxorn\n"); 1471 //printk("overrun: rxorn\n");
1472 dev->stats.rx_fifo_errors ++; 1472 ndev->stats.rx_fifo_errors++;
1473 } 1473 }
1474 1474
1475 if ((ISR_RXRCMP & isr) && dev->rx_info.up) 1475 if ((ISR_RXRCMP & isr) && dev->rx_info.up)
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 000e792d57c0..b264f0f45605 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -620,7 +620,7 @@ static int octeon_mgmt_ioctl(struct net_device *netdev,
620 if (!p->phydev) 620 if (!p->phydev)
621 return -EINVAL; 621 return -EINVAL;
622 622
623 return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); 623 return phy_mii_ioctl(p->phydev, rq, cmd);
624} 624}
625 625
626static void octeon_mgmt_adjust_link(struct net_device *netdev) 626static void octeon_mgmt_adjust_link(struct net_device *netdev)
@@ -1067,7 +1067,7 @@ static const struct net_device_ops octeon_mgmt_ops = {
1067#endif 1067#endif
1068}; 1068};
1069 1069
1070static int __init octeon_mgmt_probe(struct platform_device *pdev) 1070static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1071{ 1071{
1072 struct resource *res_irq; 1072 struct resource *res_irq;
1073 struct net_device *netdev; 1073 struct net_device *netdev;
@@ -1124,7 +1124,7 @@ err:
1124 return -ENOENT; 1124 return -ENOENT;
1125} 1125}
1126 1126
1127static int __exit octeon_mgmt_remove(struct platform_device *pdev) 1127static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
1128{ 1128{
1129 struct net_device *netdev = dev_get_drvdata(&pdev->dev); 1129 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1130 1130
@@ -1139,7 +1139,7 @@ static struct platform_driver octeon_mgmt_driver = {
1139 .owner = THIS_MODULE, 1139 .owner = THIS_MODULE,
1140 }, 1140 },
1141 .probe = octeon_mgmt_probe, 1141 .probe = octeon_mgmt_probe,
1142 .remove = __exit_p(octeon_mgmt_remove), 1142 .remove = __devexit_p(octeon_mgmt_remove),
1143}; 1143};
1144 1144
1145extern void octeon_mdiobus_force_mod_depencency(void); 1145extern void octeon_mdiobus_force_mod_depencency(void);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cecdbbd549ec..4accd83d3dfe 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -685,7 +685,7 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
685} 685}
686 686
687static struct phy_driver bcm5411_driver = { 687static struct phy_driver bcm5411_driver = {
688 .phy_id = 0x00206070, 688 .phy_id = PHY_ID_BCM5411,
689 .phy_id_mask = 0xfffffff0, 689 .phy_id_mask = 0xfffffff0,
690 .name = "Broadcom BCM5411", 690 .name = "Broadcom BCM5411",
691 .features = PHY_GBIT_FEATURES | 691 .features = PHY_GBIT_FEATURES |
@@ -700,7 +700,7 @@ static struct phy_driver bcm5411_driver = {
700}; 700};
701 701
702static struct phy_driver bcm5421_driver = { 702static struct phy_driver bcm5421_driver = {
703 .phy_id = 0x002060e0, 703 .phy_id = PHY_ID_BCM5421,
704 .phy_id_mask = 0xfffffff0, 704 .phy_id_mask = 0xfffffff0,
705 .name = "Broadcom BCM5421", 705 .name = "Broadcom BCM5421",
706 .features = PHY_GBIT_FEATURES | 706 .features = PHY_GBIT_FEATURES |
@@ -715,7 +715,7 @@ static struct phy_driver bcm5421_driver = {
715}; 715};
716 716
717static struct phy_driver bcm5461_driver = { 717static struct phy_driver bcm5461_driver = {
718 .phy_id = 0x002060c0, 718 .phy_id = PHY_ID_BCM5461,
719 .phy_id_mask = 0xfffffff0, 719 .phy_id_mask = 0xfffffff0,
720 .name = "Broadcom BCM5461", 720 .name = "Broadcom BCM5461",
721 .features = PHY_GBIT_FEATURES | 721 .features = PHY_GBIT_FEATURES |
@@ -730,7 +730,7 @@ static struct phy_driver bcm5461_driver = {
730}; 730};
731 731
732static struct phy_driver bcm5464_driver = { 732static struct phy_driver bcm5464_driver = {
733 .phy_id = 0x002060b0, 733 .phy_id = PHY_ID_BCM5464,
734 .phy_id_mask = 0xfffffff0, 734 .phy_id_mask = 0xfffffff0,
735 .name = "Broadcom BCM5464", 735 .name = "Broadcom BCM5464",
736 .features = PHY_GBIT_FEATURES | 736 .features = PHY_GBIT_FEATURES |
@@ -745,7 +745,7 @@ static struct phy_driver bcm5464_driver = {
745}; 745};
746 746
747static struct phy_driver bcm5481_driver = { 747static struct phy_driver bcm5481_driver = {
748 .phy_id = 0x0143bca0, 748 .phy_id = PHY_ID_BCM5481,
749 .phy_id_mask = 0xfffffff0, 749 .phy_id_mask = 0xfffffff0,
750 .name = "Broadcom BCM5481", 750 .name = "Broadcom BCM5481",
751 .features = PHY_GBIT_FEATURES | 751 .features = PHY_GBIT_FEATURES |
@@ -760,7 +760,7 @@ static struct phy_driver bcm5481_driver = {
760}; 760};
761 761
762static struct phy_driver bcm5482_driver = { 762static struct phy_driver bcm5482_driver = {
763 .phy_id = 0x0143bcb0, 763 .phy_id = PHY_ID_BCM5482,
764 .phy_id_mask = 0xfffffff0, 764 .phy_id_mask = 0xfffffff0,
765 .name = "Broadcom BCM5482", 765 .name = "Broadcom BCM5482",
766 .features = PHY_GBIT_FEATURES | 766 .features = PHY_GBIT_FEATURES |
@@ -834,6 +834,21 @@ static struct phy_driver bcmac131_driver = {
834 .driver = { .owner = THIS_MODULE }, 834 .driver = { .owner = THIS_MODULE },
835}; 835};
836 836
837static struct phy_driver bcm5241_driver = {
838 .phy_id = PHY_ID_BCM5241,
839 .phy_id_mask = 0xfffffff0,
840 .name = "Broadcom BCM5241",
841 .features = PHY_BASIC_FEATURES |
842 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
843 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
844 .config_init = brcm_fet_config_init,
845 .config_aneg = genphy_config_aneg,
846 .read_status = genphy_read_status,
847 .ack_interrupt = brcm_fet_ack_interrupt,
848 .config_intr = brcm_fet_config_intr,
849 .driver = { .owner = THIS_MODULE },
850};
851
837static int __init broadcom_init(void) 852static int __init broadcom_init(void)
838{ 853{
839 int ret; 854 int ret;
@@ -868,8 +883,13 @@ static int __init broadcom_init(void)
868 ret = phy_driver_register(&bcmac131_driver); 883 ret = phy_driver_register(&bcmac131_driver);
869 if (ret) 884 if (ret)
870 goto out_ac131; 885 goto out_ac131;
886 ret = phy_driver_register(&bcm5241_driver);
887 if (ret)
888 goto out_5241;
871 return ret; 889 return ret;
872 890
891out_5241:
892 phy_driver_unregister(&bcmac131_driver);
873out_ac131: 893out_ac131:
874 phy_driver_unregister(&bcm57780_driver); 894 phy_driver_unregister(&bcm57780_driver);
875out_57780: 895out_57780:
@@ -894,6 +914,7 @@ out_5411:
894 914
895static void __exit broadcom_exit(void) 915static void __exit broadcom_exit(void)
896{ 916{
917 phy_driver_unregister(&bcm5241_driver);
897 phy_driver_unregister(&bcmac131_driver); 918 phy_driver_unregister(&bcmac131_driver);
898 phy_driver_unregister(&bcm57780_driver); 919 phy_driver_unregister(&bcm57780_driver);
899 phy_driver_unregister(&bcm50610m_driver); 920 phy_driver_unregister(&bcm50610m_driver);
@@ -910,16 +931,17 @@ module_init(broadcom_init);
910module_exit(broadcom_exit); 931module_exit(broadcom_exit);
911 932
912static struct mdio_device_id broadcom_tbl[] = { 933static struct mdio_device_id broadcom_tbl[] = {
913 { 0x00206070, 0xfffffff0 }, 934 { PHY_ID_BCM5411, 0xfffffff0 },
914 { 0x002060e0, 0xfffffff0 }, 935 { PHY_ID_BCM5421, 0xfffffff0 },
915 { 0x002060c0, 0xfffffff0 }, 936 { PHY_ID_BCM5461, 0xfffffff0 },
916 { 0x002060b0, 0xfffffff0 }, 937 { PHY_ID_BCM5464, 0xfffffff0 },
917 { 0x0143bca0, 0xfffffff0 }, 938 { PHY_ID_BCM5482, 0xfffffff0 },
918 { 0x0143bcb0, 0xfffffff0 }, 939 { PHY_ID_BCM5482, 0xfffffff0 },
919 { PHY_ID_BCM50610, 0xfffffff0 }, 940 { PHY_ID_BCM50610, 0xfffffff0 },
920 { PHY_ID_BCM50610M, 0xfffffff0 }, 941 { PHY_ID_BCM50610M, 0xfffffff0 },
921 { PHY_ID_BCM57780, 0xfffffff0 }, 942 { PHY_ID_BCM57780, 0xfffffff0 },
922 { PHY_ID_BCMAC131, 0xfffffff0 }, 943 { PHY_ID_BCMAC131, 0xfffffff0 },
944 { PHY_ID_BCM5241, 0xfffffff0 },
923 { } 945 { }
924}; 946};
925 947
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 439adafeacb1..3f2583f18a39 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -116,6 +116,8 @@ static struct phy_driver ip175c_driver = {
116 .config_init = &ip175c_config_init, 116 .config_init = &ip175c_config_init,
117 .config_aneg = &ip175c_config_aneg, 117 .config_aneg = &ip175c_config_aneg,
118 .read_status = &ip175c_read_status, 118 .read_status = &ip175c_read_status,
119 .suspend = genphy_suspend,
120 .resume = genphy_resume,
119 .driver = { .owner = THIS_MODULE,}, 121 .driver = { .owner = THIS_MODULE,},
120}; 122};
121 123
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 78b74e83ce5d..0101f2bdf400 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -29,6 +29,7 @@
29#include <linux/mii.h> 29#include <linux/mii.h>
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/marvell_phy.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/irq.h> 35#include <asm/irq.h>
@@ -48,8 +49,6 @@
48#define MII_M1145_RGMII_RX_DELAY 0x0080 49#define MII_M1145_RGMII_RX_DELAY 0x0080
49#define MII_M1145_RGMII_TX_DELAY 0x0002 50#define MII_M1145_RGMII_TX_DELAY 0x0002
50 51
51#define M1145_DEV_FLAGS_RESISTANCE 0x00000001
52
53#define MII_M1111_PHY_LED_CONTROL 0x18 52#define MII_M1111_PHY_LED_CONTROL 0x18
54#define MII_M1111_PHY_LED_DIRECT 0x4100 53#define MII_M1111_PHY_LED_DIRECT 0x4100
55#define MII_M1111_PHY_LED_COMBINE 0x411c 54#define MII_M1111_PHY_LED_COMBINE 0x411c
@@ -69,6 +68,15 @@
69#define MII_M1111_COPPER 0 68#define MII_M1111_COPPER 0
70#define MII_M1111_FIBER 1 69#define MII_M1111_FIBER 1
71 70
71#define MII_88E1121_PHY_MSCR_PAGE 2
72#define MII_88E1121_PHY_MSCR_REG 21
73#define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5)
74#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
75#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
76
77#define MII_88EC048_PHY_MSCR1_REG 16
78#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6)
79
72#define MII_88E1121_PHY_LED_CTRL 16 80#define MII_88E1121_PHY_LED_CTRL 16
73#define MII_88E1121_PHY_LED_PAGE 3 81#define MII_88E1121_PHY_LED_PAGE 3
74#define MII_88E1121_PHY_LED_DEF 0x0030 82#define MII_88E1121_PHY_LED_DEF 0x0030
@@ -180,7 +188,30 @@ static int marvell_config_aneg(struct phy_device *phydev)
180 188
181static int m88e1121_config_aneg(struct phy_device *phydev) 189static int m88e1121_config_aneg(struct phy_device *phydev)
182{ 190{
183 int err, temp; 191 int err, oldpage, mscr;
192
193 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
194
195 err = phy_write(phydev, MII_88E1121_PHY_PAGE,
196 MII_88E1121_PHY_MSCR_PAGE);
197 if (err < 0)
198 return err;
199 mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
200 MII_88E1121_PHY_MSCR_DELAY_MASK;
201
202 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
203 mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
204 MII_88E1121_PHY_MSCR_TX_DELAY);
205 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
206 mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
207 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
208 mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
209
210 err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
211 if (err < 0)
212 return err;
213
214 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
184 215
185 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 216 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
186 if (err < 0) 217 if (err < 0)
@@ -191,17 +222,42 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
191 if (err < 0) 222 if (err < 0)
192 return err; 223 return err;
193 224
194 temp = phy_read(phydev, MII_88E1121_PHY_PAGE); 225 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
195 226
196 phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); 227 phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
197 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); 228 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
198 phy_write(phydev, MII_88E1121_PHY_PAGE, temp); 229 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
199 230
200 err = genphy_config_aneg(phydev); 231 err = genphy_config_aneg(phydev);
201 232
202 return err; 233 return err;
203} 234}
204 235
236static int m88ec048_config_aneg(struct phy_device *phydev)
237{
238 int err, oldpage, mscr;
239
240 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
241
242 err = phy_write(phydev, MII_88E1121_PHY_PAGE,
243 MII_88E1121_PHY_MSCR_PAGE);
244 if (err < 0)
245 return err;
246
247 mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG);
248 mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD;
249
250 err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
251 if (err < 0)
252 return err;
253
254 err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
255 if (err < 0)
256 return err;
257
258 return m88e1121_config_aneg(phydev);
259}
260
205static int m88e1111_config_init(struct phy_device *phydev) 261static int m88e1111_config_init(struct phy_device *phydev)
206{ 262{
207 int err; 263 int err;
@@ -350,7 +406,10 @@ static int m88e1118_config_init(struct phy_device *phydev)
350 return err; 406 return err;
351 407
352 /* Adjust LED Control */ 408 /* Adjust LED Control */
353 err = phy_write(phydev, 0x10, 0x021e); 409 if (phydev->dev_flags & MARVELL_PHY_M1118_DNS323_LEDS)
410 err = phy_write(phydev, 0x10, 0x1100);
411 else
412 err = phy_write(phydev, 0x10, 0x021e);
354 if (err < 0) 413 if (err < 0)
355 return err; 414 return err;
356 415
@@ -398,7 +457,7 @@ static int m88e1145_config_init(struct phy_device *phydev)
398 if (err < 0) 457 if (err < 0)
399 return err; 458 return err;
400 459
401 if (phydev->dev_flags & M1145_DEV_FLAGS_RESISTANCE) { 460 if (phydev->dev_flags & MARVELL_PHY_M1145_FLAGS_RESISTANCE) {
402 err = phy_write(phydev, 0x1d, 0x0012); 461 err = phy_write(phydev, 0x1d, 0x0012);
403 if (err < 0) 462 if (err < 0)
404 return err; 463 return err;
@@ -529,8 +588,8 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
529 588
530static struct phy_driver marvell_drivers[] = { 589static struct phy_driver marvell_drivers[] = {
531 { 590 {
532 .phy_id = 0x01410c60, 591 .phy_id = MARVELL_PHY_ID_88E1101,
533 .phy_id_mask = 0xfffffff0, 592 .phy_id_mask = MARVELL_PHY_ID_MASK,
534 .name = "Marvell 88E1101", 593 .name = "Marvell 88E1101",
535 .features = PHY_GBIT_FEATURES, 594 .features = PHY_GBIT_FEATURES,
536 .flags = PHY_HAS_INTERRUPT, 595 .flags = PHY_HAS_INTERRUPT,
@@ -541,8 +600,8 @@ static struct phy_driver marvell_drivers[] = {
541 .driver = { .owner = THIS_MODULE }, 600 .driver = { .owner = THIS_MODULE },
542 }, 601 },
543 { 602 {
544 .phy_id = 0x01410c90, 603 .phy_id = MARVELL_PHY_ID_88E1112,
545 .phy_id_mask = 0xfffffff0, 604 .phy_id_mask = MARVELL_PHY_ID_MASK,
546 .name = "Marvell 88E1112", 605 .name = "Marvell 88E1112",
547 .features = PHY_GBIT_FEATURES, 606 .features = PHY_GBIT_FEATURES,
548 .flags = PHY_HAS_INTERRUPT, 607 .flags = PHY_HAS_INTERRUPT,
@@ -554,8 +613,8 @@ static struct phy_driver marvell_drivers[] = {
554 .driver = { .owner = THIS_MODULE }, 613 .driver = { .owner = THIS_MODULE },
555 }, 614 },
556 { 615 {
557 .phy_id = 0x01410cc0, 616 .phy_id = MARVELL_PHY_ID_88E1111,
558 .phy_id_mask = 0xfffffff0, 617 .phy_id_mask = MARVELL_PHY_ID_MASK,
559 .name = "Marvell 88E1111", 618 .name = "Marvell 88E1111",
560 .features = PHY_GBIT_FEATURES, 619 .features = PHY_GBIT_FEATURES,
561 .flags = PHY_HAS_INTERRUPT, 620 .flags = PHY_HAS_INTERRUPT,
@@ -567,8 +626,8 @@ static struct phy_driver marvell_drivers[] = {
567 .driver = { .owner = THIS_MODULE }, 626 .driver = { .owner = THIS_MODULE },
568 }, 627 },
569 { 628 {
570 .phy_id = 0x01410e10, 629 .phy_id = MARVELL_PHY_ID_88E1118,
571 .phy_id_mask = 0xfffffff0, 630 .phy_id_mask = MARVELL_PHY_ID_MASK,
572 .name = "Marvell 88E1118", 631 .name = "Marvell 88E1118",
573 .features = PHY_GBIT_FEATURES, 632 .features = PHY_GBIT_FEATURES,
574 .flags = PHY_HAS_INTERRUPT, 633 .flags = PHY_HAS_INTERRUPT,
@@ -580,8 +639,8 @@ static struct phy_driver marvell_drivers[] = {
580 .driver = {.owner = THIS_MODULE,}, 639 .driver = {.owner = THIS_MODULE,},
581 }, 640 },
582 { 641 {
583 .phy_id = 0x01410cb0, 642 .phy_id = MARVELL_PHY_ID_88E1121R,
584 .phy_id_mask = 0xfffffff0, 643 .phy_id_mask = MARVELL_PHY_ID_MASK,
585 .name = "Marvell 88E1121R", 644 .name = "Marvell 88E1121R",
586 .features = PHY_GBIT_FEATURES, 645 .features = PHY_GBIT_FEATURES,
587 .flags = PHY_HAS_INTERRUPT, 646 .flags = PHY_HAS_INTERRUPT,
@@ -593,8 +652,21 @@ static struct phy_driver marvell_drivers[] = {
593 .driver = { .owner = THIS_MODULE }, 652 .driver = { .owner = THIS_MODULE },
594 }, 653 },
595 { 654 {
596 .phy_id = 0x01410cd0, 655 .phy_id = MARVELL_PHY_ID_88EC048,
597 .phy_id_mask = 0xfffffff0, 656 .phy_id_mask = MARVELL_PHY_ID_MASK,
657 .name = "Marvell 88EC048",
658 .features = PHY_GBIT_FEATURES,
659 .flags = PHY_HAS_INTERRUPT,
660 .config_aneg = &m88ec048_config_aneg,
661 .read_status = &marvell_read_status,
662 .ack_interrupt = &marvell_ack_interrupt,
663 .config_intr = &marvell_config_intr,
664 .did_interrupt = &m88e1121_did_interrupt,
665 .driver = { .owner = THIS_MODULE },
666 },
667 {
668 .phy_id = MARVELL_PHY_ID_88E1145,
669 .phy_id_mask = MARVELL_PHY_ID_MASK,
598 .name = "Marvell 88E1145", 670 .name = "Marvell 88E1145",
599 .features = PHY_GBIT_FEATURES, 671 .features = PHY_GBIT_FEATURES,
600 .flags = PHY_HAS_INTERRUPT, 672 .flags = PHY_HAS_INTERRUPT,
@@ -606,8 +678,8 @@ static struct phy_driver marvell_drivers[] = {
606 .driver = { .owner = THIS_MODULE }, 678 .driver = { .owner = THIS_MODULE },
607 }, 679 },
608 { 680 {
609 .phy_id = 0x01410e30, 681 .phy_id = MARVELL_PHY_ID_88E1240,
610 .phy_id_mask = 0xfffffff0, 682 .phy_id_mask = MARVELL_PHY_ID_MASK,
611 .name = "Marvell 88E1240", 683 .name = "Marvell 88E1240",
612 .features = PHY_GBIT_FEATURES, 684 .features = PHY_GBIT_FEATURES,
613 .flags = PHY_HAS_INTERRUPT, 685 .flags = PHY_HAS_INTERRUPT,
@@ -657,6 +729,7 @@ static struct mdio_device_id marvell_tbl[] = {
657 { 0x01410cb0, 0xfffffff0 }, 729 { 0x01410cb0, 0xfffffff0 },
658 { 0x01410cd0, 0xfffffff0 }, 730 { 0x01410cd0, 0xfffffff0 },
659 { 0x01410e30, 0xfffffff0 }, 731 { 0x01410e30, 0xfffffff0 },
732 { 0x01410e90, 0xfffffff0 },
660 { } 733 { }
661}; 734};
662 735
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index f443d43edd80..bd12ba941be5 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -85,7 +85,7 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
85 return 0; 85 return 0;
86} 86}
87 87
88static int __init octeon_mdiobus_probe(struct platform_device *pdev) 88static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
89{ 89{
90 struct octeon_mdiobus *bus; 90 struct octeon_mdiobus *bus;
91 union cvmx_smix_en smi_en; 91 union cvmx_smix_en smi_en;
@@ -143,7 +143,7 @@ err:
143 return err; 143 return err;
144} 144}
145 145
146static int __exit octeon_mdiobus_remove(struct platform_device *pdev) 146static int __devexit octeon_mdiobus_remove(struct platform_device *pdev)
147{ 147{
148 struct octeon_mdiobus *bus; 148 struct octeon_mdiobus *bus;
149 union cvmx_smix_en smi_en; 149 union cvmx_smix_en smi_en;
@@ -163,7 +163,7 @@ static struct platform_driver octeon_mdiobus_driver = {
163 .owner = THIS_MODULE, 163 .owner = THIS_MODULE,
164 }, 164 },
165 .probe = octeon_mdiobus_probe, 165 .probe = octeon_mdiobus_probe,
166 .remove = __exit_p(octeon_mdiobus_remove), 166 .remove = __devexit_p(octeon_mdiobus_remove),
167}; 167};
168 168
169void octeon_mdiobus_force_mod_depencency(void) 169void octeon_mdiobus_force_mod_depencency(void)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0692f750c404..8bb7db676a5c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -12,7 +12,8 @@
12 * Free Software Foundation; either version 2 of the License, or (at your 12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 * Support : ksz9021 , vsc8201, ks8001 15 * Support : ksz9021 1000/100/10 phy from Micrel
16 * ks8001, ks8737, ks8721, ks8041, ks8051 100/10 phy
16 */ 17 */
17 18
18#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -20,37 +21,146 @@
20#include <linux/phy.h> 21#include <linux/phy.h>
21 22
22#define PHY_ID_KSZ9021 0x00221611 23#define PHY_ID_KSZ9021 0x00221611
23#define PHY_ID_VSC8201 0x000FC413 24#define PHY_ID_KS8737 0x00221720
25#define PHY_ID_KS8041 0x00221510
26#define PHY_ID_KS8051 0x00221550
27/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
24#define PHY_ID_KS8001 0x0022161A 28#define PHY_ID_KS8001 0x0022161A
25 29
30/* general Interrupt control/status reg in vendor specific block. */
31#define MII_KSZPHY_INTCS 0x1B
32#define KSZPHY_INTCS_JABBER (1 << 15)
33#define KSZPHY_INTCS_RECEIVE_ERR (1 << 14)
34#define KSZPHY_INTCS_PAGE_RECEIVE (1 << 13)
35#define KSZPHY_INTCS_PARELLEL (1 << 12)
36#define KSZPHY_INTCS_LINK_PARTNER_ACK (1 << 11)
37#define KSZPHY_INTCS_LINK_DOWN (1 << 10)
38#define KSZPHY_INTCS_REMOTE_FAULT (1 << 9)
39#define KSZPHY_INTCS_LINK_UP (1 << 8)
40#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
41 KSZPHY_INTCS_LINK_DOWN)
42
43/* general PHY control reg in vendor specific block. */
44#define MII_KSZPHY_CTRL 0x1F
45/* bitmap of PHY register to set interrupt mode */
46#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
47#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
48#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
49
50static int kszphy_ack_interrupt(struct phy_device *phydev)
51{
52 /* bit[7..0] int status, which is a read and clear register. */
53 int rc;
54
55 rc = phy_read(phydev, MII_KSZPHY_INTCS);
56
57 return (rc < 0) ? rc : 0;
58}
59
60static int kszphy_set_interrupt(struct phy_device *phydev)
61{
62 int temp;
63 temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ?
64 KSZPHY_INTCS_ALL : 0;
65 return phy_write(phydev, MII_KSZPHY_INTCS, temp);
66}
67
68static int kszphy_config_intr(struct phy_device *phydev)
69{
70 int temp, rc;
71
72 /* set the interrupt pin active low */
73 temp = phy_read(phydev, MII_KSZPHY_CTRL);
74 temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH;
75 phy_write(phydev, MII_KSZPHY_CTRL, temp);
76 rc = kszphy_set_interrupt(phydev);
77 return rc < 0 ? rc : 0;
78}
79
80static int ksz9021_config_intr(struct phy_device *phydev)
81{
82 int temp, rc;
83
84 /* set the interrupt pin active low */
85 temp = phy_read(phydev, MII_KSZPHY_CTRL);
86 temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH;
87 phy_write(phydev, MII_KSZPHY_CTRL, temp);
88 rc = kszphy_set_interrupt(phydev);
89 return rc < 0 ? rc : 0;
90}
91
92static int ks8737_config_intr(struct phy_device *phydev)
93{
94 int temp, rc;
95
96 /* set the interrupt pin active low */
97 temp = phy_read(phydev, MII_KSZPHY_CTRL);
98 temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH;
99 phy_write(phydev, MII_KSZPHY_CTRL, temp);
100 rc = kszphy_set_interrupt(phydev);
101 return rc < 0 ? rc : 0;
102}
26 103
27static int kszphy_config_init(struct phy_device *phydev) 104static int kszphy_config_init(struct phy_device *phydev)
28{ 105{
29 return 0; 106 return 0;
30} 107}
31 108
109static struct phy_driver ks8737_driver = {
110 .phy_id = PHY_ID_KS8737,
111 .phy_id_mask = 0x00fffff0,
112 .name = "Micrel KS8737",
113 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
114 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
115 .config_init = kszphy_config_init,
116 .config_aneg = genphy_config_aneg,
117 .read_status = genphy_read_status,
118 .ack_interrupt = kszphy_ack_interrupt,
119 .config_intr = ks8737_config_intr,
120 .driver = { .owner = THIS_MODULE,},
121};
122
123static struct phy_driver ks8041_driver = {
124 .phy_id = PHY_ID_KS8041,
125 .phy_id_mask = 0x00fffff0,
126 .name = "Micrel KS8041",
127 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
128 | SUPPORTED_Asym_Pause),
129 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
130 .config_init = kszphy_config_init,
131 .config_aneg = genphy_config_aneg,
132 .read_status = genphy_read_status,
133 .ack_interrupt = kszphy_ack_interrupt,
134 .config_intr = kszphy_config_intr,
135 .driver = { .owner = THIS_MODULE,},
136};
32 137
33static struct phy_driver ks8001_driver = { 138static struct phy_driver ks8051_driver = {
34 .phy_id = PHY_ID_KS8001, 139 .phy_id = PHY_ID_KS8051,
35 .name = "Micrel KS8001",
36 .phy_id_mask = 0x00fffff0, 140 .phy_id_mask = 0x00fffff0,
37 .features = PHY_BASIC_FEATURES, 141 .name = "Micrel KS8051",
38 .flags = PHY_POLL, 142 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
143 | SUPPORTED_Asym_Pause),
144 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
39 .config_init = kszphy_config_init, 145 .config_init = kszphy_config_init,
40 .config_aneg = genphy_config_aneg, 146 .config_aneg = genphy_config_aneg,
41 .read_status = genphy_read_status, 147 .read_status = genphy_read_status,
148 .ack_interrupt = kszphy_ack_interrupt,
149 .config_intr = kszphy_config_intr,
42 .driver = { .owner = THIS_MODULE,}, 150 .driver = { .owner = THIS_MODULE,},
43}; 151};
44 152
45static struct phy_driver vsc8201_driver = { 153static struct phy_driver ks8001_driver = {
46 .phy_id = PHY_ID_VSC8201, 154 .phy_id = PHY_ID_KS8001,
47 .name = "Micrel VSC8201", 155 .name = "Micrel KS8001 or KS8721",
48 .phy_id_mask = 0x00fffff0, 156 .phy_id_mask = 0x00fffff0,
49 .features = PHY_BASIC_FEATURES, 157 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
50 .flags = PHY_POLL, 158 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
51 .config_init = kszphy_config_init, 159 .config_init = kszphy_config_init,
52 .config_aneg = genphy_config_aneg, 160 .config_aneg = genphy_config_aneg,
53 .read_status = genphy_read_status, 161 .read_status = genphy_read_status,
162 .ack_interrupt = kszphy_ack_interrupt,
163 .config_intr = kszphy_config_intr,
54 .driver = { .owner = THIS_MODULE,}, 164 .driver = { .owner = THIS_MODULE,},
55}; 165};
56 166
@@ -58,11 +168,14 @@ static struct phy_driver ksz9021_driver = {
58 .phy_id = PHY_ID_KSZ9021, 168 .phy_id = PHY_ID_KSZ9021,
59 .phy_id_mask = 0x000fff10, 169 .phy_id_mask = 0x000fff10,
60 .name = "Micrel KSZ9021 Gigabit PHY", 170 .name = "Micrel KSZ9021 Gigabit PHY",
61 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause, 171 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
62 .flags = PHY_POLL, 172 | SUPPORTED_Asym_Pause),
173 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
63 .config_init = kszphy_config_init, 174 .config_init = kszphy_config_init,
64 .config_aneg = genphy_config_aneg, 175 .config_aneg = genphy_config_aneg,
65 .read_status = genphy_read_status, 176 .read_status = genphy_read_status,
177 .ack_interrupt = kszphy_ack_interrupt,
178 .config_intr = ksz9021_config_intr,
66 .driver = { .owner = THIS_MODULE, }, 179 .driver = { .owner = THIS_MODULE, },
67}; 180};
68 181
@@ -73,17 +186,29 @@ static int __init ksphy_init(void)
73 ret = phy_driver_register(&ks8001_driver); 186 ret = phy_driver_register(&ks8001_driver);
74 if (ret) 187 if (ret)
75 goto err1; 188 goto err1;
76 ret = phy_driver_register(&vsc8201_driver); 189
190 ret = phy_driver_register(&ksz9021_driver);
77 if (ret) 191 if (ret)
78 goto err2; 192 goto err2;
79 193
80 ret = phy_driver_register(&ksz9021_driver); 194 ret = phy_driver_register(&ks8737_driver);
81 if (ret) 195 if (ret)
82 goto err3; 196 goto err3;
197 ret = phy_driver_register(&ks8041_driver);
198 if (ret)
199 goto err4;
200 ret = phy_driver_register(&ks8051_driver);
201 if (ret)
202 goto err5;
203
83 return 0; 204 return 0;
84 205
206err5:
207 phy_driver_unregister(&ks8041_driver);
208err4:
209 phy_driver_unregister(&ks8737_driver);
85err3: 210err3:
86 phy_driver_unregister(&vsc8201_driver); 211 phy_driver_unregister(&ksz9021_driver);
87err2: 212err2:
88 phy_driver_unregister(&ks8001_driver); 213 phy_driver_unregister(&ks8001_driver);
89err1: 214err1:
@@ -93,8 +218,10 @@ err1:
93static void __exit ksphy_exit(void) 218static void __exit ksphy_exit(void)
94{ 219{
95 phy_driver_unregister(&ks8001_driver); 220 phy_driver_unregister(&ks8001_driver);
96 phy_driver_unregister(&vsc8201_driver); 221 phy_driver_unregister(&ks8737_driver);
97 phy_driver_unregister(&ksz9021_driver); 222 phy_driver_unregister(&ksz9021_driver);
223 phy_driver_unregister(&ks8041_driver);
224 phy_driver_unregister(&ks8051_driver);
98} 225}
99 226
100module_init(ksphy_init); 227module_init(ksphy_init);
@@ -106,8 +233,10 @@ MODULE_LICENSE("GPL");
106 233
107static struct mdio_device_id micrel_tbl[] = { 234static struct mdio_device_id micrel_tbl[] = {
108 { PHY_ID_KSZ9021, 0x000fff10 }, 235 { PHY_ID_KSZ9021, 0x000fff10 },
109 { PHY_ID_VSC8201, 0x00fffff0 },
110 { PHY_ID_KS8001, 0x00fffff0 }, 236 { PHY_ID_KS8001, 0x00fffff0 },
237 { PHY_ID_KS8737, 0x00fffff0 },
238 { PHY_ID_KS8041, 0x00fffff0 },
239 { PHY_ID_KS8051, 0x00fffff0 },
111 { } 240 { }
112}; 241};
113 242
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 64be4664ccab..5130db8f5c4e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -309,8 +309,9 @@ EXPORT_SYMBOL(phy_ethtool_gset);
309 * current state. Use at own risk. 309 * current state. Use at own risk.
310 */ 310 */
311int phy_mii_ioctl(struct phy_device *phydev, 311int phy_mii_ioctl(struct phy_device *phydev,
312 struct mii_ioctl_data *mii_data, int cmd) 312 struct ifreq *ifr, int cmd)
313{ 313{
314 struct mii_ioctl_data *mii_data = if_mii(ifr);
314 u16 val = mii_data->val_in; 315 u16 val = mii_data->val_in;
315 316
316 switch (cmd) { 317 switch (cmd) {
@@ -360,6 +361,11 @@ int phy_mii_ioctl(struct phy_device *phydev,
360 } 361 }
361 break; 362 break;
362 363
364 case SIOCSHWTSTAMP:
365 if (phydev->drv->hwtstamp)
366 return phydev->drv->hwtstamp(phydev, ifr);
367 /* fall through */
368
363 default: 369 default:
364 return -EOPNOTSUPP; 370 return -EOPNOTSUPP;
365 } 371 }
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1a99bb244106..c0761197c07e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -460,6 +460,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
460 } 460 }
461 461
462 phydev->attached_dev = dev; 462 phydev->attached_dev = dev;
463 dev->phydev = phydev;
463 464
464 phydev->dev_flags = flags; 465 phydev->dev_flags = flags;
465 466
@@ -513,6 +514,7 @@ EXPORT_SYMBOL(phy_attach);
513 */ 514 */
514void phy_detach(struct phy_device *phydev) 515void phy_detach(struct phy_device *phydev)
515{ 516{
517 phydev->attached_dev->phydev = NULL;
516 phydev->attached_dev = NULL; 518 phydev->attached_dev = NULL;
517 519
518 /* If the device had no specific driver before (i.e. - it 520 /* If the device had no specific driver before (i.e. - it
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1b2c29150202..6695a51e09e9 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -40,7 +40,6 @@
40#include <linux/if_arp.h> 40#include <linux/if_arp.h>
41#include <linux/ip.h> 41#include <linux/ip.h>
42#include <linux/tcp.h> 42#include <linux/tcp.h>
43#include <linux/smp_lock.h>
44#include <linux/spinlock.h> 43#include <linux/spinlock.h>
45#include <linux/rwsem.h> 44#include <linux/rwsem.h>
46#include <linux/stddef.h> 45#include <linux/stddef.h>
@@ -69,7 +68,6 @@
69 68
70#define MPHDRLEN 6 /* multilink protocol header length */ 69#define MPHDRLEN 6 /* multilink protocol header length */
71#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 70#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
72#define MIN_FRAG_SIZE 64
73 71
74/* 72/*
75 * An instance of /dev/ppp can be associated with either a ppp 73 * An instance of /dev/ppp can be associated with either a ppp
@@ -181,6 +179,7 @@ struct channel {
181 * channel.downl. 179 * channel.downl.
182 */ 180 */
183 181
182static DEFINE_MUTEX(ppp_mutex);
184static atomic_t ppp_unit_count = ATOMIC_INIT(0); 183static atomic_t ppp_unit_count = ATOMIC_INIT(0);
185static atomic_t channel_count = ATOMIC_INIT(0); 184static atomic_t channel_count = ATOMIC_INIT(0);
186 185
@@ -363,7 +362,6 @@ static const int npindex_to_ethertype[NUM_NP] = {
363 */ 362 */
364static int ppp_open(struct inode *inode, struct file *file) 363static int ppp_open(struct inode *inode, struct file *file)
365{ 364{
366 cycle_kernel_lock();
367 /* 365 /*
368 * This could (should?) be enforced by the permissions on /dev/ppp. 366 * This could (should?) be enforced by the permissions on /dev/ppp.
369 */ 367 */
@@ -539,14 +537,9 @@ static int get_filter(void __user *arg, struct sock_filter **p)
539 } 537 }
540 538
541 len = uprog.len * sizeof(struct sock_filter); 539 len = uprog.len * sizeof(struct sock_filter);
542 code = kmalloc(len, GFP_KERNEL); 540 code = memdup_user(uprog.filter, len);
543 if (code == NULL) 541 if (IS_ERR(code))
544 return -ENOMEM; 542 return PTR_ERR(code);
545
546 if (copy_from_user(code, uprog.filter, len)) {
547 kfree(code);
548 return -EFAULT;
549 }
550 543
551 err = sk_chk_filter(code, uprog.len); 544 err = sk_chk_filter(code, uprog.len);
552 if (err) { 545 if (err) {
@@ -588,7 +581,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
588 * this fd and reopening /dev/ppp. 581 * this fd and reopening /dev/ppp.
589 */ 582 */
590 err = -EINVAL; 583 err = -EINVAL;
591 lock_kernel(); 584 mutex_lock(&ppp_mutex);
592 if (pf->kind == INTERFACE) { 585 if (pf->kind == INTERFACE) {
593 ppp = PF_TO_PPP(pf); 586 ppp = PF_TO_PPP(pf);
594 if (file == ppp->owner) 587 if (file == ppp->owner)
@@ -600,7 +593,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
600 } else 593 } else
601 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", 594 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
602 atomic_long_read(&file->f_count)); 595 atomic_long_read(&file->f_count));
603 unlock_kernel(); 596 mutex_unlock(&ppp_mutex);
604 return err; 597 return err;
605 } 598 }
606 599
@@ -608,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
608 struct channel *pch; 601 struct channel *pch;
609 struct ppp_channel *chan; 602 struct ppp_channel *chan;
610 603
611 lock_kernel(); 604 mutex_lock(&ppp_mutex);
612 pch = PF_TO_CHANNEL(pf); 605 pch = PF_TO_CHANNEL(pf);
613 606
614 switch (cmd) { 607 switch (cmd) {
@@ -630,7 +623,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
630 err = chan->ops->ioctl(chan, cmd, arg); 623 err = chan->ops->ioctl(chan, cmd, arg);
631 up_read(&pch->chan_sem); 624 up_read(&pch->chan_sem);
632 } 625 }
633 unlock_kernel(); 626 mutex_unlock(&ppp_mutex);
634 return err; 627 return err;
635 } 628 }
636 629
@@ -640,7 +633,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
640 return -EINVAL; 633 return -EINVAL;
641 } 634 }
642 635
643 lock_kernel(); 636 mutex_lock(&ppp_mutex);
644 ppp = PF_TO_PPP(pf); 637 ppp = PF_TO_PPP(pf);
645 switch (cmd) { 638 switch (cmd) {
646 case PPPIOCSMRU: 639 case PPPIOCSMRU:
@@ -788,7 +781,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
788 default: 781 default:
789 err = -ENOTTY; 782 err = -ENOTTY;
790 } 783 }
791 unlock_kernel(); 784 mutex_unlock(&ppp_mutex);
792 return err; 785 return err;
793} 786}
794 787
@@ -801,7 +794,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
801 struct ppp_net *pn; 794 struct ppp_net *pn;
802 int __user *p = (int __user *)arg; 795 int __user *p = (int __user *)arg;
803 796
804 lock_kernel(); 797 mutex_lock(&ppp_mutex);
805 switch (cmd) { 798 switch (cmd) {
806 case PPPIOCNEWUNIT: 799 case PPPIOCNEWUNIT:
807 /* Create a new ppp unit */ 800 /* Create a new ppp unit */
@@ -852,7 +845,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
852 default: 845 default:
853 err = -ENOTTY; 846 err = -ENOTTY;
854 } 847 }
855 unlock_kernel(); 848 mutex_unlock(&ppp_mutex);
856 return err; 849 return err;
857} 850}
858 851
@@ -1933,9 +1926,9 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1933 /* If the queue is getting long, don't wait any longer for packets 1926 /* If the queue is getting long, don't wait any longer for packets
1934 before the start of the queue. */ 1927 before the start of the queue. */
1935 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 1928 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
1936 struct sk_buff *skb = skb_peek(&ppp->mrq); 1929 struct sk_buff *mskb = skb_peek(&ppp->mrq);
1937 if (seq_before(ppp->minseq, skb->sequence)) 1930 if (seq_before(ppp->minseq, mskb->sequence))
1938 ppp->minseq = skb->sequence; 1931 ppp->minseq = mskb->sequence;
1939 } 1932 }
1940 1933
1941 /* Pull completed packets off the queue and receive them. */ 1934 /* Pull completed packets off the queue and receive them. */
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 805b64d1e893..344ef330e123 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) 89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) 90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
91 91
92static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
93static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 92static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
94 93
95static const struct proto_ops pppoe_ops; 94static const struct proto_ops pppoe_ops;
@@ -949,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
949 948
950abort: 949abort:
951 kfree_skb(skb); 950 kfree_skb(skb);
952 return 1; 951 return 0;
953} 952}
954 953
955/************************************************************************ 954/************************************************************************
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
index 0a88b535197a..f7e51b7d7049 100644
--- a/drivers/net/ps3_gelic_wireless.h
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -74,7 +74,7 @@ struct gelic_eurus_common_cfg {
74 u16 bss_type; /* infra or adhoc */ 74 u16 bss_type; /* infra or adhoc */
75 u16 auth_method; /* shared key or open */ 75 u16 auth_method; /* shared key or open */
76 u16 op_mode; /* B/G */ 76 u16 op_mode; /* B/G */
77} __attribute__((packed)); 77} __packed;
78 78
79 79
80/* for GELIC_EURUS_CMD_WEP_CFG */ 80/* for GELIC_EURUS_CMD_WEP_CFG */
@@ -88,7 +88,7 @@ struct gelic_eurus_wep_cfg {
88 /* all fields are big endian */ 88 /* all fields are big endian */
89 u16 security; 89 u16 security;
90 u8 key[4][16]; 90 u8 key[4][16];
91} __attribute__((packed)); 91} __packed;
92 92
93/* for GELIC_EURUS_CMD_WPA_CFG */ 93/* for GELIC_EURUS_CMD_WPA_CFG */
94enum gelic_eurus_wpa_security { 94enum gelic_eurus_wpa_security {
@@ -120,7 +120,7 @@ struct gelic_eurus_wpa_cfg {
120 u16 security; 120 u16 security;
121 u16 psk_type; /* psk key encoding type */ 121 u16 psk_type; /* psk key encoding type */
122 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */ 122 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
123} __attribute__((packed)); 123} __packed;
124 124
125/* for GELIC_EURUS_CMD_{START,GET}_SCAN */ 125/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
126enum gelic_eurus_scan_capability { 126enum gelic_eurus_scan_capability {
@@ -171,7 +171,7 @@ struct gelic_eurus_scan_info {
171 __be32 reserved3; 171 __be32 reserved3;
172 __be32 reserved4; 172 __be32 reserved4;
173 u8 elements[0]; /* ie */ 173 u8 elements[0]; /* ie */
174} __attribute__ ((packed)); 174} __packed;
175 175
176/* the hypervisor returns bbs up to 16 */ 176/* the hypervisor returns bbs up to 16 */
177#define GELIC_EURUS_MAX_SCAN (16) 177#define GELIC_EURUS_MAX_SCAN (16)
@@ -193,7 +193,7 @@ struct gelic_wl_scan_info {
193struct gelic_eurus_rssi_info { 193struct gelic_eurus_rssi_info {
194 /* big endian */ 194 /* big endian */
195 __be16 rssi; 195 __be16 rssi;
196} __attribute__ ((packed)); 196} __packed;
197 197
198 198
199/* for 'stat' member of gelic_wl_info */ 199/* for 'stat' member of gelic_wl_info */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 54ebb65ada18..6168a130f33f 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -5,6 +5,8 @@
5 * See LICENSE.qla3xxx for copyright and licensing details. 5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
8#include <linux/kernel.h> 10#include <linux/kernel.h>
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/types.h> 12#include <linux/types.h>
@@ -36,14 +38,16 @@
36 38
37#include "qla3xxx.h" 39#include "qla3xxx.h"
38 40
39#define DRV_NAME "qla3xxx" 41#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver" 42#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.03.00-k5" 43#define DRV_VERSION "v2.03.00-k5"
42#define PFX DRV_NAME " "
43 44
44static const char ql3xxx_driver_name[] = DRV_NAME; 45static const char ql3xxx_driver_name[] = DRV_NAME;
45static const char ql3xxx_driver_version[] = DRV_VERSION; 46static const char ql3xxx_driver_version[] = DRV_VERSION;
46 47
48#define TIMED_OUT_MSG \
49"Timed out waiting for management port to get free before issuing command\n"
50
47MODULE_AUTHOR("QLogic Corporation"); 51MODULE_AUTHOR("QLogic Corporation");
48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 52MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
@@ -73,24 +77,24 @@ MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
73/* 77/*
74 * These are the known PHY's which are used 78 * These are the known PHY's which are used
75 */ 79 */
76typedef enum { 80enum PHY_DEVICE_TYPE {
77 PHY_TYPE_UNKNOWN = 0, 81 PHY_TYPE_UNKNOWN = 0,
78 PHY_VITESSE_VSC8211, 82 PHY_VITESSE_VSC8211,
79 PHY_AGERE_ET1011C, 83 PHY_AGERE_ET1011C,
80 MAX_PHY_DEV_TYPES 84 MAX_PHY_DEV_TYPES
81} PHY_DEVICE_et; 85};
82 86
83typedef struct { 87struct PHY_DEVICE_INFO {
84 PHY_DEVICE_et phyDevice; 88 const enum PHY_DEVICE_TYPE phyDevice;
85 u32 phyIdOUI; 89 const u32 phyIdOUI;
86 u16 phyIdModel; 90 const u16 phyIdModel;
87 char *name; 91 const char *name;
88} PHY_DEVICE_INFO_t; 92};
89 93
90static const PHY_DEVICE_INFO_t PHY_DEVICES[] = 94static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
91 {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
92 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
93 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
94}; 98};
95 99
96 100
@@ -100,7 +104,8 @@ static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
100static int ql_sem_spinlock(struct ql3_adapter *qdev, 104static int ql_sem_spinlock(struct ql3_adapter *qdev,
101 u32 sem_mask, u32 sem_bits) 105 u32 sem_mask, u32 sem_bits)
102{ 106{
103 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 107 struct ql3xxx_port_registers __iomem *port_regs =
108 qdev->mem_map_registers;
104 u32 value; 109 u32 value;
105 unsigned int seconds = 3; 110 unsigned int seconds = 3;
106 111
@@ -111,20 +116,22 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
111 if ((value & (sem_mask >> 16)) == sem_bits) 116 if ((value & (sem_mask >> 16)) == sem_bits)
112 return 0; 117 return 0;
113 ssleep(1); 118 ssleep(1);
114 } while(--seconds); 119 } while (--seconds);
115 return -1; 120 return -1;
116} 121}
117 122
118static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 123static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
119{ 124{
120 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 125 struct ql3xxx_port_registers __iomem *port_regs =
126 qdev->mem_map_registers;
121 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
122 readl(&port_regs->CommonRegs.semaphoreReg); 128 readl(&port_regs->CommonRegs.semaphoreReg);
123} 129}
124 130
125static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 131static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
126{ 132{
127 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 133 struct ql3xxx_port_registers __iomem *port_regs =
134 qdev->mem_map_registers;
128 u32 value; 135 u32 value;
129 136
130 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
@@ -139,32 +146,28 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
139{ 146{
140 int i = 0; 147 int i = 0;
141 148
142 while (1) { 149 while (i < 10) {
143 if (!ql_sem_lock(qdev, 150 if (i)
144 QL_DRVR_SEM_MASK, 151 ssleep(1);
145 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 152
146 * 2) << 1)) { 153 if (ql_sem_lock(qdev,
147 if (i < 10) { 154 QL_DRVR_SEM_MASK,
148 ssleep(1); 155 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
149 i++; 156 * 2) << 1)) {
150 } else { 157 netdev_printk(KERN_DEBUG, qdev->ndev,
151 printk(KERN_ERR PFX "%s: Timed out waiting for " 158 "driver lock acquired\n");
152 "driver lock...\n",
153 qdev->ndev->name);
154 return 0;
155 }
156 } else {
157 printk(KERN_DEBUG PFX
158 "%s: driver lock acquired.\n",
159 qdev->ndev->name);
160 return 1; 159 return 1;
161 } 160 }
162 } 161 }
162
163 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
164 return 0;
163} 165}
164 166
165static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 167static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
166{ 168{
167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 169 struct ql3xxx_port_registers __iomem *port_regs =
170 qdev->mem_map_registers;
168 171
169 writel(((ISP_CONTROL_NP_MASK << 16) | page), 172 writel(((ISP_CONTROL_NP_MASK << 16) | page),
170 &port_regs->CommonRegs.ispControlStatus); 173 &port_regs->CommonRegs.ispControlStatus);
@@ -172,8 +175,7 @@ static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
172 qdev->current_page = page; 175 qdev->current_page = page;
173} 176}
174 177
175static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, 178static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
176 u32 __iomem * reg)
177{ 179{
178 u32 value; 180 u32 value;
179 unsigned long hw_flags; 181 unsigned long hw_flags;
@@ -185,8 +187,7 @@ static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
185 return value; 187 return value;
186} 188}
187 189
188static u32 ql_read_common_reg(struct ql3_adapter *qdev, 190static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
189 u32 __iomem * reg)
190{ 191{
191 return readl(reg); 192 return readl(reg);
192} 193}
@@ -199,7 +200,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
199 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 200 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
200 201
201 if (qdev->current_page != 0) 202 if (qdev->current_page != 0)
202 ql_set_register_page(qdev,0); 203 ql_set_register_page(qdev, 0);
203 value = readl(reg); 204 value = readl(reg);
204 205
205 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -209,7 +210,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
209static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 210static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
210{ 211{
211 if (qdev->current_page != 0) 212 if (qdev->current_page != 0)
212 ql_set_register_page(qdev,0); 213 ql_set_register_page(qdev, 0);
213 return readl(reg); 214 return readl(reg);
214} 215}
215 216
@@ -243,7 +244,7 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev,
243 u32 __iomem *reg, u32 value) 244 u32 __iomem *reg, u32 value)
244{ 245{
245 if (qdev->current_page != 0) 246 if (qdev->current_page != 0)
246 ql_set_register_page(qdev,0); 247 ql_set_register_page(qdev, 0);
247 writel(value, reg); 248 writel(value, reg);
248 readl(reg); 249 readl(reg);
249} 250}
@@ -255,7 +256,7 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev,
255 u32 __iomem *reg, u32 value) 256 u32 __iomem *reg, u32 value)
256{ 257{
257 if (qdev->current_page != 1) 258 if (qdev->current_page != 1)
258 ql_set_register_page(qdev,1); 259 ql_set_register_page(qdev, 1);
259 writel(value, reg); 260 writel(value, reg);
260 readl(reg); 261 readl(reg);
261} 262}
@@ -267,14 +268,15 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev,
267 u32 __iomem *reg, u32 value) 268 u32 __iomem *reg, u32 value)
268{ 269{
269 if (qdev->current_page != 2) 270 if (qdev->current_page != 2)
270 ql_set_register_page(qdev,2); 271 ql_set_register_page(qdev, 2);
271 writel(value, reg); 272 writel(value, reg);
272 readl(reg); 273 readl(reg);
273} 274}
274 275
275static void ql_disable_interrupts(struct ql3_adapter *qdev) 276static void ql_disable_interrupts(struct ql3_adapter *qdev)
276{ 277{
277 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 278 struct ql3xxx_port_registers __iomem *port_regs =
279 qdev->mem_map_registers;
278 280
279 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 281 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
280 (ISP_IMR_ENABLE_INT << 16)); 282 (ISP_IMR_ENABLE_INT << 16));
@@ -283,7 +285,8 @@ static void ql_disable_interrupts(struct ql3_adapter *qdev)
283 285
284static void ql_enable_interrupts(struct ql3_adapter *qdev) 286static void ql_enable_interrupts(struct ql3_adapter *qdev)
285{ 287{
286 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 288 struct ql3xxx_port_registers __iomem *port_regs =
289 qdev->mem_map_registers;
287 290
288 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 291 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
289 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 292 ((0xff << 16) | ISP_IMR_ENABLE_INT));
@@ -308,8 +311,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
308 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 311 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
309 qdev->lrg_buffer_len); 312 qdev->lrg_buffer_len);
310 if (unlikely(!lrg_buf_cb->skb)) { 313 if (unlikely(!lrg_buf_cb->skb)) {
311 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n", 314 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
312 qdev->ndev->name);
313 qdev->lrg_buf_skb_check++; 315 qdev->lrg_buf_skb_check++;
314 } else { 316 } else {
315 /* 317 /*
@@ -323,9 +325,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
323 QL_HEADER_SPACE, 325 QL_HEADER_SPACE,
324 PCI_DMA_FROMDEVICE); 326 PCI_DMA_FROMDEVICE);
325 err = pci_dma_mapping_error(qdev->pdev, map); 327 err = pci_dma_mapping_error(qdev->pdev, map);
326 if(err) { 328 if (err) {
327 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 329 netdev_err(qdev->ndev,
328 qdev->ndev->name, err); 330 "PCI mapping failed with error: %d\n",
331 err);
329 dev_kfree_skb(lrg_buf_cb->skb); 332 dev_kfree_skb(lrg_buf_cb->skb);
330 lrg_buf_cb->skb = NULL; 333 lrg_buf_cb->skb = NULL;
331 334
@@ -350,10 +353,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
350static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 353static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
351 *qdev) 354 *qdev)
352{ 355{
353 struct ql_rcv_buf_cb *lrg_buf_cb; 356 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
354 357
355 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { 358 if (lrg_buf_cb != NULL) {
356 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) 359 qdev->lrg_buf_free_head = lrg_buf_cb->next;
360 if (qdev->lrg_buf_free_head == NULL)
357 qdev->lrg_buf_free_tail = NULL; 361 qdev->lrg_buf_free_tail = NULL;
358 qdev->lrg_buf_free_count--; 362 qdev->lrg_buf_free_count--;
359 } 363 }
@@ -374,13 +378,13 @@ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
374static void fm93c56a_select(struct ql3_adapter *qdev) 378static void fm93c56a_select(struct ql3_adapter *qdev)
375{ 379{
376 struct ql3xxx_port_registers __iomem *port_regs = 380 struct ql3xxx_port_registers __iomem *port_regs =
377 qdev->mem_map_registers; 381 qdev->mem_map_registers;
382 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
378 383
379 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
380 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
381 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 386 ql_write_nvram_reg(qdev, spir,
382 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 387 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
383 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
384} 388}
385 389
386/* 390/*
@@ -393,51 +397,40 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
393 u32 dataBit; 397 u32 dataBit;
394 u32 previousBit; 398 u32 previousBit;
395 struct ql3xxx_port_registers __iomem *port_regs = 399 struct ql3xxx_port_registers __iomem *port_regs =
396 qdev->mem_map_registers; 400 qdev->mem_map_registers;
401 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
397 402
398 /* Clock in a zero, then do the start bit */ 403 /* Clock in a zero, then do the start bit */
399 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 404 ql_write_nvram_reg(qdev, spir,
400 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 405 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
401 AUBURN_EEPROM_DO_1); 406 AUBURN_EEPROM_DO_1));
402 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 407 ql_write_nvram_reg(qdev, spir,
403 ISP_NVRAM_MASK | qdev-> 408 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
404 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 409 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
405 AUBURN_EEPROM_CLK_RISE); 410 ql_write_nvram_reg(qdev, spir,
406 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 411 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
407 ISP_NVRAM_MASK | qdev-> 412 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
408 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
409 AUBURN_EEPROM_CLK_FALL);
410 413
411 mask = 1 << (FM93C56A_CMD_BITS - 1); 414 mask = 1 << (FM93C56A_CMD_BITS - 1);
412 /* Force the previous data bit to be different */ 415 /* Force the previous data bit to be different */
413 previousBit = 0xffff; 416 previousBit = 0xffff;
414 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 417 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
415 dataBit = 418 dataBit = (cmd & mask)
416 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; 419 ? AUBURN_EEPROM_DO_1
420 : AUBURN_EEPROM_DO_0;
417 if (previousBit != dataBit) { 421 if (previousBit != dataBit) {
418 /* 422 /* If the bit changed, change the DO state to match */
419 * If the bit changed, then change the DO state to 423 ql_write_nvram_reg(qdev, spir,
420 * match 424 (ISP_NVRAM_MASK |
421 */ 425 qdev->eeprom_cmd_data | dataBit));
422 ql_write_nvram_reg(qdev,
423 &port_regs->CommonRegs.
424 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit);
427 previousBit = dataBit; 426 previousBit = dataBit;
428 } 427 }
429 ql_write_nvram_reg(qdev, 428 ql_write_nvram_reg(qdev, spir,
430 &port_regs->CommonRegs. 429 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
431 serialPortInterfaceReg, 430 dataBit | AUBURN_EEPROM_CLK_RISE));
432 ISP_NVRAM_MASK | qdev-> 431 ql_write_nvram_reg(qdev, spir,
433 eeprom_cmd_data | dataBit | 432 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
434 AUBURN_EEPROM_CLK_RISE); 433 dataBit | AUBURN_EEPROM_CLK_FALL));
435 ql_write_nvram_reg(qdev,
436 &port_regs->CommonRegs.
437 serialPortInterfaceReg,
438 ISP_NVRAM_MASK | qdev->
439 eeprom_cmd_data | dataBit |
440 AUBURN_EEPROM_CLK_FALL);
441 cmd = cmd << 1; 434 cmd = cmd << 1;
442 } 435 }
443 436
@@ -445,33 +438,24 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
445 /* Force the previous data bit to be different */ 438 /* Force the previous data bit to be different */
446 previousBit = 0xffff; 439 previousBit = 0xffff;
447 for (i = 0; i < addrBits; i++) { 440 for (i = 0; i < addrBits; i++) {
448 dataBit = 441 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
449 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : 442 : AUBURN_EEPROM_DO_0;
450 AUBURN_EEPROM_DO_0;
451 if (previousBit != dataBit) { 443 if (previousBit != dataBit) {
452 /* 444 /*
453 * If the bit changed, then change the DO state to 445 * If the bit changed, then change the DO state to
454 * match 446 * match
455 */ 447 */
456 ql_write_nvram_reg(qdev, 448 ql_write_nvram_reg(qdev, spir,
457 &port_regs->CommonRegs. 449 (ISP_NVRAM_MASK |
458 serialPortInterfaceReg, 450 qdev->eeprom_cmd_data | dataBit));
459 ISP_NVRAM_MASK | qdev->
460 eeprom_cmd_data | dataBit);
461 previousBit = dataBit; 451 previousBit = dataBit;
462 } 452 }
463 ql_write_nvram_reg(qdev, 453 ql_write_nvram_reg(qdev, spir,
464 &port_regs->CommonRegs. 454 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
465 serialPortInterfaceReg, 455 dataBit | AUBURN_EEPROM_CLK_RISE));
466 ISP_NVRAM_MASK | qdev-> 456 ql_write_nvram_reg(qdev, spir,
467 eeprom_cmd_data | dataBit | 457 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE); 458 dataBit | AUBURN_EEPROM_CLK_FALL));
469 ql_write_nvram_reg(qdev,
470 &port_regs->CommonRegs.
471 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->
473 eeprom_cmd_data | dataBit |
474 AUBURN_EEPROM_CLK_FALL);
475 eepromAddr = eepromAddr << 1; 459 eepromAddr = eepromAddr << 1;
476 } 460 }
477} 461}
@@ -482,10 +466,11 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
482static void fm93c56a_deselect(struct ql3_adapter *qdev) 466static void fm93c56a_deselect(struct ql3_adapter *qdev)
483{ 467{
484 struct ql3xxx_port_registers __iomem *port_regs = 468 struct ql3xxx_port_registers __iomem *port_regs =
485 qdev->mem_map_registers; 469 qdev->mem_map_registers;
470 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
471
486 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
487 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
488 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
489} 474}
490 475
491/* 476/*
@@ -497,29 +482,23 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
497 u32 data = 0; 482 u32 data = 0;
498 u32 dataBit; 483 u32 dataBit;
499 struct ql3xxx_port_registers __iomem *port_regs = 484 struct ql3xxx_port_registers __iomem *port_regs =
500 qdev->mem_map_registers; 485 qdev->mem_map_registers;
486 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
501 487
502 /* Read the data bits */ 488 /* Read the data bits */
503 /* The first bit is a dummy. Clock right over it. */ 489 /* The first bit is a dummy. Clock right over it. */
504 for (i = 0; i < dataBits; i++) { 490 for (i = 0; i < dataBits; i++) {
505 ql_write_nvram_reg(qdev, 491 ql_write_nvram_reg(qdev, spir,
506 &port_regs->CommonRegs. 492 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
507 serialPortInterfaceReg, 493 AUBURN_EEPROM_CLK_RISE);
508 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 494 ql_write_nvram_reg(qdev, spir,
509 AUBURN_EEPROM_CLK_RISE); 495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
510 ql_write_nvram_reg(qdev, 496 AUBURN_EEPROM_CLK_FALL);
511 &port_regs->CommonRegs. 497 dataBit = (ql_read_common_reg(qdev, spir) &
512 serialPortInterfaceReg, 498 AUBURN_EEPROM_DI_1) ? 1 : 0;
513 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
514 AUBURN_EEPROM_CLK_FALL);
515 dataBit =
516 (ql_read_common_reg
517 (qdev,
518 &port_regs->CommonRegs.
519 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
520 data = (data << 1) | dataBit; 499 data = (data << 1) | dataBit;
521 } 500 }
522 *value = (u16) data; 501 *value = (u16)data;
523} 502}
524 503
525/* 504/*
@@ -551,13 +530,12 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
551 530
552 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 531 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
553 532
554 pEEPROMData = (u16 *) & qdev->nvram_data; 533 pEEPROMData = (u16 *)&qdev->nvram_data;
555 qdev->eeprom_cmd_data = 0; 534 qdev->eeprom_cmd_data = 0;
556 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 535 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
557 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 536 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
558 2) << 10)) { 537 2) << 10)) {
559 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", 538 pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
560 __func__);
561 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 539 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
562 return -1; 540 return -1;
563 } 541 }
@@ -570,8 +548,8 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
570 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 548 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
571 549
572 if (checksum != 0) { 550 if (checksum != 0) {
573 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", 551 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
574 qdev->ndev->name, checksum); 552 checksum);
575 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 553 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
576 return -1; 554 return -1;
577 } 555 }
@@ -587,7 +565,7 @@ static const u32 PHYAddr[2] = {
587static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 565static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
588{ 566{
589 struct ql3xxx_port_registers __iomem *port_regs = 567 struct ql3xxx_port_registers __iomem *port_regs =
590 qdev->mem_map_registers; 568 qdev->mem_map_registers;
591 u32 temp; 569 u32 temp;
592 int count = 1000; 570 int count = 1000;
593 571
@@ -604,7 +582,7 @@ static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
604static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 582static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
605{ 583{
606 struct ql3xxx_port_registers __iomem *port_regs = 584 struct ql3xxx_port_registers __iomem *port_regs =
607 qdev->mem_map_registers; 585 qdev->mem_map_registers;
608 u32 scanControl; 586 u32 scanControl;
609 587
610 if (qdev->numPorts > 1) { 588 if (qdev->numPorts > 1) {
@@ -632,7 +610,7 @@ static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
632{ 610{
633 u8 ret; 611 u8 ret;
634 struct ql3xxx_port_registers __iomem *port_regs = 612 struct ql3xxx_port_registers __iomem *port_regs =
635 qdev->mem_map_registers; 613 qdev->mem_map_registers;
636 614
637 /* See if scan mode is enabled before we turn it off */ 615 /* See if scan mode is enabled before we turn it off */
638 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 616 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
@@ -662,17 +640,13 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
662 u16 regAddr, u16 value, u32 phyAddr) 640 u16 regAddr, u16 value, u32 phyAddr)
663{ 641{
664 struct ql3xxx_port_registers __iomem *port_regs = 642 struct ql3xxx_port_registers __iomem *port_regs =
665 qdev->mem_map_registers; 643 qdev->mem_map_registers;
666 u8 scanWasEnabled; 644 u8 scanWasEnabled;
667 645
668 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 646 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
669 647
670 if (ql_wait_for_mii_ready(qdev)) { 648 if (ql_wait_for_mii_ready(qdev)) {
671 if (netif_msg_link(qdev)) 649 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
672 printk(KERN_WARNING PFX
673 "%s Timed out waiting for management port to "
674 "get free before issuing command.\n",
675 qdev->ndev->name);
676 return -1; 650 return -1;
677 } 651 }
678 652
@@ -683,11 +657,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
683 657
684 /* Wait for write to complete 9/10/04 SJP */ 658 /* Wait for write to complete 9/10/04 SJP */
685 if (ql_wait_for_mii_ready(qdev)) { 659 if (ql_wait_for_mii_ready(qdev)) {
686 if (netif_msg_link(qdev)) 660 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
687 printk(KERN_WARNING PFX
688 "%s: Timed out waiting for management port to "
689 "get free before issuing command.\n",
690 qdev->ndev->name);
691 return -1; 661 return -1;
692 } 662 }
693 663
@@ -698,21 +668,17 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
698} 668}
699 669
700static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 670static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
701 u16 * value, u32 phyAddr) 671 u16 *value, u32 phyAddr)
702{ 672{
703 struct ql3xxx_port_registers __iomem *port_regs = 673 struct ql3xxx_port_registers __iomem *port_regs =
704 qdev->mem_map_registers; 674 qdev->mem_map_registers;
705 u8 scanWasEnabled; 675 u8 scanWasEnabled;
706 u32 temp; 676 u32 temp;
707 677
708 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 678 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
709 679
710 if (ql_wait_for_mii_ready(qdev)) { 680 if (ql_wait_for_mii_ready(qdev)) {
711 if (netif_msg_link(qdev)) 681 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
712 printk(KERN_WARNING PFX
713 "%s: Timed out waiting for management port to "
714 "get free before issuing command.\n",
715 qdev->ndev->name);
716 return -1; 682 return -1;
717 } 683 }
718 684
@@ -727,11 +693,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
727 693
728 /* Wait for the read to complete */ 694 /* Wait for the read to complete */
729 if (ql_wait_for_mii_ready(qdev)) { 695 if (ql_wait_for_mii_ready(qdev)) {
730 if (netif_msg_link(qdev)) 696 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
731 printk(KERN_WARNING PFX
732 "%s: Timed out waiting for management port to "
733 "get free after issuing command.\n",
734 qdev->ndev->name);
735 return -1; 697 return -1;
736 } 698 }
737 699
@@ -747,16 +709,12 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
747static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 709static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
748{ 710{
749 struct ql3xxx_port_registers __iomem *port_regs = 711 struct ql3xxx_port_registers __iomem *port_regs =
750 qdev->mem_map_registers; 712 qdev->mem_map_registers;
751 713
752 ql_mii_disable_scan_mode(qdev); 714 ql_mii_disable_scan_mode(qdev);
753 715
754 if (ql_wait_for_mii_ready(qdev)) { 716 if (ql_wait_for_mii_ready(qdev)) {
755 if (netif_msg_link(qdev)) 717 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
756 printk(KERN_WARNING PFX
757 "%s: Timed out waiting for management port to "
758 "get free before issuing command.\n",
759 qdev->ndev->name);
760 return -1; 718 return -1;
761 } 719 }
762 720
@@ -767,11 +725,7 @@ static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
767 725
768 /* Wait for write to complete. */ 726 /* Wait for write to complete. */
769 if (ql_wait_for_mii_ready(qdev)) { 727 if (ql_wait_for_mii_ready(qdev)) {
770 if (netif_msg_link(qdev)) 728 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
771 printk(KERN_WARNING PFX
772 "%s: Timed out waiting for management port to "
773 "get free before issuing command.\n",
774 qdev->ndev->name);
775 return -1; 729 return -1;
776 } 730 }
777 731
@@ -784,16 +738,12 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
784{ 738{
785 u32 temp; 739 u32 temp;
786 struct ql3xxx_port_registers __iomem *port_regs = 740 struct ql3xxx_port_registers __iomem *port_regs =
787 qdev->mem_map_registers; 741 qdev->mem_map_registers;
788 742
789 ql_mii_disable_scan_mode(qdev); 743 ql_mii_disable_scan_mode(qdev);
790 744
791 if (ql_wait_for_mii_ready(qdev)) { 745 if (ql_wait_for_mii_ready(qdev)) {
792 if (netif_msg_link(qdev)) 746 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
793 printk(KERN_WARNING PFX
794 "%s: Timed out waiting for management port to "
795 "get free before issuing command.\n",
796 qdev->ndev->name);
797 return -1; 747 return -1;
798 } 748 }
799 749
@@ -808,11 +758,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
808 758
809 /* Wait for the read to complete */ 759 /* Wait for the read to complete */
810 if (ql_wait_for_mii_ready(qdev)) { 760 if (ql_wait_for_mii_ready(qdev)) {
811 if (netif_msg_link(qdev)) 761 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
812 printk(KERN_WARNING PFX
813 "%s: Timed out waiting for management port to "
814 "get free before issuing command.\n",
815 qdev->ndev->name);
816 return -1; 762 return -1;
817 } 763 }
818 764
@@ -898,7 +844,7 @@ static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
898 844
899static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 845static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
900{ 846{
901 printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); 847 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
902 /* power down device bit 11 = 1 */ 848 /* power down device bit 11 = 1 */
903 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 849 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
904 /* enable diagnostic mode bit 2 = 1 */ 850 /* enable diagnostic mode bit 2 = 1 */
@@ -918,7 +864,8 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
918 /* point to hidden reg 0x2806 */ 864 /* point to hidden reg 0x2806 */
919 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 865 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
920 /* Write new PHYAD w/bit 5 set */ 866 /* Write new PHYAD w/bit 5 set */
921 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 867 ql_mii_write_reg_ex(qdev, 0x11,
868 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
922 /* 869 /*
923 * Disable diagnostic mode bit 2 = 0 870 * Disable diagnostic mode bit 2 = 0
924 * Power up device bit 11 = 0 871 * Power up device bit 11 = 0
@@ -929,21 +876,19 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
929 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 876 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
930} 877}
931 878
932static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, 879static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
933 u16 phyIdReg0, u16 phyIdReg1) 880 u16 phyIdReg0, u16 phyIdReg1)
934{ 881{
935 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; 882 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
936 u32 oui; 883 u32 oui;
937 u16 model; 884 u16 model;
938 int i; 885 int i;
939 886
940 if (phyIdReg0 == 0xffff) { 887 if (phyIdReg0 == 0xffff)
941 return result; 888 return result;
942 }
943 889
944 if (phyIdReg1 == 0xffff) { 890 if (phyIdReg1 == 0xffff)
945 return result; 891 return result;
946 }
947 892
948 /* oui is split between two registers */ 893 /* oui is split between two registers */
949 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 894 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
@@ -951,15 +896,13 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
951 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 896 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
952 897
953 /* Scan table for this PHY */ 898 /* Scan table for this PHY */
954 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { 899 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
955 if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) 900 if ((oui == PHY_DEVICES[i].phyIdOUI) &&
956 { 901 (model == PHY_DEVICES[i].phyIdModel)) {
902 netdev_info(qdev->ndev, "Phy: %s\n",
903 PHY_DEVICES[i].name);
957 result = PHY_DEVICES[i].phyDevice; 904 result = PHY_DEVICES[i].phyDevice;
958 905 break;
959 printk(KERN_INFO "%s: Phy: %s\n",
960 qdev->ndev->name, PHY_DEVICES[i].name);
961
962 break;
963 } 906 }
964 } 907 }
965 908
@@ -970,9 +913,8 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev)
970{ 913{
971 u16 reg; 914 u16 reg;
972 915
973 switch(qdev->phyType) { 916 switch (qdev->phyType) {
974 case PHY_AGERE_ET1011C: 917 case PHY_AGERE_ET1011C: {
975 {
976 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0) 918 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
977 return 0; 919 return 0;
978 920
@@ -980,20 +922,20 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev)
980 break; 922 break;
981 } 923 }
982 default: 924 default:
983 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) 925 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
984 return 0; 926 return 0;
985 927
986 reg = (((reg & 0x18) >> 3) & 3); 928 reg = (((reg & 0x18) >> 3) & 3);
987 } 929 }
988 930
989 switch(reg) { 931 switch (reg) {
990 case 2: 932 case 2:
991 return SPEED_1000; 933 return SPEED_1000;
992 case 1: 934 case 1:
993 return SPEED_100; 935 return SPEED_100;
994 case 0: 936 case 0:
995 return SPEED_10; 937 return SPEED_10;
996 default: 938 default:
997 return -1; 939 return -1;
998 } 940 }
999} 941}
@@ -1002,17 +944,15 @@ static int ql_is_full_dup(struct ql3_adapter *qdev)
1002{ 944{
1003 u16 reg; 945 u16 reg;
1004 946
1005 switch(qdev->phyType) { 947 switch (qdev->phyType) {
1006 case PHY_AGERE_ET1011C: 948 case PHY_AGERE_ET1011C: {
1007 {
1008 if (ql_mii_read_reg(qdev, 0x1A, &reg)) 949 if (ql_mii_read_reg(qdev, 0x1A, &reg))
1009 return 0; 950 return 0;
1010 951
1011 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 952 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1012 } 953 }
1013 case PHY_VITESSE_VSC8211: 954 case PHY_VITESSE_VSC8211:
1014 default: 955 default: {
1015 {
1016 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) 956 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1017 return 0; 957 return 0;
1018 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 958 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
@@ -1040,17 +980,15 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1040 980
1041 /* Determine the PHY we are using by reading the ID's */ 981 /* Determine the PHY we are using by reading the ID's */
1042 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1); 982 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1043 if(err != 0) { 983 if (err != 0) {
1044 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", 984 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
1045 qdev->ndev->name); 985 return err;
1046 return err;
1047 } 986 }
1048 987
1049 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2); 988 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1050 if(err != 0) { 989 if (err != 0) {
1051 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", 990 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
1052 qdev->ndev->name); 991 return err;
1053 return err;
1054 } 992 }
1055 993
1056 /* Check if we have a Agere PHY */ 994 /* Check if we have a Agere PHY */
@@ -1058,24 +996,22 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1058 996
1059 /* Determine which MII address we should be using 997 /* Determine which MII address we should be using
1060 determined by the index of the card */ 998 determined by the index of the card */
1061 if (qdev->mac_index == 0) { 999 if (qdev->mac_index == 0)
1062 miiAddr = MII_AGERE_ADDR_1; 1000 miiAddr = MII_AGERE_ADDR_1;
1063 } else { 1001 else
1064 miiAddr = MII_AGERE_ADDR_2; 1002 miiAddr = MII_AGERE_ADDR_2;
1065 }
1066 1003
1067 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); 1004 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1068 if(err != 0) { 1005 if (err != 0) {
1069 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", 1006 netdev_err(qdev->ndev,
1070 qdev->ndev->name); 1007 "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1071 return err; 1008 return err;
1072 } 1009 }
1073 1010
1074 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); 1011 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1075 if(err != 0) { 1012 if (err != 0) {
1076 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", 1013 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1077 qdev->ndev->name); 1014 return err;
1078 return err;
1079 } 1015 }
1080 1016
1081 /* We need to remember to initialize the Agere PHY */ 1017 /* We need to remember to initialize the Agere PHY */
@@ -1090,7 +1026,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1090 /* need this here so address gets changed */ 1026 /* need this here so address gets changed */
1091 phyAgereSpecificInit(qdev, miiAddr); 1027 phyAgereSpecificInit(qdev, miiAddr);
1092 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1028 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1093 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); 1029 netdev_err(qdev->ndev, "PHY is unknown\n");
1094 return -EIO; 1030 return -EIO;
1095 } 1031 }
1096 1032
@@ -1103,7 +1039,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1103static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1039static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1104{ 1040{
1105 struct ql3xxx_port_registers __iomem *port_regs = 1041 struct ql3xxx_port_registers __iomem *port_regs =
1106 qdev->mem_map_registers; 1042 qdev->mem_map_registers;
1107 u32 value; 1043 u32 value;
1108 1044
1109 if (enable) 1045 if (enable)
@@ -1123,7 +1059,7 @@ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1123static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1059static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1124{ 1060{
1125 struct ql3xxx_port_registers __iomem *port_regs = 1061 struct ql3xxx_port_registers __iomem *port_regs =
1126 qdev->mem_map_registers; 1062 qdev->mem_map_registers;
1127 u32 value; 1063 u32 value;
1128 1064
1129 if (enable) 1065 if (enable)
@@ -1143,7 +1079,7 @@ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1143static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1079static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1144{ 1080{
1145 struct ql3xxx_port_registers __iomem *port_regs = 1081 struct ql3xxx_port_registers __iomem *port_regs =
1146 qdev->mem_map_registers; 1082 qdev->mem_map_registers;
1147 u32 value; 1083 u32 value;
1148 1084
1149 if (enable) 1085 if (enable)
@@ -1163,7 +1099,7 @@ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1163static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1099static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1164{ 1100{
1165 struct ql3xxx_port_registers __iomem *port_regs = 1101 struct ql3xxx_port_registers __iomem *port_regs =
1166 qdev->mem_map_registers; 1102 qdev->mem_map_registers;
1167 u32 value; 1103 u32 value;
1168 1104
1169 if (enable) 1105 if (enable)
@@ -1183,7 +1119,7 @@ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1183static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1119static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1184{ 1120{
1185 struct ql3xxx_port_registers __iomem *port_regs = 1121 struct ql3xxx_port_registers __iomem *port_regs =
1186 qdev->mem_map_registers; 1122 qdev->mem_map_registers;
1187 u32 value; 1123 u32 value;
1188 1124
1189 if (enable) 1125 if (enable)
@@ -1205,7 +1141,7 @@ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1205static int ql_is_fiber(struct ql3_adapter *qdev) 1141static int ql_is_fiber(struct ql3_adapter *qdev)
1206{ 1142{
1207 struct ql3xxx_port_registers __iomem *port_regs = 1143 struct ql3xxx_port_registers __iomem *port_regs =
1208 qdev->mem_map_registers; 1144 qdev->mem_map_registers;
1209 u32 bitToCheck = 0; 1145 u32 bitToCheck = 0;
1210 u32 temp; 1146 u32 temp;
1211 1147
@@ -1235,7 +1171,7 @@ static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1235static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1171static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1236{ 1172{
1237 struct ql3xxx_port_registers __iomem *port_regs = 1173 struct ql3xxx_port_registers __iomem *port_regs =
1238 qdev->mem_map_registers; 1174 qdev->mem_map_registers;
1239 u32 bitToCheck = 0; 1175 u32 bitToCheck = 0;
1240 u32 temp; 1176 u32 temp;
1241 1177
@@ -1250,18 +1186,11 @@ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1250 1186
1251 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1187 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1252 if (temp & bitToCheck) { 1188 if (temp & bitToCheck) {
1253 if (netif_msg_link(qdev)) 1189 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1254 printk(KERN_INFO PFX
1255 "%s: Auto-Negotiate complete.\n",
1256 qdev->ndev->name);
1257 return 1; 1190 return 1;
1258 } else {
1259 if (netif_msg_link(qdev))
1260 printk(KERN_WARNING PFX
1261 "%s: Auto-Negotiate incomplete.\n",
1262 qdev->ndev->name);
1263 return 0;
1264 } 1191 }
1192 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1193 return 0;
1265} 1194}
1266 1195
1267/* 1196/*
@@ -1278,7 +1207,7 @@ static int ql_is_neg_pause(struct ql3_adapter *qdev)
1278static int ql_auto_neg_error(struct ql3_adapter *qdev) 1207static int ql_auto_neg_error(struct ql3_adapter *qdev)
1279{ 1208{
1280 struct ql3xxx_port_registers __iomem *port_regs = 1209 struct ql3xxx_port_registers __iomem *port_regs =
1281 qdev->mem_map_registers; 1210 qdev->mem_map_registers;
1282 u32 bitToCheck = 0; 1211 u32 bitToCheck = 0;
1283 u32 temp; 1212 u32 temp;
1284 1213
@@ -1316,7 +1245,7 @@ static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1316static int ql_link_down_detect(struct ql3_adapter *qdev) 1245static int ql_link_down_detect(struct ql3_adapter *qdev)
1317{ 1246{
1318 struct ql3xxx_port_registers __iomem *port_regs = 1247 struct ql3xxx_port_registers __iomem *port_regs =
1319 qdev->mem_map_registers; 1248 qdev->mem_map_registers;
1320 u32 bitToCheck = 0; 1249 u32 bitToCheck = 0;
1321 u32 temp; 1250 u32 temp;
1322 1251
@@ -1340,7 +1269,7 @@ static int ql_link_down_detect(struct ql3_adapter *qdev)
1340static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1269static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1341{ 1270{
1342 struct ql3xxx_port_registers __iomem *port_regs = 1271 struct ql3xxx_port_registers __iomem *port_regs =
1343 qdev->mem_map_registers; 1272 qdev->mem_map_registers;
1344 1273
1345 switch (qdev->mac_index) { 1274 switch (qdev->mac_index) {
1346 case 0: 1275 case 0:
@@ -1370,7 +1299,7 @@ static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1370static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1299static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1371{ 1300{
1372 struct ql3xxx_port_registers __iomem *port_regs = 1301 struct ql3xxx_port_registers __iomem *port_regs =
1373 qdev->mem_map_registers; 1302 qdev->mem_map_registers;
1374 u32 bitToCheck = 0; 1303 u32 bitToCheck = 0;
1375 u32 temp; 1304 u32 temp;
1376 1305
@@ -1387,16 +1316,13 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1387 1316
1388 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1317 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1389 if (temp & bitToCheck) { 1318 if (temp & bitToCheck) {
1390 if (netif_msg_link(qdev)) 1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1391 printk(KERN_DEBUG PFX 1320 "not link master\n");
1392 "%s: is not link master.\n", qdev->ndev->name);
1393 return 0; 1321 return 0;
1394 } else {
1395 if (netif_msg_link(qdev))
1396 printk(KERN_DEBUG PFX
1397 "%s: is link master.\n", qdev->ndev->name);
1398 return 1;
1399 } 1322 }
1323
1324 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1325 return 1;
1400} 1326}
1401 1327
1402static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1328static void ql_phy_reset_ex(struct ql3_adapter *qdev)
@@ -1410,19 +1336,20 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1410 u16 reg; 1336 u16 reg;
1411 u16 portConfiguration; 1337 u16 portConfiguration;
1412 1338
1413 if(qdev->phyType == PHY_AGERE_ET1011C) { 1339 if (qdev->phyType == PHY_AGERE_ET1011C)
1414 /* turn off external loopback */
1415 ql_mii_write_reg(qdev, 0x13, 0x0000); 1340 ql_mii_write_reg(qdev, 0x13, 0x0000);
1416 } 1341 /* turn off external loopback */
1417 1342
1418 if(qdev->mac_index == 0) 1343 if (qdev->mac_index == 0)
1419 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; 1344 portConfiguration =
1345 qdev->nvram_data.macCfg_port0.portConfiguration;
1420 else 1346 else
1421 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; 1347 portConfiguration =
1348 qdev->nvram_data.macCfg_port1.portConfiguration;
1422 1349
1423 /* Some HBA's in the field are set to 0 and they need to 1350 /* Some HBA's in the field are set to 0 and they need to
1424 be reinterpreted with a default value */ 1351 be reinterpreted with a default value */
1425 if(portConfiguration == 0) 1352 if (portConfiguration == 0)
1426 portConfiguration = PORT_CONFIG_DEFAULT; 1353 portConfiguration = PORT_CONFIG_DEFAULT;
1427 1354
1428 /* Set the 1000 advertisements */ 1355 /* Set the 1000 advertisements */
@@ -1430,8 +1357,8 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1430 PHYAddr[qdev->mac_index]); 1357 PHYAddr[qdev->mac_index]);
1431 reg &= ~PHY_GIG_ALL_PARAMS; 1358 reg &= ~PHY_GIG_ALL_PARAMS;
1432 1359
1433 if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1360 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1434 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1361 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1435 reg |= PHY_GIG_ADV_1000F; 1362 reg |= PHY_GIG_ADV_1000F;
1436 else 1363 else
1437 reg |= PHY_GIG_ADV_1000H; 1364 reg |= PHY_GIG_ADV_1000H;
@@ -1445,29 +1372,27 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1445 PHYAddr[qdev->mac_index]); 1372 PHYAddr[qdev->mac_index]);
1446 reg &= ~PHY_NEG_ALL_PARAMS; 1373 reg &= ~PHY_NEG_ALL_PARAMS;
1447 1374
1448 if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1375 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1449 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1376 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1450 1377
1451 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1378 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1452 if(portConfiguration & PORT_CONFIG_100MB_SPEED) 1379 if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1453 reg |= PHY_NEG_ADV_100F; 1380 reg |= PHY_NEG_ADV_100F;
1454 1381
1455 if(portConfiguration & PORT_CONFIG_10MB_SPEED) 1382 if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1456 reg |= PHY_NEG_ADV_10F; 1383 reg |= PHY_NEG_ADV_10F;
1457 } 1384 }
1458 1385
1459 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1386 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1460 if(portConfiguration & PORT_CONFIG_100MB_SPEED) 1387 if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1461 reg |= PHY_NEG_ADV_100H; 1388 reg |= PHY_NEG_ADV_100H;
1462 1389
1463 if(portConfiguration & PORT_CONFIG_10MB_SPEED) 1390 if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1464 reg |= PHY_NEG_ADV_10H; 1391 reg |= PHY_NEG_ADV_10H;
1465 } 1392 }
1466 1393
1467 if(portConfiguration & 1394 if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1468 PORT_CONFIG_1000MB_SPEED) {
1469 reg |= 1; 1395 reg |= 1;
1470 }
1471 1396
1472 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1397 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1473 PHYAddr[qdev->mac_index]); 1398 PHYAddr[qdev->mac_index]);
@@ -1492,7 +1417,7 @@ static void ql_phy_init_ex(struct ql3_adapter *qdev)
1492static u32 ql_get_link_state(struct ql3_adapter *qdev) 1417static u32 ql_get_link_state(struct ql3_adapter *qdev)
1493{ 1418{
1494 struct ql3xxx_port_registers __iomem *port_regs = 1419 struct ql3xxx_port_registers __iomem *port_regs =
1495 qdev->mem_map_registers; 1420 qdev->mem_map_registers;
1496 u32 bitToCheck = 0; 1421 u32 bitToCheck = 0;
1497 u32 temp, linkState; 1422 u32 temp, linkState;
1498 1423
@@ -1504,22 +1429,22 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev)
1504 bitToCheck = PORT_STATUS_UP1; 1429 bitToCheck = PORT_STATUS_UP1;
1505 break; 1430 break;
1506 } 1431 }
1432
1507 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1433 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1508 if (temp & bitToCheck) { 1434 if (temp & bitToCheck)
1509 linkState = LS_UP; 1435 linkState = LS_UP;
1510 } else { 1436 else
1511 linkState = LS_DOWN; 1437 linkState = LS_DOWN;
1512 } 1438
1513 return linkState; 1439 return linkState;
1514} 1440}
1515 1441
1516static int ql_port_start(struct ql3_adapter *qdev) 1442static int ql_port_start(struct ql3_adapter *qdev)
1517{ 1443{
1518 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1444 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1519 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1445 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1520 2) << 7)) { 1446 2) << 7)) {
1521 printk(KERN_ERR "%s: Could not get hw lock for GIO\n", 1447 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1522 qdev->ndev->name);
1523 return -1; 1448 return -1;
1524 } 1449 }
1525 1450
@@ -1537,19 +1462,16 @@ static int ql_port_start(struct ql3_adapter *qdev)
1537static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1462static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1538{ 1463{
1539 1464
1540 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1465 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1541 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1466 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1542 2) << 7)) 1467 2) << 7))
1543 return -1; 1468 return -1;
1544 1469
1545 if (!ql_auto_neg_error(qdev)) { 1470 if (!ql_auto_neg_error(qdev)) {
1546 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1471 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1547 /* configure the MAC */ 1472 /* configure the MAC */
1548 if (netif_msg_link(qdev)) 1473 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1549 printk(KERN_DEBUG PFX 1474 "Configuring link\n");
1550 "%s: Configuring link.\n",
1551 qdev->ndev->
1552 name);
1553 ql_mac_cfg_soft_reset(qdev, 1); 1475 ql_mac_cfg_soft_reset(qdev, 1);
1554 ql_mac_cfg_gig(qdev, 1476 ql_mac_cfg_gig(qdev,
1555 (ql_get_link_speed 1477 (ql_get_link_speed
@@ -1564,43 +1486,32 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1564 ql_mac_cfg_soft_reset(qdev, 0); 1486 ql_mac_cfg_soft_reset(qdev, 0);
1565 1487
1566 /* enable the MAC */ 1488 /* enable the MAC */
1567 if (netif_msg_link(qdev)) 1489 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1568 printk(KERN_DEBUG PFX 1490 "Enabling mac\n");
1569 "%s: Enabling mac.\n",
1570 qdev->ndev->
1571 name);
1572 ql_mac_enable(qdev, 1); 1491 ql_mac_enable(qdev, 1);
1573 } 1492 }
1574 1493
1575 qdev->port_link_state = LS_UP; 1494 qdev->port_link_state = LS_UP;
1576 netif_start_queue(qdev->ndev); 1495 netif_start_queue(qdev->ndev);
1577 netif_carrier_on(qdev->ndev); 1496 netif_carrier_on(qdev->ndev);
1578 if (netif_msg_link(qdev)) 1497 netif_info(qdev, link, qdev->ndev,
1579 printk(KERN_INFO PFX 1498 "Link is up at %d Mbps, %s duplex\n",
1580 "%s: Link is up at %d Mbps, %s duplex.\n", 1499 ql_get_link_speed(qdev),
1581 qdev->ndev->name, 1500 ql_is_link_full_dup(qdev) ? "full" : "half");
1582 ql_get_link_speed(qdev),
1583 ql_is_link_full_dup(qdev)
1584 ? "full" : "half");
1585 1501
1586 } else { /* Remote error detected */ 1502 } else { /* Remote error detected */
1587 1503
1588 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1504 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1589 if (netif_msg_link(qdev)) 1505 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1590 printk(KERN_DEBUG PFX 1506 "Remote error detected. Calling ql_port_start()\n");
1591 "%s: Remote error detected. "
1592 "Calling ql_port_start().\n",
1593 qdev->ndev->
1594 name);
1595 /* 1507 /*
1596 * ql_port_start() is shared code and needs 1508 * ql_port_start() is shared code and needs
1597 * to lock the PHY on it's own. 1509 * to lock the PHY on it's own.
1598 */ 1510 */
1599 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1511 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1600 if(ql_port_start(qdev)) {/* Restart port */ 1512 if (ql_port_start(qdev)) /* Restart port */
1601 return -1; 1513 return -1;
1602 } else 1514 return 0;
1603 return 0;
1604 } 1515 }
1605 } 1516 }
1606 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1517 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
@@ -1619,33 +1530,28 @@ static void ql_link_state_machine_work(struct work_struct *work)
1619 1530
1620 curr_link_state = ql_get_link_state(qdev); 1531 curr_link_state = ql_get_link_state(qdev);
1621 1532
1622 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { 1533 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1623 if (netif_msg_link(qdev)) 1534 netif_info(qdev, link, qdev->ndev,
1624 printk(KERN_INFO PFX 1535 "Reset in progress, skip processing link state\n");
1625 "%s: Reset in progress, skip processing link "
1626 "state.\n", qdev->ndev->name);
1627 1536
1628 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1629 1538
1630 /* Restart timer on 2 second interval. */ 1539 /* Restart timer on 2 second interval. */
1631 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ 1540 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1632 1541
1633 return; 1542 return;
1634 } 1543 }
1635 1544
1636 switch (qdev->port_link_state) { 1545 switch (qdev->port_link_state) {
1637 default: 1546 default:
1638 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1547 if (test_bit(QL_LINK_MASTER, &qdev->flags))
1639 ql_port_start(qdev); 1548 ql_port_start(qdev);
1640 }
1641 qdev->port_link_state = LS_DOWN; 1549 qdev->port_link_state = LS_DOWN;
1642 /* Fall Through */ 1550 /* Fall Through */
1643 1551
1644 case LS_DOWN: 1552 case LS_DOWN:
1645 if (curr_link_state == LS_UP) { 1553 if (curr_link_state == LS_UP) {
1646 if (netif_msg_link(qdev)) 1554 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1647 printk(KERN_INFO PFX "%s: Link is up.\n",
1648 qdev->ndev->name);
1649 if (ql_is_auto_neg_complete(qdev)) 1555 if (ql_is_auto_neg_complete(qdev))
1650 ql_finish_auto_neg(qdev); 1556 ql_finish_auto_neg(qdev);
1651 1557
@@ -1662,9 +1568,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
1662 * back up 1568 * back up
1663 */ 1569 */
1664 if (curr_link_state == LS_DOWN) { 1570 if (curr_link_state == LS_DOWN) {
1665 if (netif_msg_link(qdev)) 1571 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1666 printk(KERN_INFO PFX "%s: Link is down.\n",
1667 qdev->ndev->name);
1668 qdev->port_link_state = LS_DOWN; 1572 qdev->port_link_state = LS_DOWN;
1669 } 1573 }
1670 if (ql_link_down_detect(qdev)) 1574 if (ql_link_down_detect(qdev))
@@ -1683,9 +1587,9 @@ static void ql_link_state_machine_work(struct work_struct *work)
1683static void ql_get_phy_owner(struct ql3_adapter *qdev) 1587static void ql_get_phy_owner(struct ql3_adapter *qdev)
1684{ 1588{
1685 if (ql_this_adapter_controls_port(qdev)) 1589 if (ql_this_adapter_controls_port(qdev))
1686 set_bit(QL_LINK_MASTER,&qdev->flags); 1590 set_bit(QL_LINK_MASTER, &qdev->flags);
1687 else 1591 else
1688 clear_bit(QL_LINK_MASTER,&qdev->flags); 1592 clear_bit(QL_LINK_MASTER, &qdev->flags);
1689} 1593}
1690 1594
1691/* 1595/*
@@ -1695,7 +1599,7 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev)
1695{ 1599{
1696 ql_mii_enable_scan_mode(qdev); 1600 ql_mii_enable_scan_mode(qdev);
1697 1601
1698 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { 1602 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1699 if (ql_this_adapter_controls_port(qdev)) 1603 if (ql_this_adapter_controls_port(qdev))
1700 ql_petbi_init_ex(qdev); 1604 ql_petbi_init_ex(qdev);
1701 } else { 1605 } else {
@@ -1705,18 +1609,18 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev)
1705} 1609}
1706 1610
1707/* 1611/*
1708 * MII_Setup needs to be called before taking the PHY out of reset so that the 1612 * MII_Setup needs to be called before taking the PHY out of reset
1709 * management interface clock speed can be set properly. It would be better if 1613 * so that the management interface clock speed can be set properly.
1710 * we had a way to disable MDC until after the PHY is out of reset, but we 1614 * It would be better if we had a way to disable MDC until after the
1711 * don't have that capability. 1615 * PHY is out of reset, but we don't have that capability.
1712 */ 1616 */
1713static int ql_mii_setup(struct ql3_adapter *qdev) 1617static int ql_mii_setup(struct ql3_adapter *qdev)
1714{ 1618{
1715 u32 reg; 1619 u32 reg;
1716 struct ql3xxx_port_registers __iomem *port_regs = 1620 struct ql3xxx_port_registers __iomem *port_regs =
1717 qdev->mem_map_registers; 1621 qdev->mem_map_registers;
1718 1622
1719 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1623 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1720 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1624 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1721 2) << 7)) 1625 2) << 7))
1722 return -1; 1626 return -1;
@@ -1735,24 +1639,24 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1735 return 0; 1639 return 0;
1736} 1640}
1737 1641
1642#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
1643 SUPPORTED_FIBRE | \
1644 SUPPORTED_Autoneg)
1645#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
1646 SUPPORTED_10baseT_Full | \
1647 SUPPORTED_100baseT_Half | \
1648 SUPPORTED_100baseT_Full | \
1649 SUPPORTED_1000baseT_Half | \
1650 SUPPORTED_1000baseT_Full | \
1651 SUPPORTED_Autoneg | \
1652 SUPPORTED_TP); \
1653
1738static u32 ql_supported_modes(struct ql3_adapter *qdev) 1654static u32 ql_supported_modes(struct ql3_adapter *qdev)
1739{ 1655{
1740 u32 supported; 1656 if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1657 return SUPPORTED_OPTICAL_MODES;
1741 1658
1742 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { 1659 return SUPPORTED_TP_MODES;
1743 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1744 | SUPPORTED_Autoneg;
1745 } else {
1746 supported = SUPPORTED_10baseT_Half
1747 | SUPPORTED_10baseT_Full
1748 | SUPPORTED_100baseT_Half
1749 | SUPPORTED_100baseT_Full
1750 | SUPPORTED_1000baseT_Half
1751 | SUPPORTED_1000baseT_Full
1752 | SUPPORTED_Autoneg | SUPPORTED_TP;
1753 }
1754
1755 return supported;
1756} 1660}
1757 1661
1758static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1662static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
@@ -1760,9 +1664,9 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1760 int status; 1664 int status;
1761 unsigned long hw_flags; 1665 unsigned long hw_flags;
1762 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1666 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1763 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1667 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1764 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1668 (QL_RESOURCE_BITS_BASE_CODE |
1765 2) << 7)) { 1669 (qdev->mac_index) * 2) << 7)) {
1766 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1767 return 0; 1671 return 0;
1768 } 1672 }
@@ -1777,9 +1681,9 @@ static u32 ql_get_speed(struct ql3_adapter *qdev)
1777 u32 status; 1681 u32 status;
1778 unsigned long hw_flags; 1682 unsigned long hw_flags;
1779 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1683 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1780 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1684 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1781 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1685 (QL_RESOURCE_BITS_BASE_CODE |
1782 2) << 7)) { 1686 (qdev->mac_index) * 2) << 7)) {
1783 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1784 return 0; 1688 return 0;
1785 } 1689 }
@@ -1794,9 +1698,9 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
1794 int status; 1698 int status;
1795 unsigned long hw_flags; 1699 unsigned long hw_flags;
1796 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1700 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1797 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1701 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1798 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1702 (QL_RESOURCE_BITS_BASE_CODE |
1799 2) << 7)) { 1703 (qdev->mac_index) * 2) << 7)) {
1800 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1801 return 0; 1705 return 0;
1802 } 1706 }
@@ -1806,7 +1710,6 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
1806 return status; 1710 return status;
1807} 1711}
1808 1712
1809
1810static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1713static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1811{ 1714{
1812 struct ql3_adapter *qdev = netdev_priv(ndev); 1715 struct ql3_adapter *qdev = netdev_priv(ndev);
@@ -1814,7 +1717,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1814 ecmd->transceiver = XCVR_INTERNAL; 1717 ecmd->transceiver = XCVR_INTERNAL;
1815 ecmd->supported = ql_supported_modes(qdev); 1718 ecmd->supported = ql_supported_modes(qdev);
1816 1719
1817 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { 1720 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1818 ecmd->port = PORT_FIBRE; 1721 ecmd->port = PORT_FIBRE;
1819 } else { 1722 } else {
1820 ecmd->port = PORT_TP; 1723 ecmd->port = PORT_TP;
@@ -1855,10 +1758,11 @@ static void ql_get_pauseparam(struct net_device *ndev,
1855 struct ethtool_pauseparam *pause) 1758 struct ethtool_pauseparam *pause)
1856{ 1759{
1857 struct ql3_adapter *qdev = netdev_priv(ndev); 1760 struct ql3_adapter *qdev = netdev_priv(ndev);
1858 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 1761 struct ql3xxx_port_registers __iomem *port_regs =
1762 qdev->mem_map_registers;
1859 1763
1860 u32 reg; 1764 u32 reg;
1861 if(qdev->mac_index == 0) 1765 if (qdev->mac_index == 0)
1862 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1766 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1863 else 1767 else
1864 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1768 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
@@ -1885,12 +1789,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1885 1789
1886 while (lrg_buf_cb) { 1790 while (lrg_buf_cb) {
1887 if (!lrg_buf_cb->skb) { 1791 if (!lrg_buf_cb->skb) {
1888 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 1792 lrg_buf_cb->skb =
1889 qdev->lrg_buffer_len); 1793 netdev_alloc_skb(qdev->ndev,
1794 qdev->lrg_buffer_len);
1890 if (unlikely(!lrg_buf_cb->skb)) { 1795 if (unlikely(!lrg_buf_cb->skb)) {
1891 printk(KERN_DEBUG PFX 1796 netdev_printk(KERN_DEBUG, qdev->ndev,
1892 "%s: Failed netdev_alloc_skb().\n", 1797 "Failed netdev_alloc_skb()\n");
1893 qdev->ndev->name);
1894 break; 1798 break;
1895 } else { 1799 } else {
1896 /* 1800 /*
@@ -1905,9 +1809,10 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1905 PCI_DMA_FROMDEVICE); 1809 PCI_DMA_FROMDEVICE);
1906 1810
1907 err = pci_dma_mapping_error(qdev->pdev, map); 1811 err = pci_dma_mapping_error(qdev->pdev, map);
1908 if(err) { 1812 if (err) {
1909 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 1813 netdev_err(qdev->ndev,
1910 qdev->ndev->name, err); 1814 "PCI mapping failed with error: %d\n",
1815 err);
1911 dev_kfree_skb(lrg_buf_cb->skb); 1816 dev_kfree_skb(lrg_buf_cb->skb);
1912 lrg_buf_cb->skb = NULL; 1817 lrg_buf_cb->skb = NULL;
1913 break; 1818 break;
@@ -1915,9 +1820,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1915 1820
1916 1821
1917 lrg_buf_cb->buf_phy_addr_low = 1822 lrg_buf_cb->buf_phy_addr_low =
1918 cpu_to_le32(LS_64BITS(map)); 1823 cpu_to_le32(LS_64BITS(map));
1919 lrg_buf_cb->buf_phy_addr_high = 1824 lrg_buf_cb->buf_phy_addr_high =
1920 cpu_to_le32(MS_64BITS(map)); 1825 cpu_to_le32(MS_64BITS(map));
1921 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1826 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1922 dma_unmap_len_set(lrg_buf_cb, maplen, 1827 dma_unmap_len_set(lrg_buf_cb, maplen,
1923 qdev->lrg_buffer_len - 1828 qdev->lrg_buffer_len -
@@ -1937,7 +1842,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1937 */ 1842 */
1938static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1843static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1939{ 1844{
1940 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 1845 struct ql3xxx_port_registers __iomem *port_regs =
1846 qdev->mem_map_registers;
1847
1941 if (qdev->small_buf_release_cnt >= 16) { 1848 if (qdev->small_buf_release_cnt >= 16) {
1942 while (qdev->small_buf_release_cnt >= 16) { 1849 while (qdev->small_buf_release_cnt >= 16) {
1943 qdev->small_buf_q_producer_index++; 1850 qdev->small_buf_q_producer_index++;
@@ -1961,7 +1868,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1961 struct bufq_addr_element *lrg_buf_q_ele; 1868 struct bufq_addr_element *lrg_buf_q_ele;
1962 int i; 1869 int i;
1963 struct ql_rcv_buf_cb *lrg_buf_cb; 1870 struct ql_rcv_buf_cb *lrg_buf_cb;
1964 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 1871 struct ql3xxx_port_registers __iomem *port_regs =
1872 qdev->mem_map_registers;
1965 1873
1966 if ((qdev->lrg_buf_free_count >= 8) && 1874 if ((qdev->lrg_buf_free_count >= 8) &&
1967 (qdev->lrg_buf_release_cnt >= 16)) { 1875 (qdev->lrg_buf_release_cnt >= 16)) {
@@ -1989,7 +1897,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1989 1897
1990 qdev->lrg_buf_q_producer_index++; 1898 qdev->lrg_buf_q_producer_index++;
1991 1899
1992 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) 1900 if (qdev->lrg_buf_q_producer_index ==
1901 qdev->num_lbufq_entries)
1993 qdev->lrg_buf_q_producer_index = 0; 1902 qdev->lrg_buf_q_producer_index = 0;
1994 1903
1995 if (qdev->lrg_buf_q_producer_index == 1904 if (qdev->lrg_buf_q_producer_index ==
@@ -2011,23 +1920,26 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2011 int i; 1920 int i;
2012 int retval = 0; 1921 int retval = 0;
2013 1922
2014 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1923 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2015 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); 1924 netdev_warn(qdev->ndev,
1925 "Frame too short but it was padded and sent\n");
2016 } 1926 }
2017 1927
2018 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1928 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2019 1929
2020 /* Check the transmit response flags for any errors */ 1930 /* Check the transmit response flags for any errors */
2021 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1931 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2022 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); 1932 netdev_err(qdev->ndev,
1933 "Frame too short to be legal, frame not sent\n");
2023 1934
2024 qdev->ndev->stats.tx_errors++; 1935 qdev->ndev->stats.tx_errors++;
2025 retval = -EIO; 1936 retval = -EIO;
2026 goto frame_not_sent; 1937 goto frame_not_sent;
2027 } 1938 }
2028 1939
2029 if(tx_cb->seg_count == 0) { 1940 if (tx_cb->seg_count == 0) {
2030 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); 1941 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1942 mac_rsp->transaction_id);
2031 1943
2032 qdev->ndev->stats.tx_errors++; 1944 qdev->ndev->stats.tx_errors++;
2033 retval = -EIO; 1945 retval = -EIO;
@@ -2073,7 +1985,7 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2073 qdev->lrg_buf_release_cnt++; 1985 qdev->lrg_buf_release_cnt++;
2074 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1986 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2075 qdev->lrg_buf_index = 0; 1987 qdev->lrg_buf_index = 0;
2076 return(lrg_buf_cb); 1988 return lrg_buf_cb;
2077} 1989}
2078 1990
2079/* 1991/*
@@ -2177,12 +2089,11 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2177 if (checksum & 2089 if (checksum &
2178 (IB_IP_IOCB_RSP_3032_ICE | 2090 (IB_IP_IOCB_RSP_3032_ICE |
2179 IB_IP_IOCB_RSP_3032_CE)) { 2091 IB_IP_IOCB_RSP_3032_CE)) {
2180 printk(KERN_ERR 2092 netdev_err(ndev,
2181 "%s: Bad checksum for this %s packet, checksum = %x.\n", 2093 "%s: Bad checksum for this %s packet, checksum = %x\n",
2182 __func__, 2094 __func__,
2183 ((checksum & 2095 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2184 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : 2096 "TCP" : "UDP"), checksum);
2185 "UDP"),checksum);
2186 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2097 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2187 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2098 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2188 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2099 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
@@ -2215,8 +2126,8 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2215 net_rsp = qdev->rsp_current; 2126 net_rsp = qdev->rsp_current;
2216 rmb(); 2127 rmb();
2217 /* 2128 /*
2218 * Fix 4032 chipe undocumented "feature" where bit-8 is set if the 2129 * Fix 4032 chip's undocumented "feature" where bit-8 is set
2219 * inbound completion is for a VLAN. 2130 * if the inbound completion is for a VLAN.
2220 */ 2131 */
2221 if (qdev->device_id == QL3032_DEVICE_ID) 2132 if (qdev->device_id == QL3032_DEVICE_ID)
2222 net_rsp->opcode &= 0x7f; 2133 net_rsp->opcode &= 0x7f;
@@ -2242,22 +2153,18 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2242 net_rsp); 2153 net_rsp);
2243 (*rx_cleaned)++; 2154 (*rx_cleaned)++;
2244 break; 2155 break;
2245 default: 2156 default: {
2246 { 2157 u32 *tmp = (u32 *)net_rsp;
2247 u32 *tmp = (u32 *) net_rsp; 2158 netdev_err(ndev,
2248 printk(KERN_ERR PFX 2159 "Hit default case, not handled!\n"
2249 "%s: Hit default case, not " 2160 " dropping the packet, opcode = %x\n"
2250 "handled!\n" 2161 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2251 " dropping the packet, opcode = " 2162 net_rsp->opcode,
2252 "%x.\n", 2163 (unsigned long int)tmp[0],
2253 ndev->name, net_rsp->opcode); 2164 (unsigned long int)tmp[1],
2254 printk(KERN_ERR PFX 2165 (unsigned long int)tmp[2],
2255 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2166 (unsigned long int)tmp[3]);
2256 (unsigned long int)tmp[0], 2167 }
2257 (unsigned long int)tmp[1],
2258 (unsigned long int)tmp[2],
2259 (unsigned long int)tmp[3]);
2260 }
2261 } 2168 }
2262 2169
2263 qdev->rsp_consumer_index++; 2170 qdev->rsp_consumer_index++;
@@ -2280,7 +2187,8 @@ static int ql_poll(struct napi_struct *napi, int budget)
2280 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2187 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2281 int rx_cleaned = 0, tx_cleaned = 0; 2188 int rx_cleaned = 0, tx_cleaned = 0;
2282 unsigned long hw_flags; 2189 unsigned long hw_flags;
2283 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2190 struct ql3xxx_port_registers __iomem *port_regs =
2191 qdev->mem_map_registers;
2284 2192
2285 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2193 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2286 2194
@@ -2303,15 +2211,14 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2303 2211
2304 struct net_device *ndev = dev_id; 2212 struct net_device *ndev = dev_id;
2305 struct ql3_adapter *qdev = netdev_priv(ndev); 2213 struct ql3_adapter *qdev = netdev_priv(ndev);
2306 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2214 struct ql3xxx_port_registers __iomem *port_regs =
2215 qdev->mem_map_registers;
2307 u32 value; 2216 u32 value;
2308 int handled = 1; 2217 int handled = 1;
2309 u32 var; 2218 u32 var;
2310 2219
2311 port_regs = qdev->mem_map_registers; 2220 value = ql_read_common_reg_l(qdev,
2312 2221 &port_regs->CommonRegs.ispControlStatus);
2313 value =
2314 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2315 2222
2316 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2223 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2317 spin_lock(&qdev->adapter_lock); 2224 spin_lock(&qdev->adapter_lock);
@@ -2319,7 +2226,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2319 netif_carrier_off(qdev->ndev); 2226 netif_carrier_off(qdev->ndev);
2320 ql_disable_interrupts(qdev); 2227 ql_disable_interrupts(qdev);
2321 qdev->port_link_state = LS_DOWN; 2228 qdev->port_link_state = LS_DOWN;
2322 set_bit(QL_RESET_ACTIVE,&qdev->flags) ; 2229 set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2323 2230
2324 if (value & ISP_CONTROL_FE) { 2231 if (value & ISP_CONTROL_FE) {
2325 /* 2232 /*
@@ -2328,69 +2235,53 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2328 var = 2235 var =
2329 ql_read_page0_reg_l(qdev, 2236 ql_read_page0_reg_l(qdev,
2330 &port_regs->PortFatalErrStatus); 2237 &port_regs->PortFatalErrStatus);
2331 printk(KERN_WARNING PFX 2238 netdev_warn(ndev,
2332 "%s: Resetting chip. PortFatalErrStatus " 2239 "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2333 "register = 0x%x\n", ndev->name, var); 2240 var);
2334 set_bit(QL_RESET_START,&qdev->flags) ; 2241 set_bit(QL_RESET_START, &qdev->flags) ;
2335 } else { 2242 } else {
2336 /* 2243 /*
2337 * Soft Reset Requested. 2244 * Soft Reset Requested.
2338 */ 2245 */
2339 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; 2246 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2340 printk(KERN_ERR PFX 2247 netdev_err(ndev,
2341 "%s: Another function issued a reset to the " 2248 "Another function issued a reset to the chip. ISR value = %x\n",
2342 "chip. ISR value = %x.\n", ndev->name, value); 2249 value);
2343 } 2250 }
2344 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2251 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2345 spin_unlock(&qdev->adapter_lock); 2252 spin_unlock(&qdev->adapter_lock);
2346 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2253 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2347 ql_disable_interrupts(qdev); 2254 ql_disable_interrupts(qdev);
2348 if (likely(napi_schedule_prep(&qdev->napi))) { 2255 if (likely(napi_schedule_prep(&qdev->napi)))
2349 __napi_schedule(&qdev->napi); 2256 __napi_schedule(&qdev->napi);
2350 } 2257 } else
2351 } else {
2352 return IRQ_NONE; 2258 return IRQ_NONE;
2353 }
2354 2259
2355 return IRQ_RETVAL(handled); 2260 return IRQ_RETVAL(handled);
2356} 2261}
2357 2262
2358/* 2263/*
2359 * Get the total number of segments needed for the 2264 * Get the total number of segments needed for the given number of fragments.
2360 * given number of fragments. This is necessary because 2265 * This is necessary because outbound address lists (OAL) will be used when
2361 * outbound address lists (OAL) will be used when more than 2266 * more than two frags are given. Each address list has 5 addr/len pairs.
2362 * two frags are given. Each address list has 5 addr/len 2267 * The 5th pair in each OAL is used to point to the next OAL if more frags
2363 * pairs. The 5th pair in each AOL is used to point to 2268 * are coming. That is why the frags:segment count ratio is not linear.
2364 * the next AOL if more frags are coming.
2365 * That is why the frags:segment count ratio is not linear.
2366 */ 2269 */
2367static int ql_get_seg_count(struct ql3_adapter *qdev, 2270static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2368 unsigned short frags)
2369{ 2271{
2370 if (qdev->device_id == QL3022_DEVICE_ID) 2272 if (qdev->device_id == QL3022_DEVICE_ID)
2371 return 1; 2273 return 1;
2372 2274
2373 switch(frags) { 2275 if (frags <= 2)
2374 case 0: return 1; /* just the skb->data seg */ 2276 return frags + 1;
2375 case 1: return 2; /* skb->data + 1 frag */ 2277 else if (frags <= 6)
2376 case 2: return 3; /* skb->data + 2 frags */ 2278 return frags + 2;
2377 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ 2279 else if (frags <= 10)
2378 case 4: return 6; 2280 return frags + 3;
2379 case 5: return 7; 2281 else if (frags <= 14)
2380 case 6: return 8; 2282 return frags + 4;
2381 case 7: return 10; 2283 else if (frags <= 18)
2382 case 8: return 11; 2284 return frags + 5;
2383 case 9: return 12;
2384 case 10: return 13;
2385 case 11: return 15;
2386 case 12: return 16;
2387 case 13: return 17;
2388 case 14: return 18;
2389 case 15: return 20;
2390 case 16: return 21;
2391 case 17: return 22;
2392 case 18: return 23;
2393 }
2394 return -1; 2285 return -1;
2395} 2286}
2396 2287
@@ -2413,8 +2304,8 @@ static void ql_hw_csum_setup(const struct sk_buff *skb,
2413} 2304}
2414 2305
2415/* 2306/*
2416 * Map the buffers for this transmit. This will return 2307 * Map the buffers for this transmit.
2417 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2308 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2418 */ 2309 */
2419static int ql_send_map(struct ql3_adapter *qdev, 2310static int ql_send_map(struct ql3_adapter *qdev,
2420 struct ob_mac_iocb_req *mac_iocb_ptr, 2311 struct ob_mac_iocb_req *mac_iocb_ptr,
@@ -2437,9 +2328,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
2437 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2328 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2438 2329
2439 err = pci_dma_mapping_error(qdev->pdev, map); 2330 err = pci_dma_mapping_error(qdev->pdev, map);
2440 if(err) { 2331 if (err) {
2441 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2332 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2442 qdev->ndev->name, err); 2333 err);
2443 2334
2444 return NETDEV_TX_BUSY; 2335 return NETDEV_TX_BUSY;
2445 } 2336 }
@@ -2455,65 +2346,67 @@ static int ql_send_map(struct ql3_adapter *qdev,
2455 if (seg_cnt == 1) { 2346 if (seg_cnt == 1) {
2456 /* Terminate the last segment. */ 2347 /* Terminate the last segment. */
2457 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2348 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2458 } else { 2349 return NETDEV_TX_OK;
2459 oal = tx_cb->oal; 2350 }
2460 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { 2351 oal = tx_cb->oal;
2461 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2352 for (completed_segs = 0;
2462 oal_entry++; 2353 completed_segs < frag_cnt;
2463 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ 2354 completed_segs++, seg++) {
2464 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ 2355 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2465 (seg == 12 && seg_cnt > 13) || /* but necessary. */ 2356 oal_entry++;
2466 (seg == 17 && seg_cnt > 18)) { 2357 /*
2467 /* Continuation entry points to outbound address list. */ 2358 * Check for continuation requirements.
2468 map = pci_map_single(qdev->pdev, oal, 2359 * It's strange but necessary.
2469 sizeof(struct oal), 2360 * Continuation entry points to outbound address list.
2470 PCI_DMA_TODEVICE); 2361 */
2471 2362 if ((seg == 2 && seg_cnt > 3) ||
2472 err = pci_dma_mapping_error(qdev->pdev, map); 2363 (seg == 7 && seg_cnt > 8) ||
2473 if(err) { 2364 (seg == 12 && seg_cnt > 13) ||
2474 2365 (seg == 17 && seg_cnt > 18)) {
2475 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", 2366 map = pci_map_single(qdev->pdev, oal,
2476 qdev->ndev->name, err); 2367 sizeof(struct oal),
2477 goto map_error; 2368 PCI_DMA_TODEVICE);
2478 }
2479
2480 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2481 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2482 oal_entry->len =
2483 cpu_to_le32(sizeof(struct oal) |
2484 OAL_CONT_ENTRY);
2485 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2486 map);
2487 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2488 sizeof(struct oal));
2489 oal_entry = (struct oal_entry *)oal;
2490 oal++;
2491 seg++;
2492 }
2493
2494 map =
2495 pci_map_page(qdev->pdev, frag->page,
2496 frag->page_offset, frag->size,
2497 PCI_DMA_TODEVICE);
2498 2369
2499 err = pci_dma_mapping_error(qdev->pdev, map); 2370 err = pci_dma_mapping_error(qdev->pdev, map);
2500 if(err) { 2371 if (err) {
2501 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", 2372 netdev_err(qdev->ndev,
2502 qdev->ndev->name, err); 2373 "PCI mapping outbound address list with error: %d\n",
2374 err);
2503 goto map_error; 2375 goto map_error;
2504 } 2376 }
2505 2377
2506 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2378 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2507 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2379 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2508 oal_entry->len = cpu_to_le32(frag->size); 2380 oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2381 OAL_CONT_ENTRY);
2509 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2382 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2510 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2383 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2511 frag->size); 2384 sizeof(struct oal));
2385 oal_entry = (struct oal_entry *)oal;
2386 oal++;
2387 seg++;
2388 }
2389
2390 map = pci_map_page(qdev->pdev, frag->page,
2391 frag->page_offset, frag->size,
2392 PCI_DMA_TODEVICE);
2393
2394 err = pci_dma_mapping_error(qdev->pdev, map);
2395 if (err) {
2396 netdev_err(qdev->ndev,
2397 "PCI mapping frags failed with error: %d\n",
2398 err);
2399 goto map_error;
2512 } 2400 }
2513 /* Terminate the last segment. */
2514 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2515 }
2516 2401
2402 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2403 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2404 oal_entry->len = cpu_to_le32(frag->size);
2405 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2406 dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
2407 }
2408 /* Terminate the last segment. */
2409 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2517 return NETDEV_TX_OK; 2410 return NETDEV_TX_OK;
2518 2411
2519map_error: 2412map_error:
@@ -2525,13 +2418,18 @@ map_error:
2525 seg = 1; 2418 seg = 1;
2526 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2419 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2527 oal = tx_cb->oal; 2420 oal = tx_cb->oal;
2528 for (i=0; i<completed_segs; i++,seg++) { 2421 for (i = 0; i < completed_segs; i++, seg++) {
2529 oal_entry++; 2422 oal_entry++;
2530 2423
2531 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */ 2424 /*
2532 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ 2425 * Check for continuation requirements.
2533 (seg == 12 && seg_cnt > 13) || /* but necessary. */ 2426 * It's strange but necessary.
2534 (seg == 17 && seg_cnt > 18)) { 2427 */
2428
2429 if ((seg == 2 && seg_cnt > 3) ||
2430 (seg == 7 && seg_cnt > 8) ||
2431 (seg == 12 && seg_cnt > 13) ||
2432 (seg == 17 && seg_cnt > 18)) {
2535 pci_unmap_single(qdev->pdev, 2433 pci_unmap_single(qdev->pdev,
2536 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2434 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2537 dma_unmap_len(&tx_cb->map[seg], maplen), 2435 dma_unmap_len(&tx_cb->map[seg], maplen),
@@ -2570,19 +2468,20 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2570 struct net_device *ndev) 2468 struct net_device *ndev)
2571{ 2469{
2572 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2470 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2573 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2471 struct ql3xxx_port_registers __iomem *port_regs =
2472 qdev->mem_map_registers;
2574 struct ql_tx_buf_cb *tx_cb; 2473 struct ql_tx_buf_cb *tx_cb;
2575 u32 tot_len = skb->len; 2474 u32 tot_len = skb->len;
2576 struct ob_mac_iocb_req *mac_iocb_ptr; 2475 struct ob_mac_iocb_req *mac_iocb_ptr;
2577 2476
2578 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2477 if (unlikely(atomic_read(&qdev->tx_count) < 2))
2579 return NETDEV_TX_BUSY; 2478 return NETDEV_TX_BUSY;
2580 }
2581 2479
2582 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2480 tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2583 if((tx_cb->seg_count = ql_get_seg_count(qdev, 2481 tx_cb->seg_count = ql_get_seg_count(qdev,
2584 (skb_shinfo(skb)->nr_frags))) == -1) { 2482 skb_shinfo(skb)->nr_frags);
2585 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2483 if (tx_cb->seg_count == -1) {
2484 netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2586 return NETDEV_TX_OK; 2485 return NETDEV_TX_OK;
2587 } 2486 }
2588 2487
@@ -2598,8 +2497,8 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2598 skb->ip_summed == CHECKSUM_PARTIAL) 2497 skb->ip_summed == CHECKSUM_PARTIAL)
2599 ql_hw_csum_setup(skb, mac_iocb_ptr); 2498 ql_hw_csum_setup(skb, mac_iocb_ptr);
2600 2499
2601 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { 2500 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2602 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); 2501 netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2603 return NETDEV_TX_BUSY; 2502 return NETDEV_TX_BUSY;
2604 } 2503 }
2605 2504
@@ -2612,9 +2511,9 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2612 &port_regs->CommonRegs.reqQProducerIndex, 2511 &port_regs->CommonRegs.reqQProducerIndex,
2613 qdev->req_producer_index); 2512 qdev->req_producer_index);
2614 2513
2615 if (netif_msg_tx_queued(qdev)) 2514 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2616 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2515 "tx queued, slot %d, len %d\n",
2617 ndev->name, qdev->req_producer_index, skb->len); 2516 qdev->req_producer_index, skb->len);
2618 2517
2619 atomic_dec(&qdev->tx_count); 2518 atomic_dec(&qdev->tx_count);
2620 return NETDEV_TX_OK; 2519 return NETDEV_TX_OK;
@@ -2632,8 +2531,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2632 2531
2633 if ((qdev->req_q_virt_addr == NULL) || 2532 if ((qdev->req_q_virt_addr == NULL) ||
2634 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2533 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2635 printk(KERN_ERR PFX "%s: reqQ failed.\n", 2534 netdev_err(qdev->ndev, "reqQ failed\n");
2636 qdev->ndev->name);
2637 return -ENOMEM; 2535 return -ENOMEM;
2638 } 2536 }
2639 2537
@@ -2646,25 +2544,22 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2646 2544
2647 if ((qdev->rsp_q_virt_addr == NULL) || 2545 if ((qdev->rsp_q_virt_addr == NULL) ||
2648 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2546 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2649 printk(KERN_ERR PFX 2547 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2650 "%s: rspQ allocation failed\n",
2651 qdev->ndev->name);
2652 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2548 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2653 qdev->req_q_virt_addr, 2549 qdev->req_q_virt_addr,
2654 qdev->req_q_phy_addr); 2550 qdev->req_q_phy_addr);
2655 return -ENOMEM; 2551 return -ENOMEM;
2656 } 2552 }
2657 2553
2658 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); 2554 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2659 2555
2660 return 0; 2556 return 0;
2661} 2557}
2662 2558
2663static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2559static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2664{ 2560{
2665 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { 2561 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2666 printk(KERN_INFO PFX 2562 netdev_info(qdev->ndev, "Already done\n");
2667 "%s: Already done.\n", qdev->ndev->name);
2668 return; 2563 return;
2669 } 2564 }
2670 2565
@@ -2680,34 +2575,34 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2680 2575
2681 qdev->rsp_q_virt_addr = NULL; 2576 qdev->rsp_q_virt_addr = NULL;
2682 2577
2683 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); 2578 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2684} 2579}
2685 2580
2686static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2581static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2687{ 2582{
2688 /* Create Large Buffer Queue */ 2583 /* Create Large Buffer Queue */
2689 qdev->lrg_buf_q_size = 2584 qdev->lrg_buf_q_size =
2690 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2585 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2691 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2586 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2692 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2587 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2693 else 2588 else
2694 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2589 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2695 2590
2696 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); 2591 qdev->lrg_buf =
2592 kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
2593 GFP_KERNEL);
2697 if (qdev->lrg_buf == NULL) { 2594 if (qdev->lrg_buf == NULL) {
2698 printk(KERN_ERR PFX 2595 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
2699 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2700 return -ENOMEM; 2596 return -ENOMEM;
2701 } 2597 }
2702 2598
2703 qdev->lrg_buf_q_alloc_virt_addr = 2599 qdev->lrg_buf_q_alloc_virt_addr =
2704 pci_alloc_consistent(qdev->pdev, 2600 pci_alloc_consistent(qdev->pdev,
2705 qdev->lrg_buf_q_alloc_size, 2601 qdev->lrg_buf_q_alloc_size,
2706 &qdev->lrg_buf_q_alloc_phy_addr); 2602 &qdev->lrg_buf_q_alloc_phy_addr);
2707 2603
2708 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2604 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2709 printk(KERN_ERR PFX 2605 netdev_err(qdev->ndev, "lBufQ failed\n");
2710 "%s: lBufQ failed\n", qdev->ndev->name);
2711 return -ENOMEM; 2606 return -ENOMEM;
2712 } 2607 }
2713 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2608 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2715,21 +2610,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2715 2610
2716 /* Create Small Buffer Queue */ 2611 /* Create Small Buffer Queue */
2717 qdev->small_buf_q_size = 2612 qdev->small_buf_q_size =
2718 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2613 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2719 if (qdev->small_buf_q_size < PAGE_SIZE) 2614 if (qdev->small_buf_q_size < PAGE_SIZE)
2720 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2615 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2721 else 2616 else
2722 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2617 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2723 2618
2724 qdev->small_buf_q_alloc_virt_addr = 2619 qdev->small_buf_q_alloc_virt_addr =
2725 pci_alloc_consistent(qdev->pdev, 2620 pci_alloc_consistent(qdev->pdev,
2726 qdev->small_buf_q_alloc_size, 2621 qdev->small_buf_q_alloc_size,
2727 &qdev->small_buf_q_alloc_phy_addr); 2622 &qdev->small_buf_q_alloc_phy_addr);
2728 2623
2729 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2624 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2730 printk(KERN_ERR PFX 2625 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2731 "%s: Small Buffer Queue allocation failed.\n",
2732 qdev->ndev->name);
2733 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2626 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2734 qdev->lrg_buf_q_alloc_virt_addr, 2627 qdev->lrg_buf_q_alloc_virt_addr,
2735 qdev->lrg_buf_q_alloc_phy_addr); 2628 qdev->lrg_buf_q_alloc_phy_addr);
@@ -2738,18 +2631,17 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2738 2631
2739 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2632 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2740 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2633 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2741 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); 2634 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2742 return 0; 2635 return 0;
2743} 2636}
2744 2637
2745static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2638static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2746{ 2639{
2747 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { 2640 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2748 printk(KERN_INFO PFX 2641 netdev_info(qdev->ndev, "Already done\n");
2749 "%s: Already done.\n", qdev->ndev->name);
2750 return; 2642 return;
2751 } 2643 }
2752 if(qdev->lrg_buf) kfree(qdev->lrg_buf); 2644 kfree(qdev->lrg_buf);
2753 pci_free_consistent(qdev->pdev, 2645 pci_free_consistent(qdev->pdev,
2754 qdev->lrg_buf_q_alloc_size, 2646 qdev->lrg_buf_q_alloc_size,
2755 qdev->lrg_buf_q_alloc_virt_addr, 2647 qdev->lrg_buf_q_alloc_virt_addr,
@@ -2764,7 +2656,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2764 2656
2765 qdev->small_buf_q_virt_addr = NULL; 2657 qdev->small_buf_q_virt_addr = NULL;
2766 2658
2767 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); 2659 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2768} 2660}
2769 2661
2770static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2662static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
@@ -2774,18 +2666,16 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2774 2666
2775 /* Currently we allocate on one of memory and use it for smallbuffers */ 2667 /* Currently we allocate on one of memory and use it for smallbuffers */
2776 qdev->small_buf_total_size = 2668 qdev->small_buf_total_size =
2777 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2669 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2778 QL_SMALL_BUFFER_SIZE); 2670 QL_SMALL_BUFFER_SIZE);
2779 2671
2780 qdev->small_buf_virt_addr = 2672 qdev->small_buf_virt_addr =
2781 pci_alloc_consistent(qdev->pdev, 2673 pci_alloc_consistent(qdev->pdev,
2782 qdev->small_buf_total_size, 2674 qdev->small_buf_total_size,
2783 &qdev->small_buf_phy_addr); 2675 &qdev->small_buf_phy_addr);
2784 2676
2785 if (qdev->small_buf_virt_addr == NULL) { 2677 if (qdev->small_buf_virt_addr == NULL) {
2786 printk(KERN_ERR PFX 2678 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2787 "%s: Failed to get small buffer memory.\n",
2788 qdev->ndev->name);
2789 return -ENOMEM; 2679 return -ENOMEM;
2790 } 2680 }
2791 2681
@@ -2804,15 +2694,14 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2804 small_buf_q_entry++; 2694 small_buf_q_entry++;
2805 } 2695 }
2806 qdev->small_buf_index = 0; 2696 qdev->small_buf_index = 0;
2807 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); 2697 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2808 return 0; 2698 return 0;
2809} 2699}
2810 2700
2811static void ql_free_small_buffers(struct ql3_adapter *qdev) 2701static void ql_free_small_buffers(struct ql3_adapter *qdev)
2812{ 2702{
2813 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { 2703 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2814 printk(KERN_INFO PFX 2704 netdev_info(qdev->ndev, "Already done\n");
2815 "%s: Already done.\n", qdev->ndev->name);
2816 return; 2705 return;
2817 } 2706 }
2818 if (qdev->small_buf_virt_addr != NULL) { 2707 if (qdev->small_buf_virt_addr != NULL) {
@@ -2874,11 +2763,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2874 qdev->lrg_buffer_len); 2763 qdev->lrg_buffer_len);
2875 if (unlikely(!skb)) { 2764 if (unlikely(!skb)) {
2876 /* Better luck next round */ 2765 /* Better luck next round */
2877 printk(KERN_ERR PFX 2766 netdev_err(qdev->ndev,
2878 "%s: large buff alloc failed, " 2767 "large buff alloc failed for %d bytes at index %d\n",
2879 "for %d bytes at index %d.\n", 2768 qdev->lrg_buffer_len * 2, i);
2880 qdev->ndev->name,
2881 qdev->lrg_buffer_len * 2, i);
2882 ql_free_large_buffers(qdev); 2769 ql_free_large_buffers(qdev);
2883 return -ENOMEM; 2770 return -ENOMEM;
2884 } else { 2771 } else {
@@ -2899,9 +2786,10 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2899 PCI_DMA_FROMDEVICE); 2786 PCI_DMA_FROMDEVICE);
2900 2787
2901 err = pci_dma_mapping_error(qdev->pdev, map); 2788 err = pci_dma_mapping_error(qdev->pdev, map);
2902 if(err) { 2789 if (err) {
2903 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2790 netdev_err(qdev->ndev,
2904 qdev->ndev->name, err); 2791 "PCI mapping failed with error: %d\n",
2792 err);
2905 ql_free_large_buffers(qdev); 2793 ql_free_large_buffers(qdev);
2906 return -ENOMEM; 2794 return -ENOMEM;
2907 } 2795 }
@@ -2926,10 +2814,8 @@ static void ql_free_send_free_list(struct ql3_adapter *qdev)
2926 2814
2927 tx_cb = &qdev->tx_buf[0]; 2815 tx_cb = &qdev->tx_buf[0];
2928 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2816 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2929 if (tx_cb->oal) { 2817 kfree(tx_cb->oal);
2930 kfree(tx_cb->oal); 2818 tx_cb->oal = NULL;
2931 tx_cb->oal = NULL;
2932 }
2933 tx_cb++; 2819 tx_cb++;
2934 } 2820 }
2935} 2821}
@@ -2938,8 +2824,7 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
2938{ 2824{
2939 struct ql_tx_buf_cb *tx_cb; 2825 struct ql_tx_buf_cb *tx_cb;
2940 int i; 2826 int i;
2941 struct ob_mac_iocb_req *req_q_curr = 2827 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2942 qdev->req_q_virt_addr;
2943 2828
2944 /* Create free list of transmit buffers */ 2829 /* Create free list of transmit buffers */
2945 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2830 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
@@ -2960,23 +2845,22 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2960 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2845 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2961 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2846 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2962 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2847 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2963 } 2848 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2964 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2965 /* 2849 /*
2966 * Bigger buffers, so less of them. 2850 * Bigger buffers, so less of them.
2967 */ 2851 */
2968 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2852 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2969 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2853 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2970 } else { 2854 } else {
2971 printk(KERN_ERR PFX 2855 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2972 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", 2856 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2973 qdev->ndev->name);
2974 return -ENOMEM; 2857 return -ENOMEM;
2975 } 2858 }
2976 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2859 qdev->num_large_buffers =
2860 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2977 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2861 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2978 qdev->max_frame_size = 2862 qdev->max_frame_size =
2979 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2863 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2980 2864
2981 /* 2865 /*
2982 * First allocate a page of shared memory and use it for shadow 2866 * First allocate a page of shared memory and use it for shadow
@@ -2984,51 +2868,44 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2984 * Network Completion Queue Producer Index Register 2868 * Network Completion Queue Producer Index Register
2985 */ 2869 */
2986 qdev->shadow_reg_virt_addr = 2870 qdev->shadow_reg_virt_addr =
2987 pci_alloc_consistent(qdev->pdev, 2871 pci_alloc_consistent(qdev->pdev,
2988 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2872 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2989 2873
2990 if (qdev->shadow_reg_virt_addr != NULL) { 2874 if (qdev->shadow_reg_virt_addr != NULL) {
2991 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; 2875 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2992 qdev->req_consumer_index_phy_addr_high = 2876 qdev->req_consumer_index_phy_addr_high =
2993 MS_64BITS(qdev->shadow_reg_phy_addr); 2877 MS_64BITS(qdev->shadow_reg_phy_addr);
2994 qdev->req_consumer_index_phy_addr_low = 2878 qdev->req_consumer_index_phy_addr_low =
2995 LS_64BITS(qdev->shadow_reg_phy_addr); 2879 LS_64BITS(qdev->shadow_reg_phy_addr);
2996 2880
2997 qdev->prsp_producer_index = 2881 qdev->prsp_producer_index =
2998 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2882 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2999 qdev->rsp_producer_index_phy_addr_high = 2883 qdev->rsp_producer_index_phy_addr_high =
3000 qdev->req_consumer_index_phy_addr_high; 2884 qdev->req_consumer_index_phy_addr_high;
3001 qdev->rsp_producer_index_phy_addr_low = 2885 qdev->rsp_producer_index_phy_addr_low =
3002 qdev->req_consumer_index_phy_addr_low + 8; 2886 qdev->req_consumer_index_phy_addr_low + 8;
3003 } else { 2887 } else {
3004 printk(KERN_ERR PFX 2888 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
3005 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3006 return -ENOMEM; 2889 return -ENOMEM;
3007 } 2890 }
3008 2891
3009 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2892 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3010 printk(KERN_ERR PFX 2893 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
3011 "%s: ql_alloc_net_req_rsp_queues failed.\n",
3012 qdev->ndev->name);
3013 goto err_req_rsp; 2894 goto err_req_rsp;
3014 } 2895 }
3015 2896
3016 if (ql_alloc_buffer_queues(qdev) != 0) { 2897 if (ql_alloc_buffer_queues(qdev) != 0) {
3017 printk(KERN_ERR PFX 2898 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
3018 "%s: ql_alloc_buffer_queues failed.\n",
3019 qdev->ndev->name);
3020 goto err_buffer_queues; 2899 goto err_buffer_queues;
3021 } 2900 }
3022 2901
3023 if (ql_alloc_small_buffers(qdev) != 0) { 2902 if (ql_alloc_small_buffers(qdev) != 0) {
3024 printk(KERN_ERR PFX 2903 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
3025 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3026 goto err_small_buffers; 2904 goto err_small_buffers;
3027 } 2905 }
3028 2906
3029 if (ql_alloc_large_buffers(qdev) != 0) { 2907 if (ql_alloc_large_buffers(qdev) != 0) {
3030 printk(KERN_ERR PFX 2908 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
3031 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3032 goto err_small_buffers; 2909 goto err_small_buffers;
3033 } 2910 }
3034 2911
@@ -3076,7 +2953,7 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev)
3076 struct ql3xxx_local_ram_registers __iomem *local_ram = 2953 struct ql3xxx_local_ram_registers __iomem *local_ram =
3077 (void __iomem *)qdev->mem_map_registers; 2954 (void __iomem *)qdev->mem_map_registers;
3078 2955
3079 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2956 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
3080 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2957 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3081 2) << 4)) 2958 2) << 4))
3082 return -1; 2959 return -1;
@@ -3132,18 +3009,20 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev)
3132static int ql_adapter_initialize(struct ql3_adapter *qdev) 3009static int ql_adapter_initialize(struct ql3_adapter *qdev)
3133{ 3010{
3134 u32 value; 3011 u32 value;
3135 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3012 struct ql3xxx_port_registers __iomem *port_regs =
3013 qdev->mem_map_registers;
3014 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3136 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3137 (void __iomem *)port_regs; 3016 (void __iomem *)port_regs;
3138 u32 delay = 10; 3017 u32 delay = 10;
3139 int status = 0; 3018 int status = 0;
3140 unsigned long hw_flags = 0; 3019 unsigned long hw_flags = 0;
3141 3020
3142 if(ql_mii_setup(qdev)) 3021 if (ql_mii_setup(qdev))
3143 return -1; 3022 return -1;
3144 3023
3145 /* Bring out PHY out of reset */ 3024 /* Bring out PHY out of reset */
3146 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 3025 ql_write_common_reg(qdev, spir,
3147 (ISP_SERIAL_PORT_IF_WE | 3026 (ISP_SERIAL_PORT_IF_WE |
3148 (ISP_SERIAL_PORT_IF_WE << 16))); 3027 (ISP_SERIAL_PORT_IF_WE << 16)));
3149 /* Give the PHY time to come out of reset. */ 3028 /* Give the PHY time to come out of reset. */
@@ -3152,13 +3031,13 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3152 netif_carrier_off(qdev->ndev); 3031 netif_carrier_off(qdev->ndev);
3153 3032
3154 /* V2 chip fix for ARS-39168. */ 3033 /* V2 chip fix for ARS-39168. */
3155 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 3034 ql_write_common_reg(qdev, spir,
3156 (ISP_SERIAL_PORT_IF_SDE | 3035 (ISP_SERIAL_PORT_IF_SDE |
3157 (ISP_SERIAL_PORT_IF_SDE << 16))); 3036 (ISP_SERIAL_PORT_IF_SDE << 16)));
3158 3037
3159 /* Request Queue Registers */ 3038 /* Request Queue Registers */
3160 *((u32 *) (qdev->preq_consumer_index)) = 0; 3039 *((u32 *)(qdev->preq_consumer_index)) = 0;
3161 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); 3040 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3162 qdev->req_producer_index = 0; 3041 qdev->req_producer_index = 0;
3163 3042
3164 ql_write_page1_reg(qdev, 3043 ql_write_page1_reg(qdev,
@@ -3208,7 +3087,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3208 &hmem_regs->rxLargeQBaseAddrLow, 3087 &hmem_regs->rxLargeQBaseAddrLow,
3209 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3088 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3210 3089
3211 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); 3090 ql_write_page1_reg(qdev,
3091 &hmem_regs->rxLargeQLength,
3092 qdev->num_lbufq_entries);
3212 3093
3213 ql_write_page1_reg(qdev, 3094 ql_write_page1_reg(qdev,
3214 &hmem_regs->rxLargeBufferLength, 3095 &hmem_regs->rxLargeBufferLength,
@@ -3258,7 +3139,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3258 if ((value & PORT_STATUS_IC) == 0) { 3139 if ((value & PORT_STATUS_IC) == 0) {
3259 3140
3260 /* Chip has not been configured yet, so let it rip. */ 3141 /* Chip has not been configured yet, so let it rip. */
3261 if(ql_init_misc_registers(qdev)) { 3142 if (ql_init_misc_registers(qdev)) {
3262 status = -1; 3143 status = -1;
3263 goto out; 3144 goto out;
3264 } 3145 }
@@ -3268,7 +3149,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3268 3149
3269 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3150 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3270 3151
3271 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3152 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3272 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3153 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3273 * 2) << 13)) { 3154 * 2) << 13)) {
3274 status = -1; 3155 status = -1;
@@ -3291,7 +3172,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3291 &port_regs->mac0MaxFrameLengthReg, 3172 &port_regs->mac0MaxFrameLengthReg,
3292 qdev->max_frame_size); 3173 qdev->max_frame_size);
3293 3174
3294 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3175 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3295 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3176 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3296 2) << 7)) { 3177 2) << 7)) {
3297 status = -1; 3178 status = -1;
@@ -3353,8 +3234,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3353 } while (--delay); 3234 } while (--delay);
3354 3235
3355 if (delay == 0) { 3236 if (delay == 0) {
3356 printk(KERN_ERR PFX 3237 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3357 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3358 status = -1; 3238 status = -1;
3359 goto out; 3239 goto out;
3360 } 3240 }
@@ -3385,7 +3265,8 @@ out:
3385 */ 3265 */
3386static int ql_adapter_reset(struct ql3_adapter *qdev) 3266static int ql_adapter_reset(struct ql3_adapter *qdev)
3387{ 3267{
3388 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3268 struct ql3xxx_port_registers __iomem *port_regs =
3269 qdev->mem_map_registers;
3389 int status = 0; 3270 int status = 0;
3390 u16 value; 3271 u16 value;
3391 int max_wait_time; 3272 int max_wait_time;
@@ -3396,17 +3277,14 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3396 /* 3277 /*
3397 * Issue soft reset to chip. 3278 * Issue soft reset to chip.
3398 */ 3279 */
3399 printk(KERN_DEBUG PFX 3280 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3400 "%s: Issue soft reset to chip.\n",
3401 qdev->ndev->name);
3402 ql_write_common_reg(qdev, 3281 ql_write_common_reg(qdev,
3403 &port_regs->CommonRegs.ispControlStatus, 3282 &port_regs->CommonRegs.ispControlStatus,
3404 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3283 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3405 3284
3406 /* Wait 3 seconds for reset to complete. */ 3285 /* Wait 3 seconds for reset to complete. */
3407 printk(KERN_DEBUG PFX 3286 netdev_printk(KERN_DEBUG, qdev->ndev,
3408 "%s: Wait 10 milliseconds for reset to complete.\n", 3287 "Wait 10 milliseconds for reset to complete\n");
3409 qdev->ndev->name);
3410 3288
3411 /* Wait until the firmware tells us the Soft Reset is done */ 3289 /* Wait until the firmware tells us the Soft Reset is done */
3412 max_wait_time = 5; 3290 max_wait_time = 5;
@@ -3427,8 +3305,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3427 value = 3305 value =
3428 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3306 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3429 if (value & ISP_CONTROL_RI) { 3307 if (value & ISP_CONTROL_RI) {
3430 printk(KERN_DEBUG PFX 3308 netdev_printk(KERN_DEBUG, qdev->ndev,
3431 "ql_adapter_reset: clearing RI after reset.\n"); 3309 "clearing RI after reset\n");
3432 ql_write_common_reg(qdev, 3310 ql_write_common_reg(qdev,
3433 &port_regs->CommonRegs. 3311 &port_regs->CommonRegs.
3434 ispControlStatus, 3312 ispControlStatus,
@@ -3448,13 +3326,11 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3448 */ 3326 */
3449 max_wait_time = 5; 3327 max_wait_time = 5;
3450 do { 3328 do {
3451 value = 3329 value = ql_read_common_reg(qdev,
3452 ql_read_common_reg(qdev, 3330 &port_regs->CommonRegs.
3453 &port_regs->CommonRegs. 3331 ispControlStatus);
3454 ispControlStatus); 3332 if ((value & ISP_CONTROL_FSR) == 0)
3455 if ((value & ISP_CONTROL_FSR) == 0) {
3456 break; 3333 break;
3457 }
3458 ssleep(1); 3334 ssleep(1);
3459 } while ((--max_wait_time)); 3335 } while ((--max_wait_time));
3460 } 3336 }
@@ -3468,7 +3344,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3468 3344
3469static void ql_set_mac_info(struct ql3_adapter *qdev) 3345static void ql_set_mac_info(struct ql3_adapter *qdev)
3470{ 3346{
3471 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3347 struct ql3xxx_port_registers __iomem *port_regs =
3348 qdev->mem_map_registers;
3472 u32 value, port_status; 3349 u32 value, port_status;
3473 u8 func_number; 3350 u8 func_number;
3474 3351
@@ -3484,9 +3361,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3484 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3361 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3485 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3362 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3486 if (port_status & PORT_STATUS_SM0) 3363 if (port_status & PORT_STATUS_SM0)
3487 set_bit(QL_LINK_OPTICAL,&qdev->flags); 3364 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3488 else 3365 else
3489 clear_bit(QL_LINK_OPTICAL,&qdev->flags); 3366 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3490 break; 3367 break;
3491 3368
3492 case ISP_CONTROL_FN1_NET: 3369 case ISP_CONTROL_FN1_NET:
@@ -3495,17 +3372,17 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3495 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3372 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3496 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3373 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3497 if (port_status & PORT_STATUS_SM1) 3374 if (port_status & PORT_STATUS_SM1)
3498 set_bit(QL_LINK_OPTICAL,&qdev->flags); 3375 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3499 else 3376 else
3500 clear_bit(QL_LINK_OPTICAL,&qdev->flags); 3377 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3501 break; 3378 break;
3502 3379
3503 case ISP_CONTROL_FN0_SCSI: 3380 case ISP_CONTROL_FN0_SCSI:
3504 case ISP_CONTROL_FN1_SCSI: 3381 case ISP_CONTROL_FN1_SCSI:
3505 default: 3382 default:
3506 printk(KERN_DEBUG PFX 3383 netdev_printk(KERN_DEBUG, qdev->ndev,
3507 "%s: Invalid function number, ispControlStatus = 0x%x\n", 3384 "Invalid function number, ispControlStatus = 0x%x\n",
3508 qdev->ndev->name,value); 3385 value);
3509 break; 3386 break;
3510 } 3387 }
3511 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3388 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
@@ -3516,32 +3393,26 @@ static void ql_display_dev_info(struct net_device *ndev)
3516 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3393 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3517 struct pci_dev *pdev = qdev->pdev; 3394 struct pci_dev *pdev = qdev->pdev;
3518 3395
3519 printk(KERN_INFO PFX 3396 netdev_info(ndev,
3520 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", 3397 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3521 DRV_NAME, qdev->index, qdev->chip_rev_id, 3398 DRV_NAME, qdev->index, qdev->chip_rev_id,
3522 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", 3399 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3523 qdev->pci_slot); 3400 qdev->pci_slot);
3524 printk(KERN_INFO PFX 3401 netdev_info(ndev, "%s Interface\n",
3525 "%s Interface.\n", 3402 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3526 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3527 3403
3528 /* 3404 /*
3529 * Print PCI bus width/type. 3405 * Print PCI bus width/type.
3530 */ 3406 */
3531 printk(KERN_INFO PFX 3407 netdev_info(ndev, "Bus interface is %s %s\n",
3532 "Bus interface is %s %s.\n", 3408 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3533 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3409 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3534 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3535 3410
3536 printk(KERN_INFO PFX 3411 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
3537 "mem IO base address adjusted = 0x%p\n", 3412 qdev->mem_map_registers);
3538 qdev->mem_map_registers); 3413 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3539 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3540 3414
3541 if (netif_msg_probe(qdev)) 3415 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3542 printk(KERN_INFO PFX
3543 "%s: MAC address %pM\n",
3544 ndev->name, ndev->dev_addr);
3545} 3416}
3546 3417
3547static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3418static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
@@ -3552,17 +3423,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3552 netif_stop_queue(ndev); 3423 netif_stop_queue(ndev);
3553 netif_carrier_off(ndev); 3424 netif_carrier_off(ndev);
3554 3425
3555 clear_bit(QL_ADAPTER_UP,&qdev->flags); 3426 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3556 clear_bit(QL_LINK_MASTER,&qdev->flags); 3427 clear_bit(QL_LINK_MASTER, &qdev->flags);
3557 3428
3558 ql_disable_interrupts(qdev); 3429 ql_disable_interrupts(qdev);
3559 3430
3560 free_irq(qdev->pdev->irq, ndev); 3431 free_irq(qdev->pdev->irq, ndev);
3561 3432
3562 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3433 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3563 printk(KERN_INFO PFX 3434 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3564 "%s: calling pci_disable_msi().\n", qdev->ndev->name); 3435 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3565 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3566 pci_disable_msi(qdev->pdev); 3436 pci_disable_msi(qdev->pdev);
3567 } 3437 }
3568 3438
@@ -3576,17 +3446,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3576 3446
3577 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3447 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3578 if (ql_wait_for_drvr_lock(qdev)) { 3448 if (ql_wait_for_drvr_lock(qdev)) {
3579 if ((soft_reset = ql_adapter_reset(qdev))) { 3449 soft_reset = ql_adapter_reset(qdev);
3580 printk(KERN_ERR PFX 3450 if (soft_reset) {
3581 "%s: ql_adapter_reset(%d) FAILED!\n", 3451 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3582 ndev->name, qdev->index); 3452 qdev->index);
3583 } 3453 }
3584 printk(KERN_ERR PFX 3454 netdev_err(ndev,
3585 "%s: Releaseing driver lock via chip reset.\n",ndev->name); 3455 "Releasing driver lock via chip reset\n");
3586 } else { 3456 } else {
3587 printk(KERN_ERR PFX 3457 netdev_err(ndev,
3588 "%s: Could not acquire driver lock to do " 3458 "Could not acquire driver lock to do reset!\n");
3589 "reset!\n", ndev->name);
3590 retval = -1; 3459 retval = -1;
3591 } 3460 }
3592 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3461 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -3603,56 +3472,50 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3603 unsigned long hw_flags; 3472 unsigned long hw_flags;
3604 3473
3605 if (ql_alloc_mem_resources(qdev)) { 3474 if (ql_alloc_mem_resources(qdev)) {
3606 printk(KERN_ERR PFX 3475 netdev_err(ndev, "Unable to allocate buffers\n");
3607 "%s Unable to allocate buffers.\n", ndev->name);
3608 return -ENOMEM; 3476 return -ENOMEM;
3609 } 3477 }
3610 3478
3611 if (qdev->msi) { 3479 if (qdev->msi) {
3612 if (pci_enable_msi(qdev->pdev)) { 3480 if (pci_enable_msi(qdev->pdev)) {
3613 printk(KERN_ERR PFX 3481 netdev_err(ndev,
3614 "%s: User requested MSI, but MSI failed to " 3482 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
3615 "initialize. Continuing without MSI.\n",
3616 qdev->ndev->name);
3617 qdev->msi = 0; 3483 qdev->msi = 0;
3618 } else { 3484 } else {
3619 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); 3485 netdev_info(ndev, "MSI Enabled...\n");
3620 set_bit(QL_MSI_ENABLED,&qdev->flags); 3486 set_bit(QL_MSI_ENABLED, &qdev->flags);
3621 irq_flags &= ~IRQF_SHARED; 3487 irq_flags &= ~IRQF_SHARED;
3622 } 3488 }
3623 } 3489 }
3624 3490
3625 if ((err = request_irq(qdev->pdev->irq, 3491 err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3626 ql3xxx_isr, 3492 irq_flags, ndev->name, ndev);
3627 irq_flags, ndev->name, ndev))) { 3493 if (err) {
3628 printk(KERN_ERR PFX 3494 netdev_err(ndev,
3629 "%s: Failed to reserve interrupt %d already in use.\n", 3495 "Failed to reserve interrupt %d - already in use\n",
3630 ndev->name, qdev->pdev->irq); 3496 qdev->pdev->irq);
3631 goto err_irq; 3497 goto err_irq;
3632 } 3498 }
3633 3499
3634 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3500 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3635 3501
3636 if ((err = ql_wait_for_drvr_lock(qdev))) { 3502 err = ql_wait_for_drvr_lock(qdev);
3637 if ((err = ql_adapter_initialize(qdev))) { 3503 if (err) {
3638 printk(KERN_ERR PFX 3504 err = ql_adapter_initialize(qdev);
3639 "%s: Unable to initialize adapter.\n", 3505 if (err) {
3640 ndev->name); 3506 netdev_err(ndev, "Unable to initialize adapter\n");
3641 goto err_init; 3507 goto err_init;
3642 } 3508 }
3643 printk(KERN_ERR PFX 3509 netdev_err(ndev, "Releasing driver lock\n");
3644 "%s: Releaseing driver lock.\n",ndev->name);
3645 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3510 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3646 } else { 3511 } else {
3647 printk(KERN_ERR PFX 3512 netdev_err(ndev, "Could not acquire driver lock\n");
3648 "%s: Could not acquire driver lock.\n",
3649 ndev->name);
3650 goto err_lock; 3513 goto err_lock;
3651 } 3514 }
3652 3515
3653 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3516 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3654 3517
3655 set_bit(QL_ADAPTER_UP,&qdev->flags); 3518 set_bit(QL_ADAPTER_UP, &qdev->flags);
3656 3519
3657 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3520 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3658 3521
@@ -3666,11 +3529,9 @@ err_lock:
3666 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3529 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3667 free_irq(qdev->pdev->irq, ndev); 3530 free_irq(qdev->pdev->irq, ndev);
3668err_irq: 3531err_irq:
3669 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3532 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3670 printk(KERN_INFO PFX 3533 netdev_info(ndev, "calling pci_disable_msi()\n");
3671 "%s: calling pci_disable_msi().\n", 3534 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3672 qdev->ndev->name);
3673 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3674 pci_disable_msi(qdev->pdev); 3535 pci_disable_msi(qdev->pdev);
3675 } 3536 }
3676 return err; 3537 return err;
@@ -3678,10 +3539,9 @@ err_irq:
3678 3539
3679static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3540static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3680{ 3541{
3681 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { 3542 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3682 printk(KERN_ERR PFX 3543 netdev_err(qdev->ndev,
3683 "%s: Driver up/down cycle failed, " 3544 "Driver up/down cycle failed, closing device\n");
3684 "closing device\n",qdev->ndev->name);
3685 rtnl_lock(); 3545 rtnl_lock();
3686 dev_close(qdev->ndev); 3546 dev_close(qdev->ndev);
3687 rtnl_unlock(); 3547 rtnl_unlock();
@@ -3698,24 +3558,24 @@ static int ql3xxx_close(struct net_device *ndev)
3698 * Wait for device to recover from a reset. 3558 * Wait for device to recover from a reset.
3699 * (Rarely happens, but possible.) 3559 * (Rarely happens, but possible.)
3700 */ 3560 */
3701 while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) 3561 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3702 msleep(50); 3562 msleep(50);
3703 3563
3704 ql_adapter_down(qdev,QL_DO_RESET); 3564 ql_adapter_down(qdev, QL_DO_RESET);
3705 return 0; 3565 return 0;
3706} 3566}
3707 3567
3708static int ql3xxx_open(struct net_device *ndev) 3568static int ql3xxx_open(struct net_device *ndev)
3709{ 3569{
3710 struct ql3_adapter *qdev = netdev_priv(ndev); 3570 struct ql3_adapter *qdev = netdev_priv(ndev);
3711 return (ql_adapter_up(qdev)); 3571 return ql_adapter_up(qdev);
3712} 3572}
3713 3573
3714static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3715{ 3575{
3716 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3576 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3717 struct ql3xxx_port_registers __iomem *port_regs = 3577 struct ql3xxx_port_registers __iomem *port_regs =
3718 qdev->mem_map_registers; 3578 qdev->mem_map_registers;
3719 struct sockaddr *addr = p; 3579 struct sockaddr *addr = p;
3720 unsigned long hw_flags; 3580 unsigned long hw_flags;
3721 3581
@@ -3750,7 +3610,7 @@ static void ql3xxx_tx_timeout(struct net_device *ndev)
3750{ 3610{
3751 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3752 3612
3753 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); 3613 netdev_err(ndev, "Resetting...\n");
3754 /* 3614 /*
3755 * Stop the queues, we've got a problem. 3615 * Stop the queues, we've got a problem.
3756 */ 3616 */
@@ -3770,11 +3630,12 @@ static void ql_reset_work(struct work_struct *work)
3770 u32 value; 3630 u32 value;
3771 struct ql_tx_buf_cb *tx_cb; 3631 struct ql_tx_buf_cb *tx_cb;
3772 int max_wait_time, i; 3632 int max_wait_time, i;
3773 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3633 struct ql3xxx_port_registers __iomem *port_regs =
3634 qdev->mem_map_registers;
3774 unsigned long hw_flags; 3635 unsigned long hw_flags;
3775 3636
3776 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { 3637 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3777 clear_bit(QL_LINK_MASTER,&qdev->flags); 3638 clear_bit(QL_LINK_MASTER, &qdev->flags);
3778 3639
3779 /* 3640 /*
3780 * Loop through the active list and return the skb. 3641 * Loop through the active list and return the skb.
@@ -3783,17 +3644,19 @@ static void ql_reset_work(struct work_struct *work)
3783 int j; 3644 int j;
3784 tx_cb = &qdev->tx_buf[i]; 3645 tx_cb = &qdev->tx_buf[i];
3785 if (tx_cb->skb) { 3646 if (tx_cb->skb) {
3786 printk(KERN_DEBUG PFX 3647 netdev_printk(KERN_DEBUG, ndev,
3787 "%s: Freeing lost SKB.\n", 3648 "Freeing lost SKB\n");
3788 qdev->ndev->name);
3789 pci_unmap_single(qdev->pdev, 3649 pci_unmap_single(qdev->pdev,
3790 dma_unmap_addr(&tx_cb->map[0], mapaddr), 3650 dma_unmap_addr(&tx_cb->map[0],
3651 mapaddr),
3791 dma_unmap_len(&tx_cb->map[0], maplen), 3652 dma_unmap_len(&tx_cb->map[0], maplen),
3792 PCI_DMA_TODEVICE); 3653 PCI_DMA_TODEVICE);
3793 for(j=1;j<tx_cb->seg_count;j++) { 3654 for (j = 1; j < tx_cb->seg_count; j++) {
3794 pci_unmap_page(qdev->pdev, 3655 pci_unmap_page(qdev->pdev,
3795 dma_unmap_addr(&tx_cb->map[j],mapaddr), 3656 dma_unmap_addr(&tx_cb->map[j],
3796 dma_unmap_len(&tx_cb->map[j],maplen), 3657 mapaddr),
3658 dma_unmap_len(&tx_cb->map[j],
3659 maplen),
3797 PCI_DMA_TODEVICE); 3660 PCI_DMA_TODEVICE);
3798 } 3661 }
3799 dev_kfree_skb(tx_cb->skb); 3662 dev_kfree_skb(tx_cb->skb);
@@ -3801,8 +3664,7 @@ static void ql_reset_work(struct work_struct *work)
3801 } 3664 }
3802 } 3665 }
3803 3666
3804 printk(KERN_ERR PFX 3667 netdev_err(ndev, "Clearing NRI after reset\n");
3805 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3806 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3668 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3807 ql_write_common_reg(qdev, 3669 ql_write_common_reg(qdev,
3808 &port_regs->CommonRegs. 3670 &port_regs->CommonRegs.
@@ -3818,16 +3680,14 @@ static void ql_reset_work(struct work_struct *work)
3818 3680
3819 ispControlStatus); 3681 ispControlStatus);
3820 if ((value & ISP_CONTROL_SR) == 0) { 3682 if ((value & ISP_CONTROL_SR) == 0) {
3821 printk(KERN_DEBUG PFX 3683 netdev_printk(KERN_DEBUG, ndev,
3822 "%s: reset completed.\n", 3684 "reset completed\n");
3823 qdev->ndev->name);
3824 break; 3685 break;
3825 } 3686 }
3826 3687
3827 if (value & ISP_CONTROL_RI) { 3688 if (value & ISP_CONTROL_RI) {
3828 printk(KERN_DEBUG PFX 3689 netdev_printk(KERN_DEBUG, ndev,
3829 "%s: clearing NRI after reset.\n", 3690 "clearing NRI after reset\n");
3830 qdev->ndev->name);
3831 ql_write_common_reg(qdev, 3691 ql_write_common_reg(qdev,
3832 &port_regs-> 3692 &port_regs->
3833 CommonRegs. 3693 CommonRegs.
@@ -3848,21 +3708,19 @@ static void ql_reset_work(struct work_struct *work)
3848 * Set the reset flags and clear the board again. 3708 * Set the reset flags and clear the board again.
3849 * Nothing else to do... 3709 * Nothing else to do...
3850 */ 3710 */
3851 printk(KERN_ERR PFX 3711 netdev_err(ndev,
3852 "%s: Timed out waiting for reset to " 3712 "Timed out waiting for reset to complete\n");
3853 "complete.\n", ndev->name); 3713 netdev_err(ndev, "Do a reset\n");
3854 printk(KERN_ERR PFX 3714 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3855 "%s: Do a reset.\n", ndev->name); 3715 clear_bit(QL_RESET_START, &qdev->flags);
3856 clear_bit(QL_RESET_PER_SCSI,&qdev->flags); 3716 ql_cycle_adapter(qdev, QL_DO_RESET);
3857 clear_bit(QL_RESET_START,&qdev->flags);
3858 ql_cycle_adapter(qdev,QL_DO_RESET);
3859 return; 3717 return;
3860 } 3718 }
3861 3719
3862 clear_bit(QL_RESET_ACTIVE,&qdev->flags); 3720 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3863 clear_bit(QL_RESET_PER_SCSI,&qdev->flags); 3721 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3864 clear_bit(QL_RESET_START,&qdev->flags); 3722 clear_bit(QL_RESET_START, &qdev->flags);
3865 ql_cycle_adapter(qdev,QL_NO_RESET); 3723 ql_cycle_adapter(qdev, QL_NO_RESET);
3866 } 3724 }
3867} 3725}
3868 3726
@@ -3876,7 +3734,8 @@ static void ql_tx_timeout_work(struct work_struct *work)
3876 3734
3877static void ql_get_board_info(struct ql3_adapter *qdev) 3735static void ql_get_board_info(struct ql3_adapter *qdev)
3878{ 3736{
3879 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3737 struct ql3xxx_port_registers __iomem *port_regs =
3738 qdev->mem_map_registers;
3880 u32 value; 3739 u32 value;
3881 3740
3882 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3741 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
@@ -3915,20 +3774,18 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3915{ 3774{
3916 struct net_device *ndev = NULL; 3775 struct net_device *ndev = NULL;
3917 struct ql3_adapter *qdev = NULL; 3776 struct ql3_adapter *qdev = NULL;
3918 static int cards_found = 0; 3777 static int cards_found;
3919 int uninitialized_var(pci_using_dac), err; 3778 int uninitialized_var(pci_using_dac), err;
3920 3779
3921 err = pci_enable_device(pdev); 3780 err = pci_enable_device(pdev);
3922 if (err) { 3781 if (err) {
3923 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3782 pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3924 pci_name(pdev));
3925 goto err_out; 3783 goto err_out;
3926 } 3784 }
3927 3785
3928 err = pci_request_regions(pdev, DRV_NAME); 3786 err = pci_request_regions(pdev, DRV_NAME);
3929 if (err) { 3787 if (err) {
3930 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3788 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3931 pci_name(pdev));
3932 goto err_out_disable_pdev; 3789 goto err_out_disable_pdev;
3933 } 3790 }
3934 3791
@@ -3943,15 +3800,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3943 } 3800 }
3944 3801
3945 if (err) { 3802 if (err) {
3946 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3803 pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3947 pci_name(pdev));
3948 goto err_out_free_regions; 3804 goto err_out_free_regions;
3949 } 3805 }
3950 3806
3951 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3807 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3952 if (!ndev) { 3808 if (!ndev) {
3953 printk(KERN_ERR PFX "%s could not alloc etherdev\n", 3809 pr_err("%s could not alloc etherdev\n", pci_name(pdev));
3954 pci_name(pdev));
3955 err = -ENOMEM; 3810 err = -ENOMEM;
3956 goto err_out_free_regions; 3811 goto err_out_free_regions;
3957 } 3812 }
@@ -3978,8 +3833,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3978 3833
3979 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3834 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3980 if (!qdev->mem_map_registers) { 3835 if (!qdev->mem_map_registers) {
3981 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3836 pr_err("%s: cannot map device registers\n", pci_name(pdev));
3982 pci_name(pdev));
3983 err = -EIO; 3837 err = -EIO;
3984 goto err_out_free_ndev; 3838 goto err_out_free_ndev;
3985 } 3839 }
@@ -3998,9 +3852,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3998 3852
3999 /* make sure the EEPROM is good */ 3853 /* make sure the EEPROM is good */
4000 if (ql_get_nvram_params(qdev)) { 3854 if (ql_get_nvram_params(qdev)) {
4001 printk(KERN_ALERT PFX 3855 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
4002 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", 3856 __func__, qdev->index);
4003 qdev->index);
4004 err = -EIO; 3857 err = -EIO;
4005 goto err_out_iounmap; 3858 goto err_out_iounmap;
4006 } 3859 }
@@ -4026,14 +3879,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4026 * Set the Maximum Memory Read Byte Count value. We do this to handle 3879 * Set the Maximum Memory Read Byte Count value. We do this to handle
4027 * jumbo frames. 3880 * jumbo frames.
4028 */ 3881 */
4029 if (qdev->pci_x) { 3882 if (qdev->pci_x)
4030 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3883 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
4031 }
4032 3884
4033 err = register_netdev(ndev); 3885 err = register_netdev(ndev);
4034 if (err) { 3886 if (err) {
4035 printk(KERN_ERR PFX "%s: cannot register net device\n", 3887 pr_err("%s: cannot register net device\n", pci_name(pdev));
4036 pci_name(pdev));
4037 goto err_out_iounmap; 3888 goto err_out_iounmap;
4038 } 3889 }
4039 3890
@@ -4052,10 +3903,10 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4052 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3903 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4053 qdev->adapter_timer.data = (unsigned long)qdev; 3904 qdev->adapter_timer.data = (unsigned long)qdev;
4054 3905
4055 if(!cards_found) { 3906 if (!cards_found) {
4056 printk(KERN_ALERT PFX "%s\n", DRV_STRING); 3907 pr_alert("%s\n", DRV_STRING);
4057 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", 3908 pr_alert("Driver name: %s, Version: %s\n",
4058 DRV_NAME, DRV_VERSION); 3909 DRV_NAME, DRV_VERSION);
4059 } 3910 }
4060 ql_display_dev_info(ndev); 3911 ql_display_dev_info(ndev);
4061 3912
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 896d40df9a13..970389331bbc 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 2 54#define _QLCNIC_LINUX_SUBVERSION 7
55#define QLCNIC_LINUX_VERSIONID "5.0.2" 55#define QLCNIC_LINUX_VERSIONID "5.0.7"
56#define QLCNIC_DRV_IDC_VER 0x01 56#define QLCNIC_DRV_IDC_VER 0x01
57 57
58#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 58#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
@@ -68,6 +68,7 @@
68#define QLCNIC_DECODE_VERSION(v) \ 68#define QLCNIC_DECODE_VERSION(v) \
69 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) 69 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
70 70
71#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
71#define QLCNIC_NUM_FLASH_SECTORS (64) 72#define QLCNIC_NUM_FLASH_SECTORS (64)
72#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) 73#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
73#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ 74#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
@@ -112,8 +113,10 @@
112#define TX_UDPV6_PKT 0x0c 113#define TX_UDPV6_PKT 0x0c
113 114
114/* Tx defines */ 115/* Tx defines */
115#define MAX_BUFFERS_PER_CMD 32 116#define MAX_TSO_HEADER_DESC 2
116#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) 117#define MGMT_CMD_DESC_RESV 4
118#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
119 + MGMT_CMD_DESC_RESV)
117#define QLCNIC_MAX_TX_TIMEOUTS 2 120#define QLCNIC_MAX_TX_TIMEOUTS 2
118 121
119/* 122/*
@@ -197,8 +200,7 @@ struct cmd_desc_type0 {
197 200
198 __le64 addr_buffer4; 201 __le64 addr_buffer4;
199 202
200 __le32 reserved2; 203 u8 eth_addr[ETH_ALEN];
201 __le16 reserved;
202 __le16 vlan_TCI; 204 __le16 vlan_TCI;
203 205
204} __attribute__ ((aligned(64))); 206} __attribute__ ((aligned(64)));
@@ -315,6 +317,8 @@ struct uni_data_desc{
315#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032 317#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
316#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080 318#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
317 319
320#define QLCNIC_MSIX_TABLE_OFFSET 0x44
321
318/* Flash memory map */ 322/* Flash memory map */
319#define QLCNIC_BRDCFG_START 0x4000 /* board config */ 323#define QLCNIC_BRDCFG_START 0x4000 /* board config */
320#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ 324#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
@@ -367,7 +371,7 @@ struct qlcnic_recv_crb {
367 */ 371 */
368struct qlcnic_cmd_buffer { 372struct qlcnic_cmd_buffer {
369 struct sk_buff *skb; 373 struct sk_buff *skb;
370 struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 374 struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
371 u32 frag_count; 375 u32 frag_count;
372}; 376};
373 377
@@ -377,7 +381,6 @@ struct qlcnic_rx_buffer {
377 struct sk_buff *skb; 381 struct sk_buff *skb;
378 u64 dma; 382 u64 dma;
379 u16 ref_handle; 383 u16 ref_handle;
380 u16 state;
381}; 384};
382 385
383/* Board types */ 386/* Board types */
@@ -419,7 +422,6 @@ struct qlcnic_adapter_stats {
419 u64 xmit_on; 422 u64 xmit_on;
420 u64 xmit_off; 423 u64 xmit_off;
421 u64 skb_alloc_failure; 424 u64 skb_alloc_failure;
422 u64 null_skb;
423 u64 null_rxbuf; 425 u64 null_rxbuf;
424 u64 rx_dma_map_error; 426 u64 rx_dma_map_error;
425 u64 tx_dma_map_error; 427 u64 tx_dma_map_error;
@@ -542,7 +544,17 @@ struct qlcnic_recv_context {
542#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c 544#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
543#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d 545#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
544#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e 546#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
545#define QLCNIC_CDRP_CMD_MAX 0x0000001f 547#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
548
549#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
550#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
551#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
552#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023
553#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
554#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
555#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
556#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
557#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
546 558
547#define QLCNIC_RCODE_SUCCESS 0 559#define QLCNIC_RCODE_SUCCESS 0
548#define QLCNIC_RCODE_TIMEOUT 17 560#define QLCNIC_RCODE_TIMEOUT 17
@@ -556,12 +568,12 @@ struct qlcnic_recv_context {
556#define QLCNIC_CAP0_LSO (1 << 6) 568#define QLCNIC_CAP0_LSO (1 << 6)
557#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) 569#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
558#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) 570#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
571#define QLCNIC_CAP0_VALIDOFF (1 << 11)
559 572
560/* 573/*
561 * Context state 574 * Context state
562 */ 575 */
563#define QLCHAL_VERSION 1 576#define QLCNIC_HOST_CTX_STATE_FREED 0
564
565#define QLCNIC_HOST_CTX_STATE_ACTIVE 2 577#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
566 578
567/* 579/*
@@ -592,9 +604,10 @@ struct qlcnic_hostrq_rx_ctx {
592 __le32 sds_ring_offset; /* Offset to SDS config */ 604 __le32 sds_ring_offset; /* Offset to SDS config */
593 __le16 num_rds_rings; /* Count of RDS rings */ 605 __le16 num_rds_rings; /* Count of RDS rings */
594 __le16 num_sds_rings; /* Count of SDS rings */ 606 __le16 num_sds_rings; /* Count of SDS rings */
595 __le16 rsvd1; /* Padding */ 607 __le16 valid_field_offset;
596 __le16 rsvd2; /* Padding */ 608 u8 txrx_sds_binding;
597 u8 reserved[128]; /* reserve space for future expansion*/ 609 u8 msix_handler;
610 u8 reserved[128]; /* reserve space for future expansion*/
598 /* MUST BE 64-bit aligned. 611 /* MUST BE 64-bit aligned.
599 The following is packed: 612 The following is packed:
600 - N hostrq_rds_rings 613 - N hostrq_rds_rings
@@ -808,9 +821,10 @@ struct qlcnic_nic_intr_coalesce {
808#define QLCNIC_LRO_REQUEST_CLEANUP 4 821#define QLCNIC_LRO_REQUEST_CLEANUP 4
809 822
810/* Capabilites received */ 823/* Capabilites received */
811#define QLCNIC_FW_CAPABILITY_BDG (1 << 8) 824#define QLCNIC_FW_CAPABILITY_TSO BIT_1
812#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9) 825#define QLCNIC_FW_CAPABILITY_BDG BIT_8
813#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10) 826#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
827#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
814 828
815/* module types */ 829/* module types */
816#define LINKEVENT_MODULE_NOT_PRESENT 1 830#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -881,12 +895,14 @@ struct qlcnic_mac_req {
881#define QLCNIC_LRO_ENABLED 0x08 895#define QLCNIC_LRO_ENABLED 0x08
882#define QLCNIC_BRIDGE_ENABLED 0X10 896#define QLCNIC_BRIDGE_ENABLED 0X10
883#define QLCNIC_DIAG_ENABLED 0x20 897#define QLCNIC_DIAG_ENABLED 0x20
898#define QLCNIC_ESWITCH_ENABLED 0x40
884#define QLCNIC_IS_MSI_FAMILY(adapter) \ 899#define QLCNIC_IS_MSI_FAMILY(adapter) \
885 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 900 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
886 901
887#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS 902#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
888#define QLCNIC_MSIX_TBL_SPACE 8192 903#define QLCNIC_MSIX_TBL_SPACE 8192
889#define QLCNIC_PCI_REG_MSIX_TBL 0x44 904#define QLCNIC_PCI_REG_MSIX_TBL 0x44
905#define QLCNIC_MSIX_TBL_PGSIZE 4096
890 906
891#define QLCNIC_NETDEV_WEIGHT 128 907#define QLCNIC_NETDEV_WEIGHT 128
892#define QLCNIC_ADAPTER_UP_MAGIC 777 908#define QLCNIC_ADAPTER_UP_MAGIC 777
@@ -895,6 +911,7 @@ struct qlcnic_mac_req {
895#define __QLCNIC_DEV_UP 1 911#define __QLCNIC_DEV_UP 1
896#define __QLCNIC_RESETTING 2 912#define __QLCNIC_RESETTING 2
897#define __QLCNIC_START_FW 4 913#define __QLCNIC_START_FW 4
914#define __QLCNIC_AER 5
898 915
899#define QLCNIC_INTERRUPT_TEST 1 916#define QLCNIC_INTERRUPT_TEST 1
900#define QLCNIC_LOOPBACK_TEST 2 917#define QLCNIC_LOOPBACK_TEST 2
@@ -919,11 +936,11 @@ struct qlcnic_adapter {
919 u8 rx_csum; 936 u8 rx_csum;
920 u8 portnum; 937 u8 portnum;
921 u8 physical_port; 938 u8 physical_port;
939 u8 reset_context;
922 940
923 u8 mc_enabled; 941 u8 mc_enabled;
924 u8 max_mc_count; 942 u8 max_mc_count;
925 u8 rss_supported; 943 u8 rss_supported;
926 u8 rsrvd1;
927 u8 fw_wait_cnt; 944 u8 fw_wait_cnt;
928 u8 fw_fail_cnt; 945 u8 fw_fail_cnt;
929 u8 tx_timeo_cnt; 946 u8 tx_timeo_cnt;
@@ -932,7 +949,6 @@ struct qlcnic_adapter {
932 u8 has_link_events; 949 u8 has_link_events;
933 u8 fw_type; 950 u8 fw_type;
934 u16 tx_context_id; 951 u16 tx_context_id;
935 u16 mtu;
936 u16 is_up; 952 u16 is_up;
937 953
938 u16 link_speed; 954 u16 link_speed;
@@ -940,6 +956,13 @@ struct qlcnic_adapter {
940 u16 link_autoneg; 956 u16 link_autoneg;
941 u16 module_type; 957 u16 module_type;
942 958
959 u16 op_mode;
960 u16 switch_mode;
961 u16 max_tx_ques;
962 u16 max_rx_ques;
963 u16 max_mtu;
964
965 u32 fw_hal_version;
943 u32 capabilities; 966 u32 capabilities;
944 u32 flags; 967 u32 flags;
945 u32 irq; 968 u32 irq;
@@ -948,18 +971,22 @@ struct qlcnic_adapter {
948 u32 int_vec_bit; 971 u32 int_vec_bit;
949 u32 heartbit; 972 u32 heartbit;
950 973
974 u8 max_mac_filters;
951 u8 dev_state; 975 u8 dev_state;
952 u8 diag_test; 976 u8 diag_test;
953 u8 diag_cnt; 977 u8 diag_cnt;
954 u8 reset_ack_timeo; 978 u8 reset_ack_timeo;
955 u8 dev_init_timeo; 979 u8 dev_init_timeo;
956 u8 rsrd1;
957 u16 msg_enable; 980 u16 msg_enable;
958 981
959 u8 mac_addr[ETH_ALEN]; 982 u8 mac_addr[ETH_ALEN];
960 983
961 u64 dev_rst_time; 984 u64 dev_rst_time;
962 985
986 struct qlcnic_npar_info *npars;
987 struct qlcnic_eswitch *eswitch;
988 struct qlcnic_nic_template *nic_ops;
989
963 struct qlcnic_adapter_stats stats; 990 struct qlcnic_adapter_stats stats;
964 991
965 struct qlcnic_recv_context recv_ctx; 992 struct qlcnic_recv_context recv_ctx;
@@ -974,8 +1001,6 @@ struct qlcnic_adapter {
974 1001
975 struct delayed_work fw_work; 1002 struct delayed_work fw_work;
976 1003
977 struct work_struct tx_timeout_task;
978
979 struct qlcnic_nic_intr_coalesce coal; 1004 struct qlcnic_nic_intr_coalesce coal;
980 1005
981 unsigned long state; 1006 unsigned long state;
@@ -984,6 +1009,123 @@ struct qlcnic_adapter {
984 const struct firmware *fw; 1009 const struct firmware *fw;
985}; 1010};
986 1011
1012struct qlcnic_info {
1013 __le16 pci_func;
1014 __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
1015 __le16 phys_port;
1016 __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
1017
1018 __le32 capabilities;
1019 u8 max_mac_filters;
1020 u8 reserved1;
1021 __le16 max_mtu;
1022
1023 __le16 max_tx_ques;
1024 __le16 max_rx_ques;
1025 __le16 min_tx_bw;
1026 __le16 max_tx_bw;
1027 u8 reserved2[104];
1028};
1029
1030struct qlcnic_pci_info {
1031 __le16 id; /* pci function id */
1032 __le16 active; /* 1 = Enabled */
1033 __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
1034 __le16 default_port; /* default port number */
1035
1036 __le16 tx_min_bw; /* Multiple of 100mbpc */
1037 __le16 tx_max_bw;
1038 __le16 reserved1[2];
1039
1040 u8 mac[ETH_ALEN];
1041 u8 reserved2[106];
1042};
1043
1044struct qlcnic_npar_info {
1045 u16 vlan_id;
1046 u16 min_bw;
1047 u16 max_bw;
1048 u8 phy_port;
1049 u8 type;
1050 u8 active;
1051 u8 enable_pm;
1052 u8 dest_npar;
1053 u8 host_vlan_tag;
1054 u8 promisc_mode;
1055 u8 discard_tagged;
1056 u8 mac_learning;
1057};
1058struct qlcnic_eswitch {
1059 u8 port;
1060 u8 active_vports;
1061 u8 active_vlans;
1062 u8 active_ucast_filters;
1063 u8 max_ucast_filters;
1064 u8 max_active_vlans;
1065
1066 u32 flags;
1067#define QLCNIC_SWITCH_ENABLE BIT_1
1068#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
1069#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
1070#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
1071};
1072
1073
1074/* Return codes for Error handling */
1075#define QL_STATUS_INVALID_PARAM -1
1076
1077#define MAX_BW 100
1078#define MIN_BW 1
1079#define MAX_VLAN_ID 4095
1080#define MIN_VLAN_ID 2
1081#define MAX_TX_QUEUES 1
1082#define MAX_RX_QUEUES 4
1083#define DEFAULT_MAC_LEARN 1
1084
1085#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
1086#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
1087#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1088#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1089#define IS_VALID_MODE(mode) (mode == 0 || mode == 1)
1090
1091struct qlcnic_pci_func_cfg {
1092 u16 func_type;
1093 u16 min_bw;
1094 u16 max_bw;
1095 u16 port_num;
1096 u8 pci_func;
1097 u8 func_state;
1098 u8 def_mac_addr[6];
1099};
1100
1101struct qlcnic_npar_func_cfg {
1102 u32 fw_capab;
1103 u16 port_num;
1104 u16 min_bw;
1105 u16 max_bw;
1106 u16 max_tx_queues;
1107 u16 max_rx_queues;
1108 u8 pci_func;
1109 u8 op_mode;
1110};
1111
1112struct qlcnic_pm_func_cfg {
1113 u8 pci_func;
1114 u8 action;
1115 u8 dest_npar;
1116 u8 reserved[5];
1117};
1118
1119struct qlcnic_esw_func_cfg {
1120 u16 vlan_id;
1121 u8 pci_func;
1122 u8 host_vlan_tag;
1123 u8 promisc_mode;
1124 u8 discard_tagged;
1125 u8 mac_learning;
1126 u8 reserved;
1127};
1128
987int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); 1129int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
988int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val); 1130int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
989 1131
@@ -1031,13 +1173,13 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1031int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); 1173int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1032 1174
1033/* Functions from qlcnic_init.c */ 1175/* Functions from qlcnic_init.c */
1034int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
1035int qlcnic_load_firmware(struct qlcnic_adapter *adapter); 1176int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
1036int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); 1177int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1037void qlcnic_request_firmware(struct qlcnic_adapter *adapter); 1178void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1038void qlcnic_release_firmware(struct qlcnic_adapter *adapter); 1179void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1039int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); 1180int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1040int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); 1181int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
1182int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
1041 1183
1042int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp); 1184int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1043int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, 1185int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1050,6 +1192,10 @@ void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
1050int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); 1192int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1051void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); 1193void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
1052 1194
1195int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
1196void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
1197
1198void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
1053void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); 1199void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1054void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); 1200void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1055 1201
@@ -1070,13 +1216,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1070int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); 1216int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1071int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); 1217int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1072int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); 1218int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1073int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable); 1219int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1074int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1220int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1075void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1221void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1076 struct qlcnic_host_tx_ring *tx_ring); 1222 struct qlcnic_host_tx_ring *tx_ring);
1077int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac); 1223int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
1078void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter); 1224void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1079int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter); 1225int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1226void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1080 1227
1081/* Functions from qlcnic_main.c */ 1228/* Functions from qlcnic_main.c */
1082int qlcnic_reset_context(struct qlcnic_adapter *); 1229int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1088,6 +1235,25 @@ int qlcnic_check_loopback_buff(unsigned char *data);
1088netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1235netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1089void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); 1236void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1090 1237
1238/* Management functions */
1239int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
1240int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
1241int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
1242int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
1243int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
1244int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
1245
1246/* eSwitch management functions */
1247int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
1248 struct qlcnic_eswitch *);
1249int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
1250 struct qlcnic_eswitch *);
1251int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
1252int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8,
1253 u8, u8, u16);
1254int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
1255extern int qlcnic_config_tso;
1256
1091/* 1257/*
1092 * QLOGIC Board information 1258 * QLOGIC Board information
1093 */ 1259 */
@@ -1131,6 +1297,13 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1131 1297
1132extern const struct ethtool_ops qlcnic_ethtool_ops; 1298extern const struct ethtool_ops qlcnic_ethtool_ops;
1133 1299
1300struct qlcnic_nic_template {
1301 int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
1302 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1303 int (*config_led) (struct qlcnic_adapter *, u32, u32);
1304 int (*start_firmware) (struct qlcnic_adapter *);
1305};
1306
1134#define QLCDB(adapter, lvl, _fmt, _args...) do { \ 1307#define QLCDB(adapter, lvl, _fmt, _args...) do { \
1135 if (NETIF_MSG_##lvl & adapter->msg_enable) \ 1308 if (NETIF_MSG_##lvl & adapter->msg_enable) \
1136 printk(KERN_INFO "%s: %s: " _fmt, \ 1309 printk(KERN_INFO "%s: %s: " _fmt, \
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index c2c1f5cc16c6..cc5d861d9a12 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -88,12 +88,12 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
88 88
89 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { 89 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
90 if (qlcnic_issue_cmd(adapter, 90 if (qlcnic_issue_cmd(adapter,
91 adapter->ahw.pci_func, 91 adapter->ahw.pci_func,
92 QLCHAL_VERSION, 92 adapter->fw_hal_version,
93 recv_ctx->context_id, 93 recv_ctx->context_id,
94 mtu, 94 mtu,
95 0, 95 0,
96 QLCNIC_CDRP_CMD_SET_MTU)) { 96 QLCNIC_CDRP_CMD_SET_MTU)) {
97 97
98 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 98 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
99 return -EIO; 99 return -EIO;
@@ -121,7 +121,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
121 121
122 int i, nrds_rings, nsds_rings; 122 int i, nrds_rings, nsds_rings;
123 size_t rq_size, rsp_size; 123 size_t rq_size, rsp_size;
124 u32 cap, reg, val; 124 u32 cap, reg, val, reg2;
125 int err; 125 int err;
126 126
127 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 127 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -152,9 +152,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
152 152
153 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 153 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
154 154
155 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN); 155 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
156 | QLCNIC_CAP0_VALIDOFF);
156 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 157 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
157 158
159 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
160 msix_handler);
161 prq->txrx_sds_binding = nsds_rings - 1;
162
158 prq->capabilities[0] = cpu_to_le32(cap); 163 prq->capabilities[0] = cpu_to_le32(cap);
159 prq->host_int_crb_mode = 164 prq->host_int_crb_mode =
160 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 165 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
@@ -175,6 +180,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
175 for (i = 0; i < nrds_rings; i++) { 180 for (i = 0; i < nrds_rings; i++) {
176 181
177 rds_ring = &recv_ctx->rds_rings[i]; 182 rds_ring = &recv_ctx->rds_rings[i];
183 rds_ring->producer = 0;
178 184
179 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 185 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
180 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 186 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
@@ -188,6 +194,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
188 for (i = 0; i < nsds_rings; i++) { 194 for (i = 0; i < nsds_rings; i++) {
189 195
190 sds_ring = &recv_ctx->sds_rings[i]; 196 sds_ring = &recv_ctx->sds_rings[i];
197 sds_ring->consumer = 0;
198 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
191 199
192 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 200 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
193 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 201 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
@@ -197,7 +205,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
197 phys_addr = hostrq_phys_addr; 205 phys_addr = hostrq_phys_addr;
198 err = qlcnic_issue_cmd(adapter, 206 err = qlcnic_issue_cmd(adapter,
199 adapter->ahw.pci_func, 207 adapter->ahw.pci_func,
200 QLCHAL_VERSION, 208 adapter->fw_hal_version,
201 (u32)(phys_addr >> 32), 209 (u32)(phys_addr >> 32),
202 (u32)(phys_addr & 0xffffffff), 210 (u32)(phys_addr & 0xffffffff),
203 rq_size, 211 rq_size,
@@ -216,8 +224,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
216 rds_ring = &recv_ctx->rds_rings[i]; 224 rds_ring = &recv_ctx->rds_rings[i];
217 225
218 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 226 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
219 rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter, 227 rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg;
220 QLCNIC_REG(reg - 0x200));
221 } 228 }
222 229
223 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 230 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -227,12 +234,10 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
227 sds_ring = &recv_ctx->sds_rings[i]; 234 sds_ring = &recv_ctx->sds_rings[i];
228 235
229 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 236 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
230 sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter, 237 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
231 QLCNIC_REG(reg - 0x200));
232 238
233 reg = le32_to_cpu(prsp_sds[i].interrupt_crb); 239 sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg;
234 sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter, 240 sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
235 QLCNIC_REG(reg - 0x200));
236 } 241 }
237 242
238 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 243 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -253,7 +258,7 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
253 258
254 if (qlcnic_issue_cmd(adapter, 259 if (qlcnic_issue_cmd(adapter,
255 adapter->ahw.pci_func, 260 adapter->ahw.pci_func,
256 QLCHAL_VERSION, 261 adapter->fw_hal_version,
257 recv_ctx->context_id, 262 recv_ctx->context_id,
258 QLCNIC_DESTROY_CTX_RESET, 263 QLCNIC_DESTROY_CTX_RESET,
259 0, 264 0,
@@ -262,6 +267,8 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
262 dev_err(&adapter->pdev->dev, 267 dev_err(&adapter->pdev->dev,
263 "Failed to destroy rx ctx in firmware\n"); 268 "Failed to destroy rx ctx in firmware\n");
264 } 269 }
270
271 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
265} 272}
266 273
267static int 274static int
@@ -278,6 +285,11 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
278 dma_addr_t rq_phys_addr, rsp_phys_addr; 285 dma_addr_t rq_phys_addr, rsp_phys_addr;
279 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 286 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
280 287
288 /* reset host resources */
289 tx_ring->producer = 0;
290 tx_ring->sw_consumer = 0;
291 *(tx_ring->hw_consumer) = 0;
292
281 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 293 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
282 rq_addr = pci_alloc_consistent(adapter->pdev, 294 rq_addr = pci_alloc_consistent(adapter->pdev,
283 rq_size, &rq_phys_addr); 295 rq_size, &rq_phys_addr);
@@ -319,7 +331,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
319 phys_addr = rq_phys_addr; 331 phys_addr = rq_phys_addr;
320 err = qlcnic_issue_cmd(adapter, 332 err = qlcnic_issue_cmd(adapter,
321 adapter->ahw.pci_func, 333 adapter->ahw.pci_func,
322 QLCHAL_VERSION, 334 adapter->fw_hal_version,
323 (u32)(phys_addr >> 32), 335 (u32)(phys_addr >> 32),
324 ((u32)phys_addr & 0xffffffff), 336 ((u32)phys_addr & 0xffffffff),
325 rq_size, 337 rq_size,
@@ -327,8 +339,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
327 339
328 if (err == QLCNIC_RCODE_SUCCESS) { 340 if (err == QLCNIC_RCODE_SUCCESS) {
329 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 341 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
330 tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter, 342 tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp;
331 QLCNIC_REG(temp - 0x200));
332 343
333 adapter->tx_context_id = 344 adapter->tx_context_id =
334 le16_to_cpu(prsp->context_id); 345 le16_to_cpu(prsp->context_id);
@@ -351,7 +362,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
351{ 362{
352 if (qlcnic_issue_cmd(adapter, 363 if (qlcnic_issue_cmd(adapter,
353 adapter->ahw.pci_func, 364 adapter->ahw.pci_func,
354 QLCHAL_VERSION, 365 adapter->fw_hal_version,
355 adapter->tx_context_id, 366 adapter->tx_context_id,
356 QLCNIC_DESTROY_CTX_RESET, 367 QLCNIC_DESTROY_CTX_RESET,
357 0, 368 0,
@@ -368,7 +379,7 @@ qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
368 379
369 if (qlcnic_issue_cmd(adapter, 380 if (qlcnic_issue_cmd(adapter,
370 adapter->ahw.pci_func, 381 adapter->ahw.pci_func,
371 QLCHAL_VERSION, 382 adapter->fw_hal_version,
372 reg, 383 reg,
373 0, 384 0,
374 0, 385 0,
@@ -385,7 +396,7 @@ qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
385{ 396{
386 return qlcnic_issue_cmd(adapter, 397 return qlcnic_issue_cmd(adapter,
387 adapter->ahw.pci_func, 398 adapter->ahw.pci_func,
388 QLCHAL_VERSION, 399 adapter->fw_hal_version,
389 reg, 400 reg,
390 val, 401 val,
391 0, 402 0,
@@ -457,15 +468,6 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
457 sds_ring->desc_head = (struct status_desc *)addr; 468 sds_ring->desc_head = (struct status_desc *)addr;
458 } 469 }
459 470
460
461 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
462 if (err)
463 goto err_out_free;
464 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
465 if (err)
466 goto err_out_free;
467
468 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
469 return 0; 471 return 0;
470 472
471err_out_free: 473err_out_free:
@@ -473,15 +475,27 @@ err_out_free:
473 return err; 475 return err;
474} 476}
475 477
476void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) 478
479int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
477{ 480{
478 struct qlcnic_recv_context *recv_ctx; 481 int err;
479 struct qlcnic_host_rds_ring *rds_ring;
480 struct qlcnic_host_sds_ring *sds_ring;
481 struct qlcnic_host_tx_ring *tx_ring;
482 int ring;
483 482
483 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
484 if (err)
485 return err;
484 486
487 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
488 if (err) {
489 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
490 return err;
491 }
492
493 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
494 return 0;
495}
496
497void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
498{
485 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 499 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
486 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 500 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
487 qlcnic_fw_cmd_destroy_tx_ctx(adapter); 501 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
@@ -489,6 +503,15 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
489 /* Allow dma queues to drain after context reset */ 503 /* Allow dma queues to drain after context reset */
490 msleep(20); 504 msleep(20);
491 } 505 }
506}
507
508void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
509{
510 struct qlcnic_recv_context *recv_ctx;
511 struct qlcnic_host_rds_ring *rds_ring;
512 struct qlcnic_host_sds_ring *sds_ring;
513 struct qlcnic_host_tx_ring *tx_ring;
514 int ring;
492 515
493 recv_ctx = &adapter->recv_ctx; 516 recv_ctx = &adapter->recv_ctx;
494 517
@@ -533,3 +556,430 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
533 } 556 }
534} 557}
535 558
559/* Set MAC address of a NIC partition */
560int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac)
561{
562 int err = 0;
563 u32 arg1, arg2, arg3;
564
565 arg1 = adapter->ahw.pci_func | BIT_9;
566 arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
567 arg3 = mac[4] | (mac[5] << 16);
568
569 err = qlcnic_issue_cmd(adapter,
570 adapter->ahw.pci_func,
571 adapter->fw_hal_version,
572 arg1,
573 arg2,
574 arg3,
575 QLCNIC_CDRP_CMD_MAC_ADDRESS);
576
577 if (err != QLCNIC_RCODE_SUCCESS) {
578 dev_err(&adapter->pdev->dev,
579 "Failed to set mac address%d\n", err);
580 err = -EIO;
581 }
582
583 return err;
584}
585
586/* Get MAC address of a NIC partition */
587int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
588{
589 int err;
590 u32 arg1;
591
592 arg1 = adapter->ahw.pci_func | BIT_8;
593 err = qlcnic_issue_cmd(adapter,
594 adapter->ahw.pci_func,
595 adapter->fw_hal_version,
596 arg1,
597 0,
598 0,
599 QLCNIC_CDRP_CMD_MAC_ADDRESS);
600
601 if (err == QLCNIC_RCODE_SUCCESS)
602 qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
603 QLCNIC_ARG2_CRB_OFFSET, 0, mac);
604 else {
605 dev_err(&adapter->pdev->dev,
606 "Failed to get mac address%d\n", err);
607 err = -EIO;
608 }
609
610 return err;
611}
612
613/* Get info of a NIC partition */
614int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
615 struct qlcnic_info *npar_info, u8 func_id)
616{
617 int err;
618 dma_addr_t nic_dma_t;
619 struct qlcnic_info *nic_info;
620 void *nic_info_addr;
621 size_t nic_size = sizeof(struct qlcnic_info);
622
623 nic_info_addr = pci_alloc_consistent(adapter->pdev,
624 nic_size, &nic_dma_t);
625 if (!nic_info_addr)
626 return -ENOMEM;
627 memset(nic_info_addr, 0, nic_size);
628
629 nic_info = (struct qlcnic_info *) nic_info_addr;
630 err = qlcnic_issue_cmd(adapter,
631 adapter->ahw.pci_func,
632 adapter->fw_hal_version,
633 MSD(nic_dma_t),
634 LSD(nic_dma_t),
635 (func_id << 16 | nic_size),
636 QLCNIC_CDRP_CMD_GET_NIC_INFO);
637
638 if (err == QLCNIC_RCODE_SUCCESS) {
639 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
640 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
641 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
642 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
643 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
644 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
645 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
646 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
647 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
648 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
649
650 dev_info(&adapter->pdev->dev,
651 "phy port: %d switch_mode: %d,\n"
652 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
653 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
654 npar_info->phys_port, npar_info->switch_mode,
655 npar_info->max_tx_ques, npar_info->max_rx_ques,
656 npar_info->min_tx_bw, npar_info->max_tx_bw,
657 npar_info->max_mtu, npar_info->capabilities);
658 } else {
659 dev_err(&adapter->pdev->dev,
660 "Failed to get nic info%d\n", err);
661 err = -EIO;
662 }
663
664 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
665 return err;
666}
667
668/* Configure a NIC partition */
669int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
670{
671 int err = -EIO;
672 dma_addr_t nic_dma_t;
673 void *nic_info_addr;
674 struct qlcnic_info *nic_info;
675 size_t nic_size = sizeof(struct qlcnic_info);
676
677 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
678 return err;
679
680 nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
681 &nic_dma_t);
682 if (!nic_info_addr)
683 return -ENOMEM;
684
685 memset(nic_info_addr, 0, nic_size);
686 nic_info = (struct qlcnic_info *)nic_info_addr;
687
688 nic_info->pci_func = cpu_to_le16(nic->pci_func);
689 nic_info->op_mode = cpu_to_le16(nic->op_mode);
690 nic_info->phys_port = cpu_to_le16(nic->phys_port);
691 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
692 nic_info->capabilities = cpu_to_le32(nic->capabilities);
693 nic_info->max_mac_filters = nic->max_mac_filters;
694 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
695 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
696 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
697 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
698
699 err = qlcnic_issue_cmd(adapter,
700 adapter->ahw.pci_func,
701 adapter->fw_hal_version,
702 MSD(nic_dma_t),
703 LSD(nic_dma_t),
704 ((nic->pci_func << 16) | nic_size),
705 QLCNIC_CDRP_CMD_SET_NIC_INFO);
706
707 if (err != QLCNIC_RCODE_SUCCESS) {
708 dev_err(&adapter->pdev->dev,
709 "Failed to set nic info%d\n", err);
710 err = -EIO;
711 }
712
713 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
714 return err;
715}
716
717/* Get PCI Info of a partition */
718int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
719 struct qlcnic_pci_info *pci_info)
720{
721 int err = 0, i;
722 dma_addr_t pci_info_dma_t;
723 struct qlcnic_pci_info *npar;
724 void *pci_info_addr;
725 size_t npar_size = sizeof(struct qlcnic_pci_info);
726 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
727
728 pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size,
729 &pci_info_dma_t);
730 if (!pci_info_addr)
731 return -ENOMEM;
732 memset(pci_info_addr, 0, pci_size);
733
734 npar = (struct qlcnic_pci_info *) pci_info_addr;
735 err = qlcnic_issue_cmd(adapter,
736 adapter->ahw.pci_func,
737 adapter->fw_hal_version,
738 MSD(pci_info_dma_t),
739 LSD(pci_info_dma_t),
740 pci_size,
741 QLCNIC_CDRP_CMD_GET_PCI_INFO);
742
743 if (err == QLCNIC_RCODE_SUCCESS) {
744 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
745 pci_info->id = le32_to_cpu(npar->id);
746 pci_info->active = le32_to_cpu(npar->active);
747 pci_info->type = le32_to_cpu(npar->type);
748 pci_info->default_port =
749 le32_to_cpu(npar->default_port);
750 pci_info->tx_min_bw =
751 le32_to_cpu(npar->tx_min_bw);
752 pci_info->tx_max_bw =
753 le32_to_cpu(npar->tx_max_bw);
754 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
755 }
756 } else {
757 dev_err(&adapter->pdev->dev,
758 "Failed to get PCI Info%d\n", err);
759 err = -EIO;
760 }
761
762 pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
763 pci_info_dma_t);
764 return err;
765}
766
767/* Reset a NIC partition */
768
769int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no)
770{
771 int err = -EIO;
772
773 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
774 return err;
775
776 err = qlcnic_issue_cmd(adapter,
777 adapter->ahw.pci_func,
778 adapter->fw_hal_version,
779 func_no,
780 0,
781 0,
782 QLCNIC_CDRP_CMD_RESET_NPAR);
783
784 if (err != QLCNIC_RCODE_SUCCESS) {
785 dev_err(&adapter->pdev->dev,
786 "Failed to issue reset partition%d\n", err);
787 err = -EIO;
788 }
789
790 return err;
791}
792
793/* Get eSwitch Capabilities */
794int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
795 struct qlcnic_eswitch *eswitch)
796{
797 int err = -EIO;
798 u32 arg1, arg2;
799
800 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
801 return err;
802
803 err = qlcnic_issue_cmd(adapter,
804 adapter->ahw.pci_func,
805 adapter->fw_hal_version,
806 port,
807 0,
808 0,
809 QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY);
810
811 if (err == QLCNIC_RCODE_SUCCESS) {
812 arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
813 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
814
815 eswitch->port = arg1 & 0xf;
816 eswitch->active_vports = LSB(arg2);
817 eswitch->max_ucast_filters = MSB(arg2);
818 eswitch->max_active_vlans = LSB(MSW(arg2));
819 if (arg1 & BIT_6)
820 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
821 if (arg1 & BIT_7)
822 eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE;
823 if (arg1 & BIT_8)
824 eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
825 } else {
826 dev_err(&adapter->pdev->dev,
827 "Failed to get eswitch capabilities%d\n", err);
828 }
829
830 return err;
831}
832
833/* Get current status of eswitch */
834int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port,
835 struct qlcnic_eswitch *eswitch)
836{
837 int err = -EIO;
838 u32 arg1, arg2;
839
840 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
841 return err;
842
843 err = qlcnic_issue_cmd(adapter,
844 adapter->ahw.pci_func,
845 adapter->fw_hal_version,
846 port,
847 0,
848 0,
849 QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS);
850
851 if (err == QLCNIC_RCODE_SUCCESS) {
852 arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
853 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
854
855 eswitch->port = arg1 & 0xf;
856 eswitch->active_vports = LSB(arg2);
857 eswitch->active_ucast_filters = MSB(arg2);
858 eswitch->active_vlans = LSB(MSW(arg2));
859 if (arg1 & BIT_6)
860 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
861 if (arg1 & BIT_8)
862 eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
863
864 } else {
865 dev_err(&adapter->pdev->dev,
866 "Failed to get eswitch status%d\n", err);
867 }
868
869 return err;
870}
871
872/* Enable/Disable eSwitch */
873int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable)
874{
875 int err = -EIO;
876 u32 arg1, arg2;
877 struct qlcnic_eswitch *eswitch;
878
879 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
880 return err;
881
882 eswitch = &adapter->eswitch[id];
883 if (!eswitch)
884 return err;
885
886 arg1 = eswitch->port | (enable ? BIT_4 : 0);
887 arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) |
888 (eswitch->max_active_vlans << 16);
889 err = qlcnic_issue_cmd(adapter,
890 adapter->ahw.pci_func,
891 adapter->fw_hal_version,
892 arg1,
893 arg2,
894 0,
895 QLCNIC_CDRP_CMD_TOGGLE_ESWITCH);
896
897 if (err != QLCNIC_RCODE_SUCCESS) {
898 dev_err(&adapter->pdev->dev,
899 "Failed to enable eswitch%d\n", eswitch->port);
900 eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
901 err = -EIO;
902 } else {
903 eswitch->flags |= QLCNIC_SWITCH_ENABLE;
904 dev_info(&adapter->pdev->dev,
905 "Enabled eSwitch for port %d\n", eswitch->port);
906 }
907
908 return err;
909}
910
911/* Configure eSwitch for port mirroring */
912int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
913 u8 enable_mirroring, u8 pci_func)
914{
915 int err = -EIO;
916 u32 arg1;
917
918 if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
919 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
920 return err;
921
922 arg1 = id | (enable_mirroring ? BIT_4 : 0);
923 arg1 |= pci_func << 8;
924
925 err = qlcnic_issue_cmd(adapter,
926 adapter->ahw.pci_func,
927 adapter->fw_hal_version,
928 arg1,
929 0,
930 0,
931 QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
932
933 if (err != QLCNIC_RCODE_SUCCESS) {
934 dev_err(&adapter->pdev->dev,
935 "Failed to configure port mirroring%d on eswitch:%d\n",
936 pci_func, id);
937 } else {
938 dev_info(&adapter->pdev->dev,
939 "Configured eSwitch %d for port mirroring:%d\n",
940 id, pci_func);
941 }
942
943 return err;
944}
945
946/* Configure eSwitch port */
947int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
948 int vlan_tagging, u8 discard_tagged, u8 promsc_mode,
949 u8 mac_learn, u8 pci_func, u16 vlan_id)
950{
951 int err = -EIO;
952 u32 arg1;
953 struct qlcnic_eswitch *eswitch;
954
955 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
956 return err;
957
958 eswitch = &adapter->eswitch[id];
959 if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE))
960 return err;
961
962 arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0);
963 arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0);
964 arg1 |= pci_func << 8;
965 if (vlan_tagging)
966 arg1 |= BIT_5 | (vlan_id << 16);
967
968 err = qlcnic_issue_cmd(adapter,
969 adapter->ahw.pci_func,
970 adapter->fw_hal_version,
971 arg1,
972 0,
973 0,
974 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
975
976 if (err != QLCNIC_RCODE_SUCCESS) {
977 dev_err(&adapter->pdev->dev,
978 "Failed to configure eswitch port%d\n", eswitch->port);
979 } else {
980 dev_info(&adapter->pdev->dev,
981 "Configured eSwitch for port %d\n", eswitch->port);
982 }
983
984 return err;
985}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 3bd514ec7e8f..9328d59e21e0 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,8 +69,6 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
69 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, 69 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
70 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), 70 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
71 QLC_OFF(stats.skb_alloc_failure)}, 71 QLC_OFF(stats.skb_alloc_failure)},
72 {"null skb",
73 QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
74 {"null rxbuf", 72 {"null rxbuf",
75 QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, 73 QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
76 {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), 74 {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
@@ -350,7 +348,7 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
350 for (i = 0; diag_registers[i] != -1; i++) 348 for (i = 0; diag_registers[i] != -1; i++)
351 regs_buff[i] = QLCRD32(adapter, diag_registers[i]); 349 regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
352 350
353 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 351 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
354 return; 352 return;
355 353
356 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ 354 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
@@ -580,8 +578,12 @@ qlcnic_set_pauseparam(struct net_device *netdev,
580 } 578 }
581 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); 579 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
582 } else if (adapter->ahw.port_type == QLCNIC_XGBE) { 580 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
581 if (!pause->rx_pause || pause->autoneg)
582 return -EOPNOTSUPP;
583
583 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) 584 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
584 return -EIO; 585 return -EIO;
586
585 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); 587 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
586 if (port == 0) { 588 if (port == 0) {
587 if (pause->tx_pause) 589 if (pause->tx_pause)
@@ -676,6 +678,12 @@ static int qlcnic_loopback_test(struct net_device *netdev)
676 int max_sds_rings = adapter->max_sds_rings; 678 int max_sds_rings = adapter->max_sds_rings;
677 int ret; 679 int ret;
678 680
681 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
682 dev_warn(&adapter->pdev->dev, "Loopback test not supported"
683 "for non privilege function\n");
684 return 0;
685 }
686
679 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 687 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
680 return -EIO; 688 return -EIO;
681 689
@@ -715,7 +723,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
715 723
716 adapter->diag_cnt = 0; 724 adapter->diag_cnt = 0;
717 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, 725 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
718 QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011); 726 adapter->fw_hal_version, adapter->portnum,
727 0, 0, 0x00000011);
719 if (ret) 728 if (ret)
720 goto done; 729 goto done;
721 730
@@ -821,6 +830,9 @@ static u32 qlcnic_get_tso(struct net_device *dev)
821 830
822static int qlcnic_set_tso(struct net_device *dev, u32 data) 831static int qlcnic_set_tso(struct net_device *dev, u32 data)
823{ 832{
833 struct qlcnic_adapter *adapter = netdev_priv(dev);
834 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO))
835 return -EOPNOTSUPP;
824 if (data) 836 if (data)
825 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); 837 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
826 else 838 else
@@ -834,7 +846,10 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
834 struct qlcnic_adapter *adapter = netdev_priv(dev); 846 struct qlcnic_adapter *adapter = netdev_priv(dev);
835 int ret; 847 int ret;
836 848
837 ret = qlcnic_config_led(adapter, 1, 0xf); 849 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
850 return -EIO;
851
852 ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
838 if (ret) { 853 if (ret) {
839 dev_err(&adapter->pdev->dev, 854 dev_err(&adapter->pdev->dev,
840 "Failed to set LED blink state.\n"); 855 "Failed to set LED blink state.\n");
@@ -843,7 +858,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
843 858
844 msleep_interruptible(val * 1000); 859 msleep_interruptible(val * 1000);
845 860
846 ret = qlcnic_config_led(adapter, 0, 0xf); 861 ret = adapter->nic_ops->config_led(adapter, 0, 0xf);
847 if (ret) { 862 if (ret) {
848 dev_err(&adapter->pdev->dev, 863 dev_err(&adapter->pdev->dev,
849 "Failed to reset LED blink state.\n"); 864 "Failed to reset LED blink state.\n");
@@ -905,7 +920,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
905{ 920{
906 struct qlcnic_adapter *adapter = netdev_priv(netdev); 921 struct qlcnic_adapter *adapter = netdev_priv(netdev);
907 922
908 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 923 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
909 return -EINVAL; 924 return -EINVAL;
910 925
911 /* 926 /*
@@ -981,12 +996,19 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
981 struct qlcnic_adapter *adapter = netdev_priv(netdev); 996 struct qlcnic_adapter *adapter = netdev_priv(netdev);
982 int hw_lro; 997 int hw_lro;
983 998
984 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) 999 if (data & ~ETH_FLAG_LRO)
985 return -EINVAL; 1000 return -EINVAL;
986 1001
987 ethtool_op_set_flags(netdev, data); 1002 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
1003 return -EINVAL;
988 1004
989 hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0; 1005 if (data & ETH_FLAG_LRO) {
1006 hw_lro = QLCNIC_LRO_ENABLED;
1007 netdev->features |= NETIF_F_LRO;
1008 } else {
1009 hw_lro = 0;
1010 netdev->features &= ~NETIF_F_LRO;
1011 }
990 1012
991 if (qlcnic_config_hw_lro(adapter, hw_lro)) 1013 if (qlcnic_config_hw_lro(adapter, hw_lro))
992 return -EIO; 1014 return -EIO;
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index ad9d167723c4..15fc32070be3 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -208,6 +208,39 @@ enum {
208 QLCNIC_HW_PX_MAP_CRB_PGR0 208 QLCNIC_HW_PX_MAP_CRB_PGR0
209}; 209};
210 210
211#define BIT_0 0x1
212#define BIT_1 0x2
213#define BIT_2 0x4
214#define BIT_3 0x8
215#define BIT_4 0x10
216#define BIT_5 0x20
217#define BIT_6 0x40
218#define BIT_7 0x80
219#define BIT_8 0x100
220#define BIT_9 0x200
221#define BIT_10 0x400
222#define BIT_11 0x800
223#define BIT_12 0x1000
224#define BIT_13 0x2000
225#define BIT_14 0x4000
226#define BIT_15 0x8000
227#define BIT_16 0x10000
228#define BIT_17 0x20000
229#define BIT_18 0x40000
230#define BIT_19 0x80000
231#define BIT_20 0x100000
232#define BIT_21 0x200000
233#define BIT_22 0x400000
234#define BIT_23 0x800000
235#define BIT_24 0x1000000
236#define BIT_25 0x2000000
237#define BIT_26 0x4000000
238#define BIT_27 0x8000000
239#define BIT_28 0x10000000
240#define BIT_29 0x20000000
241#define BIT_30 0x40000000
242#define BIT_31 0x80000000
243
211/* This field defines CRB adr [31:20] of the agents */ 244/* This field defines CRB adr [31:20] of the agents */
212 245
213#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ 246#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
@@ -668,10 +701,11 @@ enum {
668#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138)) 701#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
669#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) 702#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
670 703
671#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) 704#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
672#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) 705#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
673#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) 706#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
674#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) 707#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
708#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
675#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) 709#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
676#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) 710#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
677 711
@@ -684,15 +718,26 @@ enum {
684#define QLCNIC_DEV_FAILED 0x6 718#define QLCNIC_DEV_FAILED 0x6
685#define QLCNIC_DEV_QUISCENT 0x7 719#define QLCNIC_DEV_QUISCENT 0x7
686 720
721#define QLCNIC_DEV_NPAR_NOT_RDY 0
722#define QLCNIC_DEV_NPAR_RDY 1
723
724#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
687#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 725#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
688#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) 726#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
689#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) 727#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
690#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) 728#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
691#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) 729#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
692 730
731#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
732#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
733
734#define QLCNIC_TYPE_NIC 1
735#define QLCNIC_TYPE_FCOE 2
736#define QLCNIC_TYPE_ISCSI 3
737
693#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 738#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
694#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000 739#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
695#define QLCNIC_RCODE_FATAL_ERROR 0x80000000 740#define QLCNIC_RCODE_FATAL_ERROR BIT_31
696#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) 741#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
697#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) 742#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
698 743
@@ -721,6 +766,29 @@ struct qlcnic_legacy_intr_set {
721 u32 pci_int_reg; 766 u32 pci_int_reg;
722}; 767};
723 768
769#define QLCNIC_FW_API 0x1b216c
770#define QLCNIC_DRV_OP_MODE 0x1b2170
771#define QLCNIC_MSIX_BASE 0x132110
772#define QLCNIC_MAX_PCI_FUNC 8
773
774/* PCI function operational mode */
775enum {
776 QLCNIC_MGMT_FUNC = 0,
777 QLCNIC_PRIV_FUNC = 1,
778 QLCNIC_NON_PRIV_FUNC = 2
779};
780
781#define QLC_DEV_DRV_DEFAULT 0x11111111
782
783#define LSB(x) ((uint8_t)(x))
784#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
785
786#define LSW(x) ((uint16_t)((uint32_t)(x)))
787#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
788
789#define LSD(x) ((uint32_t)((uint64_t)(x)))
790#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
791
724#define QLCNIC_LEGACY_INTR_CONFIG \ 792#define QLCNIC_LEGACY_INTR_CONFIG \
725{ \ 793{ \
726 { \ 794 { \
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 0c2e1f08f459..e08c8b0556a4 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -327,7 +327,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
327 327
328 i = 0; 328 i = 0;
329 329
330 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 330 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
331 return -EIO; 331 return -EIO;
332 332
333 tx_ring = adapter->tx_ring; 333 tx_ring = adapter->tx_ring;
@@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
338 338
339 if (nr_desc >= qlcnic_tx_avail(tx_ring)) { 339 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
340 netif_tx_stop_queue(tx_ring->txq); 340 netif_tx_stop_queue(tx_ring->txq);
341 __netif_tx_unlock_bh(tx_ring->txq); 341 smp_mb();
342 adapter->stats.xmit_off++; 342 if (qlcnic_tx_avail(tx_ring) > nr_desc) {
343 return -EBUSY; 343 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
344 netif_tx_wake_queue(tx_ring->txq);
345 } else {
346 adapter->stats.xmit_off++;
347 __netif_tx_unlock_bh(tx_ring->txq);
348 return -EBUSY;
349 }
344 } 350 }
345 351
346 do { 352 do {
@@ -407,10 +413,15 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
407 return -ENOMEM; 413 return -ENOMEM;
408 } 414 }
409 memcpy(cur->mac_addr, addr, ETH_ALEN); 415 memcpy(cur->mac_addr, addr, ETH_ALEN);
410 list_add_tail(&cur->list, &adapter->mac_list);
411 416
412 return qlcnic_sre_macaddr_change(adapter, 417 if (qlcnic_sre_macaddr_change(adapter,
413 cur->mac_addr, QLCNIC_MAC_ADD); 418 cur->mac_addr, QLCNIC_MAC_ADD)) {
419 kfree(cur);
420 return -EIO;
421 }
422
423 list_add_tail(&cur->list, &adapter->mac_list);
424 return 0;
414} 425}
415 426
416void qlcnic_set_multi(struct net_device *netdev) 427void qlcnic_set_multi(struct net_device *netdev)
@@ -420,7 +431,7 @@ void qlcnic_set_multi(struct net_device *netdev)
420 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
421 u32 mode = VPORT_MISS_MODE_DROP; 432 u32 mode = VPORT_MISS_MODE_DROP;
422 433
423 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 434 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
424 return; 435 return;
425 436
426 qlcnic_nic_add_mac(adapter, adapter->mac_addr); 437 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
@@ -538,7 +549,7 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
538 return rv; 549 return rv;
539} 550}
540 551
541int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable) 552int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
542{ 553{
543 struct qlcnic_nic_req req; 554 struct qlcnic_nic_req req;
544 u64 word; 555 u64 word;
@@ -704,21 +715,15 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
704 return rc; 715 return rc;
705} 716}
706 717
707int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac) 718int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
708{ 719{
709 u32 crbaddr, mac_hi, mac_lo; 720 u32 crbaddr;
710 int pci_func = adapter->ahw.pci_func; 721 int pci_func = adapter->ahw.pci_func;
711 722
712 crbaddr = CRB_MAC_BLOCK_START + 723 crbaddr = CRB_MAC_BLOCK_START +
713 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); 724 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
714 725
715 mac_lo = QLCRD32(adapter, crbaddr); 726 qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
716 mac_hi = QLCRD32(adapter, crbaddr+4);
717
718 if (pci_func & 1)
719 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
720 else
721 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
722 727
723 return 0; 728 return 0;
724} 729}
@@ -766,7 +771,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
766 * Out: 'off' is 2M pci map addr 771 * Out: 'off' is 2M pci map addr
767 * side effect: lock crb window 772 * side effect: lock crb window
768 */ 773 */
769static void 774static int
770qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) 775qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
771{ 776{
772 u32 window; 777 u32 window;
@@ -775,6 +780,10 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
775 off -= QLCNIC_PCI_CRBSPACE; 780 off -= QLCNIC_PCI_CRBSPACE;
776 781
777 window = CRB_HI(off); 782 window = CRB_HI(off);
783 if (window == 0) {
784 dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
785 return -EIO;
786 }
778 787
779 writel(window, addr); 788 writel(window, addr);
780 if (readl(addr) != window) { 789 if (readl(addr) != window) {
@@ -782,7 +791,9 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
782 dev_warn(&adapter->pdev->dev, 791 dev_warn(&adapter->pdev->dev,
783 "failed to set CRB window to %d off 0x%lx\n", 792 "failed to set CRB window to %d off 0x%lx\n",
784 window, off); 793 window, off);
794 return -EIO;
785 } 795 }
796 return 0;
786} 797}
787 798
788int 799int
@@ -803,11 +814,12 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
803 /* indirect access */ 814 /* indirect access */
804 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 815 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
805 crb_win_lock(adapter); 816 crb_win_lock(adapter);
806 qlcnic_pci_set_crbwindow_2M(adapter, off); 817 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
807 writel(data, addr); 818 if (!rv)
819 writel(data, addr);
808 crb_win_unlock(adapter); 820 crb_win_unlock(adapter);
809 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 821 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
810 return 0; 822 return rv;
811 } 823 }
812 824
813 dev_err(&adapter->pdev->dev, 825 dev_err(&adapter->pdev->dev,
@@ -821,7 +833,7 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
821{ 833{
822 unsigned long flags; 834 unsigned long flags;
823 int rv; 835 int rv;
824 u32 data; 836 u32 data = -1;
825 void __iomem *addr = NULL; 837 void __iomem *addr = NULL;
826 838
827 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); 839 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
@@ -833,8 +845,8 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
833 /* indirect access */ 845 /* indirect access */
834 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 846 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
835 crb_win_lock(adapter); 847 crb_win_lock(adapter);
836 qlcnic_pci_set_crbwindow_2M(adapter, off); 848 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
837 data = readl(addr); 849 data = readl(addr);
838 crb_win_unlock(adapter); 850 crb_win_unlock(adapter);
839 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 851 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
840 return data; 852 return data;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 71a4e664ad76..75ba744b173c 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -112,18 +112,45 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
112 rds_ring = &recv_ctx->rds_rings[ring]; 112 rds_ring = &recv_ctx->rds_rings[ring];
113 for (i = 0; i < rds_ring->num_desc; ++i) { 113 for (i = 0; i < rds_ring->num_desc; ++i) {
114 rx_buf = &(rds_ring->rx_buf_arr[i]); 114 rx_buf = &(rds_ring->rx_buf_arr[i]);
115 if (rx_buf->state == QLCNIC_BUFFER_FREE) 115 if (rx_buf->skb == NULL)
116 continue; 116 continue;
117
117 pci_unmap_single(adapter->pdev, 118 pci_unmap_single(adapter->pdev,
118 rx_buf->dma, 119 rx_buf->dma,
119 rds_ring->dma_size, 120 rds_ring->dma_size,
120 PCI_DMA_FROMDEVICE); 121 PCI_DMA_FROMDEVICE);
121 if (rx_buf->skb != NULL) 122
122 dev_kfree_skb_any(rx_buf->skb); 123 dev_kfree_skb_any(rx_buf->skb);
123 } 124 }
124 } 125 }
125} 126}
126 127
128void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
129{
130 struct qlcnic_recv_context *recv_ctx;
131 struct qlcnic_host_rds_ring *rds_ring;
132 struct qlcnic_rx_buffer *rx_buf;
133 int i, ring;
134
135 recv_ctx = &adapter->recv_ctx;
136 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
137 rds_ring = &recv_ctx->rds_rings[ring];
138
139 spin_lock(&rds_ring->lock);
140
141 INIT_LIST_HEAD(&rds_ring->free_list);
142
143 rx_buf = rds_ring->rx_buf_arr;
144 for (i = 0; i < rds_ring->num_desc; i++) {
145 list_add_tail(&rx_buf->list,
146 &rds_ring->free_list);
147 rx_buf++;
148 }
149
150 spin_unlock(&rds_ring->lock);
151 }
152}
153
127void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) 154void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
128{ 155{
129 struct qlcnic_cmd_buffer *cmd_buf; 156 struct qlcnic_cmd_buffer *cmd_buf;
@@ -181,7 +208,9 @@ skip_rds:
181 208
182 tx_ring = adapter->tx_ring; 209 tx_ring = adapter->tx_ring;
183 vfree(tx_ring->cmd_buf_arr); 210 vfree(tx_ring->cmd_buf_arr);
211 tx_ring->cmd_buf_arr = NULL;
184 kfree(adapter->tx_ring); 212 kfree(adapter->tx_ring);
213 adapter->tx_ring = NULL;
185} 214}
186 215
187int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) 216int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -264,7 +293,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
264 list_add_tail(&rx_buf->list, 293 list_add_tail(&rx_buf->list,
265 &rds_ring->free_list); 294 &rds_ring->free_list);
266 rx_buf->ref_handle = i; 295 rx_buf->ref_handle = i;
267 rx_buf->state = QLCNIC_BUFFER_FREE;
268 rx_buf++; 296 rx_buf++;
269 } 297 }
270 spin_lock_init(&rds_ring->lock); 298 spin_lock_init(&rds_ring->lock);
@@ -413,7 +441,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
413 441
414 /* resetall */ 442 /* resetall */
415 qlcnic_rom_lock(adapter); 443 qlcnic_rom_lock(adapter);
416 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff); 444 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
417 qlcnic_rom_unlock(adapter); 445 qlcnic_rom_unlock(adapter);
418 446
419 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || 447 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
@@ -521,16 +549,13 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
521 u32 val; 549 u32 val;
522 550
523 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); 551 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
524 val = (val >> (adapter->portnum * 4)) & 0xf; 552 val = QLC_DEV_GET_DRV(val, adapter->portnum);
525 553 if ((val & 0x3) != QLCNIC_TYPE_NIC) {
526 if ((val & 0x3) != 1) { 554 dev_err(&adapter->pdev->dev,
527 dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n", 555 "Not an Ethernet NIC func=%u\n", val);
528 val);
529 return -EIO; 556 return -EIO;
530 } 557 }
531
532 adapter->physical_port = (val >> 2); 558 adapter->physical_port = (val >> 2);
533
534 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) 559 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
535 timeo = 30; 560 timeo = 30;
536 561
@@ -544,16 +569,34 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
544 return 0; 569 return 0;
545} 570}
546 571
572int
573qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
574{
575 u32 ver = -1, min_ver;
576
577 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
578
579 ver = QLCNIC_DECODE_VERSION(ver);
580 min_ver = QLCNIC_MIN_FW_VERSION;
581
582 if (ver < min_ver) {
583 dev_err(&adapter->pdev->dev,
584 "firmware version %d.%d.%d unsupported."
585 "Min supported version %d.%d.%d\n",
586 _major(ver), _minor(ver), _build(ver),
587 _major(min_ver), _minor(min_ver), _build(min_ver));
588 return -EINVAL;
589 }
590
591 return 0;
592}
593
547static int 594static int
548qlcnic_has_mn(struct qlcnic_adapter *adapter) 595qlcnic_has_mn(struct qlcnic_adapter *adapter)
549{ 596{
550 u32 capability, flashed_ver; 597 u32 capability;
551 capability = 0; 598 capability = 0;
552 599
553 qlcnic_rom_fast_read(adapter,
554 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
555 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
556
557 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); 600 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
558 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) 601 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
559 return 1; 602 return 1;
@@ -1007,7 +1050,7 @@ static int
1007qlcnic_validate_firmware(struct qlcnic_adapter *adapter) 1050qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1008{ 1051{
1009 __le32 val; 1052 __le32 val;
1010 u32 ver, min_ver, bios, min_size; 1053 u32 ver, bios, min_size;
1011 struct pci_dev *pdev = adapter->pdev; 1054 struct pci_dev *pdev = adapter->pdev;
1012 const struct firmware *fw = adapter->fw; 1055 const struct firmware *fw = adapter->fw;
1013 u8 fw_type = adapter->fw_type; 1056 u8 fw_type = adapter->fw_type;
@@ -1029,12 +1072,9 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1029 return -EINVAL; 1072 return -EINVAL;
1030 1073
1031 val = qlcnic_get_fw_version(adapter); 1074 val = qlcnic_get_fw_version(adapter);
1032
1033 min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
1034
1035 ver = QLCNIC_DECODE_VERSION(val); 1075 ver = QLCNIC_DECODE_VERSION(val);
1036 1076
1037 if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) { 1077 if (ver < QLCNIC_MIN_FW_VERSION) {
1038 dev_err(&pdev->dev, 1078 dev_err(&pdev->dev,
1039 "%s: firmware version %d.%d.%d unsupported\n", 1079 "%s: firmware version %d.%d.%d unsupported\n",
1040 fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); 1080 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
@@ -1122,7 +1162,7 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1122 adapter->fw = NULL; 1162 adapter->fw = NULL;
1123} 1163}
1124 1164
1125int qlcnic_phantom_init(struct qlcnic_adapter *adapter) 1165static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
1126{ 1166{
1127 u32 val; 1167 u32 val;
1128 int retries = 60; 1168 int retries = 60;
@@ -1147,7 +1187,8 @@ int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
1147 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); 1187 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1148 1188
1149out_err: 1189out_err:
1150 dev_err(&adapter->pdev->dev, "firmware init failed\n"); 1190 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
1191 "complete, state: 0x%x.\n", val);
1151 return -EIO; 1192 return -EIO;
1152} 1193}
1153 1194
@@ -1180,6 +1221,10 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1180{ 1221{
1181 int err; 1222 int err;
1182 1223
1224 err = qlcnic_cmd_peg_ready(adapter);
1225 if (err)
1226 return err;
1227
1183 err = qlcnic_receive_peg_ready(adapter); 1228 err = qlcnic_receive_peg_ready(adapter);
1184 if (err) 1229 if (err)
1185 return err; 1230 return err;
@@ -1265,14 +1310,12 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1265 dma_addr_t dma; 1310 dma_addr_t dma;
1266 struct pci_dev *pdev = adapter->pdev; 1311 struct pci_dev *pdev = adapter->pdev;
1267 1312
1268 buffer->skb = dev_alloc_skb(rds_ring->skb_size); 1313 skb = dev_alloc_skb(rds_ring->skb_size);
1269 if (!buffer->skb) { 1314 if (!skb) {
1270 adapter->stats.skb_alloc_failure++; 1315 adapter->stats.skb_alloc_failure++;
1271 return -ENOMEM; 1316 return -ENOMEM;
1272 } 1317 }
1273 1318
1274 skb = buffer->skb;
1275
1276 skb_reserve(skb, 2); 1319 skb_reserve(skb, 2);
1277 1320
1278 dma = pci_map_single(pdev, skb->data, 1321 dma = pci_map_single(pdev, skb->data,
@@ -1281,13 +1324,11 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1281 if (pci_dma_mapping_error(pdev, dma)) { 1324 if (pci_dma_mapping_error(pdev, dma)) {
1282 adapter->stats.rx_dma_map_error++; 1325 adapter->stats.rx_dma_map_error++;
1283 dev_kfree_skb_any(skb); 1326 dev_kfree_skb_any(skb);
1284 buffer->skb = NULL;
1285 return -ENOMEM; 1327 return -ENOMEM;
1286 } 1328 }
1287 1329
1288 buffer->skb = skb; 1330 buffer->skb = skb;
1289 buffer->dma = dma; 1331 buffer->dma = dma;
1290 buffer->state = QLCNIC_BUFFER_BUSY;
1291 1332
1292 return 0; 1333 return 0;
1293} 1334}
@@ -1300,14 +1341,15 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1300 1341
1301 buffer = &rds_ring->rx_buf_arr[index]; 1342 buffer = &rds_ring->rx_buf_arr[index];
1302 1343
1344 if (unlikely(buffer->skb == NULL)) {
1345 WARN_ON(1);
1346 return NULL;
1347 }
1348
1303 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, 1349 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1304 PCI_DMA_FROMDEVICE); 1350 PCI_DMA_FROMDEVICE);
1305 1351
1306 skb = buffer->skb; 1352 skb = buffer->skb;
1307 if (!skb) {
1308 adapter->stats.null_skb++;
1309 goto no_skb;
1310 }
1311 1353
1312 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { 1354 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1313 adapter->stats.csummed++; 1355 adapter->stats.csummed++;
@@ -1319,8 +1361,7 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1319 skb->dev = adapter->netdev; 1361 skb->dev = adapter->netdev;
1320 1362
1321 buffer->skb = NULL; 1363 buffer->skb = NULL;
1322no_skb: 1364
1323 buffer->state = QLCNIC_BUFFER_FREE;
1324 return skb; 1365 return skb;
1325} 1366}
1326 1367
@@ -1495,7 +1536,7 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1495 1536
1496 WARN_ON(desc_cnt > 1); 1537 WARN_ON(desc_cnt > 1);
1497 1538
1498 if (rxbuf) 1539 if (likely(rxbuf))
1499 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); 1540 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1500 else 1541 else
1501 adapter->stats.null_rxbuf++; 1542 adapter->stats.null_rxbuf++;
@@ -1701,3 +1742,24 @@ qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1701 sds_ring->consumer = consumer; 1742 sds_ring->consumer = consumer;
1702 writel(consumer, sds_ring->crb_sts_consumer); 1743 writel(consumer, sds_ring->crb_sts_consumer);
1703} 1744}
1745
1746void
1747qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1748 u8 alt_mac, u8 *mac)
1749{
1750 u32 mac_low, mac_high;
1751 int i;
1752
1753 mac_low = QLCRD32(adapter, off1);
1754 mac_high = QLCRD32(adapter, off2);
1755
1756 if (alt_mac) {
1757 mac_low |= (mac_low >> 16) | (mac_high << 16);
1758 mac_high >>= 16;
1759 }
1760
1761 for (i = 0; i < 2; i++)
1762 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1763 for (i = 2; i < 6; i++)
1764 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1765}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 23ea9caa5261..b9615bd745ea 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -34,15 +34,16 @@
34#include <linux/ipv6.h> 34#include <linux/ipv6.h>
35#include <linux/inetdevice.h> 35#include <linux/inetdevice.h>
36#include <linux/sysfs.h> 36#include <linux/sysfs.h>
37#include <linux/aer.h>
37 38
38MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver"); 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40MODULE_VERSION(QLCNIC_LINUX_VERSIONID); 41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); 42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
42 43
43char qlcnic_driver_name[] = "qlcnic"; 44char qlcnic_driver_name[] = "qlcnic";
44static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v" 45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
45 QLCNIC_LINUX_VERSIONID; 46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
46 47
47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG; 48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
48 49
@@ -65,13 +66,16 @@ static int load_fw_file;
65module_param(load_fw_file, int, 0644); 66module_param(load_fw_file, int, 0644);
66MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
67 68
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
68static int __devinit qlcnic_probe(struct pci_dev *pdev, 73static int __devinit qlcnic_probe(struct pci_dev *pdev,
69 const struct pci_device_id *ent); 74 const struct pci_device_id *ent);
70static void __devexit qlcnic_remove(struct pci_dev *pdev); 75static void __devexit qlcnic_remove(struct pci_dev *pdev);
71static int qlcnic_open(struct net_device *netdev); 76static int qlcnic_open(struct net_device *netdev);
72static int qlcnic_close(struct net_device *netdev); 77static int qlcnic_close(struct net_device *netdev);
73static void qlcnic_tx_timeout(struct net_device *netdev); 78static void qlcnic_tx_timeout(struct net_device *netdev);
74static void qlcnic_tx_timeout_task(struct work_struct *work);
75static void qlcnic_attach_work(struct work_struct *work); 79static void qlcnic_attach_work(struct work_struct *work);
76static void qlcnic_fwinit_work(struct work_struct *work); 80static void qlcnic_fwinit_work(struct work_struct *work);
77static void qlcnic_fw_poll_work(struct work_struct *work); 81static void qlcnic_fw_poll_work(struct work_struct *work);
@@ -79,6 +83,7 @@ static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
79 work_func_t func, int delay); 83 work_func_t func, int delay);
80static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); 84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
81static int qlcnic_poll(struct napi_struct *napi, int budget); 85static int qlcnic_poll(struct napi_struct *napi, int budget);
86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
82#ifdef CONFIG_NET_POLL_CONTROLLER 87#ifdef CONFIG_NET_POLL_CONTROLLER
83static void qlcnic_poll_controller(struct net_device *netdev); 88static void qlcnic_poll_controller(struct net_device *netdev);
84#endif 89#endif
@@ -99,7 +104,12 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data);
99 104
100static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); 105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
101static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); 106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
102 108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
103/* PCI Device ID Table */ 113/* PCI Device ID Table */
104#define ENTRY(device) \ 114#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ 115 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -120,12 +130,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring) 130 struct qlcnic_host_tx_ring *tx_ring)
121{ 131{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer); 132 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
123
124 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
125 netif_stop_queue(adapter->netdev);
126 smp_mb();
127 adapter->stats.xmit_off++;
128 }
129} 133}
130 134
131static const u32 msi_tgt_status[8] = { 135static const u32 msi_tgt_status[8] = {
@@ -184,8 +188,13 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
184 188
185 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 189 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
186 sds_ring = &recv_ctx->sds_rings[ring]; 190 sds_ring = &recv_ctx->sds_rings[ring];
187 netif_napi_add(netdev, &sds_ring->napi, 191
188 qlcnic_poll, QLCNIC_NETDEV_WEIGHT); 192 if (ring == adapter->max_sds_rings - 1)
193 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
194 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
195 else
196 netif_napi_add(netdev, &sds_ring->napi,
197 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
189 } 198 }
190 199
191 return 0; 200 return 0;
@@ -307,19 +316,14 @@ static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
307static int 316static int
308qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) 317qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
309{ 318{
310 int i; 319 u8 mac_addr[ETH_ALEN];
311 unsigned char *p;
312 u64 mac_addr;
313 struct net_device *netdev = adapter->netdev; 320 struct net_device *netdev = adapter->netdev;
314 struct pci_dev *pdev = adapter->pdev; 321 struct pci_dev *pdev = adapter->pdev;
315 322
316 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0) 323 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
317 return -EIO; 324 return -EIO;
318 325
319 p = (unsigned char *)&mac_addr; 326 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
320 for (i = 0; i < 6; i++)
321 netdev->dev_addr[i] = *(p + 5 - i);
322
323 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 327 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
324 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); 328 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
325 329
@@ -340,7 +344,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
340 if (!is_valid_ether_addr(addr->sa_data)) 344 if (!is_valid_ether_addr(addr->sa_data))
341 return -EINVAL; 345 return -EINVAL;
342 346
343 if (netif_running(netdev)) { 347 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
344 netif_device_detach(netdev); 348 netif_device_detach(netdev);
345 qlcnic_napi_disable(adapter); 349 qlcnic_napi_disable(adapter);
346 } 350 }
@@ -349,7 +353,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
349 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 353 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
350 qlcnic_set_multi(adapter->netdev); 354 qlcnic_set_multi(adapter->netdev);
351 355
352 if (netif_running(netdev)) { 356 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
353 netif_device_attach(netdev); 357 netif_device_attach(netdev);
354 qlcnic_napi_enable(adapter); 358 qlcnic_napi_enable(adapter);
355 } 359 }
@@ -371,6 +375,20 @@ static const struct net_device_ops qlcnic_netdev_ops = {
371#endif 375#endif
372}; 376};
373 377
378static struct qlcnic_nic_template qlcnic_ops = {
379 .get_mac_addr = qlcnic_get_mac_address,
380 .config_bridged_mode = qlcnic_config_bridged_mode,
381 .config_led = qlcnic_config_led,
382 .start_firmware = qlcnic_start_firmware
383};
384
385static struct qlcnic_nic_template qlcnic_vf_ops = {
386 .get_mac_addr = qlcnic_get_mac_address,
387 .config_bridged_mode = qlcnicvf_config_bridged_mode,
388 .config_led = qlcnicvf_config_led,
389 .start_firmware = qlcnicvf_start_firmware
390};
391
374static void 392static void
375qlcnic_setup_intr(struct qlcnic_adapter *adapter) 393qlcnic_setup_intr(struct qlcnic_adapter *adapter)
376{ 394{
@@ -453,6 +471,169 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
453} 471}
454 472
455static int 473static int
474qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
475{
476 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
477 int i, ret = 0, err;
478 u8 pfn;
479
480 if (!adapter->npars)
481 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
482 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
483 if (!adapter->npars)
484 return -ENOMEM;
485
486 if (!adapter->eswitch)
487 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
488 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
489 if (!adapter->eswitch) {
490 err = -ENOMEM;
491 goto err_eswitch;
492 }
493
494 ret = qlcnic_get_pci_info(adapter, pci_info);
495 if (!ret) {
496 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
497 pfn = pci_info[i].id;
498 if (pfn > QLCNIC_MAX_PCI_FUNC)
499 return QL_STATUS_INVALID_PARAM;
500 adapter->npars[pfn].active = pci_info[i].active;
501 adapter->npars[pfn].type = pci_info[i].type;
502 adapter->npars[pfn].phy_port = pci_info[i].default_port;
503 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
504 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
505 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
506 }
507
508 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
509 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
510
511 return ret;
512 }
513
514 kfree(adapter->eswitch);
515 adapter->eswitch = NULL;
516err_eswitch:
517 kfree(adapter->npars);
518
519 return ret;
520}
521
522static int
523qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
524{
525 u8 id;
526 u32 ref_count;
527 int i, ret = 1;
528 u32 data = QLCNIC_MGMT_FUNC;
529 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
530
531 /* If other drivers are not in use set their privilege level */
532 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
533 ret = qlcnic_api_lock(adapter);
534 if (ret)
535 goto err_lock;
536 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
537 goto err_npar;
538
539 if (qlcnic_config_npars) {
540 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
541 id = i;
542 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
543 id == adapter->ahw.pci_func)
544 continue;
545 data |= (qlcnic_config_npars &
546 QLC_DEV_SET_DRV(0xf, id));
547 }
548 } else {
549 data = readl(priv_op);
550 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
551 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
552 adapter->ahw.pci_func));
553 }
554 writel(data, priv_op);
555err_npar:
556 qlcnic_api_unlock(adapter);
557err_lock:
558 return ret;
559}
560
561static u32
562qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
563{
564 void __iomem *msix_base_addr;
565 void __iomem *priv_op;
566 struct qlcnic_info nic_info;
567 u32 func;
568 u32 msix_base;
569 u32 op_mode, priv_level;
570
571 /* Determine FW API version */
572 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
573
574 /* Find PCI function number */
575 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
576 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
577 msix_base = readl(msix_base_addr);
578 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
579 adapter->ahw.pci_func = func;
580
581 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
582 adapter->capabilities = nic_info.capabilities;
583
584 if (adapter->capabilities & BIT_6)
585 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
586 else
587 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
588 }
589
590 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
591 adapter->nic_ops = &qlcnic_ops;
592 return adapter->fw_hal_version;
593 }
594
595 /* Determine function privilege level */
596 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
597 op_mode = readl(priv_op);
598 if (op_mode == QLC_DEV_DRV_DEFAULT)
599 priv_level = QLCNIC_MGMT_FUNC;
600 else
601 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
602
603 switch (priv_level) {
604 case QLCNIC_MGMT_FUNC:
605 adapter->op_mode = QLCNIC_MGMT_FUNC;
606 adapter->nic_ops = &qlcnic_ops;
607 qlcnic_init_pci_info(adapter);
608 /* Set privilege level for other functions */
609 qlcnic_set_function_modes(adapter);
610 dev_info(&adapter->pdev->dev,
611 "HAL Version: %d, Management function\n",
612 adapter->fw_hal_version);
613 break;
614 case QLCNIC_PRIV_FUNC:
615 adapter->op_mode = QLCNIC_PRIV_FUNC;
616 dev_info(&adapter->pdev->dev,
617 "HAL Version: %d, Privileged function\n",
618 adapter->fw_hal_version);
619 adapter->nic_ops = &qlcnic_ops;
620 break;
621 case QLCNIC_NON_PRIV_FUNC:
622 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
623 dev_info(&adapter->pdev->dev,
624 "HAL Version: %d Non Privileged function\n",
625 adapter->fw_hal_version);
626 adapter->nic_ops = &qlcnic_vf_ops;
627 break;
628 default:
629 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
630 priv_level);
631 return 0;
632 }
633 return adapter->fw_hal_version;
634}
635
636static int
456qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) 637qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
457{ 638{
458 void __iomem *mem_ptr0 = NULL; 639 void __iomem *mem_ptr0 = NULL;
@@ -460,7 +641,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
460 unsigned long mem_len, pci_len0 = 0; 641 unsigned long mem_len, pci_len0 = 0;
461 642
462 struct pci_dev *pdev = adapter->pdev; 643 struct pci_dev *pdev = adapter->pdev;
463 int pci_func = adapter->ahw.pci_func;
464 644
465 /* remap phys address */ 645 /* remap phys address */
466 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 646 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
@@ -483,8 +663,13 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
483 adapter->ahw.pci_base0 = mem_ptr0; 663 adapter->ahw.pci_base0 = mem_ptr0;
484 adapter->ahw.pci_len0 = pci_len0; 664 adapter->ahw.pci_len0 = pci_len0;
485 665
666 if (!qlcnic_get_driver_mode(adapter)) {
667 iounmap(adapter->ahw.pci_base0);
668 return -EIO;
669 }
670
486 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, 671 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
487 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); 672 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
488 673
489 return 0; 674 return 0;
490} 675}
@@ -509,7 +694,7 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
509 } 694 }
510 695
511 if (!found) 696 if (!found)
512 name = "Unknown"; 697 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
513} 698}
514 699
515static void 700static void
@@ -521,7 +706,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
521 int i, offset, val; 706 int i, offset, val;
522 int *ptr32; 707 int *ptr32;
523 struct pci_dev *pdev = adapter->pdev; 708 struct pci_dev *pdev = adapter->pdev;
524 709 struct qlcnic_info nic_info;
525 adapter->driver_mismatch = 0; 710 adapter->driver_mismatch = 0;
526 711
527 ptr32 = (int *)&serial_num; 712 ptr32 = (int *)&serial_num;
@@ -553,8 +738,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
553 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 738 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
554 fw_major, fw_minor, fw_build); 739 fw_major, fw_minor, fw_build);
555 740
556 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
557
558 adapter->flags &= ~QLCNIC_LRO_ENABLED; 741 adapter->flags &= ~QLCNIC_LRO_ENABLED;
559 742
560 if (adapter->ahw.port_type == QLCNIC_XGBE) { 743 if (adapter->ahw.port_type == QLCNIC_XGBE) {
@@ -565,6 +748,16 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
565 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 748 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
566 } 749 }
567 750
751 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
752 adapter->physical_port = nic_info.phys_port;
753 adapter->switch_mode = nic_info.switch_mode;
754 adapter->max_tx_ques = nic_info.max_tx_ques;
755 adapter->max_rx_ques = nic_info.max_rx_ques;
756 adapter->capabilities = nic_info.capabilities;
757 adapter->max_mac_filters = nic_info.max_mac_filters;
758 adapter->max_mtu = nic_info.max_mtu;
759 }
760
568 adapter->msix_supported = !!use_msi_x; 761 adapter->msix_supported = !!use_msi_x;
569 adapter->rss_supported = !!use_msi_x; 762 adapter->rss_supported = !!use_msi_x;
570 763
@@ -574,6 +767,50 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
574} 767}
575 768
576static int 769static int
770qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
771{
772 int i, err = 0;
773 struct qlcnic_npar_info *npar;
774 struct qlcnic_info nic_info;
775
776 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
777 !adapter->need_fw_reset)
778 return 0;
779
780 if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
781 /* Set the NPAR config data after FW reset */
782 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
783 npar = &adapter->npars[i];
784 if (npar->type != QLCNIC_TYPE_NIC)
785 continue;
786 err = qlcnic_get_nic_info(adapter, &nic_info, i);
787 if (err)
788 goto err_out;
789 nic_info.min_tx_bw = npar->min_bw;
790 nic_info.max_tx_bw = npar->max_bw;
791 err = qlcnic_set_nic_info(adapter, &nic_info);
792 if (err)
793 goto err_out;
794
795 if (npar->enable_pm) {
796 err = qlcnic_config_port_mirroring(adapter,
797 npar->dest_npar, 1, i);
798 if (err)
799 goto err_out;
800
801 }
802 npar->mac_learning = DEFAULT_MAC_LEARN;
803 npar->host_vlan_tag = 0;
804 npar->promisc_mode = 0;
805 npar->discard_tagged = 0;
806 npar->vlan_id = 0;
807 }
808 }
809err_out:
810 return err;
811}
812
813static int
577qlcnic_start_firmware(struct qlcnic_adapter *adapter) 814qlcnic_start_firmware(struct qlcnic_adapter *adapter)
578{ 815{
579 int val, err, first_boot; 816 int val, err, first_boot;
@@ -591,8 +828,12 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
591 828
592 if (load_fw_file) 829 if (load_fw_file)
593 qlcnic_request_firmware(adapter); 830 qlcnic_request_firmware(adapter);
594 else 831 else {
832 if (qlcnic_check_flash_fw_ver(adapter))
833 goto err_out;
834
595 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; 835 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
836 }
596 837
597 err = qlcnic_need_fw_reset(adapter); 838 err = qlcnic_need_fw_reset(adapter);
598 if (err < 0) 839 if (err < 0)
@@ -602,6 +843,7 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
602 843
603 if (first_boot != 0x55555555) { 844 if (first_boot != 0x55555555) {
604 QLCWR32(adapter, CRB_CMDPEG_STATE, 0); 845 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
846 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
605 qlcnic_pinit_from_rom(adapter); 847 qlcnic_pinit_from_rom(adapter);
606 msleep(1); 848 msleep(1);
607 } 849 }
@@ -624,7 +866,7 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
624 866
625wait_init: 867wait_init:
626 /* Handshake with the card before we register the devices. */ 868 /* Handshake with the card before we register the devices. */
627 err = qlcnic_phantom_init(adapter); 869 err = qlcnic_init_firmware(adapter);
628 if (err) 870 if (err)
629 goto err_out; 871 goto err_out;
630 872
@@ -632,6 +874,9 @@ wait_init:
632 qlcnic_idc_debug_info(adapter, 1); 874 qlcnic_idc_debug_info(adapter, 1);
633 875
634 qlcnic_check_options(adapter); 876 qlcnic_check_options(adapter);
877 if (qlcnic_reset_npar_config(adapter))
878 goto err_out;
879 qlcnic_dev_set_npar_ready(adapter);
635 880
636 adapter->need_fw_reset = 0; 881 adapter->need_fw_reset = 0;
637 882
@@ -716,9 +961,23 @@ qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
716static int 961static int
717__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) 962__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
718{ 963{
964 int ring;
965 struct qlcnic_host_rds_ring *rds_ring;
966
719 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 967 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
720 return -EIO; 968 return -EIO;
721 969
970 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
971 return 0;
972
973 if (qlcnic_fw_create_ctx(adapter))
974 return -EIO;
975
976 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
977 rds_ring = &adapter->recv_ctx.rds_rings[ring];
978 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
979 }
980
722 qlcnic_set_multi(netdev); 981 qlcnic_set_multi(netdev);
723 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); 982 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
724 983
@@ -736,6 +995,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
736 995
737 qlcnic_linkevent_request(adapter, 1); 996 qlcnic_linkevent_request(adapter, 1);
738 997
998 adapter->reset_context = 0;
739 set_bit(__QLCNIC_DEV_UP, &adapter->state); 999 set_bit(__QLCNIC_DEV_UP, &adapter->state);
740 return 0; 1000 return 0;
741} 1001}
@@ -775,6 +1035,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
775 1035
776 qlcnic_napi_disable(adapter); 1036 qlcnic_napi_disable(adapter);
777 1037
1038 qlcnic_fw_destroy_ctx(adapter);
1039
1040 qlcnic_reset_rx_buffers_list(adapter);
778 qlcnic_release_tx_buffers(adapter); 1041 qlcnic_release_tx_buffers(adapter);
779 spin_unlock(&adapter->tx_clean_lock); 1042 spin_unlock(&adapter->tx_clean_lock);
780} 1043}
@@ -796,16 +1059,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
796{ 1059{
797 struct net_device *netdev = adapter->netdev; 1060 struct net_device *netdev = adapter->netdev;
798 struct pci_dev *pdev = adapter->pdev; 1061 struct pci_dev *pdev = adapter->pdev;
799 int err, ring; 1062 int err;
800 struct qlcnic_host_rds_ring *rds_ring;
801 1063
802 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) 1064 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
803 return 0; 1065 return 0;
804 1066
805 err = qlcnic_init_firmware(adapter);
806 if (err)
807 return err;
808
809 err = qlcnic_napi_add(adapter, netdev); 1067 err = qlcnic_napi_add(adapter, netdev);
810 if (err) 1068 if (err)
811 return err; 1069 return err;
@@ -813,7 +1071,7 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
813 err = qlcnic_alloc_sw_resources(adapter); 1071 err = qlcnic_alloc_sw_resources(adapter);
814 if (err) { 1072 if (err) {
815 dev_err(&pdev->dev, "Error in setting sw resources\n"); 1073 dev_err(&pdev->dev, "Error in setting sw resources\n");
816 return err; 1074 goto err_out_napi_del;
817 } 1075 }
818 1076
819 err = qlcnic_alloc_hw_resources(adapter); 1077 err = qlcnic_alloc_hw_resources(adapter);
@@ -822,16 +1080,10 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
822 goto err_out_free_sw; 1080 goto err_out_free_sw;
823 } 1081 }
824 1082
825
826 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
827 rds_ring = &adapter->recv_ctx.rds_rings[ring];
828 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
829 }
830
831 err = qlcnic_request_irq(adapter); 1083 err = qlcnic_request_irq(adapter);
832 if (err) { 1084 if (err) {
833 dev_err(&pdev->dev, "failed to setup interrupt\n"); 1085 dev_err(&pdev->dev, "failed to setup interrupt\n");
834 goto err_out_free_rxbuf; 1086 goto err_out_free_hw;
835 } 1087 }
836 1088
837 qlcnic_init_coalesce_defaults(adapter); 1089 qlcnic_init_coalesce_defaults(adapter);
@@ -841,11 +1093,12 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
841 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; 1093 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
842 return 0; 1094 return 0;
843 1095
844err_out_free_rxbuf: 1096err_out_free_hw:
845 qlcnic_release_rx_buffers(adapter);
846 qlcnic_free_hw_resources(adapter); 1097 qlcnic_free_hw_resources(adapter);
847err_out_free_sw: 1098err_out_free_sw:
848 qlcnic_free_sw_resources(adapter); 1099 qlcnic_free_sw_resources(adapter);
1100err_out_napi_del:
1101 qlcnic_napi_del(adapter);
849 return err; 1102 return err;
850} 1103}
851 1104
@@ -880,6 +1133,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
880 } 1133 }
881 } 1134 }
882 1135
1136 qlcnic_fw_destroy_ctx(adapter);
1137
883 qlcnic_detach(adapter); 1138 qlcnic_detach(adapter);
884 1139
885 adapter->diag_test = 0; 1140 adapter->diag_test = 0;
@@ -898,6 +1153,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
898{ 1153{
899 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1154 struct qlcnic_adapter *adapter = netdev_priv(netdev);
900 struct qlcnic_host_sds_ring *sds_ring; 1155 struct qlcnic_host_sds_ring *sds_ring;
1156 struct qlcnic_host_rds_ring *rds_ring;
901 int ring; 1157 int ring;
902 int ret; 1158 int ret;
903 1159
@@ -917,6 +1173,18 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
917 return ret; 1173 return ret;
918 } 1174 }
919 1175
1176 ret = qlcnic_fw_create_ctx(adapter);
1177 if (ret) {
1178 qlcnic_detach(adapter);
1179 netif_device_attach(netdev);
1180 return ret;
1181 }
1182
1183 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1184 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1185 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1186 }
1187
920 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1188 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
921 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1189 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
922 sds_ring = &adapter->recv_ctx.sds_rings[ring]; 1190 sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -928,6 +1196,27 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
928 return 0; 1196 return 0;
929} 1197}
930 1198
1199/* Reset context in hardware only */
1200static int
1201qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1202{
1203 struct net_device *netdev = adapter->netdev;
1204
1205 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1206 return -EBUSY;
1207
1208 netif_device_detach(netdev);
1209
1210 qlcnic_down(adapter, netdev);
1211
1212 qlcnic_up(adapter, netdev);
1213
1214 netif_device_attach(netdev);
1215
1216 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1217 return 0;
1218}
1219
931int 1220int
932qlcnic_reset_context(struct qlcnic_adapter *adapter) 1221qlcnic_reset_context(struct qlcnic_adapter *adapter)
933{ 1222{
@@ -971,18 +1260,21 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
971 adapter->max_mc_count = 38; 1260 adapter->max_mc_count = 38;
972 1261
973 netdev->netdev_ops = &qlcnic_netdev_ops; 1262 netdev->netdev_ops = &qlcnic_netdev_ops;
974 netdev->watchdog_timeo = 2*HZ; 1263 netdev->watchdog_timeo = 5*HZ;
975 1264
976 qlcnic_change_mtu(netdev, netdev->mtu); 1265 qlcnic_change_mtu(netdev, netdev->mtu);
977 1266
978 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); 1267 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
979 1268
980 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 1269 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
981 netdev->features |= (NETIF_F_GRO); 1270 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
982 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 1271 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1272 NETIF_F_IPV6_CSUM);
983 1273
984 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1274 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
985 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1275 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1276 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1277 }
986 1278
987 if (pci_using_dac) { 1279 if (pci_using_dac) {
988 netdev->features |= NETIF_F_HIGHDMA; 1280 netdev->features |= NETIF_F_HIGHDMA;
@@ -997,8 +1289,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
997 1289
998 netdev->irq = adapter->msix_entries[0].vector; 1290 netdev->irq = adapter->msix_entries[0].vector;
999 1291
1000 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1001
1002 if (qlcnic_read_mac_addr(adapter)) 1292 if (qlcnic_read_mac_addr(adapter))
1003 dev_warn(&pdev->dev, "failed to read mac addr\n"); 1293 dev_warn(&pdev->dev, "failed to read mac addr\n");
1004 1294
@@ -1036,7 +1326,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1036 struct net_device *netdev = NULL; 1326 struct net_device *netdev = NULL;
1037 struct qlcnic_adapter *adapter = NULL; 1327 struct qlcnic_adapter *adapter = NULL;
1038 int err; 1328 int err;
1039 int pci_func_id = PCI_FUNC(pdev->devfn);
1040 uint8_t revision_id; 1329 uint8_t revision_id;
1041 uint8_t pci_using_dac; 1330 uint8_t pci_using_dac;
1042 1331
@@ -1058,6 +1347,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1058 goto err_out_disable_pdev; 1347 goto err_out_disable_pdev;
1059 1348
1060 pci_set_master(pdev); 1349 pci_set_master(pdev);
1350 pci_enable_pcie_error_reporting(pdev);
1061 1351
1062 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); 1352 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1063 if (!netdev) { 1353 if (!netdev) {
@@ -1072,7 +1362,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1072 adapter->netdev = netdev; 1362 adapter->netdev = netdev;
1073 adapter->pdev = pdev; 1363 adapter->pdev = pdev;
1074 adapter->dev_rst_time = jiffies; 1364 adapter->dev_rst_time = jiffies;
1075 adapter->ahw.pci_func = pci_func_id;
1076 1365
1077 revision_id = pdev->revision; 1366 revision_id = pdev->revision;
1078 adapter->ahw.revision_id = revision_id; 1367 adapter->ahw.revision_id = revision_id;
@@ -1088,7 +1377,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1088 goto err_out_free_netdev; 1377 goto err_out_free_netdev;
1089 1378
1090 /* This will be reset for mezz cards */ 1379 /* This will be reset for mezz cards */
1091 adapter->portnum = pci_func_id; 1380 adapter->portnum = adapter->ahw.pci_func;
1092 1381
1093 err = qlcnic_get_board_info(adapter); 1382 err = qlcnic_get_board_info(adapter);
1094 if (err) { 1383 if (err) {
@@ -1102,7 +1391,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1102 if (qlcnic_setup_idc_param(adapter)) 1391 if (qlcnic_setup_idc_param(adapter))
1103 goto err_out_iounmap; 1392 goto err_out_iounmap;
1104 1393
1105 err = qlcnic_start_firmware(adapter); 1394 err = adapter->nic_ops->start_firmware(adapter);
1106 if (err) { 1395 if (err) {
1107 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1396 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1108 goto err_out_decr_ref; 1397 goto err_out_decr_ref;
@@ -1171,10 +1460,13 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1171 1460
1172 unregister_netdev(netdev); 1461 unregister_netdev(netdev);
1173 1462
1174 cancel_work_sync(&adapter->tx_timeout_task);
1175
1176 qlcnic_detach(adapter); 1463 qlcnic_detach(adapter);
1177 1464
1465 if (adapter->npars != NULL)
1466 kfree(adapter->npars);
1467 if (adapter->eswitch != NULL)
1468 kfree(adapter->eswitch);
1469
1178 qlcnic_clr_all_drv_state(adapter); 1470 qlcnic_clr_all_drv_state(adapter);
1179 1471
1180 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1472 clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1187,6 +1479,7 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1187 1479
1188 qlcnic_release_firmware(adapter); 1480 qlcnic_release_firmware(adapter);
1189 1481
1482 pci_disable_pcie_error_reporting(pdev);
1190 pci_release_regions(pdev); 1483 pci_release_regions(pdev);
1191 pci_disable_device(pdev); 1484 pci_disable_device(pdev);
1192 pci_set_drvdata(pdev, NULL); 1485 pci_set_drvdata(pdev, NULL);
@@ -1206,10 +1499,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
1206 if (netif_running(netdev)) 1499 if (netif_running(netdev))
1207 qlcnic_down(adapter, netdev); 1500 qlcnic_down(adapter, netdev);
1208 1501
1209 cancel_work_sync(&adapter->tx_timeout_task);
1210
1211 qlcnic_detach(adapter);
1212
1213 qlcnic_clr_all_drv_state(adapter); 1502 qlcnic_clr_all_drv_state(adapter);
1214 1503
1215 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1504 clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1263,35 +1552,23 @@ qlcnic_resume(struct pci_dev *pdev)
1263 pci_set_master(pdev); 1552 pci_set_master(pdev);
1264 pci_restore_state(pdev); 1553 pci_restore_state(pdev);
1265 1554
1266 err = qlcnic_start_firmware(adapter); 1555 err = adapter->nic_ops->start_firmware(adapter);
1267 if (err) { 1556 if (err) {
1268 dev_err(&pdev->dev, "failed to start firmware\n"); 1557 dev_err(&pdev->dev, "failed to start firmware\n");
1269 return err; 1558 return err;
1270 } 1559 }
1271 1560
1272 if (netif_running(netdev)) { 1561 if (netif_running(netdev)) {
1273 err = qlcnic_attach(adapter);
1274 if (err)
1275 goto err_out;
1276
1277 err = qlcnic_up(adapter, netdev); 1562 err = qlcnic_up(adapter, netdev);
1278 if (err) 1563 if (err)
1279 goto err_out_detach; 1564 goto done;
1280
1281 1565
1282 qlcnic_config_indev_addr(netdev, NETDEV_UP); 1566 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1283 } 1567 }
1284 1568done:
1285 netif_device_attach(netdev); 1569 netif_device_attach(netdev);
1286 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 1570 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1287 return 0; 1571 return 0;
1288
1289err_out_detach:
1290 qlcnic_detach(adapter);
1291err_out:
1292 qlcnic_clr_all_drv_state(adapter);
1293 netif_device_attach(netdev);
1294 return err;
1295} 1572}
1296#endif 1573#endif
1297 1574
@@ -1340,11 +1617,11 @@ qlcnic_tso_check(struct net_device *netdev,
1340 u8 opcode = TX_ETHER_PKT; 1617 u8 opcode = TX_ETHER_PKT;
1341 __be16 protocol = skb->protocol; 1618 __be16 protocol = skb->protocol;
1342 u16 flags = 0, vid = 0; 1619 u16 flags = 0, vid = 0;
1343 u32 producer;
1344 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; 1620 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1345 struct cmd_desc_type0 *hwdesc; 1621 struct cmd_desc_type0 *hwdesc;
1346 struct vlan_ethhdr *vh; 1622 struct vlan_ethhdr *vh;
1347 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1623 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1624 u32 producer = tx_ring->producer;
1348 1625
1349 if (protocol == cpu_to_be16(ETH_P_8021Q)) { 1626 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1350 1627
@@ -1360,6 +1637,11 @@ qlcnic_tso_check(struct net_device *netdev,
1360 vlan_oob = 1; 1637 vlan_oob = 1;
1361 } 1638 }
1362 1639
1640 if (*(skb->data) & BIT_0) {
1641 flags |= BIT_0;
1642 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1643 }
1644
1363 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1645 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1364 skb_shinfo(skb)->gso_size > 0) { 1646 skb_shinfo(skb)->gso_size > 0) {
1365 1647
@@ -1409,7 +1691,6 @@ qlcnic_tso_check(struct net_device *netdev,
1409 /* For LSO, we need to copy the MAC/IP/TCP headers into 1691 /* For LSO, we need to copy the MAC/IP/TCP headers into
1410 * the descriptor ring 1692 * the descriptor ring
1411 */ 1693 */
1412 producer = tx_ring->producer;
1413 copied = 0; 1694 copied = 0;
1414 offset = 2; 1695 offset = 2;
1415 1696
@@ -1537,10 +1818,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1537 /* 4 fragments per cmd des */ 1818 /* 4 fragments per cmd des */
1538 no_of_desc = (frag_count + 3) >> 2; 1819 no_of_desc = (frag_count + 3) >> 2;
1539 1820
1540 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) { 1821 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
1541 netif_stop_queue(netdev); 1822 netif_stop_queue(netdev);
1542 adapter->stats.xmit_off++; 1823 smp_mb();
1543 return NETDEV_TX_BUSY; 1824 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1825 netif_start_queue(netdev);
1826 else {
1827 adapter->stats.xmit_off++;
1828 return NETDEV_TX_BUSY;
1829 }
1544 } 1830 }
1545 1831
1546 producer = tx_ring->producer; 1832 producer = tx_ring->producer;
@@ -1675,35 +1961,11 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
1675 return; 1961 return;
1676 1962
1677 dev_err(&netdev->dev, "transmit timeout, resetting.\n"); 1963 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1678 schedule_work(&adapter->tx_timeout_task);
1679}
1680
1681static void qlcnic_tx_timeout_task(struct work_struct *work)
1682{
1683 struct qlcnic_adapter *adapter =
1684 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1685
1686 if (!netif_running(adapter->netdev))
1687 return;
1688
1689 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1690 return;
1691 1964
1692 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) 1965 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1693 goto request_reset; 1966 adapter->need_fw_reset = 1;
1694 1967 else
1695 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1968 adapter->reset_context = 1;
1696 if (!qlcnic_reset_context(adapter)) {
1697 adapter->netdev->trans_start = jiffies;
1698 return;
1699
1700 /* context reset failed, fall through for fw reset */
1701 }
1702
1703request_reset:
1704 adapter->need_fw_reset = 1;
1705 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1706 QLCDB(adapter, DRV, "Resetting adapter\n");
1707} 1969}
1708 1970
1709static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) 1971static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1846,14 +2108,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1846 smp_mb(); 2108 smp_mb();
1847 2109
1848 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 2110 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1849 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1850 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 2111 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1851 netif_wake_queue(netdev); 2112 netif_wake_queue(netdev);
1852 adapter->tx_timeo_cnt = 0;
1853 adapter->stats.xmit_on++; 2113 adapter->stats.xmit_on++;
1854 } 2114 }
1855 __netif_tx_unlock(tx_ring->txq);
1856 } 2115 }
2116 adapter->tx_timeo_cnt = 0;
1857 } 2117 }
1858 /* 2118 /*
1859 * If everything is freed up to consumer then check if the ring is full 2119 * If everything is freed up to consumer then check if the ring is full
@@ -1898,6 +2158,25 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
1898 return work_done; 2158 return work_done;
1899} 2159}
1900 2160
2161static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2162{
2163 struct qlcnic_host_sds_ring *sds_ring =
2164 container_of(napi, struct qlcnic_host_sds_ring, napi);
2165
2166 struct qlcnic_adapter *adapter = sds_ring->adapter;
2167 int work_done;
2168
2169 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2170
2171 if (work_done < budget) {
2172 napi_complete(&sds_ring->napi);
2173 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2174 qlcnic_enable_int(sds_ring);
2175 }
2176
2177 return work_done;
2178}
2179
1901#ifdef CONFIG_NET_POLL_CONTROLLER 2180#ifdef CONFIG_NET_POLL_CONTROLLER
1902static void qlcnic_poll_controller(struct net_device *netdev) 2181static void qlcnic_poll_controller(struct net_device *netdev)
1903{ 2182{
@@ -2109,7 +2388,7 @@ qlcnic_fwinit_work(struct work_struct *work)
2109{ 2388{
2110 struct qlcnic_adapter *adapter = container_of(work, 2389 struct qlcnic_adapter *adapter = container_of(work,
2111 struct qlcnic_adapter, fw_work.work); 2390 struct qlcnic_adapter, fw_work.work);
2112 u32 dev_state = 0xf; 2391 u32 dev_state = 0xf, npar_state;
2113 2392
2114 if (qlcnic_api_lock(adapter)) 2393 if (qlcnic_api_lock(adapter))
2115 goto err_ret; 2394 goto err_ret;
@@ -2122,6 +2401,19 @@ qlcnic_fwinit_work(struct work_struct *work)
2122 return; 2401 return;
2123 } 2402 }
2124 2403
2404 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2405 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2406 if (npar_state == QLCNIC_DEV_NPAR_RDY) {
2407 qlcnic_api_unlock(adapter);
2408 goto wait_npar;
2409 } else {
2410 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2411 FW_POLL_DELAY);
2412 qlcnic_api_unlock(adapter);
2413 return;
2414 }
2415 }
2416
2125 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { 2417 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2126 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", 2418 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2127 adapter->reset_ack_timeo); 2419 adapter->reset_ack_timeo);
@@ -2154,7 +2446,7 @@ skip_ack_check:
2154 2446
2155 qlcnic_api_unlock(adapter); 2447 qlcnic_api_unlock(adapter);
2156 2448
2157 if (!qlcnic_start_firmware(adapter)) { 2449 if (!adapter->nic_ops->start_firmware(adapter)) {
2158 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2450 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2159 return; 2451 return;
2160 } 2452 }
@@ -2163,6 +2455,7 @@ skip_ack_check:
2163 2455
2164 qlcnic_api_unlock(adapter); 2456 qlcnic_api_unlock(adapter);
2165 2457
2458wait_npar:
2166 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2459 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2167 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); 2460 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2168 2461
@@ -2177,7 +2470,7 @@ skip_ack_check:
2177 break; 2470 break;
2178 2471
2179 default: 2472 default:
2180 if (!qlcnic_start_firmware(adapter)) { 2473 if (!adapter->nic_ops->start_firmware(adapter)) {
2181 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2474 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2182 return; 2475 return;
2183 } 2476 }
@@ -2202,10 +2495,6 @@ qlcnic_detach_work(struct work_struct *work)
2202 2495
2203 qlcnic_down(adapter, netdev); 2496 qlcnic_down(adapter, netdev);
2204 2497
2205 rtnl_lock();
2206 qlcnic_detach(adapter);
2207 rtnl_unlock();
2208
2209 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); 2498 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2210 2499
2211 if (status & QLCNIC_RCODE_FATAL_ERROR) 2500 if (status & QLCNIC_RCODE_FATAL_ERROR)
@@ -2237,6 +2526,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2237{ 2526{
2238 u32 state; 2527 u32 state;
2239 2528
2529 adapter->need_fw_reset = 1;
2240 if (qlcnic_api_lock(adapter)) 2530 if (qlcnic_api_lock(adapter))
2241 return; 2531 return;
2242 2532
@@ -2251,10 +2541,36 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2251 qlcnic_api_unlock(adapter); 2541 qlcnic_api_unlock(adapter);
2252} 2542}
2253 2543
2544/* Transit to NPAR READY state from NPAR NOT READY state */
2545static void
2546qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2547{
2548 u32 state;
2549
2550 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
2551 adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
2552 return;
2553 if (qlcnic_api_lock(adapter))
2554 return;
2555
2556 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2557
2558 if (state != QLCNIC_DEV_NPAR_RDY) {
2559 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2560 QLCNIC_DEV_NPAR_RDY);
2561 QLCDB(adapter, DRV, "NPAR READY state set\n");
2562 }
2563
2564 qlcnic_api_unlock(adapter);
2565}
2566
2254static void 2567static void
2255qlcnic_schedule_work(struct qlcnic_adapter *adapter, 2568qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2256 work_func_t func, int delay) 2569 work_func_t func, int delay)
2257{ 2570{
2571 if (test_bit(__QLCNIC_AER, &adapter->state))
2572 return;
2573
2258 INIT_DELAYED_WORK(&adapter->fw_work, func); 2574 INIT_DELAYED_WORK(&adapter->fw_work, func);
2259 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay)); 2575 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2260} 2576}
@@ -2274,18 +2590,10 @@ qlcnic_attach_work(struct work_struct *work)
2274 struct qlcnic_adapter *adapter = container_of(work, 2590 struct qlcnic_adapter *adapter = container_of(work,
2275 struct qlcnic_adapter, fw_work.work); 2591 struct qlcnic_adapter, fw_work.work);
2276 struct net_device *netdev = adapter->netdev; 2592 struct net_device *netdev = adapter->netdev;
2277 int err;
2278 2593
2279 if (netif_running(netdev)) { 2594 if (netif_running(netdev)) {
2280 err = qlcnic_attach(adapter); 2595 if (qlcnic_up(adapter, netdev))
2281 if (err)
2282 goto done;
2283
2284 err = qlcnic_up(adapter, netdev);
2285 if (err) {
2286 qlcnic_detach(adapter);
2287 goto done; 2596 goto done;
2288 }
2289 2597
2290 qlcnic_config_indev_addr(netdev, NETDEV_UP); 2598 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2291 } 2599 }
@@ -2322,6 +2630,13 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2322 adapter->fw_fail_cnt = 0; 2630 adapter->fw_fail_cnt = 0;
2323 if (adapter->need_fw_reset) 2631 if (adapter->need_fw_reset)
2324 goto detach; 2632 goto detach;
2633
2634 if (adapter->reset_context &&
2635 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
2636 qlcnic_reset_hw_context(adapter);
2637 adapter->netdev->trans_start = jiffies;
2638 }
2639
2325 return 0; 2640 return 0;
2326 } 2641 }
2327 2642
@@ -2330,7 +2645,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2330 2645
2331 qlcnic_dev_request_reset(adapter); 2646 qlcnic_dev_request_reset(adapter);
2332 2647
2333 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); 2648 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2649 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2334 2650
2335 dev_info(&netdev->dev, "firmware hang detected\n"); 2651 dev_info(&netdev->dev, "firmware hang detected\n");
2336 2652
@@ -2365,6 +2681,161 @@ reschedule:
2365 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 2681 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2366} 2682}
2367 2683
2684static int qlcnic_is_first_func(struct pci_dev *pdev)
2685{
2686 struct pci_dev *oth_pdev;
2687 int val = pdev->devfn;
2688
2689 while (val-- > 0) {
2690 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2691 (pdev->bus), pdev->bus->number,
2692 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
2693 if (!oth_pdev)
2694 continue;
2695
2696 if (oth_pdev->current_state != PCI_D3cold) {
2697 pci_dev_put(oth_pdev);
2698 return 0;
2699 }
2700 pci_dev_put(oth_pdev);
2701 }
2702 return 1;
2703}
2704
2705static int qlcnic_attach_func(struct pci_dev *pdev)
2706{
2707 int err, first_func;
2708 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2709 struct net_device *netdev = adapter->netdev;
2710
2711 pdev->error_state = pci_channel_io_normal;
2712
2713 err = pci_enable_device(pdev);
2714 if (err)
2715 return err;
2716
2717 pci_set_power_state(pdev, PCI_D0);
2718 pci_set_master(pdev);
2719 pci_restore_state(pdev);
2720
2721 first_func = qlcnic_is_first_func(pdev);
2722
2723 if (qlcnic_api_lock(adapter))
2724 return -EINVAL;
2725
2726 if (first_func) {
2727 adapter->need_fw_reset = 1;
2728 set_bit(__QLCNIC_START_FW, &adapter->state);
2729 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2730 QLCDB(adapter, DRV, "Restarting fw\n");
2731 }
2732 qlcnic_api_unlock(adapter);
2733
2734 err = adapter->nic_ops->start_firmware(adapter);
2735 if (err)
2736 return err;
2737
2738 qlcnic_clr_drv_state(adapter);
2739 qlcnic_setup_intr(adapter);
2740
2741 if (netif_running(netdev)) {
2742 err = qlcnic_attach(adapter);
2743 if (err) {
2744 qlcnic_clr_all_drv_state(adapter);
2745 clear_bit(__QLCNIC_AER, &adapter->state);
2746 netif_device_attach(netdev);
2747 return err;
2748 }
2749
2750 err = qlcnic_up(adapter, netdev);
2751 if (err)
2752 goto done;
2753
2754 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2755 }
2756 done:
2757 netif_device_attach(netdev);
2758 return err;
2759}
2760
2761static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2762 pci_channel_state_t state)
2763{
2764 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2765 struct net_device *netdev = adapter->netdev;
2766
2767 if (state == pci_channel_io_perm_failure)
2768 return PCI_ERS_RESULT_DISCONNECT;
2769
2770 if (state == pci_channel_io_normal)
2771 return PCI_ERS_RESULT_RECOVERED;
2772
2773 set_bit(__QLCNIC_AER, &adapter->state);
2774 netif_device_detach(netdev);
2775
2776 cancel_delayed_work_sync(&adapter->fw_work);
2777
2778 if (netif_running(netdev))
2779 qlcnic_down(adapter, netdev);
2780
2781 qlcnic_detach(adapter);
2782 qlcnic_teardown_intr(adapter);
2783
2784 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2785
2786 pci_save_state(pdev);
2787 pci_disable_device(pdev);
2788
2789 return PCI_ERS_RESULT_NEED_RESET;
2790}
2791
2792static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2793{
2794 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2795 PCI_ERS_RESULT_RECOVERED;
2796}
2797
2798static void qlcnic_io_resume(struct pci_dev *pdev)
2799{
2800 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2801
2802 pci_cleanup_aer_uncorrect_error_status(pdev);
2803
2804 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2805 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2806 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2807 FW_POLL_DELAY);
2808}
2809
2810
2811static int
2812qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2813{
2814 int err;
2815
2816 err = qlcnic_can_start_firmware(adapter);
2817 if (err)
2818 return err;
2819
2820 qlcnic_check_options(adapter);
2821
2822 adapter->need_fw_reset = 0;
2823
2824 return err;
2825}
2826
2827static int
2828qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2829{
2830 return -EOPNOTSUPP;
2831}
2832
2833static int
2834qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2835{
2836 return -EOPNOTSUPP;
2837}
2838
2368static ssize_t 2839static ssize_t
2369qlcnic_store_bridged_mode(struct device *dev, 2840qlcnic_store_bridged_mode(struct device *dev,
2370 struct device_attribute *attr, const char *buf, size_t len) 2841 struct device_attribute *attr, const char *buf, size_t len)
@@ -2376,13 +2847,13 @@ qlcnic_store_bridged_mode(struct device *dev,
2376 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)) 2847 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2377 goto err_out; 2848 goto err_out;
2378 2849
2379 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 2850 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
2380 goto err_out; 2851 goto err_out;
2381 2852
2382 if (strict_strtoul(buf, 2, &new)) 2853 if (strict_strtoul(buf, 2, &new))
2383 goto err_out; 2854 goto err_out;
2384 2855
2385 if (!qlcnic_config_bridged_mode(adapter, !!new)) 2856 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
2386 ret = len; 2857 ret = len;
2387 2858
2388err_out: 2859err_out:
@@ -2585,6 +3056,361 @@ static struct bin_attribute bin_attr_mem = {
2585 .write = qlcnic_sysfs_write_mem, 3056 .write = qlcnic_sysfs_write_mem,
2586}; 3057};
2587 3058
3059static int
3060validate_pm_config(struct qlcnic_adapter *adapter,
3061 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3062{
3063
3064 u8 src_pci_func, s_esw_id, d_esw_id;
3065 u8 dest_pci_func;
3066 int i;
3067
3068 for (i = 0; i < count; i++) {
3069 src_pci_func = pm_cfg[i].pci_func;
3070 dest_pci_func = pm_cfg[i].dest_npar;
3071 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3072 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3073 return QL_STATUS_INVALID_PARAM;
3074
3075 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3076 return QL_STATUS_INVALID_PARAM;
3077
3078 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3079 return QL_STATUS_INVALID_PARAM;
3080
3081 if (!IS_VALID_MODE(pm_cfg[i].action))
3082 return QL_STATUS_INVALID_PARAM;
3083
3084 s_esw_id = adapter->npars[src_pci_func].phy_port;
3085 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3086
3087 if (s_esw_id != d_esw_id)
3088 return QL_STATUS_INVALID_PARAM;
3089
3090 }
3091 return 0;
3092
3093}
3094
3095static ssize_t
3096qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3097 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3098{
3099 struct device *dev = container_of(kobj, struct device, kobj);
3100 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3101 struct qlcnic_pm_func_cfg *pm_cfg;
3102 u32 id, action, pci_func;
3103 int count, rem, i, ret;
3104
3105 count = size / sizeof(struct qlcnic_pm_func_cfg);
3106 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3107 if (rem)
3108 return QL_STATUS_INVALID_PARAM;
3109
3110 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3111
3112 ret = validate_pm_config(adapter, pm_cfg, count);
3113 if (ret)
3114 return ret;
3115 for (i = 0; i < count; i++) {
3116 pci_func = pm_cfg[i].pci_func;
3117 action = pm_cfg[i].action;
3118 id = adapter->npars[pci_func].phy_port;
3119 ret = qlcnic_config_port_mirroring(adapter, id,
3120 action, pci_func);
3121 if (ret)
3122 return ret;
3123 }
3124
3125 for (i = 0; i < count; i++) {
3126 pci_func = pm_cfg[i].pci_func;
3127 id = adapter->npars[pci_func].phy_port;
3128 adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
3129 adapter->npars[pci_func].dest_npar = id;
3130 }
3131 return size;
3132}
3133
3134static ssize_t
3135qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3136 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3137{
3138 struct device *dev = container_of(kobj, struct device, kobj);
3139 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3140 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3141 int i;
3142
3143 if (size != sizeof(pm_cfg))
3144 return QL_STATUS_INVALID_PARAM;
3145
3146 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3147 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3148 continue;
3149 pm_cfg[i].action = adapter->npars[i].enable_pm;
3150 pm_cfg[i].dest_npar = 0;
3151 pm_cfg[i].pci_func = i;
3152 }
3153 memcpy(buf, &pm_cfg, size);
3154
3155 return size;
3156}
3157
3158static int
3159validate_esw_config(struct qlcnic_adapter *adapter,
3160 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3161{
3162 u8 pci_func;
3163 int i;
3164
3165 for (i = 0; i < count; i++) {
3166 pci_func = esw_cfg[i].pci_func;
3167 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3168 return QL_STATUS_INVALID_PARAM;
3169
3170 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3171 return QL_STATUS_INVALID_PARAM;
3172
3173 if (esw_cfg->host_vlan_tag == 1)
3174 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3175 return QL_STATUS_INVALID_PARAM;
3176
3177 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
3178 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
3179 || !IS_VALID_MODE(esw_cfg[i].mac_learning)
3180 || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
3181 return QL_STATUS_INVALID_PARAM;
3182 }
3183
3184 return 0;
3185}
3186
3187static ssize_t
3188qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3189 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3190{
3191 struct device *dev = container_of(kobj, struct device, kobj);
3192 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3193 struct qlcnic_esw_func_cfg *esw_cfg;
3194 int count, rem, i, ret;
3195 u8 id, pci_func;
3196
3197 count = size / sizeof(struct qlcnic_esw_func_cfg);
3198 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3199 if (rem)
3200 return QL_STATUS_INVALID_PARAM;
3201
3202 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3203 ret = validate_esw_config(adapter, esw_cfg, count);
3204 if (ret)
3205 return ret;
3206
3207 for (i = 0; i < count; i++) {
3208 pci_func = esw_cfg[i].pci_func;
3209 id = adapter->npars[pci_func].phy_port;
3210 ret = qlcnic_config_switch_port(adapter, id,
3211 esw_cfg[i].host_vlan_tag,
3212 esw_cfg[i].discard_tagged,
3213 esw_cfg[i].promisc_mode,
3214 esw_cfg[i].mac_learning,
3215 esw_cfg[i].pci_func,
3216 esw_cfg[i].vlan_id);
3217 if (ret)
3218 return ret;
3219 }
3220
3221 for (i = 0; i < count; i++) {
3222 pci_func = esw_cfg[i].pci_func;
3223 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
3224 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
3225 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
3226 adapter->npars[pci_func].discard_tagged =
3227 esw_cfg[i].discard_tagged;
3228 adapter->npars[pci_func].host_vlan_tag =
3229 esw_cfg[i].host_vlan_tag;
3230 }
3231
3232 return size;
3233}
3234
3235static ssize_t
3236qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3237 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3238{
3239 struct device *dev = container_of(kobj, struct device, kobj);
3240 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3241 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3242 int i;
3243
3244 if (size != sizeof(esw_cfg))
3245 return QL_STATUS_INVALID_PARAM;
3246
3247 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3248 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3249 continue;
3250
3251 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
3252 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
3253 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3254 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3255 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3256 }
3257 memcpy(buf, &esw_cfg, size);
3258
3259 return size;
3260}
3261
3262static int
3263validate_npar_config(struct qlcnic_adapter *adapter,
3264 struct qlcnic_npar_func_cfg *np_cfg, int count)
3265{
3266 u8 pci_func, i;
3267
3268 for (i = 0; i < count; i++) {
3269 pci_func = np_cfg[i].pci_func;
3270 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3271 return QL_STATUS_INVALID_PARAM;
3272
3273 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3274 return QL_STATUS_INVALID_PARAM;
3275
3276 if (!IS_VALID_BW(np_cfg[i].min_bw)
3277 || !IS_VALID_BW(np_cfg[i].max_bw)
3278 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3279 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3280 return QL_STATUS_INVALID_PARAM;
3281 }
3282 return 0;
3283}
3284
3285static ssize_t
3286qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3287 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3288{
3289 struct device *dev = container_of(kobj, struct device, kobj);
3290 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3291 struct qlcnic_info nic_info;
3292 struct qlcnic_npar_func_cfg *np_cfg;
3293 int i, count, rem, ret;
3294 u8 pci_func;
3295
3296 count = size / sizeof(struct qlcnic_npar_func_cfg);
3297 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3298 if (rem)
3299 return QL_STATUS_INVALID_PARAM;
3300
3301 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3302 ret = validate_npar_config(adapter, np_cfg, count);
3303 if (ret)
3304 return ret;
3305
3306 for (i = 0; i < count ; i++) {
3307 pci_func = np_cfg[i].pci_func;
3308 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3309 if (ret)
3310 return ret;
3311 nic_info.pci_func = pci_func;
3312 nic_info.min_tx_bw = np_cfg[i].min_bw;
3313 nic_info.max_tx_bw = np_cfg[i].max_bw;
3314 ret = qlcnic_set_nic_info(adapter, &nic_info);
3315 if (ret)
3316 return ret;
3317 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3318 adapter->npars[i].max_bw = nic_info.max_tx_bw;
3319 }
3320
3321 return size;
3322
3323}
3324static ssize_t
3325qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3326 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3327{
3328 struct device *dev = container_of(kobj, struct device, kobj);
3329 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3330 struct qlcnic_info nic_info;
3331 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3332 int i, ret;
3333
3334 if (size != sizeof(np_cfg))
3335 return QL_STATUS_INVALID_PARAM;
3336
3337 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3338 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3339 continue;
3340 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3341 if (ret)
3342 return ret;
3343
3344 np_cfg[i].pci_func = i;
3345 np_cfg[i].op_mode = nic_info.op_mode;
3346 np_cfg[i].port_num = nic_info.phys_port;
3347 np_cfg[i].fw_capab = nic_info.capabilities;
3348 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3349 np_cfg[i].max_bw = nic_info.max_tx_bw;
3350 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3351 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3352 }
3353 memcpy(buf, &np_cfg, size);
3354 return size;
3355}
3356
3357static ssize_t
3358qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3359 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3360{
3361 struct device *dev = container_of(kobj, struct device, kobj);
3362 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3363 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3364 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
3365 int i, ret;
3366
3367 if (size != sizeof(pci_cfg))
3368 return QL_STATUS_INVALID_PARAM;
3369
3370 ret = qlcnic_get_pci_info(adapter, pci_info);
3371 if (ret)
3372 return ret;
3373
3374 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3375 pci_cfg[i].pci_func = pci_info[i].id;
3376 pci_cfg[i].func_type = pci_info[i].type;
3377 pci_cfg[i].port_num = pci_info[i].default_port;
3378 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3379 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3380 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3381 }
3382 memcpy(buf, &pci_cfg, size);
3383 return size;
3384
3385}
3386static struct bin_attribute bin_attr_npar_config = {
3387 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3388 .size = 0,
3389 .read = qlcnic_sysfs_read_npar_config,
3390 .write = qlcnic_sysfs_write_npar_config,
3391};
3392
3393static struct bin_attribute bin_attr_pci_config = {
3394 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3395 .size = 0,
3396 .read = qlcnic_sysfs_read_pci_config,
3397 .write = NULL,
3398};
3399
3400static struct bin_attribute bin_attr_esw_config = {
3401 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3402 .size = 0,
3403 .read = qlcnic_sysfs_read_esw_config,
3404 .write = qlcnic_sysfs_write_esw_config,
3405};
3406
3407static struct bin_attribute bin_attr_pm_config = {
3408 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3409 .size = 0,
3410 .read = qlcnic_sysfs_read_pm_config,
3411 .write = qlcnic_sysfs_write_pm_config,
3412};
3413
2588static void 3414static void
2589qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) 3415qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2590{ 3416{
@@ -2610,23 +3436,45 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2610{ 3436{
2611 struct device *dev = &adapter->pdev->dev; 3437 struct device *dev = &adapter->pdev->dev;
2612 3438
3439 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3440 return;
2613 if (device_create_file(dev, &dev_attr_diag_mode)) 3441 if (device_create_file(dev, &dev_attr_diag_mode))
2614 dev_info(dev, "failed to create diag_mode sysfs entry\n"); 3442 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2615 if (device_create_bin_file(dev, &bin_attr_crb)) 3443 if (device_create_bin_file(dev, &bin_attr_crb))
2616 dev_info(dev, "failed to create crb sysfs entry\n"); 3444 dev_info(dev, "failed to create crb sysfs entry\n");
2617 if (device_create_bin_file(dev, &bin_attr_mem)) 3445 if (device_create_bin_file(dev, &bin_attr_mem))
2618 dev_info(dev, "failed to create mem sysfs entry\n"); 3446 dev_info(dev, "failed to create mem sysfs entry\n");
2619} 3447 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3448 adapter->op_mode != QLCNIC_MGMT_FUNC)
3449 return;
3450 if (device_create_bin_file(dev, &bin_attr_pci_config))
3451 dev_info(dev, "failed to create pci config sysfs entry");
3452 if (device_create_bin_file(dev, &bin_attr_npar_config))
3453 dev_info(dev, "failed to create npar config sysfs entry");
3454 if (device_create_bin_file(dev, &bin_attr_esw_config))
3455 dev_info(dev, "failed to create esw config sysfs entry");
3456 if (device_create_bin_file(dev, &bin_attr_pm_config))
3457 dev_info(dev, "failed to create pm config sysfs entry");
2620 3458
3459}
2621 3460
2622static void 3461static void
2623qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 3462qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2624{ 3463{
2625 struct device *dev = &adapter->pdev->dev; 3464 struct device *dev = &adapter->pdev->dev;
2626 3465
3466 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3467 return;
2627 device_remove_file(dev, &dev_attr_diag_mode); 3468 device_remove_file(dev, &dev_attr_diag_mode);
2628 device_remove_bin_file(dev, &bin_attr_crb); 3469 device_remove_bin_file(dev, &bin_attr_crb);
2629 device_remove_bin_file(dev, &bin_attr_mem); 3470 device_remove_bin_file(dev, &bin_attr_mem);
3471 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3472 adapter->op_mode != QLCNIC_MGMT_FUNC)
3473 return;
3474 device_remove_bin_file(dev, &bin_attr_pci_config);
3475 device_remove_bin_file(dev, &bin_attr_npar_config);
3476 device_remove_bin_file(dev, &bin_attr_esw_config);
3477 device_remove_bin_file(dev, &bin_attr_pm_config);
2630} 3478}
2631 3479
2632#ifdef CONFIG_INET 3480#ifdef CONFIG_INET
@@ -2684,7 +3532,7 @@ recheck:
2684 if (!adapter) 3532 if (!adapter)
2685 goto done; 3533 goto done;
2686 3534
2687 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 3535 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
2688 goto done; 3536 goto done;
2689 3537
2690 qlcnic_config_indev_addr(dev, event); 3538 qlcnic_config_indev_addr(dev, event);
@@ -2720,7 +3568,7 @@ recheck:
2720 if (!adapter) 3568 if (!adapter)
2721 goto done; 3569 goto done;
2722 3570
2723 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 3571 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
2724 goto done; 3572 goto done;
2725 3573
2726 switch (event) { 3574 switch (event) {
@@ -2750,6 +3598,11 @@ static void
2750qlcnic_config_indev_addr(struct net_device *dev, unsigned long event) 3598qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2751{ } 3599{ }
2752#endif 3600#endif
3601static struct pci_error_handlers qlcnic_err_handler = {
3602 .error_detected = qlcnic_io_error_detected,
3603 .slot_reset = qlcnic_io_slot_reset,
3604 .resume = qlcnic_io_resume,
3605};
2753 3606
2754static struct pci_driver qlcnic_driver = { 3607static struct pci_driver qlcnic_driver = {
2755 .name = qlcnic_driver_name, 3608 .name = qlcnic_driver_name,
@@ -2760,11 +3613,14 @@ static struct pci_driver qlcnic_driver = {
2760 .suspend = qlcnic_suspend, 3613 .suspend = qlcnic_suspend,
2761 .resume = qlcnic_resume, 3614 .resume = qlcnic_resume,
2762#endif 3615#endif
2763 .shutdown = qlcnic_shutdown 3616 .shutdown = qlcnic_shutdown,
3617 .err_handler = &qlcnic_err_handler
3618
2764}; 3619};
2765 3620
2766static int __init qlcnic_init_module(void) 3621static int __init qlcnic_init_module(void)
2767{ 3622{
3623 int ret;
2768 3624
2769 printk(KERN_INFO "%s\n", qlcnic_driver_string); 3625 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2770 3626
@@ -2773,8 +3629,15 @@ static int __init qlcnic_init_module(void)
2773 register_inetaddr_notifier(&qlcnic_inetaddr_cb); 3629 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2774#endif 3630#endif
2775 3631
3632 ret = pci_register_driver(&qlcnic_driver);
3633 if (ret) {
3634#ifdef CONFIG_INET
3635 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3636 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3637#endif
3638 }
2776 3639
2777 return pci_register_driver(&qlcnic_driver); 3640 return ret;
2778} 3641}
2779 3642
2780module_init(qlcnic_init_module); 3643module_init(qlcnic_init_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 20624ba44a37..a478786840a6 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,9 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00.23.00.00-01" 19#define DRV_VERSION "v1.00.00.25.00.00-01"
20
21#define PFX "qlge: "
22 20
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 22
@@ -1062,7 +1060,7 @@ struct tx_buf_desc {
1062#define TX_DESC_LEN_MASK 0x000fffff 1060#define TX_DESC_LEN_MASK 0x000fffff
1063#define TX_DESC_C 0x40000000 1061#define TX_DESC_C 0x40000000
1064#define TX_DESC_E 0x80000000 1062#define TX_DESC_E 0x80000000
1065} __attribute((packed)); 1063} __packed;
1066 1064
1067/* 1065/*
1068 * IOCB Definitions... 1066 * IOCB Definitions...
@@ -1095,7 +1093,7 @@ struct ob_mac_iocb_req {
1095 __le16 vlan_tci; 1093 __le16 vlan_tci;
1096 __le16 reserved4; 1094 __le16 reserved4;
1097 struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; 1095 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
1098} __attribute((packed)); 1096} __packed;
1099 1097
1100struct ob_mac_iocb_rsp { 1098struct ob_mac_iocb_rsp {
1101 u8 opcode; /* */ 1099 u8 opcode; /* */
@@ -1112,7 +1110,7 @@ struct ob_mac_iocb_rsp {
1112 u32 tid; 1110 u32 tid;
1113 u32 txq_idx; 1111 u32 txq_idx;
1114 __le32 reserved[13]; 1112 __le32 reserved[13];
1115} __attribute((packed)); 1113} __packed;
1116 1114
1117struct ob_mac_tso_iocb_req { 1115struct ob_mac_tso_iocb_req {
1118 u8 opcode; 1116 u8 opcode;
@@ -1140,7 +1138,7 @@ struct ob_mac_tso_iocb_req {
1140 __le16 vlan_tci; 1138 __le16 vlan_tci;
1141 __le16 mss; 1139 __le16 mss;
1142 struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; 1140 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
1143} __attribute((packed)); 1141} __packed;
1144 1142
1145struct ob_mac_tso_iocb_rsp { 1143struct ob_mac_tso_iocb_rsp {
1146 u8 opcode; 1144 u8 opcode;
@@ -1157,7 +1155,7 @@ struct ob_mac_tso_iocb_rsp {
1157 u32 tid; 1155 u32 tid;
1158 u32 txq_idx; 1156 u32 txq_idx;
1159 __le32 reserved2[13]; 1157 __le32 reserved2[13];
1160} __attribute((packed)); 1158} __packed;
1161 1159
1162struct ib_mac_iocb_rsp { 1160struct ib_mac_iocb_rsp {
1163 u8 opcode; /* 0x20 */ 1161 u8 opcode; /* 0x20 */
@@ -1216,7 +1214,7 @@ struct ib_mac_iocb_rsp {
1216#define IB_MAC_IOCB_RSP_HL 0x80 1214#define IB_MAC_IOCB_RSP_HL 0x80
1217 __le32 hdr_len; /* */ 1215 __le32 hdr_len; /* */
1218 __le64 hdr_addr; /* */ 1216 __le64 hdr_addr; /* */
1219} __attribute((packed)); 1217} __packed;
1220 1218
1221struct ib_ae_iocb_rsp { 1219struct ib_ae_iocb_rsp {
1222 u8 opcode; 1220 u8 opcode;
@@ -1237,7 +1235,7 @@ struct ib_ae_iocb_rsp {
1237#define PCI_ERR_ANON_BUF_RD 0x40 1235#define PCI_ERR_ANON_BUF_RD 0x40
1238 u8 q_id; 1236 u8 q_id;
1239 __le32 reserved[15]; 1237 __le32 reserved[15];
1240} __attribute((packed)); 1238} __packed;
1241 1239
1242/* 1240/*
1243 * These three structures are for generic 1241 * These three structures are for generic
@@ -1249,7 +1247,7 @@ struct ql_net_rsp_iocb {
1249 __le16 length; 1247 __le16 length;
1250 __le32 tid; 1248 __le32 tid;
1251 __le32 reserved[14]; 1249 __le32 reserved[14];
1252} __attribute((packed)); 1250} __packed;
1253 1251
1254struct net_req_iocb { 1252struct net_req_iocb {
1255 u8 opcode; 1253 u8 opcode;
@@ -1257,7 +1255,7 @@ struct net_req_iocb {
1257 __le16 flags1; 1255 __le16 flags1;
1258 __le32 tid; 1256 __le32 tid;
1259 __le32 reserved1[30]; 1257 __le32 reserved1[30];
1260} __attribute((packed)); 1258} __packed;
1261 1259
1262/* 1260/*
1263 * tx ring initialization control block for chip. 1261 * tx ring initialization control block for chip.
@@ -1283,7 +1281,7 @@ struct wqicb {
1283 __le16 rid; 1281 __le16 rid;
1284 __le64 addr; 1282 __le64 addr;
1285 __le64 cnsmr_idx_addr; 1283 __le64 cnsmr_idx_addr;
1286} __attribute((packed)); 1284} __packed;
1287 1285
1288/* 1286/*
1289 * rx ring initialization control block for chip. 1287 * rx ring initialization control block for chip.
@@ -1317,7 +1315,7 @@ struct cqicb {
1317 __le64 sbq_addr; 1315 __le64 sbq_addr;
1318 __le16 sbq_buf_size; 1316 __le16 sbq_buf_size;
1319 __le16 sbq_len; /* entry count */ 1317 __le16 sbq_len; /* entry count */
1320} __attribute((packed)); 1318} __packed;
1321 1319
1322struct ricb { 1320struct ricb {
1323 u8 base_cq; 1321 u8 base_cq;
@@ -1335,7 +1333,7 @@ struct ricb {
1335 u8 hash_cq_id[1024]; 1333 u8 hash_cq_id[1024];
1336 __le32 ipv6_hash_key[10]; 1334 __le32 ipv6_hash_key[10];
1337 __le32 ipv4_hash_key[4]; 1335 __le32 ipv4_hash_key[4];
1338} __attribute((packed)); 1336} __packed;
1339 1337
1340/* SOFTWARE/DRIVER DATA STRUCTURES. */ 1338/* SOFTWARE/DRIVER DATA STRUCTURES. */
1341 1339
@@ -2227,7 +2225,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2227 u32 ram_addr, int word_count); 2225 u32 ram_addr, int word_count);
2228int ql_core_dump(struct ql_adapter *qdev, 2226int ql_core_dump(struct ql_adapter *qdev,
2229 struct ql_mpi_coredump *mpi_coredump); 2227 struct ql_mpi_coredump *mpi_coredump);
2230int ql_mb_sys_err(struct ql_adapter *qdev);
2231int ql_mb_about_fw(struct ql_adapter *qdev); 2228int ql_mb_about_fw(struct ql_adapter *qdev);
2232int ql_wol(struct ql_adapter *qdev); 2229int ql_wol(struct ql_adapter *qdev);
2233int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2230int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -2246,6 +2243,7 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
2246void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2243void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2247int ql_own_firmware(struct ql_adapter *qdev); 2244int ql_own_firmware(struct ql_adapter *qdev);
2248int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2245int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
2246void qlge_set_multicast_list(struct net_device *ndev);
2249 2247
2250#if 1 2248#if 1
2251#define QL_ALL_DUMP 2249#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 68a1c9b91e74..4747492935ef 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/slab.h> 3#include <linux/slab.h>
2 4
3#include "qlge.h" 5#include "qlge.h"
@@ -446,7 +448,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
446 MAC_ADDR_TYPE_CAM_MAC, i, value); 448 MAC_ADDR_TYPE_CAM_MAC, i, value);
447 if (status) { 449 if (status) {
448 netif_err(qdev, drv, qdev->ndev, 450 netif_err(qdev, drv, qdev->ndev,
449 "Failed read of mac index register.\n"); 451 "Failed read of mac index register\n");
450 goto err; 452 goto err;
451 } 453 }
452 *buf++ = value[0]; /* lower MAC address */ 454 *buf++ = value[0]; /* lower MAC address */
@@ -458,7 +460,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
458 MAC_ADDR_TYPE_MULTI_MAC, i, value); 460 MAC_ADDR_TYPE_MULTI_MAC, i, value);
459 if (status) { 461 if (status) {
460 netif_err(qdev, drv, qdev->ndev, 462 netif_err(qdev, drv, qdev->ndev,
461 "Failed read of mac index register.\n"); 463 "Failed read of mac index register\n");
462 goto err; 464 goto err;
463 } 465 }
464 *buf++ = value[0]; /* lower Mcast address */ 466 *buf++ = value[0]; /* lower Mcast address */
@@ -482,7 +484,7 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
482 status = ql_get_routing_reg(qdev, i, &value); 484 status = ql_get_routing_reg(qdev, i, &value);
483 if (status) { 485 if (status) {
484 netif_err(qdev, drv, qdev->ndev, 486 netif_err(qdev, drv, qdev->ndev,
485 "Failed read of routing index register.\n"); 487 "Failed read of routing index register\n");
486 goto err; 488 goto err;
487 } else { 489 } else {
488 *buf++ = value; 490 *buf++ = value;
@@ -668,7 +670,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
668 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; 670 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
669 break; 671 break;
670 default: 672 default:
671 printk(KERN_ERR"Bad type!!! 0x%08x\n", type); 673 pr_err("Bad type!!! 0x%08x\n", type);
672 max_index = 0; 674 max_index = 0;
673 max_offset = 0; 675 max_offset = 0;
674 break; 676 break;
@@ -738,7 +740,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
738 int i; 740 int i;
739 741
740 if (!mpi_coredump) { 742 if (!mpi_coredump) {
741 netif_err(qdev, drv, qdev->ndev, "No memory available.\n"); 743 netif_err(qdev, drv, qdev->ndev, "No memory available\n");
742 return -ENOMEM; 744 return -ENOMEM;
743 } 745 }
744 746
@@ -1234,15 +1236,10 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
1234 1236
1235 if (!netif_running(qdev->ndev)) { 1237 if (!netif_running(qdev->ndev)) {
1236 netif_err(qdev, ifup, qdev->ndev, 1238 netif_err(qdev, ifup, qdev->ndev,
1237 "Force Coredump can only be done from interface that is up.\n"); 1239 "Force Coredump can only be done from interface that is up\n");
1238 return;
1239 }
1240
1241 if (ql_mb_sys_err(qdev)) {
1242 netif_err(qdev, ifup, qdev->ndev,
1243 "Fail force coredump with ql_mb_sys_err().\n");
1244 return; 1240 return;
1245 } 1241 }
1242 ql_queue_fw_error(qdev);
1246} 1243}
1247 1244
1248void ql_gen_reg_dump(struct ql_adapter *qdev, 1245void ql_gen_reg_dump(struct ql_adapter *qdev,
@@ -1339,7 +1336,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
1339 "Core is dumping to log file!\n"); 1336 "Core is dumping to log file!\n");
1340 1337
1341 for (i = 0; i < count; i += 8) { 1338 for (i = 0; i < count; i += 8) {
1342 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " 1339 pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
1343 "%.08x %.08x %.08x\n", i, 1340 "%.08x %.08x %.08x\n", i,
1344 tmp[i + 0], 1341 tmp[i + 0],
1345 tmp[i + 1], 1342 tmp[i + 1],
@@ -1361,71 +1358,43 @@ static void ql_dump_intr_states(struct ql_adapter *qdev)
1361 for (i = 0; i < qdev->intr_count; i++) { 1358 for (i = 0; i < qdev->intr_count; i++) {
1362 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); 1359 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1363 value = ql_read32(qdev, INTR_EN); 1360 value = ql_read32(qdev, INTR_EN);
1364 printk(KERN_ERR PFX 1361 pr_err("%s: Interrupt %d is %s\n",
1365 "%s: Interrupt %d is %s.\n",
1366 qdev->ndev->name, i, 1362 qdev->ndev->name, i,
1367 (value & INTR_EN_EN ? "enabled" : "disabled")); 1363 (value & INTR_EN_EN ? "enabled" : "disabled"));
1368 } 1364 }
1369} 1365}
1370 1366
1367#define DUMP_XGMAC(qdev, reg) \
1368do { \
1369 u32 data; \
1370 ql_read_xgmac_reg(qdev, reg, &data); \
1371 pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
1372} while (0)
1373
1371void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) 1374void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1372{ 1375{
1373 u32 data;
1374 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { 1376 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1375 printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__); 1377 pr_err("%s: Couldn't get xgmac sem\n", __func__);
1376 return; 1378 return;
1377 } 1379 }
1378 ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data); 1380 DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1379 printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name, 1381 DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1380 data); 1382 DUMP_XGMAC(qdev, GLOBAL_CFG);
1381 ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data); 1383 DUMP_XGMAC(qdev, TX_CFG);
1382 printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name, 1384 DUMP_XGMAC(qdev, RX_CFG);
1383 data); 1385 DUMP_XGMAC(qdev, FLOW_CTL);
1384 ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); 1386 DUMP_XGMAC(qdev, PAUSE_OPCODE);
1385 printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name, 1387 DUMP_XGMAC(qdev, PAUSE_TIMER);
1386 data); 1388 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1387 ql_read_xgmac_reg(qdev, TX_CFG, &data); 1389 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1388 printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data); 1390 DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1389 ql_read_xgmac_reg(qdev, RX_CFG, &data); 1391 DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1390 printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data); 1392 DUMP_XGMAC(qdev, MAC_SYS_INT);
1391 ql_read_xgmac_reg(qdev, FLOW_CTL, &data); 1393 DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1392 printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name, 1394 DUMP_XGMAC(qdev, MAC_MGMT_INT);
1393 data); 1395 DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1394 ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data); 1396 DUMP_XGMAC(qdev, EXT_ARB_MODE);
1395 printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
1396 data);
1397 ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
1398 printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
1399 data);
1400 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
1401 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
1402 qdev->ndev->name, data);
1403 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
1404 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
1405 qdev->ndev->name, data);
1406 ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
1407 printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
1408 data);
1409 ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
1410 printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
1411 data);
1412 ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
1413 printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
1414 data);
1415 ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
1416 printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
1417 qdev->ndev->name, data);
1418 ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
1419 printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
1420 data);
1421 ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
1422 printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
1423 qdev->ndev->name, data);
1424 ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
1425 printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
1426 data);
1427 ql_sem_unlock(qdev, qdev->xg_sem_mask); 1397 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1428
1429} 1398}
1430 1399
1431static void ql_dump_ets_regs(struct ql_adapter *qdev) 1400static void ql_dump_ets_regs(struct ql_adapter *qdev)
@@ -1442,14 +1411,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
1442 return; 1411 return;
1443 for (i = 0; i < 4; i++) { 1412 for (i = 0; i < 4; i++) {
1444 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { 1413 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1445 printk(KERN_ERR PFX 1414 pr_err("%s: Failed read of mac index register\n",
1446 "%s: Failed read of mac index register.\n",
1447 __func__); 1415 __func__);
1448 return; 1416 return;
1449 } else { 1417 } else {
1450 if (value[0]) 1418 if (value[0])
1451 printk(KERN_ERR PFX 1419 pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1452 "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
1453 qdev->ndev->name, i, value[1], value[0], 1420 qdev->ndev->name, i, value[1], value[0],
1454 value[2]); 1421 value[2]);
1455 } 1422 }
@@ -1457,14 +1424,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
1457 for (i = 0; i < 32; i++) { 1424 for (i = 0; i < 32; i++) {
1458 if (ql_get_mac_addr_reg 1425 if (ql_get_mac_addr_reg
1459 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { 1426 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1460 printk(KERN_ERR PFX 1427 pr_err("%s: Failed read of mac index register\n",
1461 "%s: Failed read of mac index register.\n",
1462 __func__); 1428 __func__);
1463 return; 1429 return;
1464 } else { 1430 } else {
1465 if (value[0]) 1431 if (value[0])
1466 printk(KERN_ERR PFX 1432 pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1467 "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
1468 qdev->ndev->name, i, value[1], value[0]); 1433 qdev->ndev->name, i, value[1], value[0]);
1469 } 1434 }
1470 } 1435 }
@@ -1481,129 +1446,77 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
1481 for (i = 0; i < 16; i++) { 1446 for (i = 0; i < 16; i++) {
1482 value = 0; 1447 value = 0;
1483 if (ql_get_routing_reg(qdev, i, &value)) { 1448 if (ql_get_routing_reg(qdev, i, &value)) {
1484 printk(KERN_ERR PFX 1449 pr_err("%s: Failed read of routing index register\n",
1485 "%s: Failed read of routing index register.\n",
1486 __func__); 1450 __func__);
1487 return; 1451 return;
1488 } else { 1452 } else {
1489 if (value) 1453 if (value)
1490 printk(KERN_ERR PFX 1454 pr_err("%s: Routing Mask %d = 0x%.08x\n",
1491 "%s: Routing Mask %d = 0x%.08x.\n",
1492 qdev->ndev->name, i, value); 1455 qdev->ndev->name, i, value);
1493 } 1456 }
1494 } 1457 }
1495 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 1458 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1496} 1459}
1497 1460
1461#define DUMP_REG(qdev, reg) \
1462 pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1463
1498void ql_dump_regs(struct ql_adapter *qdev) 1464void ql_dump_regs(struct ql_adapter *qdev)
1499{ 1465{
1500 printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func); 1466 pr_err("reg dump for function #%d\n", qdev->func);
1501 printk(KERN_ERR PFX "SYS = 0x%x.\n", 1467 DUMP_REG(qdev, SYS);
1502 ql_read32(qdev, SYS)); 1468 DUMP_REG(qdev, RST_FO);
1503 printk(KERN_ERR PFX "RST_FO = 0x%x.\n", 1469 DUMP_REG(qdev, FSC);
1504 ql_read32(qdev, RST_FO)); 1470 DUMP_REG(qdev, CSR);
1505 printk(KERN_ERR PFX "FSC = 0x%x.\n", 1471 DUMP_REG(qdev, ICB_RID);
1506 ql_read32(qdev, FSC)); 1472 DUMP_REG(qdev, ICB_L);
1507 printk(KERN_ERR PFX "CSR = 0x%x.\n", 1473 DUMP_REG(qdev, ICB_H);
1508 ql_read32(qdev, CSR)); 1474 DUMP_REG(qdev, CFG);
1509 printk(KERN_ERR PFX "ICB_RID = 0x%x.\n", 1475 DUMP_REG(qdev, BIOS_ADDR);
1510 ql_read32(qdev, ICB_RID)); 1476 DUMP_REG(qdev, STS);
1511 printk(KERN_ERR PFX "ICB_L = 0x%x.\n", 1477 DUMP_REG(qdev, INTR_EN);
1512 ql_read32(qdev, ICB_L)); 1478 DUMP_REG(qdev, INTR_MASK);
1513 printk(KERN_ERR PFX "ICB_H = 0x%x.\n", 1479 DUMP_REG(qdev, ISR1);
1514 ql_read32(qdev, ICB_H)); 1480 DUMP_REG(qdev, ISR2);
1515 printk(KERN_ERR PFX "CFG = 0x%x.\n", 1481 DUMP_REG(qdev, ISR3);
1516 ql_read32(qdev, CFG)); 1482 DUMP_REG(qdev, ISR4);
1517 printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n", 1483 DUMP_REG(qdev, REV_ID);
1518 ql_read32(qdev, BIOS_ADDR)); 1484 DUMP_REG(qdev, FRC_ECC_ERR);
1519 printk(KERN_ERR PFX "STS = 0x%x.\n", 1485 DUMP_REG(qdev, ERR_STS);
1520 ql_read32(qdev, STS)); 1486 DUMP_REG(qdev, RAM_DBG_ADDR);
1521 printk(KERN_ERR PFX "INTR_EN = 0x%x.\n", 1487 DUMP_REG(qdev, RAM_DBG_DATA);
1522 ql_read32(qdev, INTR_EN)); 1488 DUMP_REG(qdev, ECC_ERR_CNT);
1523 printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n", 1489 DUMP_REG(qdev, SEM);
1524 ql_read32(qdev, INTR_MASK)); 1490 DUMP_REG(qdev, GPIO_1);
1525 printk(KERN_ERR PFX "ISR1 = 0x%x.\n", 1491 DUMP_REG(qdev, GPIO_2);
1526 ql_read32(qdev, ISR1)); 1492 DUMP_REG(qdev, GPIO_3);
1527 printk(KERN_ERR PFX "ISR2 = 0x%x.\n", 1493 DUMP_REG(qdev, XGMAC_ADDR);
1528 ql_read32(qdev, ISR2)); 1494 DUMP_REG(qdev, XGMAC_DATA);
1529 printk(KERN_ERR PFX "ISR3 = 0x%x.\n", 1495 DUMP_REG(qdev, NIC_ETS);
1530 ql_read32(qdev, ISR3)); 1496 DUMP_REG(qdev, CNA_ETS);
1531 printk(KERN_ERR PFX "ISR4 = 0x%x.\n", 1497 DUMP_REG(qdev, FLASH_ADDR);
1532 ql_read32(qdev, ISR4)); 1498 DUMP_REG(qdev, FLASH_DATA);
1533 printk(KERN_ERR PFX "REV_ID = 0x%x.\n", 1499 DUMP_REG(qdev, CQ_STOP);
1534 ql_read32(qdev, REV_ID)); 1500 DUMP_REG(qdev, PAGE_TBL_RID);
1535 printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n", 1501 DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1536 ql_read32(qdev, FRC_ECC_ERR)); 1502 DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1537 printk(KERN_ERR PFX "ERR_STS = 0x%x.\n", 1503 DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1538 ql_read32(qdev, ERR_STS)); 1504 DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1539 printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n", 1505 DUMP_REG(qdev, COS_DFLT_CQ1);
1540 ql_read32(qdev, RAM_DBG_ADDR)); 1506 DUMP_REG(qdev, COS_DFLT_CQ2);
1541 printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n", 1507 DUMP_REG(qdev, SPLT_HDR);
1542 ql_read32(qdev, RAM_DBG_DATA)); 1508 DUMP_REG(qdev, FC_PAUSE_THRES);
1543 printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n", 1509 DUMP_REG(qdev, NIC_PAUSE_THRES);
1544 ql_read32(qdev, ECC_ERR_CNT)); 1510 DUMP_REG(qdev, FC_ETHERTYPE);
1545 printk(KERN_ERR PFX "SEM = 0x%x.\n", 1511 DUMP_REG(qdev, FC_RCV_CFG);
1546 ql_read32(qdev, SEM)); 1512 DUMP_REG(qdev, NIC_RCV_CFG);
1547 printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n", 1513 DUMP_REG(qdev, FC_COS_TAGS);
1548 ql_read32(qdev, GPIO_1)); 1514 DUMP_REG(qdev, NIC_COS_TAGS);
1549 printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n", 1515 DUMP_REG(qdev, MGMT_RCV_CFG);
1550 ql_read32(qdev, GPIO_2)); 1516 DUMP_REG(qdev, XG_SERDES_ADDR);
1551 printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n", 1517 DUMP_REG(qdev, XG_SERDES_DATA);
1552 ql_read32(qdev, GPIO_3)); 1518 DUMP_REG(qdev, PRB_MX_ADDR);
1553 printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n", 1519 DUMP_REG(qdev, PRB_MX_DATA);
1554 ql_read32(qdev, XGMAC_ADDR));
1555 printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
1556 ql_read32(qdev, XGMAC_DATA));
1557 printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
1558 ql_read32(qdev, NIC_ETS));
1559 printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
1560 ql_read32(qdev, CNA_ETS));
1561 printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
1562 ql_read32(qdev, FLASH_ADDR));
1563 printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
1564 ql_read32(qdev, FLASH_DATA));
1565 printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
1566 ql_read32(qdev, CQ_STOP));
1567 printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
1568 ql_read32(qdev, PAGE_TBL_RID));
1569 printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
1570 ql_read32(qdev, WQ_PAGE_TBL_LO));
1571 printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
1572 ql_read32(qdev, WQ_PAGE_TBL_HI));
1573 printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
1574 ql_read32(qdev, CQ_PAGE_TBL_LO));
1575 printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
1576 ql_read32(qdev, CQ_PAGE_TBL_HI));
1577 printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
1578 ql_read32(qdev, COS_DFLT_CQ1));
1579 printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
1580 ql_read32(qdev, COS_DFLT_CQ2));
1581 printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
1582 ql_read32(qdev, SPLT_HDR));
1583 printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
1584 ql_read32(qdev, FC_PAUSE_THRES));
1585 printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
1586 ql_read32(qdev, NIC_PAUSE_THRES));
1587 printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
1588 ql_read32(qdev, FC_ETHERTYPE));
1589 printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
1590 ql_read32(qdev, FC_RCV_CFG));
1591 printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
1592 ql_read32(qdev, NIC_RCV_CFG));
1593 printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
1594 ql_read32(qdev, FC_COS_TAGS));
1595 printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
1596 ql_read32(qdev, NIC_COS_TAGS));
1597 printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
1598 ql_read32(qdev, MGMT_RCV_CFG));
1599 printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
1600 ql_read32(qdev, XG_SERDES_ADDR));
1601 printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
1602 ql_read32(qdev, XG_SERDES_DATA));
1603 printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
1604 ql_read32(qdev, PRB_MX_ADDR));
1605 printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
1606 ql_read32(qdev, PRB_MX_DATA));
1607 ql_dump_intr_states(qdev); 1520 ql_dump_intr_states(qdev);
1608 ql_dump_xgmac_control_regs(qdev); 1521 ql_dump_xgmac_control_regs(qdev);
1609 ql_dump_ets_regs(qdev); 1522 ql_dump_ets_regs(qdev);
@@ -1613,191 +1526,124 @@ void ql_dump_regs(struct ql_adapter *qdev)
1613#endif 1526#endif
1614 1527
1615#ifdef QL_STAT_DUMP 1528#ifdef QL_STAT_DUMP
1529
1530#define DUMP_STAT(qdev, stat) \
1531 pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
1532
1616void ql_dump_stat(struct ql_adapter *qdev) 1533void ql_dump_stat(struct ql_adapter *qdev)
1617{ 1534{
1618 printk(KERN_ERR "%s: Enter.\n", __func__); 1535 pr_err("%s: Enter\n", __func__);
1619 printk(KERN_ERR "tx_pkts = %ld\n", 1536 DUMP_STAT(qdev, tx_pkts);
1620 (unsigned long)qdev->nic_stats.tx_pkts); 1537 DUMP_STAT(qdev, tx_bytes);
1621 printk(KERN_ERR "tx_bytes = %ld\n", 1538 DUMP_STAT(qdev, tx_mcast_pkts);
1622 (unsigned long)qdev->nic_stats.tx_bytes); 1539 DUMP_STAT(qdev, tx_bcast_pkts);
1623 printk(KERN_ERR "tx_mcast_pkts = %ld.\n", 1540 DUMP_STAT(qdev, tx_ucast_pkts);
1624 (unsigned long)qdev->nic_stats.tx_mcast_pkts); 1541 DUMP_STAT(qdev, tx_ctl_pkts);
1625 printk(KERN_ERR "tx_bcast_pkts = %ld.\n", 1542 DUMP_STAT(qdev, tx_pause_pkts);
1626 (unsigned long)qdev->nic_stats.tx_bcast_pkts); 1543 DUMP_STAT(qdev, tx_64_pkt);
1627 printk(KERN_ERR "tx_ucast_pkts = %ld.\n", 1544 DUMP_STAT(qdev, tx_65_to_127_pkt);
1628 (unsigned long)qdev->nic_stats.tx_ucast_pkts); 1545 DUMP_STAT(qdev, tx_128_to_255_pkt);
1629 printk(KERN_ERR "tx_ctl_pkts = %ld.\n", 1546 DUMP_STAT(qdev, tx_256_511_pkt);
1630 (unsigned long)qdev->nic_stats.tx_ctl_pkts); 1547 DUMP_STAT(qdev, tx_512_to_1023_pkt);
1631 printk(KERN_ERR "tx_pause_pkts = %ld.\n", 1548 DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1632 (unsigned long)qdev->nic_stats.tx_pause_pkts); 1549 DUMP_STAT(qdev, tx_1519_to_max_pkt);
1633 printk(KERN_ERR "tx_64_pkt = %ld.\n", 1550 DUMP_STAT(qdev, tx_undersize_pkt);
1634 (unsigned long)qdev->nic_stats.tx_64_pkt); 1551 DUMP_STAT(qdev, tx_oversize_pkt);
1635 printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n", 1552 DUMP_STAT(qdev, rx_bytes);
1636 (unsigned long)qdev->nic_stats.tx_65_to_127_pkt); 1553 DUMP_STAT(qdev, rx_bytes_ok);
1637 printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n", 1554 DUMP_STAT(qdev, rx_pkts);
1638 (unsigned long)qdev->nic_stats.tx_128_to_255_pkt); 1555 DUMP_STAT(qdev, rx_pkts_ok);
1639 printk(KERN_ERR "tx_256_511_pkt = %ld.\n", 1556 DUMP_STAT(qdev, rx_bcast_pkts);
1640 (unsigned long)qdev->nic_stats.tx_256_511_pkt); 1557 DUMP_STAT(qdev, rx_mcast_pkts);
1641 printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n", 1558 DUMP_STAT(qdev, rx_ucast_pkts);
1642 (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt); 1559 DUMP_STAT(qdev, rx_undersize_pkts);
1643 printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n", 1560 DUMP_STAT(qdev, rx_oversize_pkts);
1644 (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt); 1561 DUMP_STAT(qdev, rx_jabber_pkts);
1645 printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n", 1562 DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1646 (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt); 1563 DUMP_STAT(qdev, rx_drop_events);
1647 printk(KERN_ERR "tx_undersize_pkt = %ld.\n", 1564 DUMP_STAT(qdev, rx_fcerr_pkts);
1648 (unsigned long)qdev->nic_stats.tx_undersize_pkt); 1565 DUMP_STAT(qdev, rx_align_err);
1649 printk(KERN_ERR "tx_oversize_pkt = %ld.\n", 1566 DUMP_STAT(qdev, rx_symbol_err);
1650 (unsigned long)qdev->nic_stats.tx_oversize_pkt); 1567 DUMP_STAT(qdev, rx_mac_err);
1651 printk(KERN_ERR "rx_bytes = %ld.\n", 1568 DUMP_STAT(qdev, rx_ctl_pkts);
1652 (unsigned long)qdev->nic_stats.rx_bytes); 1569 DUMP_STAT(qdev, rx_pause_pkts);
1653 printk(KERN_ERR "rx_bytes_ok = %ld.\n", 1570 DUMP_STAT(qdev, rx_64_pkts);
1654 (unsigned long)qdev->nic_stats.rx_bytes_ok); 1571 DUMP_STAT(qdev, rx_65_to_127_pkts);
1655 printk(KERN_ERR "rx_pkts = %ld.\n", 1572 DUMP_STAT(qdev, rx_128_255_pkts);
1656 (unsigned long)qdev->nic_stats.rx_pkts); 1573 DUMP_STAT(qdev, rx_256_511_pkts);
1657 printk(KERN_ERR "rx_pkts_ok = %ld.\n", 1574 DUMP_STAT(qdev, rx_512_to_1023_pkts);
1658 (unsigned long)qdev->nic_stats.rx_pkts_ok); 1575 DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1659 printk(KERN_ERR "rx_bcast_pkts = %ld.\n", 1576 DUMP_STAT(qdev, rx_1519_to_max_pkts);
1660 (unsigned long)qdev->nic_stats.rx_bcast_pkts); 1577 DUMP_STAT(qdev, rx_len_err_pkts);
1661 printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
1662 (unsigned long)qdev->nic_stats.rx_mcast_pkts);
1663 printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
1664 (unsigned long)qdev->nic_stats.rx_ucast_pkts);
1665 printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
1666 (unsigned long)qdev->nic_stats.rx_undersize_pkts);
1667 printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
1668 (unsigned long)qdev->nic_stats.rx_oversize_pkts);
1669 printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
1670 (unsigned long)qdev->nic_stats.rx_jabber_pkts);
1671 printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
1672 (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
1673 printk(KERN_ERR "rx_drop_events = %ld.\n",
1674 (unsigned long)qdev->nic_stats.rx_drop_events);
1675 printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
1676 (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
1677 printk(KERN_ERR "rx_align_err = %ld.\n",
1678 (unsigned long)qdev->nic_stats.rx_align_err);
1679 printk(KERN_ERR "rx_symbol_err = %ld.\n",
1680 (unsigned long)qdev->nic_stats.rx_symbol_err);
1681 printk(KERN_ERR "rx_mac_err = %ld.\n",
1682 (unsigned long)qdev->nic_stats.rx_mac_err);
1683 printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
1684 (unsigned long)qdev->nic_stats.rx_ctl_pkts);
1685 printk(KERN_ERR "rx_pause_pkts = %ld.\n",
1686 (unsigned long)qdev->nic_stats.rx_pause_pkts);
1687 printk(KERN_ERR "rx_64_pkts = %ld.\n",
1688 (unsigned long)qdev->nic_stats.rx_64_pkts);
1689 printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
1690 (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
1691 printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
1692 (unsigned long)qdev->nic_stats.rx_128_255_pkts);
1693 printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
1694 (unsigned long)qdev->nic_stats.rx_256_511_pkts);
1695 printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
1696 (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
1697 printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
1698 (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
1699 printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
1700 (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
1701 printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
1702 (unsigned long)qdev->nic_stats.rx_len_err_pkts);
1703}; 1578};
1704#endif 1579#endif
1705 1580
1706#ifdef QL_DEV_DUMP 1581#ifdef QL_DEV_DUMP
1582
1583#define DUMP_QDEV_FIELD(qdev, type, field) \
1584 pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
1585#define DUMP_QDEV_DMA_FIELD(qdev, field) \
1586 pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
1587#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1588 pr_err("%s[%d].%s = " type "\n", \
1589 #array, index, #field, qdev->array[index].field);
1707void ql_dump_qdev(struct ql_adapter *qdev) 1590void ql_dump_qdev(struct ql_adapter *qdev)
1708{ 1591{
1709 int i; 1592 int i;
1710 printk(KERN_ERR PFX "qdev->flags = %lx.\n", 1593 DUMP_QDEV_FIELD(qdev, "%lx", flags);
1711 qdev->flags); 1594 DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
1712 printk(KERN_ERR PFX "qdev->vlgrp = %p.\n", 1595 DUMP_QDEV_FIELD(qdev, "%p", pdev);
1713 qdev->vlgrp); 1596 DUMP_QDEV_FIELD(qdev, "%p", ndev);
1714 printk(KERN_ERR PFX "qdev->pdev = %p.\n", 1597 DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1715 qdev->pdev); 1598 DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1716 printk(KERN_ERR PFX "qdev->ndev = %p.\n", 1599 DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1717 qdev->ndev); 1600 DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1718 printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n", 1601 DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1719 qdev->chip_rev_id); 1602 DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1720 printk(KERN_ERR PFX "qdev->reg_base = %p.\n", 1603 DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1721 qdev->reg_base); 1604 DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1722 printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n", 1605 DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1723 qdev->doorbell_area); 1606 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1724 printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
1725 qdev->doorbell_area_size);
1726 printk(KERN_ERR PFX "msg_enable = %x.\n",
1727 qdev->msg_enable);
1728 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
1729 qdev->rx_ring_shadow_reg_area);
1730 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n",
1731 (unsigned long long) qdev->rx_ring_shadow_reg_dma);
1732 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
1733 qdev->tx_ring_shadow_reg_area);
1734 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n",
1735 (unsigned long long) qdev->tx_ring_shadow_reg_dma);
1736 printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
1737 qdev->intr_count);
1738 if (qdev->msi_x_entry) 1607 if (qdev->msi_x_entry)
1739 for (i = 0; i < qdev->intr_count; i++) { 1608 for (i = 0; i < qdev->intr_count; i++) {
1740 printk(KERN_ERR PFX 1609 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1741 "msi_x_entry.[%d]vector = %d.\n", i, 1610 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1742 qdev->msi_x_entry[i].vector);
1743 printk(KERN_ERR PFX
1744 "msi_x_entry.[%d]entry = %d.\n", i,
1745 qdev->msi_x_entry[i].entry);
1746 } 1611 }
1747 for (i = 0; i < qdev->intr_count; i++) { 1612 for (i = 0; i < qdev->intr_count; i++) {
1748 printk(KERN_ERR PFX 1613 DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1749 "intr_context[%d].qdev = %p.\n", i, 1614 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1750 qdev->intr_context[i].qdev); 1615 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1751 printk(KERN_ERR PFX 1616 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1752 "intr_context[%d].intr = %d.\n", i, 1617 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1753 qdev->intr_context[i].intr); 1618 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1754 printk(KERN_ERR PFX
1755 "intr_context[%d].hooked = %d.\n", i,
1756 qdev->intr_context[i].hooked);
1757 printk(KERN_ERR PFX
1758 "intr_context[%d].intr_en_mask = 0x%08x.\n", i,
1759 qdev->intr_context[i].intr_en_mask);
1760 printk(KERN_ERR PFX
1761 "intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
1762 qdev->intr_context[i].intr_dis_mask);
1763 printk(KERN_ERR PFX
1764 "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
1765 qdev->intr_context[i].intr_read_mask);
1766 } 1619 }
1767 printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count); 1620 DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1768 printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count); 1621 DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1769 printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size); 1622 DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1770 printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem); 1623 DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1771 printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count); 1624 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1772 printk(KERN_ERR PFX "qdev->tx_ring = %p.\n", 1625 DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1773 qdev->tx_ring); 1626 DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1774 printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n", 1627 DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1775 qdev->rss_ring_count); 1628 DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1776 printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring); 1629 DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1777 printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n", 1630 DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1778 qdev->default_rx_queue); 1631 DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1779 printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
1780 qdev->xg_sem_mask);
1781 printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
1782 qdev->port_link_up);
1783 printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
1784 qdev->port_init);
1785
1786} 1632}
1787#endif 1633#endif
1788 1634
1789#ifdef QL_CB_DUMP 1635#ifdef QL_CB_DUMP
1790void ql_dump_wqicb(struct wqicb *wqicb) 1636void ql_dump_wqicb(struct wqicb *wqicb)
1791{ 1637{
1792 printk(KERN_ERR PFX "Dumping wqicb stuff...\n"); 1638 pr_err("Dumping wqicb stuff...\n");
1793 printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len)); 1639 pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1794 printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags)); 1640 pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
1795 printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", 1641 pr_err("wqicb->cq_id_rss = %d\n",
1796 le16_to_cpu(wqicb->cq_id_rss)); 1642 le16_to_cpu(wqicb->cq_id_rss));
1797 printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); 1643 pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1798 printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n", 1644 pr_err("wqicb->wq_addr = 0x%llx\n",
1799 (unsigned long long) le64_to_cpu(wqicb->addr)); 1645 (unsigned long long) le64_to_cpu(wqicb->addr));
1800 printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n", 1646 pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1801 (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); 1647 (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
1802} 1648}
1803 1649
@@ -1805,40 +1651,34 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
1805{ 1651{
1806 if (tx_ring == NULL) 1652 if (tx_ring == NULL)
1807 return; 1653 return;
1808 printk(KERN_ERR PFX 1654 pr_err("===================== Dumping tx_ring %d ===============\n",
1809 "===================== Dumping tx_ring %d ===============.\n",
1810 tx_ring->wq_id); 1655 tx_ring->wq_id);
1811 printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); 1656 pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
1812 printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", 1657 pr_err("tx_ring->base_dma = 0x%llx\n",
1813 (unsigned long long) tx_ring->wq_base_dma); 1658 (unsigned long long) tx_ring->wq_base_dma);
1814 printk(KERN_ERR PFX 1659 pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1815 "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
1816 tx_ring->cnsmr_idx_sh_reg, 1660 tx_ring->cnsmr_idx_sh_reg,
1817 tx_ring->cnsmr_idx_sh_reg 1661 tx_ring->cnsmr_idx_sh_reg
1818 ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); 1662 ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1819 printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); 1663 pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
1820 printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); 1664 pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
1821 printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", 1665 pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1822 tx_ring->prod_idx_db_reg); 1666 pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1823 printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n", 1667 pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1824 tx_ring->valid_db_reg); 1668 pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
1825 printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx); 1669 pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
1826 printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id); 1670 pr_err("tx_ring->q = %p\n", tx_ring->q);
1827 printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id); 1671 pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1828 printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
1829 printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
1830 atomic_read(&tx_ring->tx_count));
1831} 1672}
1832 1673
1833void ql_dump_ricb(struct ricb *ricb) 1674void ql_dump_ricb(struct ricb *ricb)
1834{ 1675{
1835 int i; 1676 int i;
1836 printk(KERN_ERR PFX 1677 pr_err("===================== Dumping ricb ===============\n");
1837 "===================== Dumping ricb ===============.\n"); 1678 pr_err("Dumping ricb stuff...\n");
1838 printk(KERN_ERR PFX "Dumping ricb stuff...\n");
1839 1679
1840 printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f); 1680 pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1841 printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n", 1681 pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1842 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", 1682 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1843 ricb->flags & RSS_L6K ? "RSS_L6K " : "", 1683 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1844 ricb->flags & RSS_LI ? "RSS_LI " : "", 1684 ricb->flags & RSS_LI ? "RSS_LI " : "",
@@ -1848,44 +1688,44 @@ void ql_dump_ricb(struct ricb *ricb)
1848 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", 1688 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1849 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", 1689 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1850 ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); 1690 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1851 printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask)); 1691 pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1852 for (i = 0; i < 16; i++) 1692 for (i = 0; i < 16; i++)
1853 printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i, 1693 pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1854 le32_to_cpu(ricb->hash_cq_id[i])); 1694 le32_to_cpu(ricb->hash_cq_id[i]));
1855 for (i = 0; i < 10; i++) 1695 for (i = 0; i < 10; i++)
1856 printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i, 1696 pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1857 le32_to_cpu(ricb->ipv6_hash_key[i])); 1697 le32_to_cpu(ricb->ipv6_hash_key[i]));
1858 for (i = 0; i < 4; i++) 1698 for (i = 0; i < 4; i++)
1859 printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i, 1699 pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1860 le32_to_cpu(ricb->ipv4_hash_key[i])); 1700 le32_to_cpu(ricb->ipv4_hash_key[i]));
1861} 1701}
1862 1702
1863void ql_dump_cqicb(struct cqicb *cqicb) 1703void ql_dump_cqicb(struct cqicb *cqicb)
1864{ 1704{
1865 printk(KERN_ERR PFX "Dumping cqicb stuff...\n"); 1705 pr_err("Dumping cqicb stuff...\n");
1866 1706
1867 printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); 1707 pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
1868 printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); 1708 pr_err("cqicb->flags = %x\n", cqicb->flags);
1869 printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); 1709 pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1870 printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n", 1710 pr_err("cqicb->addr = 0x%llx\n",
1871 (unsigned long long) le64_to_cpu(cqicb->addr)); 1711 (unsigned long long) le64_to_cpu(cqicb->addr));
1872 printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n", 1712 pr_err("cqicb->prod_idx_addr = 0x%llx\n",
1873 (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); 1713 (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
1874 printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", 1714 pr_err("cqicb->pkt_delay = 0x%.04x\n",
1875 le16_to_cpu(cqicb->pkt_delay)); 1715 le16_to_cpu(cqicb->pkt_delay));
1876 printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", 1716 pr_err("cqicb->irq_delay = 0x%.04x\n",
1877 le16_to_cpu(cqicb->irq_delay)); 1717 le16_to_cpu(cqicb->irq_delay));
1878 printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n", 1718 pr_err("cqicb->lbq_addr = 0x%llx\n",
1879 (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); 1719 (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
1880 printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", 1720 pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
1881 le16_to_cpu(cqicb->lbq_buf_size)); 1721 le16_to_cpu(cqicb->lbq_buf_size));
1882 printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", 1722 pr_err("cqicb->lbq_len = 0x%.04x\n",
1883 le16_to_cpu(cqicb->lbq_len)); 1723 le16_to_cpu(cqicb->lbq_len));
1884 printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n", 1724 pr_err("cqicb->sbq_addr = 0x%llx\n",
1885 (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); 1725 (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
1886 printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", 1726 pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
1887 le16_to_cpu(cqicb->sbq_buf_size)); 1727 le16_to_cpu(cqicb->sbq_buf_size));
1888 printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", 1728 pr_err("cqicb->sbq_len = 0x%.04x\n",
1889 le16_to_cpu(cqicb->sbq_len)); 1729 le16_to_cpu(cqicb->sbq_len));
1890} 1730}
1891 1731
@@ -1893,100 +1733,85 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
1893{ 1733{
1894 if (rx_ring == NULL) 1734 if (rx_ring == NULL)
1895 return; 1735 return;
1896 printk(KERN_ERR PFX 1736 pr_err("===================== Dumping rx_ring %d ===============\n",
1897 "===================== Dumping rx_ring %d ===============.\n",
1898 rx_ring->cq_id); 1737 rx_ring->cq_id);
1899 printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n", 1738 pr_err("Dumping rx_ring %d, type = %s%s%s\n",
1900 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", 1739 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
1901 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", 1740 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
1902 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); 1741 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
1903 printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb); 1742 pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1904 printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base); 1743 pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
1905 printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n", 1744 pr_err("rx_ring->cq_base_dma = %llx\n",
1906 (unsigned long long) rx_ring->cq_base_dma); 1745 (unsigned long long) rx_ring->cq_base_dma);
1907 printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); 1746 pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
1908 printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); 1747 pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
1909 printk(KERN_ERR PFX 1748 pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1910 "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
1911 rx_ring->prod_idx_sh_reg, 1749 rx_ring->prod_idx_sh_reg,
1912 rx_ring->prod_idx_sh_reg 1750 rx_ring->prod_idx_sh_reg
1913 ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); 1751 ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1914 printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", 1752 pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
1915 (unsigned long long) rx_ring->prod_idx_sh_reg_dma); 1753 (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
1916 printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", 1754 pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
1917 rx_ring->cnsmr_idx_db_reg); 1755 rx_ring->cnsmr_idx_db_reg);
1918 printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx); 1756 pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1919 printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry); 1757 pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1920 printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n", 1758 pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1921 rx_ring->valid_db_reg);
1922 1759
1923 printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base); 1760 pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
1924 printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n", 1761 pr_err("rx_ring->lbq_base_dma = %llx\n",
1925 (unsigned long long) rx_ring->lbq_base_dma); 1762 (unsigned long long) rx_ring->lbq_base_dma);
1926 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n", 1763 pr_err("rx_ring->lbq_base_indirect = %p\n",
1927 rx_ring->lbq_base_indirect); 1764 rx_ring->lbq_base_indirect);
1928 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n", 1765 pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
1929 (unsigned long long) rx_ring->lbq_base_indirect_dma); 1766 (unsigned long long) rx_ring->lbq_base_indirect_dma);
1930 printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq); 1767 pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
1931 printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len); 1768 pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
1932 printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size); 1769 pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
1933 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n", 1770 pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
1934 rx_ring->lbq_prod_idx_db_reg); 1771 rx_ring->lbq_prod_idx_db_reg);
1935 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n", 1772 pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
1936 rx_ring->lbq_prod_idx); 1773 pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
1937 printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n", 1774 pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
1938 rx_ring->lbq_curr_idx); 1775 pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
1939 printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n", 1776 pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
1940 rx_ring->lbq_clean_idx); 1777
1941 printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n", 1778 pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
1942 rx_ring->lbq_free_cnt); 1779 pr_err("rx_ring->sbq_base_dma = %llx\n",
1943 printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
1944 rx_ring->lbq_buf_size);
1945
1946 printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
1947 printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
1948 (unsigned long long) rx_ring->sbq_base_dma); 1780 (unsigned long long) rx_ring->sbq_base_dma);
1949 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n", 1781 pr_err("rx_ring->sbq_base_indirect = %p\n",
1950 rx_ring->sbq_base_indirect); 1782 rx_ring->sbq_base_indirect);
1951 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n", 1783 pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
1952 (unsigned long long) rx_ring->sbq_base_indirect_dma); 1784 (unsigned long long) rx_ring->sbq_base_indirect_dma);
1953 printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq); 1785 pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
1954 printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len); 1786 pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
1955 printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size); 1787 pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
1956 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n", 1788 pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
1957 rx_ring->sbq_prod_idx_db_reg); 1789 rx_ring->sbq_prod_idx_db_reg);
1958 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n", 1790 pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
1959 rx_ring->sbq_prod_idx); 1791 pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
1960 printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n", 1792 pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
1961 rx_ring->sbq_curr_idx); 1793 pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
1962 printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n", 1794 pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
1963 rx_ring->sbq_clean_idx); 1795 pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
1964 printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n", 1796 pr_err("rx_ring->irq = %d\n", rx_ring->irq);
1965 rx_ring->sbq_free_cnt); 1797 pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
1966 printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n", 1798 pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
1967 rx_ring->sbq_buf_size);
1968 printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
1969 printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
1970 printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
1971 printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
1972} 1799}
1973 1800
1974void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) 1801void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1975{ 1802{
1976 void *ptr; 1803 void *ptr;
1977 1804
1978 printk(KERN_ERR PFX "%s: Enter.\n", __func__); 1805 pr_err("%s: Enter\n", __func__);
1979 1806
1980 ptr = kmalloc(size, GFP_ATOMIC); 1807 ptr = kmalloc(size, GFP_ATOMIC);
1981 if (ptr == NULL) { 1808 if (ptr == NULL) {
1982 printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n", 1809 pr_err("%s: Couldn't allocate a buffer\n", __func__);
1983 __func__);
1984 return; 1810 return;
1985 } 1811 }
1986 1812
1987 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { 1813 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1988 printk(KERN_ERR "%s: Failed to upload control block!\n", 1814 pr_err("%s: Failed to upload control block!\n", __func__);
1989 __func__);
1990 goto fail_it; 1815 goto fail_it;
1991 } 1816 }
1992 switch (bit) { 1817 switch (bit) {
@@ -2000,8 +1825,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
2000 ql_dump_ricb((struct ricb *)ptr); 1825 ql_dump_ricb((struct ricb *)ptr);
2001 break; 1826 break;
2002 default: 1827 default:
2003 printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n", 1828 pr_err("%s: Invalid bit value = %x\n", __func__, bit);
2004 __func__, bit);
2005 break; 1829 break;
2006 } 1830 }
2007fail_it: 1831fail_it:
@@ -2012,27 +1836,27 @@ fail_it:
2012#ifdef QL_OB_DUMP 1836#ifdef QL_OB_DUMP
2013void ql_dump_tx_desc(struct tx_buf_desc *tbd) 1837void ql_dump_tx_desc(struct tx_buf_desc *tbd)
2014{ 1838{
2015 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", 1839 pr_err("tbd->addr = 0x%llx\n",
2016 le64_to_cpu((u64) tbd->addr)); 1840 le64_to_cpu((u64) tbd->addr));
2017 printk(KERN_ERR PFX "tbd->len = %d\n", 1841 pr_err("tbd->len = %d\n",
2018 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); 1842 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
2019 printk(KERN_ERR PFX "tbd->flags = %s %s\n", 1843 pr_err("tbd->flags = %s %s\n",
2020 tbd->len & TX_DESC_C ? "C" : ".", 1844 tbd->len & TX_DESC_C ? "C" : ".",
2021 tbd->len & TX_DESC_E ? "E" : "."); 1845 tbd->len & TX_DESC_E ? "E" : ".");
2022 tbd++; 1846 tbd++;
2023 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", 1847 pr_err("tbd->addr = 0x%llx\n",
2024 le64_to_cpu((u64) tbd->addr)); 1848 le64_to_cpu((u64) tbd->addr));
2025 printk(KERN_ERR PFX "tbd->len = %d\n", 1849 pr_err("tbd->len = %d\n",
2026 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); 1850 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
2027 printk(KERN_ERR PFX "tbd->flags = %s %s\n", 1851 pr_err("tbd->flags = %s %s\n",
2028 tbd->len & TX_DESC_C ? "C" : ".", 1852 tbd->len & TX_DESC_C ? "C" : ".",
2029 tbd->len & TX_DESC_E ? "E" : "."); 1853 tbd->len & TX_DESC_E ? "E" : ".");
2030 tbd++; 1854 tbd++;
2031 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", 1855 pr_err("tbd->addr = 0x%llx\n",
2032 le64_to_cpu((u64) tbd->addr)); 1856 le64_to_cpu((u64) tbd->addr));
2033 printk(KERN_ERR PFX "tbd->len = %d\n", 1857 pr_err("tbd->len = %d\n",
2034 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); 1858 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
2035 printk(KERN_ERR PFX "tbd->flags = %s %s\n", 1859 pr_err("tbd->flags = %s %s\n",
2036 tbd->len & TX_DESC_C ? "C" : ".", 1860 tbd->len & TX_DESC_C ? "C" : ".",
2037 tbd->len & TX_DESC_E ? "E" : "."); 1861 tbd->len & TX_DESC_E ? "E" : ".");
2038 1862
@@ -2045,38 +1869,38 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
2045 struct tx_buf_desc *tbd; 1869 struct tx_buf_desc *tbd;
2046 u16 frame_len; 1870 u16 frame_len;
2047 1871
2048 printk(KERN_ERR PFX "%s\n", __func__); 1872 pr_err("%s\n", __func__);
2049 printk(KERN_ERR PFX "opcode = %s\n", 1873 pr_err("opcode = %s\n",
2050 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); 1874 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
2051 printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n", 1875 pr_err("flags1 = %s %s %s %s %s\n",
2052 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", 1876 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
2053 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", 1877 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
2054 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", 1878 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
2055 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", 1879 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
2056 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); 1880 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
2057 printk(KERN_ERR PFX "flags2 = %s %s %s\n", 1881 pr_err("flags2 = %s %s %s\n",
2058 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", 1882 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
2059 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", 1883 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
2060 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); 1884 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
2061 printk(KERN_ERR PFX "flags3 = %s %s %s\n", 1885 pr_err("flags3 = %s %s %s\n",
2062 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", 1886 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
2063 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", 1887 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
2064 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); 1888 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
2065 printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid); 1889 pr_err("tid = %x\n", ob_mac_iocb->tid);
2066 printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx); 1890 pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
2067 printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); 1891 pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
2068 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { 1892 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
2069 printk(KERN_ERR PFX "frame_len = %d\n", 1893 pr_err("frame_len = %d\n",
2070 le32_to_cpu(ob_mac_tso_iocb->frame_len)); 1894 le32_to_cpu(ob_mac_tso_iocb->frame_len));
2071 printk(KERN_ERR PFX "mss = %d\n", 1895 pr_err("mss = %d\n",
2072 le16_to_cpu(ob_mac_tso_iocb->mss)); 1896 le16_to_cpu(ob_mac_tso_iocb->mss));
2073 printk(KERN_ERR PFX "prot_hdr_len = %d\n", 1897 pr_err("prot_hdr_len = %d\n",
2074 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); 1898 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
2075 printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n", 1899 pr_err("hdr_offset = 0x%.04x\n",
2076 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); 1900 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
2077 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); 1901 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
2078 } else { 1902 } else {
2079 printk(KERN_ERR PFX "frame_len = %d\n", 1903 pr_err("frame_len = %d\n",
2080 le16_to_cpu(ob_mac_iocb->frame_len)); 1904 le16_to_cpu(ob_mac_iocb->frame_len));
2081 frame_len = le16_to_cpu(ob_mac_iocb->frame_len); 1905 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
2082 } 1906 }
@@ -2086,9 +1910,9 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
2086 1910
2087void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) 1911void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
2088{ 1912{
2089 printk(KERN_ERR PFX "%s\n", __func__); 1913 pr_err("%s\n", __func__);
2090 printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode); 1914 pr_err("opcode = %d\n", ob_mac_rsp->opcode);
2091 printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n", 1915 pr_err("flags = %s %s %s %s %s %s %s\n",
2092 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", 1916 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
2093 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", 1917 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
2094 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", 1918 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
@@ -2096,16 +1920,16 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
2096 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", 1920 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
2097 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", 1921 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
2098 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); 1922 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
2099 printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid); 1923 pr_err("tid = %x\n", ob_mac_rsp->tid);
2100} 1924}
2101#endif 1925#endif
2102 1926
2103#ifdef QL_IB_DUMP 1927#ifdef QL_IB_DUMP
2104void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) 1928void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
2105{ 1929{
2106 printk(KERN_ERR PFX "%s\n", __func__); 1930 pr_err("%s\n", __func__);
2107 printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode); 1931 pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
2108 printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n", 1932 pr_err("flags1 = %s%s%s%s%s%s\n",
2109 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", 1933 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
2110 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", 1934 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
2111 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", 1935 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
@@ -2114,7 +1938,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
2114 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); 1938 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
2115 1939
2116 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) 1940 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
2117 printk(KERN_ERR PFX "%s%s%s Multicast.\n", 1941 pr_err("%s%s%s Multicast\n",
2118 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2119 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", 1943 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
2120 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1944 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -2122,7 +1946,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
2122 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1946 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2123 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); 1947 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2124 1948
2125 printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n", 1949 pr_err("flags2 = %s%s%s%s%s\n",
2126 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", 1950 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
2127 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", 1951 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
2128 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", 1952 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
@@ -2130,7 +1954,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
2130 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); 1954 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
2131 1955
2132 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) 1956 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
2133 printk(KERN_ERR PFX "%s%s%s%s%s error.\n", 1957 pr_err("%s%s%s%s%s error\n",
2134 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == 1958 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
2135 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", 1959 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
2136 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == 1960 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
@@ -2142,12 +1966,12 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
2142 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == 1966 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
2143 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); 1967 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
2144 1968
2145 printk(KERN_ERR PFX "flags3 = %s%s.\n", 1969 pr_err("flags3 = %s%s\n",
2146 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", 1970 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
2147 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); 1971 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
2148 1972
2149 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) 1973 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
2150 printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n", 1974 pr_err("RSS flags = %s%s%s%s\n",
2151 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == 1975 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
2152 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", 1976 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
2153 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == 1977 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
@@ -2157,26 +1981,26 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
2157 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == 1981 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
2158 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); 1982 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
2159 1983
2160 printk(KERN_ERR PFX "data_len = %d\n", 1984 pr_err("data_len = %d\n",
2161 le32_to_cpu(ib_mac_rsp->data_len)); 1985 le32_to_cpu(ib_mac_rsp->data_len));
2162 printk(KERN_ERR PFX "data_addr = 0x%llx\n", 1986 pr_err("data_addr = 0x%llx\n",
2163 (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); 1987 (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
2164 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) 1988 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
2165 printk(KERN_ERR PFX "rss = %x\n", 1989 pr_err("rss = %x\n",
2166 le32_to_cpu(ib_mac_rsp->rss)); 1990 le32_to_cpu(ib_mac_rsp->rss));
2167 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) 1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
2168 printk(KERN_ERR PFX "vlan_id = %x\n", 1992 pr_err("vlan_id = %x\n",
2169 le16_to_cpu(ib_mac_rsp->vlan_id)); 1993 le16_to_cpu(ib_mac_rsp->vlan_id));
2170 1994
2171 printk(KERN_ERR PFX "flags4 = %s%s%s.\n", 1995 pr_err("flags4 = %s%s%s\n",
2172 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", 1996 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
2173 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", 1997 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
2174 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); 1998 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
2175 1999
2176 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { 2000 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2177 printk(KERN_ERR PFX "hdr length = %d.\n", 2001 pr_err("hdr length = %d\n",
2178 le32_to_cpu(ib_mac_rsp->hdr_len)); 2002 le32_to_cpu(ib_mac_rsp->hdr_len));
2179 printk(KERN_ERR PFX "hdr addr = 0x%llx.\n", 2003 pr_err("hdr addr = 0x%llx\n",
2180 (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); 2004 (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
2181 } 2005 }
2182} 2006}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index d10bcefc0e45..8d63f69b27d9 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -574,6 +574,22 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ 574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break; 575 break;
576 } 576 }
577 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
578 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_IP_CSUM_ERR_SLOT <<
582 RT_IDX_IDX_SHIFT); /* index */
583 break;
584 }
585 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
586 {
587 value = RT_IDX_DST_DFLT_Q | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
590 RT_IDX_IDX_SHIFT); /* index */
591 break;
592 }
577 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ 593 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
578 { 594 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */ 595 value = RT_IDX_DST_DFLT_Q | /* dest */
@@ -1521,7 +1537,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1521 1537
1522 /* Frame error, so drop the packet. */ 1538 /* Frame error, so drop the packet. */
1523 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1524 netif_err(qdev, drv, qdev->ndev, 1540 netif_info(qdev, drv, qdev->ndev,
1525 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); 1541 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1526 rx_ring->rx_errors++; 1542 rx_ring->rx_errors++;
1527 goto err_out; 1543 goto err_out;
@@ -1618,7 +1634,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1618 1634
1619 /* Frame error, so drop the packet. */ 1635 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1636 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1621 netif_err(qdev, drv, qdev->ndev, 1637 netif_info(qdev, drv, qdev->ndev,
1622 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); 1638 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1623 dev_kfree_skb_any(skb); 1639 dev_kfree_skb_any(skb);
1624 rx_ring->rx_errors++; 1640 rx_ring->rx_errors++;
@@ -1677,7 +1693,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1677 /* Unfragmented ipv4 UDP frame. */ 1693 /* Unfragmented ipv4 UDP frame. */
1678 struct iphdr *iph = (struct iphdr *) skb->data; 1694 struct iphdr *iph = (struct iphdr *) skb->data;
1679 if (!(iph->frag_off & 1695 if (!(iph->frag_off &
1680 cpu_to_be16(IP_MF|IP_OFFSET))) { 1696 ntohs(IP_MF|IP_OFFSET))) {
1681 skb->ip_summed = CHECKSUM_UNNECESSARY; 1697 skb->ip_summed = CHECKSUM_UNNECESSARY;
1682 netif_printk(qdev, rx_status, KERN_DEBUG, 1698 netif_printk(qdev, rx_status, KERN_DEBUG,
1683 qdev->ndev, 1699 qdev->ndev,
@@ -1939,7 +1955,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1939 1955
1940 /* Frame error, so drop the packet. */ 1956 /* Frame error, so drop the packet. */
1941 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1957 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1942 netif_err(qdev, drv, qdev->ndev, 1958 netif_info(qdev, drv, qdev->ndev,
1943 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); 1959 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1944 dev_kfree_skb_any(skb); 1960 dev_kfree_skb_any(skb);
1945 rx_ring->rx_errors++; 1961 rx_ring->rx_errors++;
@@ -1997,7 +2013,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1997 /* Unfragmented ipv4 UDP frame. */ 2013 /* Unfragmented ipv4 UDP frame. */
1998 struct iphdr *iph = (struct iphdr *) skb->data; 2014 struct iphdr *iph = (struct iphdr *) skb->data;
1999 if (!(iph->frag_off & 2015 if (!(iph->frag_off &
2000 cpu_to_be16(IP_MF|IP_OFFSET))) { 2016 ntohs(IP_MF|IP_OFFSET))) {
2001 skb->ip_summed = CHECKSUM_UNNECESSARY; 2017 skb->ip_summed = CHECKSUM_UNNECESSARY;
2002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 2018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003 "TCP checksum done!\n"); 2019 "TCP checksum done!\n");
@@ -3587,10 +3603,20 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3587 if (status) 3603 if (status)
3588 return status; 3604 return status;
3589 3605
3590 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3606 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3607 RT_IDX_IP_CSUM_ERR, 1);
3608 if (status) {
3609 netif_err(qdev, ifup, qdev->ndev,
3610 "Failed to init routing register "
3611 "for IP CSUM error packets.\n");
3612 goto exit;
3613 }
3614 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3615 RT_IDX_TU_CSUM_ERR, 1);
3591 if (status) { 3616 if (status) {
3592 netif_err(qdev, ifup, qdev->ndev, 3617 netif_err(qdev, ifup, qdev->ndev,
3593 "Failed to init routing register for error packets.\n"); 3618 "Failed to init routing register "
3619 "for TCP/UDP CSUM error packets.\n");
3594 goto exit; 3620 goto exit;
3595 } 3621 }
3596 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); 3622 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
@@ -3919,6 +3945,11 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3919 if ((ql_read32(qdev, STS) & qdev->port_init) && 3945 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3920 (ql_read32(qdev, STS) & qdev->port_link_up)) 3946 (ql_read32(qdev, STS) & qdev->port_link_up))
3921 ql_link_on(qdev); 3947 ql_link_on(qdev);
3948 /* Restore rx mode. */
3949 clear_bit(QL_ALLMULTI, &qdev->flags);
3950 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3951 qlge_set_multicast_list(qdev->ndev);
3952
3922 ql_enable_interrupts(qdev); 3953 ql_enable_interrupts(qdev);
3923 ql_enable_all_completion_interrupts(qdev); 3954 ql_enable_all_completion_interrupts(qdev);
3924 netif_tx_start_all_queues(qdev->ndev); 3955 netif_tx_start_all_queues(qdev->ndev);
@@ -4204,7 +4235,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4204 return &ndev->stats; 4235 return &ndev->stats;
4205} 4236}
4206 4237
4207static void qlge_set_multicast_list(struct net_device *ndev) 4238void qlge_set_multicast_list(struct net_device *ndev)
4208{ 4239{
4209 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4240 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4210 struct netdev_hw_addr *ha; 4241 struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 3c00462a5d22..f84e8570c7cb 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -606,23 +606,6 @@ end:
606 return status; 606 return status;
607} 607}
608 608
609int ql_mb_sys_err(struct ql_adapter *qdev)
610{
611 struct mbox_params mbc;
612 struct mbox_params *mbcp = &mbc;
613 int status;
614
615 memset(mbcp, 0, sizeof(struct mbox_params));
616
617 mbcp->in_count = 1;
618 mbcp->out_count = 0;
619
620 mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
621
622 status = ql_mailbox_command(qdev, mbcp);
623 return status;
624}
625
626/* Get MPI firmware version. This will be used for 609/* Get MPI firmware version. This will be used for
627 * driver banner and for ethtool info. 610 * driver banner and for ethtool info.
628 * Returns zero on success. 611 * Returns zero on success.
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 9a251acf5ab8..142c381e1d73 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -44,12 +44,13 @@
44#include <linux/io.h> 44#include <linux/io.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/uaccess.h> 46#include <linux/uaccess.h>
47#include <linux/phy.h>
47 48
48#include <asm/processor.h> 49#include <asm/processor.h>
49 50
50#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
51#define DRV_VERSION "0.25" 52#define DRV_VERSION "0.26"
52#define DRV_RELDATE "20Aug2009" 53#define DRV_RELDATE "30May2010"
53 54
54/* PHY CHIP Address */ 55/* PHY CHIP Address */
55#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -179,7 +180,6 @@ struct r6040_descriptor {
179 180
180struct r6040_private { 181struct r6040_private {
181 spinlock_t lock; /* driver lock */ 182 spinlock_t lock; /* driver lock */
182 struct timer_list timer;
183 struct pci_dev *pdev; 183 struct pci_dev *pdev;
184 struct r6040_descriptor *rx_insert_ptr; 184 struct r6040_descriptor *rx_insert_ptr;
185 struct r6040_descriptor *rx_remove_ptr; 185 struct r6040_descriptor *rx_remove_ptr;
@@ -189,13 +189,15 @@ struct r6040_private {
189 struct r6040_descriptor *tx_ring; 189 struct r6040_descriptor *tx_ring;
190 dma_addr_t rx_ring_dma; 190 dma_addr_t rx_ring_dma;
191 dma_addr_t tx_ring_dma; 191 dma_addr_t tx_ring_dma;
192 u16 tx_free_desc, phy_addr, phy_mode; 192 u16 tx_free_desc, phy_addr;
193 u16 mcr0, mcr1; 193 u16 mcr0, mcr1;
194 u16 switch_sig;
195 struct net_device *dev; 194 struct net_device *dev;
196 struct mii_if_info mii_if; 195 struct mii_bus *mii_bus;
197 struct napi_struct napi; 196 struct napi_struct napi;
198 void __iomem *base; 197 void __iomem *base;
198 struct phy_device *phydev;
199 int old_link;
200 int old_duplex;
199}; 201};
200 202
201static char version[] __devinitdata = KERN_INFO DRV_NAME 203static char version[] __devinitdata = KERN_INFO DRV_NAME
@@ -238,20 +240,30 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
238 } 240 }
239} 241}
240 242
241static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg) 243static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
242{ 244{
245 struct net_device *dev = bus->priv;
243 struct r6040_private *lp = netdev_priv(dev); 246 struct r6040_private *lp = netdev_priv(dev);
244 void __iomem *ioaddr = lp->base; 247 void __iomem *ioaddr = lp->base;
245 248
246 return (r6040_phy_read(ioaddr, lp->phy_addr, reg)); 249 return r6040_phy_read(ioaddr, phy_addr, reg);
247} 250}
248 251
249static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val) 252static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
253 int reg, u16 value)
250{ 254{
255 struct net_device *dev = bus->priv;
251 struct r6040_private *lp = netdev_priv(dev); 256 struct r6040_private *lp = netdev_priv(dev);
252 void __iomem *ioaddr = lp->base; 257 void __iomem *ioaddr = lp->base;
253 258
254 r6040_phy_write(ioaddr, lp->phy_addr, reg, val); 259 r6040_phy_write(ioaddr, phy_addr, reg, value);
260
261 return 0;
262}
263
264static int r6040_mdiobus_reset(struct mii_bus *bus)
265{
266 return 0;
255} 267}
256 268
257static void r6040_free_txbufs(struct net_device *dev) 269static void r6040_free_txbufs(struct net_device *dev)
@@ -408,10 +420,9 @@ static void r6040_tx_timeout(struct net_device *dev)
408 void __iomem *ioaddr = priv->base; 420 void __iomem *ioaddr = priv->base;
409 421
410 netdev_warn(dev, "transmit timed out, int enable %4.4x " 422 netdev_warn(dev, "transmit timed out, int enable %4.4x "
411 "status %4.4x, PHY status %4.4x\n", 423 "status %4.4x\n",
412 ioread16(ioaddr + MIER), 424 ioread16(ioaddr + MIER),
413 ioread16(ioaddr + MISR), 425 ioread16(ioaddr + MISR));
414 r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
415 426
416 dev->stats.tx_errors++; 427 dev->stats.tx_errors++;
417 428
@@ -463,9 +474,6 @@ static int r6040_close(struct net_device *dev)
463 struct r6040_private *lp = netdev_priv(dev); 474 struct r6040_private *lp = netdev_priv(dev);
464 struct pci_dev *pdev = lp->pdev; 475 struct pci_dev *pdev = lp->pdev;
465 476
466 /* deleted timer */
467 del_timer_sync(&lp->timer);
468
469 spin_lock_irq(&lp->lock); 477 spin_lock_irq(&lp->lock);
470 napi_disable(&lp->napi); 478 napi_disable(&lp->napi);
471 netif_stop_queue(dev); 479 netif_stop_queue(dev);
@@ -495,64 +503,14 @@ static int r6040_close(struct net_device *dev)
495 return 0; 503 return 0;
496} 504}
497 505
498/* Status of PHY CHIP */
499static int r6040_phy_mode_chk(struct net_device *dev)
500{
501 struct r6040_private *lp = netdev_priv(dev);
502 void __iomem *ioaddr = lp->base;
503 int phy_dat;
504
505 /* PHY Link Status Check */
506 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
507 if (!(phy_dat & 0x4))
508 phy_dat = 0x8000; /* Link Failed, full duplex */
509
510 /* PHY Chip Auto-Negotiation Status */
511 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
512 if (phy_dat & 0x0020) {
513 /* Auto Negotiation Mode */
514 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5);
515 phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4);
516 if (phy_dat & 0x140)
517 /* Force full duplex */
518 phy_dat = 0x8000;
519 else
520 phy_dat = 0;
521 } else {
522 /* Force Mode */
523 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0);
524 if (phy_dat & 0x100)
525 phy_dat = 0x8000;
526 else
527 phy_dat = 0x0000;
528 }
529
530 return phy_dat;
531};
532
533static void r6040_set_carrier(struct mii_if_info *mii)
534{
535 if (r6040_phy_mode_chk(mii->dev)) {
536 /* autoneg is off: Link is always assumed to be up */
537 if (!netif_carrier_ok(mii->dev))
538 netif_carrier_on(mii->dev);
539 } else
540 r6040_phy_mode_chk(mii->dev);
541}
542
543static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 506static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
544{ 507{
545 struct r6040_private *lp = netdev_priv(dev); 508 struct r6040_private *lp = netdev_priv(dev);
546 struct mii_ioctl_data *data = if_mii(rq);
547 int rc;
548 509
549 if (!netif_running(dev)) 510 if (!lp->phydev)
550 return -EINVAL; 511 return -EINVAL;
551 spin_lock_irq(&lp->lock); 512
552 rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL); 513 return phy_mii_ioctl(lp->phydev, rq, cmd);
553 spin_unlock_irq(&lp->lock);
554 r6040_set_carrier(&lp->mii_if);
555 return rc;
556} 514}
557 515
558static int r6040_rx(struct net_device *dev, int limit) 516static int r6040_rx(struct net_device *dev, int limit)
@@ -751,26 +709,6 @@ static int r6040_up(struct net_device *dev)
751 if (ret) 709 if (ret)
752 return ret; 710 return ret;
753 711
754 /* Read the PHY ID */
755 lp->switch_sig = r6040_phy_read(ioaddr, 0, 2);
756
757 if (lp->switch_sig == ICPLUS_PHY_ID) {
758 r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
759 lp->phy_mode = 0x8000;
760 } else {
761 /* PHY Mode Check */
762 r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
763 r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
764
765 if (PHY_MODE == 0x3100)
766 lp->phy_mode = r6040_phy_mode_chk(dev);
767 else
768 lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
769 }
770
771 /* Set duplex mode */
772 lp->mcr0 |= lp->phy_mode;
773
774 /* improve performance (by RDC guys) */ 712 /* improve performance (by RDC guys) */
775 r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); 713 r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
776 r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); 714 r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
@@ -783,35 +721,6 @@ static int r6040_up(struct net_device *dev)
783 return 0; 721 return 0;
784} 722}
785 723
786/*
787 A periodic timer routine
788 Polling PHY Chip Link Status
789*/
790static void r6040_timer(unsigned long data)
791{
792 struct net_device *dev = (struct net_device *)data;
793 struct r6040_private *lp = netdev_priv(dev);
794 void __iomem *ioaddr = lp->base;
795 u16 phy_mode;
796
797 /* Polling PHY Chip Status */
798 if (PHY_MODE == 0x3100)
799 phy_mode = r6040_phy_mode_chk(dev);
800 else
801 phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
802
803 if (phy_mode != lp->phy_mode) {
804 lp->phy_mode = phy_mode;
805 lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode;
806 iowrite16(lp->mcr0, ioaddr);
807 }
808
809 /* Timer active again */
810 mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
811
812 /* Check media */
813 mii_check_media(&lp->mii_if, 1, 1);
814}
815 724
816/* Read/set MAC address routines */ 725/* Read/set MAC address routines */
817static void r6040_mac_address(struct net_device *dev) 726static void r6040_mac_address(struct net_device *dev)
@@ -873,10 +782,6 @@ static int r6040_open(struct net_device *dev)
873 napi_enable(&lp->napi); 782 napi_enable(&lp->napi);
874 netif_start_queue(dev); 783 netif_start_queue(dev);
875 784
876 /* set and active a timer process */
877 setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
878 if (lp->switch_sig != ICPLUS_PHY_ID)
879 mod_timer(&lp->timer, jiffies + HZ);
880 return 0; 785 return 0;
881} 786}
882 787
@@ -1015,40 +920,22 @@ static void netdev_get_drvinfo(struct net_device *dev,
1015static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 920static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1016{ 921{
1017 struct r6040_private *rp = netdev_priv(dev); 922 struct r6040_private *rp = netdev_priv(dev);
1018 int rc;
1019
1020 spin_lock_irq(&rp->lock);
1021 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1022 spin_unlock_irq(&rp->lock);
1023 923
1024 return rc; 924 return phy_ethtool_gset(rp->phydev, cmd);
1025} 925}
1026 926
1027static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 927static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1028{ 928{
1029 struct r6040_private *rp = netdev_priv(dev); 929 struct r6040_private *rp = netdev_priv(dev);
1030 int rc;
1031
1032 spin_lock_irq(&rp->lock);
1033 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1034 spin_unlock_irq(&rp->lock);
1035 r6040_set_carrier(&rp->mii_if);
1036
1037 return rc;
1038}
1039
1040static u32 netdev_get_link(struct net_device *dev)
1041{
1042 struct r6040_private *rp = netdev_priv(dev);
1043 930
1044 return mii_link_ok(&rp->mii_if); 931 return phy_ethtool_sset(rp->phydev, cmd);
1045} 932}
1046 933
1047static const struct ethtool_ops netdev_ethtool_ops = { 934static const struct ethtool_ops netdev_ethtool_ops = {
1048 .get_drvinfo = netdev_get_drvinfo, 935 .get_drvinfo = netdev_get_drvinfo,
1049 .get_settings = netdev_get_settings, 936 .get_settings = netdev_get_settings,
1050 .set_settings = netdev_set_settings, 937 .set_settings = netdev_set_settings,
1051 .get_link = netdev_get_link, 938 .get_link = ethtool_op_get_link,
1052}; 939};
1053 940
1054static const struct net_device_ops r6040_netdev_ops = { 941static const struct net_device_ops r6040_netdev_ops = {
@@ -1067,6 +954,79 @@ static const struct net_device_ops r6040_netdev_ops = {
1067#endif 954#endif
1068}; 955};
1069 956
957static void r6040_adjust_link(struct net_device *dev)
958{
959 struct r6040_private *lp = netdev_priv(dev);
960 struct phy_device *phydev = lp->phydev;
961 int status_changed = 0;
962 void __iomem *ioaddr = lp->base;
963
964 BUG_ON(!phydev);
965
966 if (lp->old_link != phydev->link) {
967 status_changed = 1;
968 lp->old_link = phydev->link;
969 }
970
971 /* reflect duplex change */
972 if (phydev->link && (lp->old_duplex != phydev->duplex)) {
973 lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
974 iowrite16(lp->mcr0, ioaddr);
975
976 status_changed = 1;
977 lp->old_duplex = phydev->duplex;
978 }
979
980 if (status_changed) {
981 pr_info("%s: link %s", dev->name, phydev->link ?
982 "UP" : "DOWN");
983 if (phydev->link)
984 pr_cont(" - %d/%s", phydev->speed,
985 DUPLEX_FULL == phydev->duplex ? "full" : "half");
986 pr_cont("\n");
987 }
988}
989
990static int r6040_mii_probe(struct net_device *dev)
991{
992 struct r6040_private *lp = netdev_priv(dev);
993 struct phy_device *phydev = NULL;
994
995 phydev = phy_find_first(lp->mii_bus);
996 if (!phydev) {
997 dev_err(&lp->pdev->dev, "no PHY found\n");
998 return -ENODEV;
999 }
1000
1001 phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
1002 0, PHY_INTERFACE_MODE_MII);
1003
1004 if (IS_ERR(phydev)) {
1005 dev_err(&lp->pdev->dev, "could not attach to PHY\n");
1006 return PTR_ERR(phydev);
1007 }
1008
1009 /* mask with MAC supported features */
1010 phydev->supported &= (SUPPORTED_10baseT_Half
1011 | SUPPORTED_10baseT_Full
1012 | SUPPORTED_100baseT_Half
1013 | SUPPORTED_100baseT_Full
1014 | SUPPORTED_Autoneg
1015 | SUPPORTED_MII
1016 | SUPPORTED_TP);
1017
1018 phydev->advertising = phydev->supported;
1019 lp->phydev = phydev;
1020 lp->old_link = 0;
1021 lp->old_duplex = -1;
1022
1023 dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
1024 "(mii_bus:phy_addr=%s)\n",
1025 phydev->drv->name, dev_name(&phydev->dev));
1026
1027 return 0;
1028}
1029
1070static int __devinit r6040_init_one(struct pci_dev *pdev, 1030static int __devinit r6040_init_one(struct pci_dev *pdev,
1071 const struct pci_device_id *ent) 1031 const struct pci_device_id *ent)
1072{ 1032{
@@ -1077,6 +1037,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1077 static int card_idx = -1; 1037 static int card_idx = -1;
1078 int bar = 0; 1038 int bar = 0;
1079 u16 *adrp; 1039 u16 *adrp;
1040 int i;
1080 1041
1081 printk("%s\n", version); 1042 printk("%s\n", version);
1082 1043
@@ -1163,7 +1124,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1163 /* Init RDC private data */ 1124 /* Init RDC private data */
1164 lp->mcr0 = 0x1002; 1125 lp->mcr0 = 0x1002;
1165 lp->phy_addr = phy_table[card_idx]; 1126 lp->phy_addr = phy_table[card_idx];
1166 lp->switch_sig = 0;
1167 1127
1168 /* The RDC-specific entries in the device structure. */ 1128 /* The RDC-specific entries in the device structure. */
1169 dev->netdev_ops = &r6040_netdev_ops; 1129 dev->netdev_ops = &r6040_netdev_ops;
@@ -1171,28 +1131,54 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1171 dev->watchdog_timeo = TX_TIMEOUT; 1131 dev->watchdog_timeo = TX_TIMEOUT;
1172 1132
1173 netif_napi_add(dev, &lp->napi, r6040_poll, 64); 1133 netif_napi_add(dev, &lp->napi, r6040_poll, 64);
1174 lp->mii_if.dev = dev; 1134
1175 lp->mii_if.mdio_read = r6040_mdio_read; 1135 lp->mii_bus = mdiobus_alloc();
1176 lp->mii_if.mdio_write = r6040_mdio_write; 1136 if (!lp->mii_bus) {
1177 lp->mii_if.phy_id = lp->phy_addr; 1137 dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
1178 lp->mii_if.phy_id_mask = 0x1f;
1179 lp->mii_if.reg_num_mask = 0x1f;
1180
1181 /* Check the vendor ID on the PHY, if 0xffff assume none attached */
1182 if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
1183 dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
1184 err = -ENODEV;
1185 goto err_out_unmap; 1138 goto err_out_unmap;
1186 } 1139 }
1187 1140
1141 lp->mii_bus->priv = dev;
1142 lp->mii_bus->read = r6040_mdiobus_read;
1143 lp->mii_bus->write = r6040_mdiobus_write;
1144 lp->mii_bus->reset = r6040_mdiobus_reset;
1145 lp->mii_bus->name = "r6040_eth_mii";
1146 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
1147 lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1148 if (!lp->mii_bus->irq) {
1149 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
1150 goto err_out_mdio;
1151 }
1152
1153 for (i = 0; i < PHY_MAX_ADDR; i++)
1154 lp->mii_bus->irq[i] = PHY_POLL;
1155
1156 err = mdiobus_register(lp->mii_bus);
1157 if (err) {
1158 dev_err(&pdev->dev, "failed to register MII bus\n");
1159 goto err_out_mdio_irq;
1160 }
1161
1162 err = r6040_mii_probe(dev);
1163 if (err) {
1164 dev_err(&pdev->dev, "failed to probe MII bus\n");
1165 goto err_out_mdio_unregister;
1166 }
1167
1188 /* Register net device. After this dev->name assign */ 1168 /* Register net device. After this dev->name assign */
1189 err = register_netdev(dev); 1169 err = register_netdev(dev);
1190 if (err) { 1170 if (err) {
1191 dev_err(&pdev->dev, "Failed to register net device\n"); 1171 dev_err(&pdev->dev, "Failed to register net device\n");
1192 goto err_out_unmap; 1172 goto err_out_mdio_unregister;
1193 } 1173 }
1194 return 0; 1174 return 0;
1195 1175
1176err_out_mdio_unregister:
1177 mdiobus_unregister(lp->mii_bus);
1178err_out_mdio_irq:
1179 kfree(lp->mii_bus->irq);
1180err_out_mdio:
1181 mdiobus_free(lp->mii_bus);
1196err_out_unmap: 1182err_out_unmap:
1197 pci_iounmap(pdev, ioaddr); 1183 pci_iounmap(pdev, ioaddr);
1198err_out_free_res: 1184err_out_free_res:
@@ -1206,8 +1192,12 @@ err_out:
1206static void __devexit r6040_remove_one(struct pci_dev *pdev) 1192static void __devexit r6040_remove_one(struct pci_dev *pdev)
1207{ 1193{
1208 struct net_device *dev = pci_get_drvdata(pdev); 1194 struct net_device *dev = pci_get_drvdata(pdev);
1195 struct r6040_private *lp = netdev_priv(dev);
1209 1196
1210 unregister_netdev(dev); 1197 unregister_netdev(dev);
1198 mdiobus_unregister(lp->mii_bus);
1199 kfree(lp->mii_bus->irq);
1200 mdiobus_free(lp->mii_bus);
1211 pci_release_regions(pdev); 1201 pci_release_regions(pdev);
1212 free_netdev(dev); 1202 free_netdev(dev);
1213 pci_disable_device(pdev); 1203 pci_disable_device(pdev);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index cdc6a5c2e70d..35540411990d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -88,7 +88,7 @@ static const int multicast_filter_limit = 32;
88#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) 88#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
89#define RTL_R8(reg) readb (ioaddr + (reg)) 89#define RTL_R8(reg) readb (ioaddr + (reg))
90#define RTL_R16(reg) readw (ioaddr + (reg)) 90#define RTL_R16(reg) readw (ioaddr + (reg))
91#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg))) 91#define RTL_R32(reg) readl (ioaddr + (reg))
92 92
93enum mac_version { 93enum mac_version {
94 RTL_GIGA_MAC_NONE = 0x00, 94 RTL_GIGA_MAC_NONE = 0x00,
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 416669fd68c6..3688325c11f5 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -1,6 +1,6 @@
1/************************************************************************ 1/************************************************************************
2 * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC 2 * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc. 3 * Copyright(c) 2002-2010 Exar Corp.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference. 6 * the GNU General Public License (GPL), incorporated herein by reference.
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 1d37f0c310ca..18bc5b718bbb 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1,6 +1,6 @@
1/************************************************************************ 1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc. 3 * Copyright(c) 2002-2010 Exar Corp.
4 * 4 *
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference. 6 * the GNU General Public License (GPL), incorporated herein by reference.
@@ -38,7 +38,7 @@
38 * Tx descriptors that can be associated with each corresponding FIFO. 38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA), 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)' 40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not. 41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0' 42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be 43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet 44 * aggregated as a single large packet
@@ -90,7 +90,7 @@
90#include "s2io.h" 90#include "s2io.h"
91#include "s2io-regs.h" 91#include "s2io-regs.h"
92 92
93#define DRV_VERSION "2.0.26.25" 93#define DRV_VERSION "2.0.26.26"
94 94
95/* S2io Driver name & version. */ 95/* S2io Driver name & version. */
96static char s2io_driver_name[] = "Neterion"; 96static char s2io_driver_name[] = "Neterion";
@@ -496,7 +496,7 @@ S2IO_PARM_INT(rxsync_frequency, 3);
496/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ 496/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
497S2IO_PARM_INT(intr_type, 2); 497S2IO_PARM_INT(intr_type, 2);
498/* Large receive offload feature */ 498/* Large receive offload feature */
499static unsigned int lro_enable; 499static unsigned int lro_enable = 1;
500module_param_named(lro, lro_enable, uint, 0); 500module_param_named(lro, lro_enable, uint, 0);
501 501
502/* Max pkts to be aggregated by LRO at one time. If not specified, 502/* Max pkts to be aggregated by LRO at one time. If not specified,
@@ -795,7 +795,6 @@ static int init_shared_mem(struct s2io_nic *nic)
795 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1; 795 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
796 ring->nic = nic; 796 ring->nic = nic;
797 ring->ring_no = i; 797 ring->ring_no = i;
798 ring->lro = lro_enable;
799 798
800 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1); 799 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
801 /* Allocating all the Rx blocks */ 800 /* Allocating all the Rx blocks */
@@ -5797,7 +5796,7 @@ static void s2io_vpd_read(struct s2io_nic *nic)
5797{ 5796{
5798 u8 *vpd_data; 5797 u8 *vpd_data;
5799 u8 data; 5798 u8 data;
5800 int i = 0, cnt, fail = 0; 5799 int i = 0, cnt, len, fail = 0;
5801 int vpd_addr = 0x80; 5800 int vpd_addr = 0x80;
5802 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; 5801 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5803 5802
@@ -5838,20 +5837,28 @@ static void s2io_vpd_read(struct s2io_nic *nic)
5838 5837
5839 if (!fail) { 5838 if (!fail) {
5840 /* read serial number of adapter */ 5839 /* read serial number of adapter */
5841 for (cnt = 0; cnt < 256; cnt++) { 5840 for (cnt = 0; cnt < 252; cnt++) {
5842 if ((vpd_data[cnt] == 'S') && 5841 if ((vpd_data[cnt] == 'S') &&
5843 (vpd_data[cnt+1] == 'N') && 5842 (vpd_data[cnt+1] == 'N')) {
5844 (vpd_data[cnt+2] < VPD_STRING_LEN)) { 5843 len = vpd_data[cnt+2];
5845 memset(nic->serial_num, 0, VPD_STRING_LEN); 5844 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5846 memcpy(nic->serial_num, &vpd_data[cnt + 3], 5845 memcpy(nic->serial_num,
5847 vpd_data[cnt+2]); 5846 &vpd_data[cnt + 3],
5848 break; 5847 len);
5848 memset(nic->serial_num+len,
5849 0,
5850 VPD_STRING_LEN-len);
5851 break;
5852 }
5849 } 5853 }
5850 } 5854 }
5851 } 5855 }
5852 5856
5853 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) 5857 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5854 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 5858 len = vpd_data[1];
5859 memcpy(nic->product_name, &vpd_data[3], len);
5860 nic->product_name[len] = 0;
5861 }
5855 kfree(vpd_data); 5862 kfree(vpd_data);
5856 swstats->mem_freed += 256; 5863 swstats->mem_freed += 256;
5857} 5864}
@@ -6707,6 +6714,7 @@ static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6707{ 6714{
6708 return (dev->features & NETIF_F_TSO) != 0; 6715 return (dev->features & NETIF_F_TSO) != 0;
6709} 6716}
6717
6710static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) 6718static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6711{ 6719{
6712 if (data) 6720 if (data)
@@ -6717,6 +6725,42 @@ static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6717 return 0; 6725 return 0;
6718} 6726}
6719 6727
6728static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6729{
6730 struct s2io_nic *sp = netdev_priv(dev);
6731 int rc = 0;
6732 int changed = 0;
6733
6734 if (data & ~ETH_FLAG_LRO)
6735 return -EINVAL;
6736
6737 if (data & ETH_FLAG_LRO) {
6738 if (lro_enable) {
6739 if (!(dev->features & NETIF_F_LRO)) {
6740 dev->features |= NETIF_F_LRO;
6741 changed = 1;
6742 }
6743 } else
6744 rc = -EINVAL;
6745 } else if (dev->features & NETIF_F_LRO) {
6746 dev->features &= ~NETIF_F_LRO;
6747 changed = 1;
6748 }
6749
6750 if (changed && netif_running(dev)) {
6751 s2io_stop_all_tx_queue(sp);
6752 s2io_card_down(sp);
6753 sp->lro = !!(dev->features & NETIF_F_LRO);
6754 rc = s2io_card_up(sp);
6755 if (rc)
6756 s2io_reset(sp);
6757 else
6758 s2io_start_all_tx_queue(sp);
6759 }
6760
6761 return rc;
6762}
6763
6720static const struct ethtool_ops netdev_ethtool_ops = { 6764static const struct ethtool_ops netdev_ethtool_ops = {
6721 .get_settings = s2io_ethtool_gset, 6765 .get_settings = s2io_ethtool_gset,
6722 .set_settings = s2io_ethtool_sset, 6766 .set_settings = s2io_ethtool_sset,
@@ -6733,6 +6777,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
6733 .get_rx_csum = s2io_ethtool_get_rx_csum, 6777 .get_rx_csum = s2io_ethtool_get_rx_csum,
6734 .set_rx_csum = s2io_ethtool_set_rx_csum, 6778 .set_rx_csum = s2io_ethtool_set_rx_csum,
6735 .set_tx_csum = s2io_ethtool_op_set_tx_csum, 6779 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6780 .set_flags = s2io_ethtool_set_flags,
6781 .get_flags = ethtool_op_get_flags,
6736 .set_sg = ethtool_op_set_sg, 6782 .set_sg = ethtool_op_set_sg,
6737 .get_tso = s2io_ethtool_op_get_tso, 6783 .get_tso = s2io_ethtool_op_get_tso,
6738 .set_tso = s2io_ethtool_op_set_tso, 6784 .set_tso = s2io_ethtool_op_set_tso,
@@ -7261,6 +7307,7 @@ static int s2io_card_up(struct s2io_nic *sp)
7261 struct ring_info *ring = &mac_control->rings[i]; 7307 struct ring_info *ring = &mac_control->rings[i];
7262 7308
7263 ring->mtu = dev->mtu; 7309 ring->mtu = dev->mtu;
7310 ring->lro = sp->lro;
7264 ret = fill_rx_buffers(sp, ring, 1); 7311 ret = fill_rx_buffers(sp, ring, 1);
7265 if (ret) { 7312 if (ret) {
7266 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7313 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
@@ -7847,7 +7894,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7847 7894
7848 /* Private member variable initialized to s2io NIC structure */ 7895 /* Private member variable initialized to s2io NIC structure */
7849 sp = netdev_priv(dev); 7896 sp = netdev_priv(dev);
7850 memset(sp, 0, sizeof(struct s2io_nic));
7851 sp->dev = dev; 7897 sp->dev = dev;
7852 sp->pdev = pdev; 7898 sp->pdev = pdev;
7853 sp->high_dma_flag = dma_flag; 7899 sp->high_dma_flag = dma_flag;
@@ -8001,7 +8047,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8001 dev->netdev_ops = &s2io_netdev_ops; 8047 dev->netdev_ops = &s2io_netdev_ops;
8002 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 8048 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8003 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8049 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8004 8050 if (lro_enable)
8051 dev->features |= NETIF_F_LRO;
8005 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 8052 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8006 if (sp->high_dma_flag == true) 8053 if (sp->high_dma_flag == true)
8007 dev->features |= NETIF_F_HIGHDMA; 8054 dev->features |= NETIF_F_HIGHDMA;
@@ -8159,7 +8206,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8159 goto register_failed; 8206 goto register_failed;
8160 } 8207 }
8161 s2io_vpd_read(sp); 8208 s2io_vpd_read(sp);
8162 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n"); 8209 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8163 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name, 8210 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8164 sp->product_name, pdev->revision); 8211 sp->product_name, pdev->revision);
8165 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 8212 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 7f3a53dcc6ef..0af033533905 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1,6 +1,6 @@
1/************************************************************************ 1/************************************************************************
2 * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC 2 * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc. 3 * Copyright(c) 2002-2010 Exar Corp.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference. 6 * the GNU General Public License (GPL), incorporated herein by reference.
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 79eee3062083..8e6bd45b9f31 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2532,7 +2532,7 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2532 if (!netif_running(dev) || !sc->phy_dev) 2532 if (!netif_running(dev) || !sc->phy_dev)
2533 return -EINVAL; 2533 return -EINVAL;
2534 2534
2535 return phy_mii_ioctl(sc->phy_dev, if_mii(rq), cmd); 2535 return phy_mii_ioctl(sc->phy_dev, rq, cmd);
2536} 2536}
2537 2537
2538static int sbmac_close(struct net_device *dev) 2538static int sbmac_close(struct net_device *dev)
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 156460527231..ba674c5ca29e 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -27,6 +27,7 @@
27#include "nic.h" 27#include "nic.h"
28 28
29#include "mcdi.h" 29#include "mcdi.h"
30#include "workarounds.h"
30 31
31/************************************************************************** 32/**************************************************************************
32 * 33 *
@@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = {
92 93
93#define EFX_MAX_MTU (9 * 1024) 94#define EFX_MAX_MTU (9 * 1024)
94 95
95/* RX slow fill workqueue. If memory allocation fails in the fast path,
96 * a work item is pushed onto this work queue to retry the allocation later,
97 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
98 * workqueue, there is nothing to be gained in making it per NIC
99 */
100static struct workqueue_struct *refill_workqueue;
101
102/* Reset workqueue. If any NIC has a hardware failure then a reset will be 96/* Reset workqueue. If any NIC has a hardware failure then a reset will be
103 * queued onto this work queue. This is not a per-nic work queue, because 97 * queued onto this work queue. This is not a per-nic work queue, because
104 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 98 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -195,6 +189,13 @@ module_param(irq_adapt_high_thresh, uint, 0644);
195MODULE_PARM_DESC(irq_adapt_high_thresh, 189MODULE_PARM_DESC(irq_adapt_high_thresh,
196 "Threshold score for increasing IRQ moderation"); 190 "Threshold score for increasing IRQ moderation");
197 191
192static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
193 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
194 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
195 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
196module_param(debug, uint, 0);
197MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
198
198/************************************************************************** 199/**************************************************************************
199 * 200 *
200 * Utility functions and prototypes 201 * Utility functions and prototypes
@@ -278,16 +279,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
278{ 279{
279 struct efx_channel *channel = 280 struct efx_channel *channel =
280 container_of(napi, struct efx_channel, napi_str); 281 container_of(napi, struct efx_channel, napi_str);
282 struct efx_nic *efx = channel->efx;
281 int spent; 283 int spent;
282 284
283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 285 netif_vdbg(efx, intr, efx->net_dev,
284 channel->channel, raw_smp_processor_id()); 286 "channel %d NAPI poll executing on CPU %d\n",
287 channel->channel, raw_smp_processor_id());
285 288
286 spent = efx_process_channel(channel, budget); 289 spent = efx_process_channel(channel, budget);
287 290
288 if (spent < budget) { 291 if (spent < budget) {
289 struct efx_nic *efx = channel->efx;
290
291 if (channel->channel < efx->n_rx_channels && 292 if (channel->channel < efx->n_rx_channels &&
292 efx->irq_rx_adaptive && 293 efx->irq_rx_adaptive &&
293 unlikely(++channel->irq_count == 1000)) { 294 unlikely(++channel->irq_count == 1000)) {
@@ -363,7 +364,8 @@ void efx_process_channel_now(struct efx_channel *channel)
363 */ 364 */
364static int efx_probe_eventq(struct efx_channel *channel) 365static int efx_probe_eventq(struct efx_channel *channel)
365{ 366{
366 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); 367 netif_dbg(channel->efx, probe, channel->efx->net_dev,
368 "chan %d create event queue\n", channel->channel);
367 369
368 return efx_nic_probe_eventq(channel); 370 return efx_nic_probe_eventq(channel);
369} 371}
@@ -371,7 +373,8 @@ static int efx_probe_eventq(struct efx_channel *channel)
371/* Prepare channel's event queue */ 373/* Prepare channel's event queue */
372static void efx_init_eventq(struct efx_channel *channel) 374static void efx_init_eventq(struct efx_channel *channel)
373{ 375{
374 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); 376 netif_dbg(channel->efx, drv, channel->efx->net_dev,
377 "chan %d init event queue\n", channel->channel);
375 378
376 channel->eventq_read_ptr = 0; 379 channel->eventq_read_ptr = 0;
377 380
@@ -380,14 +383,16 @@ static void efx_init_eventq(struct efx_channel *channel)
380 383
381static void efx_fini_eventq(struct efx_channel *channel) 384static void efx_fini_eventq(struct efx_channel *channel)
382{ 385{
383 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); 386 netif_dbg(channel->efx, drv, channel->efx->net_dev,
387 "chan %d fini event queue\n", channel->channel);
384 388
385 efx_nic_fini_eventq(channel); 389 efx_nic_fini_eventq(channel);
386} 390}
387 391
388static void efx_remove_eventq(struct efx_channel *channel) 392static void efx_remove_eventq(struct efx_channel *channel)
389{ 393{
390 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); 394 netif_dbg(channel->efx, drv, channel->efx->net_dev,
395 "chan %d remove event queue\n", channel->channel);
391 396
392 efx_nic_remove_eventq(channel); 397 efx_nic_remove_eventq(channel);
393} 398}
@@ -404,7 +409,8 @@ static int efx_probe_channel(struct efx_channel *channel)
404 struct efx_rx_queue *rx_queue; 409 struct efx_rx_queue *rx_queue;
405 int rc; 410 int rc;
406 411
407 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel); 412 netif_dbg(channel->efx, probe, channel->efx->net_dev,
413 "creating channel %d\n", channel->channel);
408 414
409 rc = efx_probe_eventq(channel); 415 rc = efx_probe_eventq(channel);
410 if (rc) 416 if (rc)
@@ -474,12 +480,15 @@ static void efx_init_channels(struct efx_nic *efx)
474 */ 480 */
475 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 481 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
476 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 482 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
483 efx->type->rx_buffer_hash_size +
477 efx->type->rx_buffer_padding); 484 efx->type->rx_buffer_padding);
478 efx->rx_buffer_order = get_order(efx->rx_buffer_len); 485 efx->rx_buffer_order = get_order(efx->rx_buffer_len +
486 sizeof(struct efx_rx_page_state));
479 487
480 /* Initialise the channels */ 488 /* Initialise the channels */
481 efx_for_each_channel(channel, efx) { 489 efx_for_each_channel(channel, efx) {
482 EFX_LOG(channel->efx, "init chan %d\n", channel->channel); 490 netif_dbg(channel->efx, drv, channel->efx->net_dev,
491 "init chan %d\n", channel->channel);
483 492
484 efx_init_eventq(channel); 493 efx_init_eventq(channel);
485 494
@@ -506,7 +515,8 @@ static void efx_start_channel(struct efx_channel *channel)
506{ 515{
507 struct efx_rx_queue *rx_queue; 516 struct efx_rx_queue *rx_queue;
508 517
509 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); 518 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
519 "starting chan %d\n", channel->channel);
510 520
511 /* The interrupt handler for this channel may set work_pending 521 /* The interrupt handler for this channel may set work_pending
512 * as soon as we enable it. Make sure it's cleared before 522 * as soon as we enable it. Make sure it's cleared before
@@ -515,11 +525,11 @@ static void efx_start_channel(struct efx_channel *channel)
515 channel->enabled = true; 525 channel->enabled = true;
516 smp_wmb(); 526 smp_wmb();
517 527
518 napi_enable(&channel->napi_str); 528 /* Fill the queues before enabling NAPI */
519
520 /* Load up RX descriptors */
521 efx_for_each_channel_rx_queue(rx_queue, channel) 529 efx_for_each_channel_rx_queue(rx_queue, channel)
522 efx_fast_push_rx_descriptors(rx_queue); 530 efx_fast_push_rx_descriptors(rx_queue);
531
532 napi_enable(&channel->napi_str);
523} 533}
524 534
525/* This disables event queue processing and packet transmission. 535/* This disables event queue processing and packet transmission.
@@ -528,21 +538,14 @@ static void efx_start_channel(struct efx_channel *channel)
528 */ 538 */
529static void efx_stop_channel(struct efx_channel *channel) 539static void efx_stop_channel(struct efx_channel *channel)
530{ 540{
531 struct efx_rx_queue *rx_queue;
532
533 if (!channel->enabled) 541 if (!channel->enabled)
534 return; 542 return;
535 543
536 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); 544 netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
545 "stop chan %d\n", channel->channel);
537 546
538 channel->enabled = false; 547 channel->enabled = false;
539 napi_disable(&channel->napi_str); 548 napi_disable(&channel->napi_str);
540
541 /* Ensure that any worker threads have exited or will be no-ops */
542 efx_for_each_channel_rx_queue(rx_queue, channel) {
543 spin_lock_bh(&rx_queue->add_lock);
544 spin_unlock_bh(&rx_queue->add_lock);
545 }
546} 549}
547 550
548static void efx_fini_channels(struct efx_nic *efx) 551static void efx_fini_channels(struct efx_nic *efx)
@@ -556,13 +559,24 @@ static void efx_fini_channels(struct efx_nic *efx)
556 BUG_ON(efx->port_enabled); 559 BUG_ON(efx->port_enabled);
557 560
558 rc = efx_nic_flush_queues(efx); 561 rc = efx_nic_flush_queues(efx);
559 if (rc) 562 if (rc && EFX_WORKAROUND_7803(efx)) {
560 EFX_ERR(efx, "failed to flush queues\n"); 563 /* Schedule a reset to recover from the flush failure. The
561 else 564 * descriptor caches reference memory we're about to free,
562 EFX_LOG(efx, "successfully flushed all queues\n"); 565 * but falcon_reconfigure_mac_wrapper() won't reconnect
566 * the MACs because of the pending reset. */
567 netif_err(efx, drv, efx->net_dev,
568 "Resetting to recover from flush failure\n");
569 efx_schedule_reset(efx, RESET_TYPE_ALL);
570 } else if (rc) {
571 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
572 } else {
573 netif_dbg(efx, drv, efx->net_dev,
574 "successfully flushed all queues\n");
575 }
563 576
564 efx_for_each_channel(channel, efx) { 577 efx_for_each_channel(channel, efx) {
565 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 578 netif_dbg(channel->efx, drv, channel->efx->net_dev,
579 "shut down chan %d\n", channel->channel);
566 580
567 efx_for_each_channel_rx_queue(rx_queue, channel) 581 efx_for_each_channel_rx_queue(rx_queue, channel)
568 efx_fini_rx_queue(rx_queue); 582 efx_fini_rx_queue(rx_queue);
@@ -577,7 +591,8 @@ static void efx_remove_channel(struct efx_channel *channel)
577 struct efx_tx_queue *tx_queue; 591 struct efx_tx_queue *tx_queue;
578 struct efx_rx_queue *rx_queue; 592 struct efx_rx_queue *rx_queue;
579 593
580 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel); 594 netif_dbg(channel->efx, drv, channel->efx->net_dev,
595 "destroy chan %d\n", channel->channel);
581 596
582 efx_for_each_channel_rx_queue(rx_queue, channel) 597 efx_for_each_channel_rx_queue(rx_queue, channel)
583 efx_remove_rx_queue(rx_queue); 598 efx_remove_rx_queue(rx_queue);
@@ -586,9 +601,9 @@ static void efx_remove_channel(struct efx_channel *channel)
586 efx_remove_eventq(channel); 601 efx_remove_eventq(channel);
587} 602}
588 603
589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 604void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
590{ 605{
591 queue_delayed_work(refill_workqueue, &rx_queue->work, delay); 606 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
592} 607}
593 608
594/************************************************************************** 609/**************************************************************************
@@ -628,12 +643,13 @@ void efx_link_status_changed(struct efx_nic *efx)
628 643
629 /* Status message for kernel log */ 644 /* Status message for kernel log */
630 if (link_state->up) { 645 if (link_state->up) {
631 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", 646 netif_info(efx, link, efx->net_dev,
632 link_state->speed, link_state->fd ? "full" : "half", 647 "link up at %uMbps %s-duplex (MTU %d)%s\n",
633 efx->net_dev->mtu, 648 link_state->speed, link_state->fd ? "full" : "half",
634 (efx->promiscuous ? " [PROMISC]" : "")); 649 efx->net_dev->mtu,
650 (efx->promiscuous ? " [PROMISC]" : ""));
635 } else { 651 } else {
636 EFX_INFO(efx, "link down\n"); 652 netif_info(efx, link, efx->net_dev, "link down\n");
637 } 653 }
638 654
639} 655}
@@ -737,7 +753,7 @@ static int efx_probe_port(struct efx_nic *efx)
737{ 753{
738 int rc; 754 int rc;
739 755
740 EFX_LOG(efx, "create port\n"); 756 netif_dbg(efx, probe, efx->net_dev, "create port\n");
741 757
742 if (phy_flash_cfg) 758 if (phy_flash_cfg)
743 efx->phy_mode = PHY_MODE_SPECIAL; 759 efx->phy_mode = PHY_MODE_SPECIAL;
@@ -751,15 +767,16 @@ static int efx_probe_port(struct efx_nic *efx)
751 if (is_valid_ether_addr(efx->mac_address)) { 767 if (is_valid_ether_addr(efx->mac_address)) {
752 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 768 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
753 } else { 769 } else {
754 EFX_ERR(efx, "invalid MAC address %pM\n", 770 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
755 efx->mac_address); 771 efx->mac_address);
756 if (!allow_bad_hwaddr) { 772 if (!allow_bad_hwaddr) {
757 rc = -EINVAL; 773 rc = -EINVAL;
758 goto err; 774 goto err;
759 } 775 }
760 random_ether_addr(efx->net_dev->dev_addr); 776 random_ether_addr(efx->net_dev->dev_addr);
761 EFX_INFO(efx, "using locally-generated MAC %pM\n", 777 netif_info(efx, probe, efx->net_dev,
762 efx->net_dev->dev_addr); 778 "using locally-generated MAC %pM\n",
779 efx->net_dev->dev_addr);
763 } 780 }
764 781
765 return 0; 782 return 0;
@@ -773,7 +790,7 @@ static int efx_init_port(struct efx_nic *efx)
773{ 790{
774 int rc; 791 int rc;
775 792
776 EFX_LOG(efx, "init port\n"); 793 netif_dbg(efx, drv, efx->net_dev, "init port\n");
777 794
778 mutex_lock(&efx->mac_lock); 795 mutex_lock(&efx->mac_lock);
779 796
@@ -804,7 +821,7 @@ fail1:
804 821
805static void efx_start_port(struct efx_nic *efx) 822static void efx_start_port(struct efx_nic *efx)
806{ 823{
807 EFX_LOG(efx, "start port\n"); 824 netif_dbg(efx, ifup, efx->net_dev, "start port\n");
808 BUG_ON(efx->port_enabled); 825 BUG_ON(efx->port_enabled);
809 826
810 mutex_lock(&efx->mac_lock); 827 mutex_lock(&efx->mac_lock);
@@ -821,7 +838,7 @@ static void efx_start_port(struct efx_nic *efx)
821/* Prevent efx_mac_work() and efx_monitor() from working */ 838/* Prevent efx_mac_work() and efx_monitor() from working */
822static void efx_stop_port(struct efx_nic *efx) 839static void efx_stop_port(struct efx_nic *efx)
823{ 840{
824 EFX_LOG(efx, "stop port\n"); 841 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
825 842
826 mutex_lock(&efx->mac_lock); 843 mutex_lock(&efx->mac_lock);
827 efx->port_enabled = false; 844 efx->port_enabled = false;
@@ -836,7 +853,7 @@ static void efx_stop_port(struct efx_nic *efx)
836 853
837static void efx_fini_port(struct efx_nic *efx) 854static void efx_fini_port(struct efx_nic *efx)
838{ 855{
839 EFX_LOG(efx, "shut down port\n"); 856 netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
840 857
841 if (!efx->port_initialized) 858 if (!efx->port_initialized)
842 return; 859 return;
@@ -850,7 +867,7 @@ static void efx_fini_port(struct efx_nic *efx)
850 867
851static void efx_remove_port(struct efx_nic *efx) 868static void efx_remove_port(struct efx_nic *efx)
852{ 869{
853 EFX_LOG(efx, "destroying port\n"); 870 netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
854 871
855 efx->type->remove_port(efx); 872 efx->type->remove_port(efx);
856} 873}
@@ -868,11 +885,12 @@ static int efx_init_io(struct efx_nic *efx)
868 dma_addr_t dma_mask = efx->type->max_dma_mask; 885 dma_addr_t dma_mask = efx->type->max_dma_mask;
869 int rc; 886 int rc;
870 887
871 EFX_LOG(efx, "initialising I/O\n"); 888 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
872 889
873 rc = pci_enable_device(pci_dev); 890 rc = pci_enable_device(pci_dev);
874 if (rc) { 891 if (rc) {
875 EFX_ERR(efx, "failed to enable PCI device\n"); 892 netif_err(efx, probe, efx->net_dev,
893 "failed to enable PCI device\n");
876 goto fail1; 894 goto fail1;
877 } 895 }
878 896
@@ -890,39 +908,45 @@ static int efx_init_io(struct efx_nic *efx)
890 dma_mask >>= 1; 908 dma_mask >>= 1;
891 } 909 }
892 if (rc) { 910 if (rc) {
893 EFX_ERR(efx, "could not find a suitable DMA mask\n"); 911 netif_err(efx, probe, efx->net_dev,
912 "could not find a suitable DMA mask\n");
894 goto fail2; 913 goto fail2;
895 } 914 }
896 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask); 915 netif_dbg(efx, probe, efx->net_dev,
916 "using DMA mask %llx\n", (unsigned long long) dma_mask);
897 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); 917 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
898 if (rc) { 918 if (rc) {
899 /* pci_set_consistent_dma_mask() is not *allowed* to 919 /* pci_set_consistent_dma_mask() is not *allowed* to
900 * fail with a mask that pci_set_dma_mask() accepted, 920 * fail with a mask that pci_set_dma_mask() accepted,
901 * but just in case... 921 * but just in case...
902 */ 922 */
903 EFX_ERR(efx, "failed to set consistent DMA mask\n"); 923 netif_err(efx, probe, efx->net_dev,
924 "failed to set consistent DMA mask\n");
904 goto fail2; 925 goto fail2;
905 } 926 }
906 927
907 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); 928 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
908 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); 929 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
909 if (rc) { 930 if (rc) {
910 EFX_ERR(efx, "request for memory BAR failed\n"); 931 netif_err(efx, probe, efx->net_dev,
932 "request for memory BAR failed\n");
911 rc = -EIO; 933 rc = -EIO;
912 goto fail3; 934 goto fail3;
913 } 935 }
914 efx->membase = ioremap_nocache(efx->membase_phys, 936 efx->membase = ioremap_nocache(efx->membase_phys,
915 efx->type->mem_map_size); 937 efx->type->mem_map_size);
916 if (!efx->membase) { 938 if (!efx->membase) {
917 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n", 939 netif_err(efx, probe, efx->net_dev,
918 (unsigned long long)efx->membase_phys, 940 "could not map memory BAR at %llx+%x\n",
919 efx->type->mem_map_size); 941 (unsigned long long)efx->membase_phys,
942 efx->type->mem_map_size);
920 rc = -ENOMEM; 943 rc = -ENOMEM;
921 goto fail4; 944 goto fail4;
922 } 945 }
923 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n", 946 netif_dbg(efx, probe, efx->net_dev,
924 (unsigned long long)efx->membase_phys, 947 "memory BAR at %llx+%x (virtual %p)\n",
925 efx->type->mem_map_size, efx->membase); 948 (unsigned long long)efx->membase_phys,
949 efx->type->mem_map_size, efx->membase);
926 950
927 return 0; 951 return 0;
928 952
@@ -938,7 +962,7 @@ static int efx_init_io(struct efx_nic *efx)
938 962
939static void efx_fini_io(struct efx_nic *efx) 963static void efx_fini_io(struct efx_nic *efx)
940{ 964{
941 EFX_LOG(efx, "shutting down I/O\n"); 965 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
942 966
943 if (efx->membase) { 967 if (efx->membase) {
944 iounmap(efx->membase); 968 iounmap(efx->membase);
@@ -1002,9 +1026,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1002 xentries[i].entry = i; 1026 xentries[i].entry = i;
1003 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); 1027 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1004 if (rc > 0) { 1028 if (rc > 0) {
1005 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" 1029 netif_err(efx, drv, efx->net_dev,
1006 " available (%d < %d).\n", rc, n_channels); 1030 "WARNING: Insufficient MSI-X vectors"
1007 EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); 1031 " available (%d < %d).\n", rc, n_channels);
1032 netif_err(efx, drv, efx->net_dev,
1033 "WARNING: Performance may be reduced.\n");
1008 EFX_BUG_ON_PARANOID(rc >= n_channels); 1034 EFX_BUG_ON_PARANOID(rc >= n_channels);
1009 n_channels = rc; 1035 n_channels = rc;
1010 rc = pci_enable_msix(efx->pci_dev, xentries, 1036 rc = pci_enable_msix(efx->pci_dev, xentries,
@@ -1028,7 +1054,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1028 } else { 1054 } else {
1029 /* Fall back to single channel MSI */ 1055 /* Fall back to single channel MSI */
1030 efx->interrupt_mode = EFX_INT_MODE_MSI; 1056 efx->interrupt_mode = EFX_INT_MODE_MSI;
1031 EFX_ERR(efx, "could not enable MSI-X\n"); 1057 netif_err(efx, drv, efx->net_dev,
1058 "could not enable MSI-X\n");
1032 } 1059 }
1033 } 1060 }
1034 1061
@@ -1041,7 +1068,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1041 if (rc == 0) { 1068 if (rc == 0) {
1042 efx->channel[0].irq = efx->pci_dev->irq; 1069 efx->channel[0].irq = efx->pci_dev->irq;
1043 } else { 1070 } else {
1044 EFX_ERR(efx, "could not enable MSI\n"); 1071 netif_err(efx, drv, efx->net_dev,
1072 "could not enable MSI\n");
1045 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 1073 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1046 } 1074 }
1047 } 1075 }
@@ -1093,9 +1121,10 @@ static void efx_set_channels(struct efx_nic *efx)
1093 1121
1094static int efx_probe_nic(struct efx_nic *efx) 1122static int efx_probe_nic(struct efx_nic *efx)
1095{ 1123{
1124 size_t i;
1096 int rc; 1125 int rc;
1097 1126
1098 EFX_LOG(efx, "creating NIC\n"); 1127 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1099 1128
1100 /* Carry out hardware-type specific initialisation */ 1129 /* Carry out hardware-type specific initialisation */
1101 rc = efx->type->probe(efx); 1130 rc = efx->type->probe(efx);
@@ -1106,6 +1135,11 @@ static int efx_probe_nic(struct efx_nic *efx)
1106 * in MSI-X interrupts. */ 1135 * in MSI-X interrupts. */
1107 efx_probe_interrupts(efx); 1136 efx_probe_interrupts(efx);
1108 1137
1138 if (efx->n_channels > 1)
1139 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
1140 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1141 efx->rx_indir_table[i] = i % efx->n_rx_channels;
1142
1109 efx_set_channels(efx); 1143 efx_set_channels(efx);
1110 efx->net_dev->real_num_tx_queues = efx->n_tx_channels; 1144 efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
1111 1145
@@ -1117,7 +1151,7 @@ static int efx_probe_nic(struct efx_nic *efx)
1117 1151
1118static void efx_remove_nic(struct efx_nic *efx) 1152static void efx_remove_nic(struct efx_nic *efx)
1119{ 1153{
1120 EFX_LOG(efx, "destroying NIC\n"); 1154 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1121 1155
1122 efx_remove_interrupts(efx); 1156 efx_remove_interrupts(efx);
1123 efx->type->remove(efx); 1157 efx->type->remove(efx);
@@ -1137,14 +1171,14 @@ static int efx_probe_all(struct efx_nic *efx)
1137 /* Create NIC */ 1171 /* Create NIC */
1138 rc = efx_probe_nic(efx); 1172 rc = efx_probe_nic(efx);
1139 if (rc) { 1173 if (rc) {
1140 EFX_ERR(efx, "failed to create NIC\n"); 1174 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1141 goto fail1; 1175 goto fail1;
1142 } 1176 }
1143 1177
1144 /* Create port */ 1178 /* Create port */
1145 rc = efx_probe_port(efx); 1179 rc = efx_probe_port(efx);
1146 if (rc) { 1180 if (rc) {
1147 EFX_ERR(efx, "failed to create port\n"); 1181 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1148 goto fail2; 1182 goto fail2;
1149 } 1183 }
1150 1184
@@ -1152,8 +1186,9 @@ static int efx_probe_all(struct efx_nic *efx)
1152 efx_for_each_channel(channel, efx) { 1186 efx_for_each_channel(channel, efx) {
1153 rc = efx_probe_channel(channel); 1187 rc = efx_probe_channel(channel);
1154 if (rc) { 1188 if (rc) {
1155 EFX_ERR(efx, "failed to create channel %d\n", 1189 netif_err(efx, probe, efx->net_dev,
1156 channel->channel); 1190 "failed to create channel %d\n",
1191 channel->channel);
1157 goto fail3; 1192 goto fail3;
1158 } 1193 }
1159 } 1194 }
@@ -1233,15 +1268,8 @@ static void efx_start_all(struct efx_nic *efx)
1233 * since we're holding the rtnl_lock at this point. */ 1268 * since we're holding the rtnl_lock at this point. */
1234static void efx_flush_all(struct efx_nic *efx) 1269static void efx_flush_all(struct efx_nic *efx)
1235{ 1270{
1236 struct efx_rx_queue *rx_queue;
1237
1238 /* Make sure the hardware monitor is stopped */ 1271 /* Make sure the hardware monitor is stopped */
1239 cancel_delayed_work_sync(&efx->monitor_work); 1272 cancel_delayed_work_sync(&efx->monitor_work);
1240
1241 /* Ensure that all RX slow refills are complete. */
1242 efx_for_each_rx_queue(rx_queue, efx)
1243 cancel_delayed_work_sync(&rx_queue->work);
1244
1245 /* Stop scheduled port reconfigurations */ 1273 /* Stop scheduled port reconfigurations */
1246 cancel_work_sync(&efx->mac_work); 1274 cancel_work_sync(&efx->mac_work);
1247} 1275}
@@ -1356,8 +1384,9 @@ static void efx_monitor(struct work_struct *data)
1356 struct efx_nic *efx = container_of(data, struct efx_nic, 1384 struct efx_nic *efx = container_of(data, struct efx_nic,
1357 monitor_work.work); 1385 monitor_work.work);
1358 1386
1359 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", 1387 netif_vdbg(efx, timer, efx->net_dev,
1360 raw_smp_processor_id()); 1388 "hardware monitor executing on CPU %d\n",
1389 raw_smp_processor_id());
1361 BUG_ON(efx->type->monitor == NULL); 1390 BUG_ON(efx->type->monitor == NULL);
1362 1391
1363 /* If the mac_lock is already held then it is likely a port 1392 /* If the mac_lock is already held then it is likely a port
@@ -1464,8 +1493,8 @@ static int efx_net_open(struct net_device *net_dev)
1464 struct efx_nic *efx = netdev_priv(net_dev); 1493 struct efx_nic *efx = netdev_priv(net_dev);
1465 EFX_ASSERT_RESET_SERIALISED(efx); 1494 EFX_ASSERT_RESET_SERIALISED(efx);
1466 1495
1467 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, 1496 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
1468 raw_smp_processor_id()); 1497 raw_smp_processor_id());
1469 1498
1470 if (efx->state == STATE_DISABLED) 1499 if (efx->state == STATE_DISABLED)
1471 return -EIO; 1500 return -EIO;
@@ -1490,8 +1519,8 @@ static int efx_net_stop(struct net_device *net_dev)
1490{ 1519{
1491 struct efx_nic *efx = netdev_priv(net_dev); 1520 struct efx_nic *efx = netdev_priv(net_dev);
1492 1521
1493 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, 1522 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
1494 raw_smp_processor_id()); 1523 raw_smp_processor_id());
1495 1524
1496 if (efx->state != STATE_DISABLED) { 1525 if (efx->state != STATE_DISABLED) {
1497 /* Stop the device and flush all the channels */ 1526 /* Stop the device and flush all the channels */
@@ -1504,11 +1533,10 @@ static int efx_net_stop(struct net_device *net_dev)
1504} 1533}
1505 1534
1506/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1535/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1507static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1536static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
1508{ 1537{
1509 struct efx_nic *efx = netdev_priv(net_dev); 1538 struct efx_nic *efx = netdev_priv(net_dev);
1510 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1539 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1511 struct net_device_stats *stats = &net_dev->stats;
1512 1540
1513 spin_lock_bh(&efx->stats_lock); 1541 spin_lock_bh(&efx->stats_lock);
1514 efx->type->update_stats(efx); 1542 efx->type->update_stats(efx);
@@ -1530,11 +1558,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1530 stats->tx_window_errors = mac_stats->tx_late_collision; 1558 stats->tx_window_errors = mac_stats->tx_late_collision;
1531 1559
1532 stats->rx_errors = (stats->rx_length_errors + 1560 stats->rx_errors = (stats->rx_length_errors +
1533 stats->rx_over_errors +
1534 stats->rx_crc_errors + 1561 stats->rx_crc_errors +
1535 stats->rx_frame_errors + 1562 stats->rx_frame_errors +
1536 stats->rx_fifo_errors +
1537 stats->rx_missed_errors +
1538 mac_stats->rx_symbol_error); 1563 mac_stats->rx_symbol_error);
1539 stats->tx_errors = (stats->tx_window_errors + 1564 stats->tx_errors = (stats->tx_window_errors +
1540 mac_stats->tx_bad); 1565 mac_stats->tx_bad);
@@ -1547,8 +1572,9 @@ static void efx_watchdog(struct net_device *net_dev)
1547{ 1572{
1548 struct efx_nic *efx = netdev_priv(net_dev); 1573 struct efx_nic *efx = netdev_priv(net_dev);
1549 1574
1550 EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n", 1575 netif_err(efx, tx_err, efx->net_dev,
1551 efx->port_enabled); 1576 "TX stuck with port_enabled=%d: resetting channels\n",
1577 efx->port_enabled);
1552 1578
1553 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1579 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1554} 1580}
@@ -1567,7 +1593,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1567 1593
1568 efx_stop_all(efx); 1594 efx_stop_all(efx);
1569 1595
1570 EFX_LOG(efx, "changing MTU to %d\n", new_mtu); 1596 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1571 1597
1572 efx_fini_channels(efx); 1598 efx_fini_channels(efx);
1573 1599
@@ -1593,8 +1619,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1593 EFX_ASSERT_RESET_SERIALISED(efx); 1619 EFX_ASSERT_RESET_SERIALISED(efx);
1594 1620
1595 if (!is_valid_ether_addr(new_addr)) { 1621 if (!is_valid_ether_addr(new_addr)) {
1596 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n", 1622 netif_err(efx, drv, efx->net_dev,
1597 new_addr); 1623 "invalid ethernet MAC address requested: %pM\n",
1624 new_addr);
1598 return -EINVAL; 1625 return -EINVAL;
1599 } 1626 }
1600 1627
@@ -1645,7 +1672,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1645static const struct net_device_ops efx_netdev_ops = { 1672static const struct net_device_ops efx_netdev_ops = {
1646 .ndo_open = efx_net_open, 1673 .ndo_open = efx_net_open,
1647 .ndo_stop = efx_net_stop, 1674 .ndo_stop = efx_net_stop,
1648 .ndo_get_stats = efx_net_stats, 1675 .ndo_get_stats64 = efx_net_stats,
1649 .ndo_tx_timeout = efx_watchdog, 1676 .ndo_tx_timeout = efx_watchdog,
1650 .ndo_start_xmit = efx_hard_start_xmit, 1677 .ndo_start_xmit = efx_hard_start_xmit,
1651 .ndo_validate_addr = eth_validate_addr, 1678 .ndo_validate_addr = eth_validate_addr,
@@ -1697,7 +1724,6 @@ static int efx_register_netdev(struct efx_nic *efx)
1697 net_dev->watchdog_timeo = 5 * HZ; 1724 net_dev->watchdog_timeo = 5 * HZ;
1698 net_dev->irq = efx->pci_dev->irq; 1725 net_dev->irq = efx->pci_dev->irq;
1699 net_dev->netdev_ops = &efx_netdev_ops; 1726 net_dev->netdev_ops = &efx_netdev_ops;
1700 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1701 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 1727 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1702 1728
1703 /* Clear MAC statistics */ 1729 /* Clear MAC statistics */
@@ -1722,7 +1748,8 @@ static int efx_register_netdev(struct efx_nic *efx)
1722 1748
1723 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 1749 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1724 if (rc) { 1750 if (rc) {
1725 EFX_ERR(efx, "failed to init net dev attributes\n"); 1751 netif_err(efx, drv, efx->net_dev,
1752 "failed to init net dev attributes\n");
1726 goto fail_registered; 1753 goto fail_registered;
1727 } 1754 }
1728 1755
@@ -1730,7 +1757,7 @@ static int efx_register_netdev(struct efx_nic *efx)
1730 1757
1731fail_locked: 1758fail_locked:
1732 rtnl_unlock(); 1759 rtnl_unlock();
1733 EFX_ERR(efx, "could not register net dev\n"); 1760 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
1734 return rc; 1761 return rc;
1735 1762
1736fail_registered: 1763fail_registered:
@@ -1795,7 +1822,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
1795 1822
1796 rc = efx->type->init(efx); 1823 rc = efx->type->init(efx);
1797 if (rc) { 1824 if (rc) {
1798 EFX_ERR(efx, "failed to initialise NIC\n"); 1825 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
1799 goto fail; 1826 goto fail;
1800 } 1827 }
1801 1828
@@ -1807,7 +1834,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
1807 if (rc) 1834 if (rc)
1808 goto fail; 1835 goto fail;
1809 if (efx->phy_op->reconfigure(efx)) 1836 if (efx->phy_op->reconfigure(efx))
1810 EFX_ERR(efx, "could not restore PHY settings\n"); 1837 netif_err(efx, drv, efx->net_dev,
1838 "could not restore PHY settings\n");
1811 } 1839 }
1812 1840
1813 efx->mac_op->reconfigure(efx); 1841 efx->mac_op->reconfigure(efx);
@@ -1840,13 +1868,14 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
1840 int rc, rc2; 1868 int rc, rc2;
1841 bool disabled; 1869 bool disabled;
1842 1870
1843 EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method)); 1871 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
1872 RESET_TYPE(method));
1844 1873
1845 efx_reset_down(efx, method); 1874 efx_reset_down(efx, method);
1846 1875
1847 rc = efx->type->reset(efx, method); 1876 rc = efx->type->reset(efx, method);
1848 if (rc) { 1877 if (rc) {
1849 EFX_ERR(efx, "failed to reset hardware\n"); 1878 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
1850 goto out; 1879 goto out;
1851 } 1880 }
1852 1881
@@ -1871,10 +1900,10 @@ out:
1871 1900
1872 if (disabled) { 1901 if (disabled) {
1873 dev_close(efx->net_dev); 1902 dev_close(efx->net_dev);
1874 EFX_ERR(efx, "has been disabled\n"); 1903 netif_err(efx, drv, efx->net_dev, "has been disabled\n");
1875 efx->state = STATE_DISABLED; 1904 efx->state = STATE_DISABLED;
1876 } else { 1905 } else {
1877 EFX_LOG(efx, "reset complete\n"); 1906 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
1878 } 1907 }
1879 return rc; 1908 return rc;
1880} 1909}
@@ -1886,10 +1915,14 @@ static void efx_reset_work(struct work_struct *data)
1886{ 1915{
1887 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 1916 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
1888 1917
1918 if (efx->reset_pending == RESET_TYPE_NONE)
1919 return;
1920
1889 /* If we're not RUNNING then don't reset. Leave the reset_pending 1921 /* If we're not RUNNING then don't reset. Leave the reset_pending
1890 * flag set so that efx_pci_probe_main will be retried */ 1922 * flag set so that efx_pci_probe_main will be retried */
1891 if (efx->state != STATE_RUNNING) { 1923 if (efx->state != STATE_RUNNING) {
1892 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); 1924 netif_info(efx, drv, efx->net_dev,
1925 "scheduled reset quenched. NIC not RUNNING\n");
1893 return; 1926 return;
1894 } 1927 }
1895 1928
@@ -1903,7 +1936,8 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1903 enum reset_type method; 1936 enum reset_type method;
1904 1937
1905 if (efx->reset_pending != RESET_TYPE_NONE) { 1938 if (efx->reset_pending != RESET_TYPE_NONE) {
1906 EFX_INFO(efx, "quenching already scheduled reset\n"); 1939 netif_info(efx, drv, efx->net_dev,
1940 "quenching already scheduled reset\n");
1907 return; 1941 return;
1908 } 1942 }
1909 1943
@@ -1927,10 +1961,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1927 } 1961 }
1928 1962
1929 if (method != type) 1963 if (method != type)
1930 EFX_LOG(efx, "scheduling %s reset for %s\n", 1964 netif_dbg(efx, drv, efx->net_dev,
1931 RESET_TYPE(method), RESET_TYPE(type)); 1965 "scheduling %s reset for %s\n",
1966 RESET_TYPE(method), RESET_TYPE(type));
1932 else 1967 else
1933 EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method)); 1968 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
1969 RESET_TYPE(method));
1934 1970
1935 efx->reset_pending = method; 1971 efx->reset_pending = method;
1936 1972
@@ -2017,6 +2053,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2017 INIT_WORK(&efx->reset_work, efx_reset_work); 2053 INIT_WORK(&efx->reset_work, efx_reset_work);
2018 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 2054 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
2019 efx->pci_dev = pci_dev; 2055 efx->pci_dev = pci_dev;
2056 efx->msg_enable = debug;
2020 efx->state = STATE_INIT; 2057 efx->state = STATE_INIT;
2021 efx->reset_pending = RESET_TYPE_NONE; 2058 efx->reset_pending = RESET_TYPE_NONE;
2022 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2059 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
@@ -2052,8 +2089,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2052 rx_queue->queue = i; 2089 rx_queue->queue = i;
2053 rx_queue->channel = &efx->channel[0]; /* for safety */ 2090 rx_queue->channel = &efx->channel[0]; /* for safety */
2054 rx_queue->buffer = NULL; 2091 rx_queue->buffer = NULL;
2055 spin_lock_init(&rx_queue->add_lock); 2092 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
2056 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); 2093 (unsigned long)rx_queue);
2057 } 2094 }
2058 2095
2059 efx->type = type; 2096 efx->type = type;
@@ -2136,7 +2173,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2136 efx_pci_remove_main(efx); 2173 efx_pci_remove_main(efx);
2137 2174
2138 efx_fini_io(efx); 2175 efx_fini_io(efx);
2139 EFX_LOG(efx, "shutdown successful\n"); 2176 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2140 2177
2141 pci_set_drvdata(pci_dev, NULL); 2178 pci_set_drvdata(pci_dev, NULL);
2142 efx_fini_struct(efx); 2179 efx_fini_struct(efx);
@@ -2161,13 +2198,15 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2161 2198
2162 rc = efx->type->init(efx); 2199 rc = efx->type->init(efx);
2163 if (rc) { 2200 if (rc) {
2164 EFX_ERR(efx, "failed to initialise NIC\n"); 2201 netif_err(efx, probe, efx->net_dev,
2202 "failed to initialise NIC\n");
2165 goto fail3; 2203 goto fail3;
2166 } 2204 }
2167 2205
2168 rc = efx_init_port(efx); 2206 rc = efx_init_port(efx);
2169 if (rc) { 2207 if (rc) {
2170 EFX_ERR(efx, "failed to initialise port\n"); 2208 netif_err(efx, probe, efx->net_dev,
2209 "failed to initialise port\n");
2171 goto fail4; 2210 goto fail4;
2172 } 2211 }
2173 2212
@@ -2223,11 +2262,13 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2223 NETIF_F_HIGHDMA | NETIF_F_TSO); 2262 NETIF_F_HIGHDMA | NETIF_F_TSO);
2224 efx = netdev_priv(net_dev); 2263 efx = netdev_priv(net_dev);
2225 pci_set_drvdata(pci_dev, efx); 2264 pci_set_drvdata(pci_dev, efx);
2265 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2226 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2266 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2227 if (rc) 2267 if (rc)
2228 goto fail1; 2268 goto fail1;
2229 2269
2230 EFX_INFO(efx, "Solarflare Communications NIC detected\n"); 2270 netif_info(efx, probe, efx->net_dev,
2271 "Solarflare Communications NIC detected\n");
2231 2272
2232 /* Set up basic I/O (BAR mappings etc) */ 2273 /* Set up basic I/O (BAR mappings etc) */
2233 rc = efx_init_io(efx); 2274 rc = efx_init_io(efx);
@@ -2265,7 +2306,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2265 } 2306 }
2266 2307
2267 if (rc) { 2308 if (rc) {
2268 EFX_ERR(efx, "Could not reset NIC\n"); 2309 netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
2269 goto fail4; 2310 goto fail4;
2270 } 2311 }
2271 2312
@@ -2277,7 +2318,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2277 if (rc) 2318 if (rc)
2278 goto fail5; 2319 goto fail5;
2279 2320
2280 EFX_LOG(efx, "initialisation successful\n"); 2321 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2281 2322
2282 rtnl_lock(); 2323 rtnl_lock();
2283 efx_mtd_probe(efx); /* allowed to fail */ 2324 efx_mtd_probe(efx); /* allowed to fail */
@@ -2293,7 +2334,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2293 efx_fini_struct(efx); 2334 efx_fini_struct(efx);
2294 fail1: 2335 fail1:
2295 WARN_ON(rc > 0); 2336 WARN_ON(rc > 0);
2296 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2337 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2297 free_netdev(net_dev); 2338 free_netdev(net_dev);
2298 return rc; 2339 return rc;
2299} 2340}
@@ -2332,6 +2373,9 @@ static int efx_pm_thaw(struct device *dev)
2332 2373
2333 efx->type->resume_wol(efx); 2374 efx->type->resume_wol(efx);
2334 2375
2376 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
2377 queue_work(reset_workqueue, &efx->reset_work);
2378
2335 return 0; 2379 return 0;
2336} 2380}
2337 2381
@@ -2394,7 +2438,7 @@ static struct dev_pm_ops efx_pm_ops = {
2394}; 2438};
2395 2439
2396static struct pci_driver efx_pci_driver = { 2440static struct pci_driver efx_pci_driver = {
2397 .name = EFX_DRIVER_NAME, 2441 .name = KBUILD_MODNAME,
2398 .id_table = efx_pci_table, 2442 .id_table = efx_pci_table,
2399 .probe = efx_pci_probe, 2443 .probe = efx_pci_probe,
2400 .remove = efx_pci_remove, 2444 .remove = efx_pci_remove,
@@ -2421,11 +2465,6 @@ static int __init efx_init_module(void)
2421 if (rc) 2465 if (rc)
2422 goto err_notifier; 2466 goto err_notifier;
2423 2467
2424 refill_workqueue = create_workqueue("sfc_refill");
2425 if (!refill_workqueue) {
2426 rc = -ENOMEM;
2427 goto err_refill;
2428 }
2429 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2468 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2430 if (!reset_workqueue) { 2469 if (!reset_workqueue) {
2431 rc = -ENOMEM; 2470 rc = -ENOMEM;
@@ -2441,8 +2480,6 @@ static int __init efx_init_module(void)
2441 err_pci: 2480 err_pci:
2442 destroy_workqueue(reset_workqueue); 2481 destroy_workqueue(reset_workqueue);
2443 err_reset: 2482 err_reset:
2444 destroy_workqueue(refill_workqueue);
2445 err_refill:
2446 unregister_netdevice_notifier(&efx_netdev_notifier); 2483 unregister_netdevice_notifier(&efx_netdev_notifier);
2447 err_notifier: 2484 err_notifier:
2448 return rc; 2485 return rc;
@@ -2454,7 +2491,6 @@ static void __exit efx_exit_module(void)
2454 2491
2455 pci_unregister_driver(&efx_pci_driver); 2492 pci_unregister_driver(&efx_pci_driver);
2456 destroy_workqueue(reset_workqueue); 2493 destroy_workqueue(reset_workqueue);
2457 destroy_workqueue(refill_workqueue);
2458 unregister_netdevice_notifier(&efx_netdev_notifier); 2494 unregister_netdevice_notifier(&efx_netdev_notifier);
2459 2495
2460} 2496}
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index ffd708c5304a..060dc952a0fd 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
47extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 47extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
48extern void efx_rx_strategy(struct efx_channel *channel); 48extern void efx_rx_strategy(struct efx_channel *channel);
49extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 49extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
50extern void efx_rx_work(struct work_struct *data); 50extern void efx_rx_slow_fill(unsigned long context);
51extern void __efx_rx_packet(struct efx_channel *channel, 51extern void __efx_rx_packet(struct efx_channel *channel,
52 struct efx_rx_buffer *rx_buf, bool checksummed); 52 struct efx_rx_buffer *rx_buf, bool checksummed);
53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
54 unsigned int len, bool checksummed, bool discard); 54 unsigned int len, bool checksummed, bool discard);
55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
56#define EFX_RXQ_SIZE 1024 56#define EFX_RXQ_SIZE 1024
57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) 57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
58 58
@@ -106,8 +106,9 @@ extern unsigned int efx_monitor_interval;
106 106
107static inline void efx_schedule_channel(struct efx_channel *channel) 107static inline void efx_schedule_channel(struct efx_channel *channel)
108{ 108{
109 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n", 109 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
110 channel->channel, raw_smp_processor_id()); 110 "channel %d scheduling NAPI poll on CPU%d\n",
111 channel->channel, raw_smp_processor_id());
111 channel->work_pending = true; 112 channel->work_pending = true;
112 113
113 napi_schedule(&channel->napi_str); 114 napi_schedule(&channel->napi_str);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 22026bfbc4c1..fd19d6ab97a2 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -218,8 +218,8 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
218 218
219 /* GMAC does not support 1000Mbps HD */ 219 /* GMAC does not support 1000Mbps HD */
220 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { 220 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
221 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" 221 netif_dbg(efx, drv, efx->net_dev,
222 " setting\n"); 222 "rejecting unsupported 1000Mbps HD setting\n");
223 return -EINVAL; 223 return -EINVAL;
224 } 224 }
225 225
@@ -234,7 +234,7 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
234{ 234{
235 struct efx_nic *efx = netdev_priv(net_dev); 235 struct efx_nic *efx = netdev_priv(net_dev);
236 236
237 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); 237 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
238 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 238 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
239 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 239 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
240 siena_print_fwver(efx, info->fw_version, 240 siena_print_fwver(efx, info->fw_version,
@@ -242,6 +242,32 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
242 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 242 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
243} 243}
244 244
245static int efx_ethtool_get_regs_len(struct net_device *net_dev)
246{
247 return efx_nic_get_regs_len(netdev_priv(net_dev));
248}
249
250static void efx_ethtool_get_regs(struct net_device *net_dev,
251 struct ethtool_regs *regs, void *buf)
252{
253 struct efx_nic *efx = netdev_priv(net_dev);
254
255 regs->version = efx->type->revision;
256 efx_nic_get_regs(efx, buf);
257}
258
259static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
260{
261 struct efx_nic *efx = netdev_priv(net_dev);
262 return efx->msg_enable;
263}
264
265static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
266{
267 struct efx_nic *efx = netdev_priv(net_dev);
268 efx->msg_enable = msg_enable;
269}
270
245/** 271/**
246 * efx_fill_test - fill in an individual self-test entry 272 * efx_fill_test - fill in an individual self-test entry
247 * @test_index: Index of the test 273 * @test_index: Index of the test
@@ -443,12 +469,13 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
443 struct efx_mac_stats *mac_stats = &efx->mac_stats; 469 struct efx_mac_stats *mac_stats = &efx->mac_stats;
444 struct efx_ethtool_stat *stat; 470 struct efx_ethtool_stat *stat;
445 struct efx_channel *channel; 471 struct efx_channel *channel;
472 struct rtnl_link_stats64 temp;
446 int i; 473 int i;
447 474
448 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); 475 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
449 476
450 /* Update MAC and NIC statistics */ 477 /* Update MAC and NIC statistics */
451 dev_get_stats(net_dev); 478 dev_get_stats(net_dev, &temp);
452 479
453 /* Fill detailed statistics buffer */ 480 /* Fill detailed statistics buffer */
454 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { 481 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
@@ -520,6 +547,14 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
520 return efx->rx_checksum_enabled; 547 return efx->rx_checksum_enabled;
521} 548}
522 549
550static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
551{
552 struct efx_nic *efx = netdev_priv(net_dev);
553 u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH;
554
555 return ethtool_op_set_flags(net_dev, data, supported);
556}
557
523static void efx_ethtool_self_test(struct net_device *net_dev, 558static void efx_ethtool_self_test(struct net_device *net_dev,
524 struct ethtool_test *test, u64 *data) 559 struct ethtool_test *test, u64 *data)
525{ 560{
@@ -539,7 +574,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
539 if (!already_up) { 574 if (!already_up) {
540 rc = dev_open(efx->net_dev); 575 rc = dev_open(efx->net_dev);
541 if (rc) { 576 if (rc) {
542 EFX_ERR(efx, "failed opening device.\n"); 577 netif_err(efx, drv, efx->net_dev,
578 "failed opening device.\n");
543 goto fail2; 579 goto fail2;
544 } 580 }
545 } 581 }
@@ -551,9 +587,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
551 if (!already_up) 587 if (!already_up)
552 dev_close(efx->net_dev); 588 dev_close(efx->net_dev);
553 589
554 EFX_LOG(efx, "%s %sline self-tests\n", 590 netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n",
555 rc == 0 ? "passed" : "failed", 591 rc == 0 ? "passed" : "failed",
556 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 592 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
557 593
558 fail2: 594 fail2:
559 fail1: 595 fail1:
@@ -679,8 +715,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
679 return -EOPNOTSUPP; 715 return -EOPNOTSUPP;
680 716
681 if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { 717 if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
682 EFX_ERR(efx, "invalid coalescing setting. " 718 netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. "
683 "Only rx/tx_coalesce_usecs_irq are supported\n"); 719 "Only rx/tx_coalesce_usecs_irq are supported\n");
684 return -EOPNOTSUPP; 720 return -EOPNOTSUPP;
685 } 721 }
686 722
@@ -692,8 +728,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
692 efx_for_each_tx_queue(tx_queue, efx) { 728 efx_for_each_tx_queue(tx_queue, efx) {
693 if ((tx_queue->channel->channel < efx->n_rx_channels) && 729 if ((tx_queue->channel->channel < efx->n_rx_channels) &&
694 tx_usecs) { 730 tx_usecs) {
695 EFX_ERR(efx, "Channel is shared. " 731 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
696 "Only RX coalescing may be set\n"); 732 "Only RX coalescing may be set\n");
697 return -EOPNOTSUPP; 733 return -EOPNOTSUPP;
698 } 734 }
699 } 735 }
@@ -721,13 +757,15 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
721 (pause->autoneg ? EFX_FC_AUTO : 0)); 757 (pause->autoneg ? EFX_FC_AUTO : 0));
722 758
723 if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { 759 if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
724 EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n"); 760 netif_dbg(efx, drv, efx->net_dev,
761 "Flow control unsupported: tx ON rx OFF\n");
725 rc = -EINVAL; 762 rc = -EINVAL;
726 goto out; 763 goto out;
727 } 764 }
728 765
729 if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { 766 if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
730 EFX_LOG(efx, "Autonegotiation is disabled\n"); 767 netif_dbg(efx, drv, efx->net_dev,
768 "Autonegotiation is disabled\n");
731 rc = -EINVAL; 769 rc = -EINVAL;
732 goto out; 770 goto out;
733 } 771 }
@@ -758,8 +796,9 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
758 (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { 796 (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
759 rc = efx->phy_op->reconfigure(efx); 797 rc = efx->phy_op->reconfigure(efx);
760 if (rc) { 798 if (rc) {
761 EFX_ERR(efx, "Unable to advertise requested flow " 799 netif_err(efx, drv, efx->net_dev,
762 "control setting\n"); 800 "Unable to advertise requested flow "
801 "control setting\n");
763 goto out; 802 goto out;
764 } 803 }
765 } 804 }
@@ -830,10 +869,101 @@ extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
830 return efx_reset(efx, method); 869 return efx_reset(efx, method);
831} 870}
832 871
872static int
873efx_ethtool_get_rxnfc(struct net_device *net_dev,
874 struct ethtool_rxnfc *info, void *rules __always_unused)
875{
876 struct efx_nic *efx = netdev_priv(net_dev);
877
878 switch (info->cmd) {
879 case ETHTOOL_GRXRINGS:
880 info->data = efx->n_rx_channels;
881 return 0;
882
883 case ETHTOOL_GRXFH: {
884 unsigned min_revision = 0;
885
886 info->data = 0;
887 switch (info->flow_type) {
888 case TCP_V4_FLOW:
889 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
890 /* fall through */
891 case UDP_V4_FLOW:
892 case SCTP_V4_FLOW:
893 case AH_ESP_V4_FLOW:
894 case IPV4_FLOW:
895 info->data |= RXH_IP_SRC | RXH_IP_DST;
896 min_revision = EFX_REV_FALCON_B0;
897 break;
898 case TCP_V6_FLOW:
899 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
900 /* fall through */
901 case UDP_V6_FLOW:
902 case SCTP_V6_FLOW:
903 case AH_ESP_V6_FLOW:
904 case IPV6_FLOW:
905 info->data |= RXH_IP_SRC | RXH_IP_DST;
906 min_revision = EFX_REV_SIENA_A0;
907 break;
908 default:
909 break;
910 }
911 if (efx_nic_rev(efx) < min_revision)
912 info->data = 0;
913 return 0;
914 }
915
916 default:
917 return -EOPNOTSUPP;
918 }
919}
920
921static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
922 struct ethtool_rxfh_indir *indir)
923{
924 struct efx_nic *efx = netdev_priv(net_dev);
925 size_t copy_size =
926 min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
927
928 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
929 return -EOPNOTSUPP;
930
931 indir->size = ARRAY_SIZE(efx->rx_indir_table);
932 memcpy(indir->ring_index, efx->rx_indir_table,
933 copy_size * sizeof(indir->ring_index[0]));
934 return 0;
935}
936
937static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
938 const struct ethtool_rxfh_indir *indir)
939{
940 struct efx_nic *efx = netdev_priv(net_dev);
941 size_t i;
942
943 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
944 return -EOPNOTSUPP;
945
946 /* Validate size and indices */
947 if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
948 return -EINVAL;
949 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
950 if (indir->ring_index[i] >= efx->n_rx_channels)
951 return -EINVAL;
952
953 memcpy(efx->rx_indir_table, indir->ring_index,
954 sizeof(efx->rx_indir_table));
955 efx_nic_push_rx_indir_table(efx);
956 return 0;
957}
958
833const struct ethtool_ops efx_ethtool_ops = { 959const struct ethtool_ops efx_ethtool_ops = {
834 .get_settings = efx_ethtool_get_settings, 960 .get_settings = efx_ethtool_get_settings,
835 .set_settings = efx_ethtool_set_settings, 961 .set_settings = efx_ethtool_set_settings,
836 .get_drvinfo = efx_ethtool_get_drvinfo, 962 .get_drvinfo = efx_ethtool_get_drvinfo,
963 .get_regs_len = efx_ethtool_get_regs_len,
964 .get_regs = efx_ethtool_get_regs,
965 .get_msglevel = efx_ethtool_get_msglevel,
966 .set_msglevel = efx_ethtool_set_msglevel,
837 .nway_reset = efx_ethtool_nway_reset, 967 .nway_reset = efx_ethtool_nway_reset,
838 .get_link = efx_ethtool_get_link, 968 .get_link = efx_ethtool_get_link,
839 .get_eeprom_len = efx_ethtool_get_eeprom_len, 969 .get_eeprom_len = efx_ethtool_get_eeprom_len,
@@ -854,7 +984,7 @@ const struct ethtool_ops efx_ethtool_ops = {
854 /* Need to enable/disable TSO-IPv6 too */ 984 /* Need to enable/disable TSO-IPv6 too */
855 .set_tso = efx_ethtool_set_tso, 985 .set_tso = efx_ethtool_set_tso,
856 .get_flags = ethtool_op_get_flags, 986 .get_flags = ethtool_op_get_flags,
857 .set_flags = ethtool_op_set_flags, 987 .set_flags = efx_ethtool_set_flags,
858 .get_sset_count = efx_ethtool_get_sset_count, 988 .get_sset_count = efx_ethtool_get_sset_count,
859 .self_test = efx_ethtool_self_test, 989 .self_test = efx_ethtool_self_test,
860 .get_strings = efx_ethtool_get_strings, 990 .get_strings = efx_ethtool_get_strings,
@@ -863,4 +993,7 @@ const struct ethtool_ops efx_ethtool_ops = {
863 .get_wol = efx_ethtool_get_wol, 993 .get_wol = efx_ethtool_get_wol,
864 .set_wol = efx_ethtool_set_wol, 994 .set_wol = efx_ethtool_set_wol,
865 .reset = efx_ethtool_reset, 995 .reset = efx_ethtool_reset,
996 .get_rxnfc = efx_ethtool_get_rxnfc,
997 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
998 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
866}; 999};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 655b697b45b2..4f9d33f3cca1 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -167,13 +167,15 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
167 * exit without having touched the hardware. 167 * exit without having touched the hardware.
168 */ 168 */
169 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { 169 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
170 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq, 170 netif_vdbg(efx, intr, efx->net_dev,
171 raw_smp_processor_id()); 171 "IRQ %d on CPU %d not for me\n", irq,
172 raw_smp_processor_id());
172 return IRQ_NONE; 173 return IRQ_NONE;
173 } 174 }
174 efx->last_irq_cpu = raw_smp_processor_id(); 175 efx->last_irq_cpu = raw_smp_processor_id();
175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 176 netif_vdbg(efx, intr, efx->net_dev,
176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 177 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
178 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
177 179
178 /* Determine interrupting queues, clear interrupt status 180 /* Determine interrupting queues, clear interrupt status
179 * register and acknowledge the device interrupt. 181 * register and acknowledge the device interrupt.
@@ -239,7 +241,8 @@ static int falcon_spi_wait(struct efx_nic *efx)
239 if (!falcon_spi_poll(efx)) 241 if (!falcon_spi_poll(efx))
240 return 0; 242 return 0;
241 if (time_after_eq(jiffies, timeout)) { 243 if (time_after_eq(jiffies, timeout)) {
242 EFX_ERR(efx, "timed out waiting for SPI\n"); 244 netif_err(efx, hw, efx->net_dev,
245 "timed out waiting for SPI\n");
243 return -ETIMEDOUT; 246 return -ETIMEDOUT;
244 } 247 }
245 schedule_timeout_uninterruptible(1); 248 schedule_timeout_uninterruptible(1);
@@ -333,9 +336,10 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
333 if (!(status & SPI_STATUS_NRDY)) 336 if (!(status & SPI_STATUS_NRDY))
334 return 0; 337 return 0;
335 if (time_after_eq(jiffies, timeout)) { 338 if (time_after_eq(jiffies, timeout)) {
336 EFX_ERR(efx, "SPI write timeout on device %d" 339 netif_err(efx, hw, efx->net_dev,
337 " last status=0x%02x\n", 340 "SPI write timeout on device %d"
338 spi->device_id, status); 341 " last status=0x%02x\n",
342 spi->device_id, status);
339 return -ETIMEDOUT; 343 return -ETIMEDOUT;
340 } 344 }
341 schedule_timeout_uninterruptible(1); 345 schedule_timeout_uninterruptible(1);
@@ -469,7 +473,8 @@ static void falcon_reset_macs(struct efx_nic *efx)
469 udelay(10); 473 udelay(10);
470 } 474 }
471 475
472 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 476 netif_err(efx, hw, efx->net_dev,
477 "timed out waiting for XMAC core reset\n");
473 } 478 }
474 } 479 }
475 480
@@ -492,12 +497,13 @@ static void falcon_reset_macs(struct efx_nic *efx)
492 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && 497 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
493 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && 498 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
494 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { 499 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
495 EFX_LOG(efx, "Completed MAC reset after %d loops\n", 500 netif_dbg(efx, hw, efx->net_dev,
496 count); 501 "Completed MAC reset after %d loops\n",
502 count);
497 break; 503 break;
498 } 504 }
499 if (count > 20) { 505 if (count > 20) {
500 EFX_ERR(efx, "MAC reset failed\n"); 506 netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
501 break; 507 break;
502 } 508 }
503 count++; 509 count++;
@@ -548,7 +554,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
548{ 554{
549 struct efx_link_state *link_state = &efx->link_state; 555 struct efx_link_state *link_state = &efx->link_state;
550 efx_oword_t reg; 556 efx_oword_t reg;
551 int link_speed; 557 int link_speed, isolate;
558
559 isolate = (efx->reset_pending != RESET_TYPE_NONE);
552 560
553 switch (link_state->speed) { 561 switch (link_state->speed) {
554 case 10000: link_speed = 3; break; 562 case 10000: link_speed = 3; break;
@@ -570,7 +578,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
570 * discarded. */ 578 * discarded. */
571 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 579 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
572 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 580 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
573 !link_state->up); 581 !link_state->up || isolate);
574 } 582 }
575 583
576 efx_writeo(efx, &reg, FR_AB_MAC_CTRL); 584 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
@@ -584,7 +592,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
584 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); 592 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
585 /* Unisolate the MAC -> RX */ 593 /* Unisolate the MAC -> RX */
586 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 594 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
587 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 595 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
588 efx_writeo(efx, &reg, FR_AZ_RX_CFG); 596 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
589} 597}
590 598
@@ -625,7 +633,8 @@ static void falcon_stats_complete(struct efx_nic *efx)
625 rmb(); /* read the done flag before the stats */ 633 rmb(); /* read the done flag before the stats */
626 efx->mac_op->update_stats(efx); 634 efx->mac_op->update_stats(efx);
627 } else { 635 } else {
628 EFX_ERR(efx, "timed out waiting for statistics\n"); 636 netif_err(efx, hw, efx->net_dev,
637 "timed out waiting for statistics\n");
629 } 638 }
630} 639}
631 640
@@ -715,16 +724,17 @@ static int falcon_gmii_wait(struct efx_nic *efx)
715 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { 724 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
716 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || 725 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
717 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { 726 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
718 EFX_ERR(efx, "error from GMII access " 727 netif_err(efx, hw, efx->net_dev,
719 EFX_OWORD_FMT"\n", 728 "error from GMII access "
720 EFX_OWORD_VAL(md_stat)); 729 EFX_OWORD_FMT"\n",
730 EFX_OWORD_VAL(md_stat));
721 return -EIO; 731 return -EIO;
722 } 732 }
723 return 0; 733 return 0;
724 } 734 }
725 udelay(10); 735 udelay(10);
726 } 736 }
727 EFX_ERR(efx, "timed out waiting for GMII\n"); 737 netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
728 return -ETIMEDOUT; 738 return -ETIMEDOUT;
729} 739}
730 740
@@ -736,7 +746,8 @@ static int falcon_mdio_write(struct net_device *net_dev,
736 efx_oword_t reg; 746 efx_oword_t reg;
737 int rc; 747 int rc;
738 748
739 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", 749 netif_vdbg(efx, hw, efx->net_dev,
750 "writing MDIO %d register %d.%d with 0x%04x\n",
740 prtad, devad, addr, value); 751 prtad, devad, addr, value);
741 752
742 mutex_lock(&efx->mdio_lock); 753 mutex_lock(&efx->mdio_lock);
@@ -810,8 +821,9 @@ static int falcon_mdio_read(struct net_device *net_dev,
810 if (rc == 0) { 821 if (rc == 0) {
811 efx_reado(efx, &reg, FR_AB_MD_RXD); 822 efx_reado(efx, &reg, FR_AB_MD_RXD);
812 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); 823 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
813 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", 824 netif_vdbg(efx, hw, efx->net_dev,
814 prtad, devad, addr, rc); 825 "read from MDIO %d register %d.%d, got %04x\n",
826 prtad, devad, addr, rc);
815 } else { 827 } else {
816 /* Abort the read operation */ 828 /* Abort the read operation */
817 EFX_POPULATE_OWORD_2(reg, 829 EFX_POPULATE_OWORD_2(reg,
@@ -819,8 +831,9 @@ static int falcon_mdio_read(struct net_device *net_dev,
819 FRF_AB_MD_GC, 1); 831 FRF_AB_MD_GC, 1);
820 efx_writeo(efx, &reg, FR_AB_MD_CS); 832 efx_writeo(efx, &reg, FR_AB_MD_CS);
821 833
822 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", 834 netif_dbg(efx, hw, efx->net_dev,
823 prtad, devad, addr, rc); 835 "read from MDIO %d register %d.%d, got error %d\n",
836 prtad, devad, addr, rc);
824 } 837 }
825 838
826out: 839out:
@@ -871,7 +884,8 @@ static void falcon_switch_mac(struct efx_nic *efx)
871 884
872 falcon_clock_mac(efx); 885 falcon_clock_mac(efx);
873 886
874 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); 887 netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
888 EFX_IS10G(efx) ? 'X' : 'G');
875 /* Not all macs support a mac-level link state */ 889 /* Not all macs support a mac-level link state */
876 efx->xmac_poll_required = false; 890 efx->xmac_poll_required = false;
877 falcon_reset_macs(efx); 891 falcon_reset_macs(efx);
@@ -895,8 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
895 efx->phy_op = &falcon_qt202x_phy_ops; 909 efx->phy_op = &falcon_qt202x_phy_ops;
896 break; 910 break;
897 default: 911 default:
898 EFX_ERR(efx, "Unknown PHY type %d\n", 912 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
899 efx->phy_type); 913 efx->phy_type);
900 return -ENODEV; 914 return -ENODEV;
901 } 915 }
902 916
@@ -924,10 +938,11 @@ static int falcon_probe_port(struct efx_nic *efx)
924 FALCON_MAC_STATS_SIZE); 938 FALCON_MAC_STATS_SIZE);
925 if (rc) 939 if (rc)
926 return rc; 940 return rc;
927 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", 941 netif_dbg(efx, probe, efx->net_dev,
928 (u64)efx->stats_buffer.dma_addr, 942 "stats buffer at %llx (virt %p phys %llx)\n",
929 efx->stats_buffer.addr, 943 (u64)efx->stats_buffer.dma_addr,
930 (u64)virt_to_phys(efx->stats_buffer.addr)); 944 efx->stats_buffer.addr,
945 (u64)virt_to_phys(efx->stats_buffer.addr));
931 946
932 return 0; 947 return 0;
933} 948}
@@ -967,8 +982,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
967 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); 982 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
968 mutex_unlock(&efx->spi_lock); 983 mutex_unlock(&efx->spi_lock);
969 if (rc) { 984 if (rc) {
970 EFX_ERR(efx, "Failed to read %s\n", 985 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
971 efx->spi_flash ? "flash" : "EEPROM"); 986 efx->spi_flash ? "flash" : "EEPROM");
972 rc = -EIO; 987 rc = -EIO;
973 goto out; 988 goto out;
974 } 989 }
@@ -978,11 +993,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
978 993
979 rc = -EINVAL; 994 rc = -EINVAL;
980 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { 995 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
981 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); 996 netif_err(efx, hw, efx->net_dev,
997 "NVRAM bad magic 0x%x\n", magic_num);
982 goto out; 998 goto out;
983 } 999 }
984 if (struct_ver < 2) { 1000 if (struct_ver < 2) {
985 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver); 1001 netif_err(efx, hw, efx->net_dev,
1002 "NVRAM has ancient version 0x%x\n", struct_ver);
986 goto out; 1003 goto out;
987 } else if (struct_ver < 4) { 1004 } else if (struct_ver < 4) {
988 word = &nvconfig->board_magic_num; 1005 word = &nvconfig->board_magic_num;
@@ -995,7 +1012,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
995 csum += le16_to_cpu(*word); 1012 csum += le16_to_cpu(*word);
996 1013
997 if (~csum & 0xffff) { 1014 if (~csum & 0xffff) {
998 EFX_ERR(efx, "NVRAM has incorrect checksum\n"); 1015 netif_err(efx, hw, efx->net_dev,
1016 "NVRAM has incorrect checksum\n");
999 goto out; 1017 goto out;
1000 } 1018 }
1001 1019
@@ -1073,22 +1091,25 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1073 efx_oword_t glb_ctl_reg_ker; 1091 efx_oword_t glb_ctl_reg_ker;
1074 int rc; 1092 int rc;
1075 1093
1076 EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method)); 1094 netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1095 RESET_TYPE(method));
1077 1096
1078 /* Initiate device reset */ 1097 /* Initiate device reset */
1079 if (method == RESET_TYPE_WORLD) { 1098 if (method == RESET_TYPE_WORLD) {
1080 rc = pci_save_state(efx->pci_dev); 1099 rc = pci_save_state(efx->pci_dev);
1081 if (rc) { 1100 if (rc) {
1082 EFX_ERR(efx, "failed to backup PCI state of primary " 1101 netif_err(efx, drv, efx->net_dev,
1083 "function prior to hardware reset\n"); 1102 "failed to backup PCI state of primary "
1103 "function prior to hardware reset\n");
1084 goto fail1; 1104 goto fail1;
1085 } 1105 }
1086 if (efx_nic_is_dual_func(efx)) { 1106 if (efx_nic_is_dual_func(efx)) {
1087 rc = pci_save_state(nic_data->pci_dev2); 1107 rc = pci_save_state(nic_data->pci_dev2);
1088 if (rc) { 1108 if (rc) {
1089 EFX_ERR(efx, "failed to backup PCI state of " 1109 netif_err(efx, drv, efx->net_dev,
1090 "secondary function prior to " 1110 "failed to backup PCI state of "
1091 "hardware reset\n"); 1111 "secondary function prior to "
1112 "hardware reset\n");
1092 goto fail2; 1113 goto fail2;
1093 } 1114 }
1094 } 1115 }
@@ -1113,7 +1134,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1113 } 1134 }
1114 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); 1135 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1115 1136
1116 EFX_LOG(efx, "waiting for hardware reset\n"); 1137 netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
1117 schedule_timeout_uninterruptible(HZ / 20); 1138 schedule_timeout_uninterruptible(HZ / 20);
1118 1139
1119 /* Restore PCI configuration if needed */ 1140 /* Restore PCI configuration if needed */
@@ -1121,28 +1142,32 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1121 if (efx_nic_is_dual_func(efx)) { 1142 if (efx_nic_is_dual_func(efx)) {
1122 rc = pci_restore_state(nic_data->pci_dev2); 1143 rc = pci_restore_state(nic_data->pci_dev2);
1123 if (rc) { 1144 if (rc) {
1124 EFX_ERR(efx, "failed to restore PCI config for " 1145 netif_err(efx, drv, efx->net_dev,
1125 "the secondary function\n"); 1146 "failed to restore PCI config for "
1147 "the secondary function\n");
1126 goto fail3; 1148 goto fail3;
1127 } 1149 }
1128 } 1150 }
1129 rc = pci_restore_state(efx->pci_dev); 1151 rc = pci_restore_state(efx->pci_dev);
1130 if (rc) { 1152 if (rc) {
1131 EFX_ERR(efx, "failed to restore PCI config for the " 1153 netif_err(efx, drv, efx->net_dev,
1132 "primary function\n"); 1154 "failed to restore PCI config for the "
1155 "primary function\n");
1133 goto fail4; 1156 goto fail4;
1134 } 1157 }
1135 EFX_LOG(efx, "successfully restored PCI config\n"); 1158 netif_dbg(efx, drv, efx->net_dev,
1159 "successfully restored PCI config\n");
1136 } 1160 }
1137 1161
1138 /* Assert that reset complete */ 1162 /* Assert that reset complete */
1139 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); 1163 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1140 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { 1164 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1141 rc = -ETIMEDOUT; 1165 rc = -ETIMEDOUT;
1142 EFX_ERR(efx, "timed out waiting for hardware reset\n"); 1166 netif_err(efx, hw, efx->net_dev,
1167 "timed out waiting for hardware reset\n");
1143 goto fail5; 1168 goto fail5;
1144 } 1169 }
1145 EFX_LOG(efx, "hardware reset complete\n"); 1170 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1146 1171
1147 return 0; 1172 return 0;
1148 1173
@@ -1165,8 +1190,9 @@ static void falcon_monitor(struct efx_nic *efx)
1165 1190
1166 rc = falcon_board(efx)->type->monitor(efx); 1191 rc = falcon_board(efx)->type->monitor(efx);
1167 if (rc) { 1192 if (rc) {
1168 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n", 1193 netif_err(efx, hw, efx->net_dev,
1169 (rc == -ERANGE) ? "reported fault" : "failed"); 1194 "Board sensor %s; shutting down PHY\n",
1195 (rc == -ERANGE) ? "reported fault" : "failed");
1170 efx->phy_mode |= PHY_MODE_LOW_POWER; 1196 efx->phy_mode |= PHY_MODE_LOW_POWER;
1171 rc = __efx_reconfigure_port(efx); 1197 rc = __efx_reconfigure_port(efx);
1172 WARN_ON(rc); 1198 WARN_ON(rc);
@@ -1217,7 +1243,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
1217 /* Wait for SRAM reset to complete */ 1243 /* Wait for SRAM reset to complete */
1218 count = 0; 1244 count = 0;
1219 do { 1245 do {
1220 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count); 1246 netif_dbg(efx, hw, efx->net_dev,
1247 "waiting for SRAM reset (attempt %d)...\n", count);
1221 1248
1222 /* SRAM reset is slow; expect around 16ms */ 1249 /* SRAM reset is slow; expect around 16ms */
1223 schedule_timeout_uninterruptible(HZ / 50); 1250 schedule_timeout_uninterruptible(HZ / 50);
@@ -1225,13 +1252,14 @@ static int falcon_reset_sram(struct efx_nic *efx)
1225 /* Check for reset complete */ 1252 /* Check for reset complete */
1226 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); 1253 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1227 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { 1254 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1228 EFX_LOG(efx, "SRAM reset complete\n"); 1255 netif_dbg(efx, hw, efx->net_dev,
1256 "SRAM reset complete\n");
1229 1257
1230 return 0; 1258 return 0;
1231 } 1259 }
1232 } while (++count < 20); /* wait upto 0.4 sec */ 1260 } while (++count < 20); /* wait upto 0.4 sec */
1233 1261
1234 EFX_ERR(efx, "timed out waiting for SRAM reset\n"); 1262 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1235 return -ETIMEDOUT; 1263 return -ETIMEDOUT;
1236} 1264}
1237 1265
@@ -1290,7 +1318,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1290 1318
1291 rc = falcon_read_nvram(efx, nvconfig); 1319 rc = falcon_read_nvram(efx, nvconfig);
1292 if (rc == -EINVAL) { 1320 if (rc == -EINVAL) {
1293 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); 1321 netif_err(efx, probe, efx->net_dev,
1322 "NVRAM is invalid therefore using defaults\n");
1294 efx->phy_type = PHY_TYPE_NONE; 1323 efx->phy_type = PHY_TYPE_NONE;
1295 efx->mdio.prtad = MDIO_PRTAD_NONE; 1324 efx->mdio.prtad = MDIO_PRTAD_NONE;
1296 board_rev = 0; 1325 board_rev = 0;
@@ -1324,7 +1353,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1324 /* Read the MAC addresses */ 1353 /* Read the MAC addresses */
1325 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); 1354 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
1326 1355
1327 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1356 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1357 efx->phy_type, efx->mdio.prtad);
1328 1358
1329 rc = falcon_probe_board(efx, board_rev); 1359 rc = falcon_probe_board(efx, board_rev);
1330 if (rc) 1360 if (rc)
@@ -1353,14 +1383,16 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
1353 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { 1383 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
1354 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? 1384 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
1355 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); 1385 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1356 EFX_LOG(efx, "Booted from %s\n", 1386 netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
1357 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); 1387 boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
1388 "flash" : "EEPROM");
1358 } else { 1389 } else {
1359 /* Disable VPD and set clock dividers to safe 1390 /* Disable VPD and set clock dividers to safe
1360 * values for initial programming. */ 1391 * values for initial programming. */
1361 boot_dev = -1; 1392 boot_dev = -1;
1362 EFX_LOG(efx, "Booted from internal ASIC settings;" 1393 netif_dbg(efx, probe, efx->net_dev,
1363 " setting SPI config\n"); 1394 "Booted from internal ASIC settings;"
1395 " setting SPI config\n");
1364 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, 1396 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1365 /* 125 MHz / 7 ~= 20 MHz */ 1397 /* 125 MHz / 7 ~= 20 MHz */
1366 FRF_AB_EE_SF_CLOCK_DIV, 7, 1398 FRF_AB_EE_SF_CLOCK_DIV, 7,
@@ -1394,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1394 rc = -ENODEV; 1426 rc = -ENODEV;
1395 1427
1396 if (efx_nic_fpga_ver(efx) != 0) { 1428 if (efx_nic_fpga_ver(efx) != 0) {
1397 EFX_ERR(efx, "Falcon FPGA not supported\n"); 1429 netif_err(efx, probe, efx->net_dev,
1430 "Falcon FPGA not supported\n");
1398 goto fail1; 1431 goto fail1;
1399 } 1432 }
1400 1433
@@ -1404,16 +1437,19 @@ static int falcon_probe_nic(struct efx_nic *efx)
1404 u8 pci_rev = efx->pci_dev->revision; 1437 u8 pci_rev = efx->pci_dev->revision;
1405 1438
1406 if ((pci_rev == 0xff) || (pci_rev == 0)) { 1439 if ((pci_rev == 0xff) || (pci_rev == 0)) {
1407 EFX_ERR(efx, "Falcon rev A0 not supported\n"); 1440 netif_err(efx, probe, efx->net_dev,
1441 "Falcon rev A0 not supported\n");
1408 goto fail1; 1442 goto fail1;
1409 } 1443 }
1410 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); 1444 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1411 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { 1445 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
1412 EFX_ERR(efx, "Falcon rev A1 1G not supported\n"); 1446 netif_err(efx, probe, efx->net_dev,
1447 "Falcon rev A1 1G not supported\n");
1413 goto fail1; 1448 goto fail1;
1414 } 1449 }
1415 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { 1450 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
1416 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); 1451 netif_err(efx, probe, efx->net_dev,
1452 "Falcon rev A1 PCI-X not supported\n");
1417 goto fail1; 1453 goto fail1;
1418 } 1454 }
1419 1455
@@ -1427,7 +1463,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1427 } 1463 }
1428 } 1464 }
1429 if (!nic_data->pci_dev2) { 1465 if (!nic_data->pci_dev2) {
1430 EFX_ERR(efx, "failed to find secondary function\n"); 1466 netif_err(efx, probe, efx->net_dev,
1467 "failed to find secondary function\n");
1431 rc = -ENODEV; 1468 rc = -ENODEV;
1432 goto fail2; 1469 goto fail2;
1433 } 1470 }
@@ -1436,7 +1473,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
1436 /* Now we can reset the NIC */ 1473 /* Now we can reset the NIC */
1437 rc = falcon_reset_hw(efx, RESET_TYPE_ALL); 1474 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
1438 if (rc) { 1475 if (rc) {
1439 EFX_ERR(efx, "failed to reset NIC\n"); 1476 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1440 goto fail3; 1477 goto fail3;
1441 } 1478 }
1442 1479
@@ -1446,9 +1483,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
1446 goto fail4; 1483 goto fail4;
1447 BUG_ON(efx->irq_status.dma_addr & 0x0f); 1484 BUG_ON(efx->irq_status.dma_addr & 0x0f);
1448 1485
1449 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", 1486 netif_dbg(efx, probe, efx->net_dev,
1450 (u64)efx->irq_status.dma_addr, 1487 "INT_KER at %llx (virt %p phys %llx)\n",
1451 efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); 1488 (u64)efx->irq_status.dma_addr,
1489 efx->irq_status.addr,
1490 (u64)virt_to_phys(efx->irq_status.addr));
1452 1491
1453 falcon_probe_spi_devices(efx); 1492 falcon_probe_spi_devices(efx);
1454 1493
@@ -1472,7 +1511,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1472 1511
1473 rc = falcon_board(efx)->type->init(efx); 1512 rc = falcon_board(efx)->type->init(efx);
1474 if (rc) { 1513 if (rc) {
1475 EFX_ERR(efx, "failed to initialise board\n"); 1514 netif_err(efx, probe, efx->net_dev,
1515 "failed to initialise board\n");
1476 goto fail6; 1516 goto fail6;
1477 } 1517 }
1478 1518
@@ -1542,6 +1582,13 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1542 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); 1582 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1543 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); 1583 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1544 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 1584 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1585
1586 /* Enable hash insertion. This is broken for the
1587 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
1588 * IPv4 hashes. */
1589 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
1590 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
1591 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
1545 } 1592 }
1546 /* Always enable XOFF signal from RX FIFO. We enable 1593 /* Always enable XOFF signal from RX FIFO. We enable
1547 * or disable transmission of pause frames at the MAC. */ 1594 * or disable transmission of pause frames at the MAC. */
@@ -1615,8 +1662,12 @@ static int falcon_init_nic(struct efx_nic *efx)
1615 1662
1616 falcon_init_rx_cfg(efx); 1663 falcon_init_rx_cfg(efx);
1617 1664
1618 /* Set destination of both TX and RX Flush events */
1619 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1665 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1666 /* Set hash key for IPv4 */
1667 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
1668 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
1669
1670 /* Set destination of both TX and RX Flush events */
1620 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); 1671 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
1621 efx_writeo(efx, &temp, FR_BZ_DP_CTRL); 1672 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
1622 } 1673 }
@@ -1821,6 +1872,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1821 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 1872 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
1822 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 1873 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
1823 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 1874 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1875 .rx_buffer_hash_size = 0x10,
1824 .rx_buffer_padding = 0, 1876 .rx_buffer_padding = 0,
1825 .max_interrupt_mode = EFX_INT_MODE_MSIX, 1877 .max_interrupt_mode = EFX_INT_MODE_MSIX,
1826 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 1878 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -1828,7 +1880,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1828 * channels */ 1880 * channels */
1829 .tx_dc_base = 0x130000, 1881 .tx_dc_base = 0x130000,
1830 .rx_dc_base = 0x100000, 1882 .rx_dc_base = 0x100000,
1831 .offload_features = NETIF_F_IP_CSUM, 1883 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH,
1832 .reset_world_flags = ETH_RESET_IRQ, 1884 .reset_world_flags = ETH_RESET_IRQ,
1833}; 1885};
1834 1886
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index c7a933a3292e..3d950c2cf205 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -106,12 +106,17 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
106 alarms1 &= mask; 106 alarms1 &= mask;
107 alarms2 &= mask >> 8; 107 alarms2 &= mask >> 8;
108 if (alarms1 || alarms2) { 108 if (alarms1 || alarms2) {
109 EFX_ERR(efx, 109 netif_err(efx, hw, efx->net_dev,
110 "LM87 detected a hardware failure (status %02x:%02x)" 110 "LM87 detected a hardware failure (status %02x:%02x)"
111 "%s%s\n", 111 "%s%s%s\n",
112 alarms1, alarms2, 112 alarms1, alarms2,
113 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "", 113 (alarms1 & LM87_ALARM_TEMP_INT) ?
114 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : ""); 114 "; board is overheating" : "",
115 (alarms1 & LM87_ALARM_TEMP_EXT1) ?
116 "; controller is overheating" : "",
117 (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1)
118 || alarms2) ?
119 "; electrical fault" : "");
115 return -ERANGE; 120 return -ERANGE;
116 } 121 }
117 122
@@ -243,7 +248,7 @@ static int sfe4001_poweron(struct efx_nic *efx)
243 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | 248 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
244 (0 << P0_EN_1V0X_LBN)); 249 (0 << P0_EN_1V0X_LBN));
245 if (rc != out) { 250 if (rc != out) {
246 EFX_INFO(efx, "power-cycling PHY\n"); 251 netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
247 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); 252 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
248 if (rc) 253 if (rc)
249 goto fail_on; 254 goto fail_on;
@@ -269,7 +274,8 @@ static int sfe4001_poweron(struct efx_nic *efx)
269 if (rc) 274 if (rc)
270 goto fail_on; 275 goto fail_on;
271 276
272 EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i); 277 netif_info(efx, hw, efx->net_dev,
278 "waiting for DSP boot (attempt %d)...\n", i);
273 279
274 /* In flash config mode, DSP does not turn on AFE, so 280 /* In flash config mode, DSP does not turn on AFE, so
275 * just wait 1 second. 281 * just wait 1 second.
@@ -291,7 +297,7 @@ static int sfe4001_poweron(struct efx_nic *efx)
291 } 297 }
292 } 298 }
293 299
294 EFX_INFO(efx, "timed out waiting for DSP boot\n"); 300 netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
295 rc = -ETIMEDOUT; 301 rc = -ETIMEDOUT;
296fail_on: 302fail_on:
297 sfe4001_poweroff(efx); 303 sfe4001_poweroff(efx);
@@ -377,7 +383,7 @@ static void sfe4001_fini(struct efx_nic *efx)
377{ 383{
378 struct falcon_board *board = falcon_board(efx); 384 struct falcon_board *board = falcon_board(efx);
379 385
380 EFX_INFO(efx, "%s\n", __func__); 386 netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
381 387
382 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); 388 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
383 sfe4001_poweroff(efx); 389 sfe4001_poweroff(efx);
@@ -461,7 +467,7 @@ static int sfe4001_init(struct efx_nic *efx)
461 if (rc) 467 if (rc)
462 goto fail_on; 468 goto fail_on;
463 469
464 EFX_INFO(efx, "PHY is powered on\n"); 470 netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
465 return 0; 471 return 0;
466 472
467fail_on: 473fail_on:
@@ -493,7 +499,7 @@ static int sfn4111t_check_hw(struct efx_nic *efx)
493 499
494static void sfn4111t_fini(struct efx_nic *efx) 500static void sfn4111t_fini(struct efx_nic *efx)
495{ 501{
496 EFX_INFO(efx, "%s\n", __func__); 502 netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
497 503
498 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); 504 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
499 i2c_unregister_device(falcon_board(efx)->hwmon_client); 505 i2c_unregister_device(falcon_board(efx)->hwmon_client);
@@ -742,13 +748,14 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
742 board->type = &board_types[i]; 748 board->type = &board_types[i];
743 749
744 if (board->type) { 750 if (board->type) {
745 EFX_INFO(efx, "board is %s rev %c%d\n", 751 netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
746 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 752 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
747 ? board->type->ref_model : board->type->gen_type, 753 ? board->type->ref_model : board->type->gen_type,
748 'A' + board->major, board->minor); 754 'A' + board->major, board->minor);
749 return 0; 755 return 0;
750 } else { 756 } else {
751 EFX_ERR(efx, "unknown board type %d\n", type_id); 757 netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
758 type_id);
752 return -ENODEV; 759 return -ENODEV;
753 } 760 }
754} 761}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index c84a2ce2ccbb..bae656dd2c4e 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -81,7 +81,8 @@ int falcon_reset_xaui(struct efx_nic *efx)
81 } 81 }
82 udelay(10); 82 udelay(10);
83 } 83 }
84 EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); 84 netif_err(efx, hw, efx->net_dev,
85 "timed out waiting for XAUI/XGXS reset\n");
85 return -ETIMEDOUT; 86 return -ETIMEDOUT;
86} 87}
87 88
@@ -256,7 +257,7 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
256 falcon_stop_nic_stats(efx); 257 falcon_stop_nic_stats(efx);
257 258
258 while (!mac_up && tries) { 259 while (!mac_up && tries) {
259 EFX_LOG(efx, "bashing xaui\n"); 260 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
260 falcon_reset_xaui(efx); 261 falcon_reset_xaui(efx);
261 udelay(200); 262 udelay(200);
262 263
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index b89177c27f4a..85a99fe87437 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -78,8 +78,9 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
78{ 78{
79 unsigned long flags __attribute__ ((unused)); 79 unsigned long flags __attribute__ ((unused));
80 80
81 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg, 81 netif_vdbg(efx, hw, efx->net_dev,
82 EFX_OWORD_VAL(*value)); 82 "writing register %x with " EFX_OWORD_FMT "\n", reg,
83 EFX_OWORD_VAL(*value));
83 84
84 spin_lock_irqsave(&efx->biu_lock, flags); 85 spin_lock_irqsave(&efx->biu_lock, flags);
85#ifdef EFX_USE_QWORD_IO 86#ifdef EFX_USE_QWORD_IO
@@ -105,8 +106,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
105 unsigned int addr = index * sizeof(*value); 106 unsigned int addr = index * sizeof(*value);
106 unsigned long flags __attribute__ ((unused)); 107 unsigned long flags __attribute__ ((unused));
107 108
108 EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n", 109 netif_vdbg(efx, hw, efx->net_dev,
109 addr, EFX_QWORD_VAL(*value)); 110 "writing SRAM address %x with " EFX_QWORD_FMT "\n",
111 addr, EFX_QWORD_VAL(*value));
110 112
111 spin_lock_irqsave(&efx->biu_lock, flags); 113 spin_lock_irqsave(&efx->biu_lock, flags);
112#ifdef EFX_USE_QWORD_IO 114#ifdef EFX_USE_QWORD_IO
@@ -129,8 +131,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
129static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, 131static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
130 unsigned int reg) 132 unsigned int reg)
131{ 133{
132 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n", 134 netif_vdbg(efx, hw, efx->net_dev,
133 reg, EFX_DWORD_VAL(*value)); 135 "writing partial register %x with "EFX_DWORD_FMT"\n",
136 reg, EFX_DWORD_VAL(*value));
134 137
135 /* No lock required */ 138 /* No lock required */
136 _efx_writed(efx, value->u32[0], reg); 139 _efx_writed(efx, value->u32[0], reg);
@@ -155,8 +158,9 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
155 value->u32[3] = _efx_readd(efx, reg + 12); 158 value->u32[3] = _efx_readd(efx, reg + 12);
156 spin_unlock_irqrestore(&efx->biu_lock, flags); 159 spin_unlock_irqrestore(&efx->biu_lock, flags);
157 160
158 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg, 161 netif_vdbg(efx, hw, efx->net_dev,
159 EFX_OWORD_VAL(*value)); 162 "read from register %x, got " EFX_OWORD_FMT "\n", reg,
163 EFX_OWORD_VAL(*value));
160} 164}
161 165
162/* Read an 8-byte SRAM entry through supplied mapping, 166/* Read an 8-byte SRAM entry through supplied mapping,
@@ -177,8 +181,9 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
177#endif 181#endif
178 spin_unlock_irqrestore(&efx->biu_lock, flags); 182 spin_unlock_irqrestore(&efx->biu_lock, flags);
179 183
180 EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n", 184 netif_vdbg(efx, hw, efx->net_dev,
181 addr, EFX_QWORD_VAL(*value)); 185 "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
186 addr, EFX_QWORD_VAL(*value));
182} 187}
183 188
184/* Read dword from register that allows partial writes (sic) */ 189/* Read dword from register that allows partial writes (sic) */
@@ -186,8 +191,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
186 unsigned int reg) 191 unsigned int reg)
187{ 192{
188 value->u32[0] = _efx_readd(efx, reg); 193 value->u32[0] = _efx_readd(efx, reg);
189 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n", 194 netif_vdbg(efx, hw, efx->net_dev,
190 reg, EFX_DWORD_VAL(*value)); 195 "read from register %x, got "EFX_DWORD_FMT"\n",
196 reg, EFX_DWORD_VAL(*value));
191} 197}
192 198
193/* Write to a register forming part of a table */ 199/* Write to a register forming part of a table */
@@ -211,6 +217,13 @@ static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); 217 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
212} 218}
213 219
220/* Read from a dword register forming part of a table */
221static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
222 unsigned int reg, unsigned int index)
223{
224 efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
225}
226
214/* Page-mapped register block size */ 227/* Page-mapped register block size */
215#define EFX_PAGE_BLOCK_SIZE 0x2000 228#define EFX_PAGE_BLOCK_SIZE 0x2000
216 229
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 93cc3c1b9450..3912b8fed912 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -168,11 +168,12 @@ static int efx_mcdi_poll(struct efx_nic *efx)
168 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); 168 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
169 169
170 if (error && mcdi->resplen == 0) { 170 if (error && mcdi->resplen == 0) {
171 EFX_ERR(efx, "MC rebooted\n"); 171 netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
172 rc = EIO; 172 rc = EIO;
173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { 173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
174 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", 174 netif_err(efx, hw, efx->net_dev,
175 respseq, mcdi->seqno); 175 "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
176 respseq, mcdi->seqno);
176 rc = EIO; 177 rc = EIO;
177 } else if (error) { 178 } else if (error) {
178 efx_readd(efx, &reg, pdu + 4); 179 efx_readd(efx, &reg, pdu + 4);
@@ -303,8 +304,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
303 /* The request has been cancelled */ 304 /* The request has been cancelled */
304 --mcdi->credits; 305 --mcdi->credits;
305 else 306 else
306 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx " 307 netif_err(efx, hw, efx->net_dev,
307 "seq 0x%x\n", seqno, mcdi->seqno); 308 "MC response mismatch tx seq 0x%x rx "
309 "seq 0x%x\n", seqno, mcdi->seqno);
308 } else { 310 } else {
309 mcdi->resprc = errno; 311 mcdi->resprc = errno;
310 mcdi->resplen = datalen; 312 mcdi->resplen = datalen;
@@ -352,8 +354,9 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
352 ++mcdi->credits; 354 ++mcdi->credits;
353 spin_unlock_bh(&mcdi->iface_lock); 355 spin_unlock_bh(&mcdi->iface_lock);
354 356
355 EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n", 357 netif_err(efx, hw, efx->net_dev,
356 cmd, (int)inlen, mcdi->mode); 358 "MC command 0x%x inlen %d mode %d timed out\n",
359 cmd, (int)inlen, mcdi->mode);
357 } else { 360 } else {
358 size_t resplen; 361 size_t resplen;
359 362
@@ -374,11 +377,13 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
374 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 377 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
375 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 378 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
376 else if (rc == -EIO || rc == -EINTR) { 379 else if (rc == -EIO || rc == -EINTR) {
377 EFX_ERR(efx, "MC fatal error %d\n", -rc); 380 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
381 -rc);
378 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
379 } else 383 } else
380 EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n", 384 netif_err(efx, hw, efx->net_dev,
381 cmd, (int)inlen, -rc); 385 "MC command 0x%x inlen %d failed rc=%d\n",
386 cmd, (int)inlen, -rc);
382 } 387 }
383 388
384 efx_mcdi_release(mcdi); 389 efx_mcdi_release(mcdi);
@@ -534,8 +539,9 @@ static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
534 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); 539 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
535 state_txt = sensor_status_names[state]; 540 state_txt = sensor_status_names[state];
536 541
537 EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n", 542 netif_err(efx, hw, efx->net_dev,
538 monitor, name, state_txt, value); 543 "Sensor %d (%s) reports condition '%s' for raw value %d\n",
544 monitor, name, state_txt, value);
539} 545}
540 546
541/* Called from falcon_process_eventq for MCDI events */ 547/* Called from falcon_process_eventq for MCDI events */
@@ -548,12 +554,13 @@ void efx_mcdi_process_event(struct efx_channel *channel,
548 554
549 switch (code) { 555 switch (code) {
550 case MCDI_EVENT_CODE_BADSSERT: 556 case MCDI_EVENT_CODE_BADSSERT:
551 EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data); 557 netif_err(efx, hw, efx->net_dev,
558 "MC watchdog or assertion failure at 0x%x\n", data);
552 efx_mcdi_ev_death(efx, EINTR); 559 efx_mcdi_ev_death(efx, EINTR);
553 break; 560 break;
554 561
555 case MCDI_EVENT_CODE_PMNOTICE: 562 case MCDI_EVENT_CODE_PMNOTICE:
556 EFX_INFO(efx, "MCDI PM event.\n"); 563 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
557 break; 564 break;
558 565
559 case MCDI_EVENT_CODE_CMDDONE: 566 case MCDI_EVENT_CODE_CMDDONE:
@@ -570,10 +577,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
570 efx_mcdi_sensor_event(efx, event); 577 efx_mcdi_sensor_event(efx, event);
571 break; 578 break;
572 case MCDI_EVENT_CODE_SCHEDERR: 579 case MCDI_EVENT_CODE_SCHEDERR:
573 EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data); 580 netif_info(efx, hw, efx->net_dev,
581 "MC Scheduler error address=0x%x\n", data);
574 break; 582 break;
575 case MCDI_EVENT_CODE_REBOOT: 583 case MCDI_EVENT_CODE_REBOOT:
576 EFX_INFO(efx, "MC Reboot\n"); 584 netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
577 efx_mcdi_ev_death(efx, EIO); 585 efx_mcdi_ev_death(efx, EIO);
578 break; 586 break;
579 case MCDI_EVENT_CODE_MAC_STATS_DMA: 587 case MCDI_EVENT_CODE_MAC_STATS_DMA:
@@ -581,7 +589,8 @@ void efx_mcdi_process_event(struct efx_channel *channel,
581 break; 589 break;
582 590
583 default: 591 default:
584 EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code); 592 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
593 code);
585 } 594 }
586} 595}
587 596
@@ -627,7 +636,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
627 return 0; 636 return 0;
628 637
629fail: 638fail:
630 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 639 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
631 return rc; 640 return rc;
632} 641}
633 642
@@ -657,7 +666,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
657 return 0; 666 return 0;
658 667
659fail: 668fail:
660 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 669 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
661 return rc; 670 return rc;
662} 671}
663 672
@@ -695,7 +704,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
695 return 0; 704 return 0;
696 705
697fail: 706fail:
698 EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen); 707 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
708 __func__, rc, (int)outlen);
699 709
700 return rc; 710 return rc;
701} 711}
@@ -724,7 +734,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
724 return 0; 734 return 0;
725 735
726fail: 736fail:
727 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 737 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
728 return rc; 738 return rc;
729} 739}
730 740
@@ -749,8 +759,8 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
749 return 0; 759 return 0;
750 760
751fail: 761fail:
752 EFX_ERR(efx, "%s: failed rc=%d\n", 762 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
753 __func__, rc); 763 __func__, rc);
754 return rc; 764 return rc;
755} 765}
756 766
@@ -781,7 +791,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
781 return 0; 791 return 0;
782 792
783fail: 793fail:
784 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 794 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
785 return rc; 795 return rc;
786} 796}
787 797
@@ -802,7 +812,7 @@ int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
802 return 0; 812 return 0;
803 813
804fail: 814fail:
805 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 815 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
806 return rc; 816 return rc;
807} 817}
808 818
@@ -827,7 +837,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
827 return 0; 837 return 0;
828 838
829fail: 839fail:
830 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 840 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
831 return rc; 841 return rc;
832} 842}
833 843
@@ -853,7 +863,7 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
853 return 0; 863 return 0;
854 864
855fail: 865fail:
856 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 866 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
857 return rc; 867 return rc;
858} 868}
859 869
@@ -877,7 +887,7 @@ int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
877 return 0; 887 return 0;
878 888
879fail: 889fail:
880 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 890 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
881 return rc; 891 return rc;
882} 892}
883 893
@@ -898,7 +908,7 @@ int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
898 return 0; 908 return 0;
899 909
900fail: 910fail:
901 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 911 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
902 return rc; 912 return rc;
903} 913}
904 914
@@ -948,9 +958,10 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
948 return 0; 958 return 0;
949 959
950fail2: 960fail2:
951 EFX_ERR(efx, "%s: failed type=%u\n", __func__, type); 961 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
962 __func__, type);
952fail1: 963fail1:
953 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 964 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
954 return rc; 965 return rc;
955} 966}
956 967
@@ -994,14 +1005,15 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
994 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1005 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
995 ? "watchdog reset" 1006 ? "watchdog reset"
996 : "unknown assertion"; 1007 : "unknown assertion";
997 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 1008 netif_err(efx, hw, efx->net_dev,
998 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1009 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
999 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1010 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1011 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1000 1012
1001 /* Print out the registers */ 1013 /* Print out the registers */
1002 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1014 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
1003 for (index = 1; index < 32; index++) { 1015 for (index = 1; index < 32; index++) {
1004 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index, 1016 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
1005 MCDI_DWORD2(outbuf, ofst)); 1017 MCDI_DWORD2(outbuf, ofst));
1006 ofst += sizeof(efx_dword_t); 1018 ofst += sizeof(efx_dword_t);
1007 } 1019 }
@@ -1050,14 +1062,16 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1050 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), 1062 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
1051 NULL, 0, NULL); 1063 NULL, 0, NULL);
1052 if (rc) 1064 if (rc)
1053 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 1065 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1066 __func__, rc);
1054} 1067}
1055 1068
1056int efx_mcdi_reset_port(struct efx_nic *efx) 1069int efx_mcdi_reset_port(struct efx_nic *efx)
1057{ 1070{
1058 int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); 1071 int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
1059 if (rc) 1072 if (rc)
1060 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 1073 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1074 __func__, rc);
1061 return rc; 1075 return rc;
1062} 1076}
1063 1077
@@ -1075,7 +1089,7 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
1075 return 0; 1089 return 0;
1076 if (rc == 0) 1090 if (rc == 0)
1077 rc = -EIO; 1091 rc = -EIO;
1078 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 1092 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1079 return rc; 1093 return rc;
1080} 1094}
1081 1095
@@ -1108,7 +1122,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1108 1122
1109fail: 1123fail:
1110 *id_out = -1; 1124 *id_out = -1;
1111 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 1125 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1112 return rc; 1126 return rc;
1113 1127
1114} 1128}
@@ -1143,7 +1157,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1143 1157
1144fail: 1158fail:
1145 *id_out = -1; 1159 *id_out = -1;
1146 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 1160 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1147 return rc; 1161 return rc;
1148} 1162}
1149 1163
@@ -1163,7 +1177,7 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1163 return 0; 1177 return 0;
1164 1178
1165fail: 1179fail:
1166 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 1180 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1167 return rc; 1181 return rc;
1168} 1182}
1169 1183
@@ -1179,7 +1193,7 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1179 return 0; 1193 return 0;
1180 1194
1181fail: 1195fail:
1182 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 1196 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1183 return rc; 1197 return rc;
1184} 1198}
1185 1199
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 39182631ac92..f88f4bf986ff 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -69,8 +69,8 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
69 return 0; 69 return 0;
70 70
71fail: 71fail:
72 EFX_ERR(efx, "%s: failed rc=%d\n", 72 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
73 __func__, rc); 73 __func__, rc);
74 return rc; 74 return rc;
75} 75}
76 76
@@ -110,8 +110,8 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
110 return 0; 110 return 0;
111 111
112fail: 112fail:
113 EFX_ERR(efx, "%s: %s failed rc=%d\n", 113 netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
114 __func__, enable ? "enable" : "disable", rc); 114 __func__, enable ? "enable" : "disable", rc);
115 return rc; 115 return rc;
116} 116}
117 117
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 6032c0e1f1f8..0121e71702bf 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -20,7 +20,7 @@
20#include "nic.h" 20#include "nic.h"
21#include "selftest.h" 21#include "selftest.h"
22 22
23struct efx_mcdi_phy_cfg { 23struct efx_mcdi_phy_data {
24 u32 flags; 24 u32 flags;
25 u32 type; 25 u32 type;
26 u32 supported_cap; 26 u32 supported_cap;
@@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg {
35}; 35};
36 36
37static int 37static int
38efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg) 38efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
39{ 39{
40 u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; 40 u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
41 size_t outlen; 41 size_t outlen;
@@ -71,7 +71,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
71 return 0; 71 return 0;
72 72
73fail: 73fail:
74 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 74 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
75 return rc; 75 return rc;
76} 76}
77 77
@@ -97,7 +97,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
97 return 0; 97 return 0;
98 98
99fail: 99fail:
100 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 100 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
101 return rc; 101 return rc;
102} 102}
103 103
@@ -122,7 +122,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
122 return 0; 122 return 0;
123 123
124fail: 124fail:
125 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 125 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
126 return rc; 126 return rc;
127} 127}
128 128
@@ -150,7 +150,7 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
150 return 0; 150 return 0;
151 151
152fail: 152fail:
153 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 153 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
154 return rc; 154 return rc;
155} 155}
156 156
@@ -178,7 +178,7 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
178 return 0; 178 return 0;
179 179
180fail: 180fail:
181 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 181 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
182 return rc; 182 return rc;
183} 183}
184 184
@@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
259 259
260static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) 260static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
261{ 261{
262 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 262 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
263 enum efx_phy_mode mode, supported; 263 enum efx_phy_mode mode, supported;
264 u32 flags; 264 u32 flags;
265 265
@@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
307 307
308static int efx_mcdi_phy_probe(struct efx_nic *efx) 308static int efx_mcdi_phy_probe(struct efx_nic *efx)
309{ 309{
310 struct efx_mcdi_phy_cfg *phy_data; 310 struct efx_mcdi_phy_data *phy_data;
311 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 311 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
312 u32 caps; 312 u32 caps;
313 int rc; 313 int rc;
@@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
395 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; 395 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
396 if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) 396 if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
397 efx->wanted_fc |= EFX_FC_AUTO; 397 efx->wanted_fc |= EFX_FC_AUTO;
398 efx_link_set_wanted_fc(efx, efx->wanted_fc);
398 399
399 return 0; 400 return 0;
400 401
@@ -405,7 +406,7 @@ fail:
405 406
406int efx_mcdi_phy_reconfigure(struct efx_nic *efx) 407int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
407{ 408{
408 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 409 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
409 u32 caps = (efx->link_advertising ? 410 u32 caps = (efx->link_advertising ?
410 ethtool_to_mcdi_cap(efx->link_advertising) : 411 ethtool_to_mcdi_cap(efx->link_advertising) :
411 phy_cfg->forced_cap); 412 phy_cfg->forced_cap);
@@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
446 */ 447 */
447void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) 448void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
448{ 449{
449 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 450 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
450 u32 rmtadv; 451 u32 rmtadv;
451 452
452 /* The link partner capabilities are only relevent if the 453 /* The link partner capabilities are only relevent if the
@@ -465,8 +466,8 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
465 rmtadv |= ADVERTISED_Asym_Pause; 466 rmtadv |= ADVERTISED_Asym_Pause;
466 467
467 if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) 468 if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
468 EFX_ERR(efx, "warning: link partner doesn't support " 469 netif_err(efx, link, efx->net_dev,
469 "pause frames"); 470 "warning: link partner doesn't support pause frames");
470} 471}
471 472
472static bool efx_mcdi_phy_poll(struct efx_nic *efx) 473static bool efx_mcdi_phy_poll(struct efx_nic *efx)
@@ -482,7 +483,8 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
482 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, 483 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
483 outbuf, sizeof(outbuf), NULL); 484 outbuf, sizeof(outbuf), NULL);
484 if (rc) { 485 if (rc) {
485 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 486 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
487 __func__, rc);
486 efx->link_state.up = false; 488 efx->link_state.up = false;
487 } else { 489 } else {
488 efx_mcdi_phy_decode_link( 490 efx_mcdi_phy_decode_link(
@@ -505,7 +507,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
505 507
506static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 508static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
507{ 509{
508 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 510 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
509 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 511 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
510 int rc; 512 int rc;
511 513
@@ -525,7 +527,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
525 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, 527 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
526 outbuf, sizeof(outbuf), NULL); 528 outbuf, sizeof(outbuf), NULL);
527 if (rc) { 529 if (rc) {
528 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); 530 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
531 __func__, rc);
529 return; 532 return;
530 } 533 }
531 ecmd->lp_advertising = 534 ecmd->lp_advertising =
@@ -535,7 +538,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
535 538
536static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 539static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
537{ 540{
538 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 541 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
539 u32 caps; 542 u32 caps;
540 int rc; 543 int rc;
541 544
@@ -674,7 +677,7 @@ out:
674static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, 677static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
675 unsigned flags) 678 unsigned flags)
676{ 679{
677 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 680 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
678 u32 mode; 681 u32 mode;
679 int rc; 682 int rc;
680 683
@@ -712,7 +715,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
712 715
713const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) 716const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
714{ 717{
715 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
716 719
717 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { 720 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
718 if (index == 0) 721 if (index == 0)
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 0548fcbbdcd0..eeaf0bd64bd3 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -63,7 +63,8 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
63 /* Read MMD STATUS2 to check it is responding. */ 63 /* Read MMD STATUS2 to check it is responding. */
64 status = efx_mdio_read(efx, mmd, MDIO_STAT2); 64 status = efx_mdio_read(efx, mmd, MDIO_STAT2);
65 if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) { 65 if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
66 EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd); 66 netif_err(efx, hw, efx->net_dev,
67 "PHY MMD %d not responding.\n", mmd);
67 return -EIO; 68 return -EIO;
68 } 69 }
69 } 70 }
@@ -72,12 +73,14 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
72 status = efx_mdio_read(efx, mmd, MDIO_STAT1); 73 status = efx_mdio_read(efx, mmd, MDIO_STAT1);
73 if (status & MDIO_STAT1_FAULT) { 74 if (status & MDIO_STAT1_FAULT) {
74 if (fault_fatal) { 75 if (fault_fatal) {
75 EFX_ERR(efx, "PHY MMD %d reporting fatal" 76 netif_err(efx, hw, efx->net_dev,
76 " fault: status %x\n", mmd, status); 77 "PHY MMD %d reporting fatal"
78 " fault: status %x\n", mmd, status);
77 return -EIO; 79 return -EIO;
78 } else { 80 } else {
79 EFX_LOG(efx, "PHY MMD %d reporting status" 81 netif_dbg(efx, hw, efx->net_dev,
80 " %x (expected)\n", mmd, status); 82 "PHY MMD %d reporting status"
83 " %x (expected)\n", mmd, status);
81 } 84 }
82 } 85 }
83 return 0; 86 return 0;
@@ -103,8 +106,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
103 if (mask & 1) { 106 if (mask & 1) {
104 stat = efx_mdio_read(efx, mmd, MDIO_CTRL1); 107 stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
105 if (stat < 0) { 108 if (stat < 0) {
106 EFX_ERR(efx, "failed to read status of" 109 netif_err(efx, hw, efx->net_dev,
107 " MMD %d\n", mmd); 110 "failed to read status of"
111 " MMD %d\n", mmd);
108 return -EIO; 112 return -EIO;
109 } 113 }
110 if (stat & MDIO_CTRL1_RESET) 114 if (stat & MDIO_CTRL1_RESET)
@@ -119,8 +123,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
119 msleep(spintime); 123 msleep(spintime);
120 } 124 }
121 if (in_reset != 0) { 125 if (in_reset != 0) {
122 EFX_ERR(efx, "not all MMDs came out of reset in time." 126 netif_err(efx, hw, efx->net_dev,
123 " MMDs still in reset: %x\n", in_reset); 127 "not all MMDs came out of reset in time."
128 " MMDs still in reset: %x\n", in_reset);
124 rc = -ETIMEDOUT; 129 rc = -ETIMEDOUT;
125 } 130 }
126 return rc; 131 return rc;
@@ -142,16 +147,18 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
142 devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1); 147 devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
143 devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2); 148 devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
144 if (devs1 < 0 || devs2 < 0) { 149 if (devs1 < 0 || devs2 < 0) {
145 EFX_ERR(efx, "failed to read devices present\n"); 150 netif_err(efx, hw, efx->net_dev,
151 "failed to read devices present\n");
146 return -EIO; 152 return -EIO;
147 } 153 }
148 devices = devs1 | (devs2 << 16); 154 devices = devs1 | (devs2 << 16);
149 if ((devices & mmd_mask) != mmd_mask) { 155 if ((devices & mmd_mask) != mmd_mask) {
150 EFX_ERR(efx, "required MMDs not present: got %x, " 156 netif_err(efx, hw, efx->net_dev,
151 "wanted %x\n", devices, mmd_mask); 157 "required MMDs not present: got %x, wanted %x\n",
158 devices, mmd_mask);
152 return -ENODEV; 159 return -ENODEV;
153 } 160 }
154 EFX_TRACE(efx, "Devices present: %x\n", devices); 161 netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
155 162
156 /* Check all required MMDs are responding and happy. */ 163 /* Check all required MMDs are responding and happy. */
157 while (mmd_mask) { 164 while (mmd_mask) {
@@ -219,7 +226,7 @@ static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
219{ 226{
220 int stat = efx_mdio_read(efx, mmd, MDIO_STAT1); 227 int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
221 228
222 EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n", 229 netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
223 mmd, lpower); 230 mmd, lpower);
224 231
225 if (stat & MDIO_STAT1_LPOWERABLE) { 232 if (stat & MDIO_STAT1_LPOWERABLE) {
@@ -349,8 +356,8 @@ int efx_mdio_test_alive(struct efx_nic *efx)
349 356
350 if ((physid1 == 0x0000) || (physid1 == 0xffff) || 357 if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
351 (physid2 == 0x0000) || (physid2 == 0xffff)) { 358 (physid2 == 0x0000) || (physid2 == 0xffff)) {
352 EFX_ERR(efx, "no MDIO PHY present with ID %d\n", 359 netif_err(efx, hw, efx->net_dev,
353 efx->mdio.prtad); 360 "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
354 rc = -EINVAL; 361 rc = -EINVAL;
355 } else { 362 } else {
356 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); 363 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f89e71929603..75791d3d4963 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -51,7 +51,8 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
51 51
52 sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN); 52 sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
53 if (!sync) 53 if (!sync)
54 EFX_LOG(efx, "XGXS lane status: %x\n", lane_status); 54 netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
55 lane_status);
55 return sync; 56 return sync;
56} 57}
57 58
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index f3ac7f30b5e7..02e54b4f701f 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -15,7 +15,6 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17 17
18#define EFX_DRIVER_NAME "sfc_mtd"
19#include "net_driver.h" 18#include "net_driver.h"
20#include "spi.h" 19#include "spi.h"
21#include "efx.h" 20#include "efx.h"
@@ -71,8 +70,10 @@ static int siena_mtd_probe(struct efx_nic *efx);
71 70
72/* SPI utilities */ 71/* SPI utilities */
73 72
74static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) 73static int
74efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
75{ 75{
76 struct efx_mtd *efx_mtd = part->mtd.priv;
76 const struct efx_spi_device *spi = efx_mtd->spi; 77 const struct efx_spi_device *spi = efx_mtd->spi;
77 struct efx_nic *efx = efx_mtd->efx; 78 struct efx_nic *efx = efx_mtd->efx;
78 u8 status; 79 u8 status;
@@ -92,7 +93,7 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
92 if (signal_pending(current)) 93 if (signal_pending(current))
93 return -EINTR; 94 return -EINTR;
94 } 95 }
95 EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name); 96 pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
96 return -ETIMEDOUT; 97 return -ETIMEDOUT;
97} 98}
98 99
@@ -131,8 +132,10 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
131 return 0; 132 return 0;
132} 133}
133 134
134static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) 135static int
136efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
135{ 137{
138 struct efx_mtd *efx_mtd = part->mtd.priv;
136 const struct efx_spi_device *spi = efx_mtd->spi; 139 const struct efx_spi_device *spi = efx_mtd->spi;
137 struct efx_nic *efx = efx_mtd->efx; 140 struct efx_nic *efx = efx_mtd->efx;
138 unsigned pos, block_len; 141 unsigned pos, block_len;
@@ -156,7 +159,7 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
156 NULL, 0); 159 NULL, 0);
157 if (rc) 160 if (rc)
158 return rc; 161 return rc;
159 rc = efx_spi_slow_wait(efx_mtd, false); 162 rc = efx_spi_slow_wait(part, false);
160 163
161 /* Verify the entire region has been wiped */ 164 /* Verify the entire region has been wiped */
162 memset(empty, 0xff, sizeof(empty)); 165 memset(empty, 0xff, sizeof(empty));
@@ -198,13 +201,14 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
198 201
199static void efx_mtd_sync(struct mtd_info *mtd) 202static void efx_mtd_sync(struct mtd_info *mtd)
200{ 203{
204 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
201 struct efx_mtd *efx_mtd = mtd->priv; 205 struct efx_mtd *efx_mtd = mtd->priv;
202 struct efx_nic *efx = efx_mtd->efx;
203 int rc; 206 int rc;
204 207
205 rc = efx_mtd->ops->sync(mtd); 208 rc = efx_mtd->ops->sync(mtd);
206 if (rc) 209 if (rc)
207 EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc); 210 pr_err("%s: %s sync failed (%d)\n",
211 part->name, efx_mtd->name, rc);
208} 212}
209 213
210static void efx_mtd_remove_partition(struct efx_mtd_partition *part) 214static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -338,7 +342,7 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
338 rc = mutex_lock_interruptible(&efx->spi_lock); 342 rc = mutex_lock_interruptible(&efx->spi_lock);
339 if (rc) 343 if (rc)
340 return rc; 344 return rc;
341 rc = efx_spi_erase(efx_mtd, part->offset + start, len); 345 rc = efx_spi_erase(part, part->offset + start, len);
342 mutex_unlock(&efx->spi_lock); 346 mutex_unlock(&efx->spi_lock);
343 return rc; 347 return rc;
344} 348}
@@ -363,12 +367,13 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
363 367
364static int falcon_mtd_sync(struct mtd_info *mtd) 368static int falcon_mtd_sync(struct mtd_info *mtd)
365{ 369{
370 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
366 struct efx_mtd *efx_mtd = mtd->priv; 371 struct efx_mtd *efx_mtd = mtd->priv;
367 struct efx_nic *efx = efx_mtd->efx; 372 struct efx_nic *efx = efx_mtd->efx;
368 int rc; 373 int rc;
369 374
370 mutex_lock(&efx->spi_lock); 375 mutex_lock(&efx->spi_lock);
371 rc = efx_spi_slow_wait(efx_mtd, true); 376 rc = efx_spi_slow_wait(part, true);
372 mutex_unlock(&efx->spi_lock); 377 mutex_unlock(&efx->spi_lock);
373 return rc; 378 return rc;
374} 379}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 4762c91cb587..64e7caa4bbb5 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -13,11 +13,16 @@
13#ifndef EFX_NET_DRIVER_H 13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H 14#define EFX_NET_DRIVER_H
15 15
16#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
17#define DEBUG
18#endif
19
16#include <linux/version.h> 20#include <linux/version.h>
17#include <linux/netdevice.h> 21#include <linux/netdevice.h>
18#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
19#include <linux/ethtool.h> 23#include <linux/ethtool.h>
20#include <linux/if_vlan.h> 24#include <linux/if_vlan.h>
25#include <linux/timer.h>
21#include <linux/mdio.h> 26#include <linux/mdio.h>
22#include <linux/list.h> 27#include <linux/list.h>
23#include <linux/pci.h> 28#include <linux/pci.h>
@@ -34,9 +39,7 @@
34 * Build definitions 39 * Build definitions
35 * 40 *
36 **************************************************************************/ 41 **************************************************************************/
37#ifndef EFX_DRIVER_NAME 42
38#define EFX_DRIVER_NAME "sfc"
39#endif
40#define EFX_DRIVER_VERSION "3.0" 43#define EFX_DRIVER_VERSION "3.0"
41 44
42#ifdef EFX_ENABLE_DEBUG 45#ifdef EFX_ENABLE_DEBUG
@@ -47,35 +50,6 @@
47#define EFX_WARN_ON_PARANOID(x) do {} while (0) 50#define EFX_WARN_ON_PARANOID(x) do {} while (0)
48#endif 51#endif
49 52
50/* Un-rate-limited logging */
51#define EFX_ERR(efx, fmt, args...) \
52dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
53
54#define EFX_INFO(efx, fmt, args...) \
55dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
56
57#ifdef EFX_ENABLE_DEBUG
58#define EFX_LOG(efx, fmt, args...) \
59dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
60#else
61#define EFX_LOG(efx, fmt, args...) \
62dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
63#endif
64
65#define EFX_TRACE(efx, fmt, args...) do {} while (0)
66
67#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
68
69/* Rate-limited logging */
70#define EFX_ERR_RL(efx, fmt, args...) \
71do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
72
73#define EFX_INFO_RL(efx, fmt, args...) \
74do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
75
76#define EFX_LOG_RL(efx, fmt, args...) \
77do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
78
79/************************************************************************** 53/**************************************************************************
80 * 54 *
81 * Efx data structures 55 * Efx data structures
@@ -221,7 +195,6 @@ struct efx_tx_queue {
221 * If both this and skb are %NULL, the buffer slot is currently free. 195 * If both this and skb are %NULL, the buffer slot is currently free.
222 * @data: Pointer to ethernet header 196 * @data: Pointer to ethernet header
223 * @len: Buffer length, in bytes. 197 * @len: Buffer length, in bytes.
224 * @unmap_addr: DMA address to unmap
225 */ 198 */
226struct efx_rx_buffer { 199struct efx_rx_buffer {
227 dma_addr_t dma_addr; 200 dma_addr_t dma_addr;
@@ -229,7 +202,24 @@ struct efx_rx_buffer {
229 struct page *page; 202 struct page *page;
230 char *data; 203 char *data;
231 unsigned int len; 204 unsigned int len;
232 dma_addr_t unmap_addr; 205};
206
207/**
208 * struct efx_rx_page_state - Page-based rx buffer state
209 *
210 * Inserted at the start of every page allocated for receive buffers.
211 * Used to facilitate sharing dma mappings between recycled rx buffers
212 * and those passed up to the kernel.
213 *
214 * @refcnt: Number of struct efx_rx_buffer's referencing this page.
215 * When refcnt falls to zero, the page is unmapped for dma
216 * @dma_addr: The dma address of this page.
217 */
218struct efx_rx_page_state {
219 unsigned refcnt;
220 dma_addr_t dma_addr;
221
222 unsigned int __pad[0] ____cacheline_aligned;
233}; 223};
234 224
235/** 225/**
@@ -242,10 +232,6 @@ struct efx_rx_buffer {
242 * @added_count: Number of buffers added to the receive queue. 232 * @added_count: Number of buffers added to the receive queue.
243 * @notified_count: Number of buffers given to NIC (<= @added_count). 233 * @notified_count: Number of buffers given to NIC (<= @added_count).
244 * @removed_count: Number of buffers removed from the receive queue. 234 * @removed_count: Number of buffers removed from the receive queue.
245 * @add_lock: Receive queue descriptor add spin lock.
246 * This lock must be held in order to add buffers to the RX
247 * descriptor ring (rxd and buffer) and to update added_count (but
248 * not removed_count).
249 * @max_fill: RX descriptor maximum fill level (<= ring size) 235 * @max_fill: RX descriptor maximum fill level (<= ring size)
250 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 236 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
251 * (<= @max_fill) 237 * (<= @max_fill)
@@ -259,12 +245,7 @@ struct efx_rx_buffer {
259 * overflow was observed. It should never be set. 245 * overflow was observed. It should never be set.
260 * @alloc_page_count: RX allocation strategy counter. 246 * @alloc_page_count: RX allocation strategy counter.
261 * @alloc_skb_count: RX allocation strategy counter. 247 * @alloc_skb_count: RX allocation strategy counter.
262 * @work: Descriptor push work thread 248 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
263 * @buf_page: Page for next RX buffer.
264 * We can use a single page for multiple RX buffers. This tracks
265 * the remaining space in the allocation.
266 * @buf_dma_addr: Page's DMA address.
267 * @buf_data: Page's host address.
268 * @flushed: Use when handling queue flushing 249 * @flushed: Use when handling queue flushing
269 */ 250 */
270struct efx_rx_queue { 251struct efx_rx_queue {
@@ -277,7 +258,6 @@ struct efx_rx_queue {
277 int added_count; 258 int added_count;
278 int notified_count; 259 int notified_count;
279 int removed_count; 260 int removed_count;
280 spinlock_t add_lock;
281 unsigned int max_fill; 261 unsigned int max_fill;
282 unsigned int fast_fill_trigger; 262 unsigned int fast_fill_trigger;
283 unsigned int fast_fill_limit; 263 unsigned int fast_fill_limit;
@@ -285,12 +265,9 @@ struct efx_rx_queue {
285 unsigned int min_overfill; 265 unsigned int min_overfill;
286 unsigned int alloc_page_count; 266 unsigned int alloc_page_count;
287 unsigned int alloc_skb_count; 267 unsigned int alloc_skb_count;
288 struct delayed_work work; 268 struct timer_list slow_fill;
289 unsigned int slow_fill_count; 269 unsigned int slow_fill_count;
290 270
291 struct page *buf_page;
292 dma_addr_t buf_dma_addr;
293 char *buf_data;
294 enum efx_flush_state flushed; 271 enum efx_flush_state flushed;
295}; 272};
296 273
@@ -336,7 +313,7 @@ enum efx_rx_alloc_method {
336 * @eventq: Event queue buffer 313 * @eventq: Event queue buffer
337 * @eventq_read_ptr: Event queue read pointer 314 * @eventq_read_ptr: Event queue read pointer
338 * @last_eventq_read_ptr: Last event queue read pointer value. 315 * @last_eventq_read_ptr: Last event queue read pointer value.
339 * @eventq_magic: Event queue magic value for driver-generated test events 316 * @magic_count: Event queue test event count
340 * @irq_count: Number of IRQs since last adaptive moderation decision 317 * @irq_count: Number of IRQs since last adaptive moderation decision
341 * @irq_mod_score: IRQ moderation score 318 * @irq_mod_score: IRQ moderation score
342 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 319 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -367,7 +344,7 @@ struct efx_channel {
367 struct efx_special_buffer eventq; 344 struct efx_special_buffer eventq;
368 unsigned int eventq_read_ptr; 345 unsigned int eventq_read_ptr;
369 unsigned int last_eventq_read_ptr; 346 unsigned int last_eventq_read_ptr;
370 unsigned int eventq_magic; 347 unsigned int magic_count;
371 348
372 unsigned int irq_count; 349 unsigned int irq_count;
373 unsigned int irq_mod_score; 350 unsigned int irq_mod_score;
@@ -658,6 +635,7 @@ union efx_multicast_hash {
658 * @interrupt_mode: Interrupt mode 635 * @interrupt_mode: Interrupt mode
659 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 636 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
660 * @irq_rx_moderation: IRQ moderation time for RX event queues 637 * @irq_rx_moderation: IRQ moderation time for RX event queues
638 * @msg_enable: Log message enable flags
661 * @state: Device state flag. Serialised by the rtnl_lock. 639 * @state: Device state flag. Serialised by the rtnl_lock.
662 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) 640 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
663 * @tx_queue: TX DMA queues 641 * @tx_queue: TX DMA queues
@@ -669,6 +647,7 @@ union efx_multicast_hash {
669 * @n_tx_channels: Number of channels used for TX 647 * @n_tx_channels: Number of channels used for TX
670 * @rx_buffer_len: RX buffer length 648 * @rx_buffer_len: RX buffer length
671 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 649 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
650 * @rx_indir_table: Indirection table for RSS
672 * @int_error_count: Number of internal errors seen recently 651 * @int_error_count: Number of internal errors seen recently
673 * @int_error_expire: Time at which error count will be expired 652 * @int_error_expire: Time at which error count will be expired
674 * @irq_status: Interrupt status buffer 653 * @irq_status: Interrupt status buffer
@@ -740,6 +719,7 @@ struct efx_nic {
740 enum efx_int_mode interrupt_mode; 719 enum efx_int_mode interrupt_mode;
741 bool irq_rx_adaptive; 720 bool irq_rx_adaptive;
742 unsigned int irq_rx_moderation; 721 unsigned int irq_rx_moderation;
722 u32 msg_enable;
743 723
744 enum nic_state state; 724 enum nic_state state;
745 enum reset_type reset_pending; 725 enum reset_type reset_pending;
@@ -754,6 +734,8 @@ struct efx_nic {
754 unsigned n_tx_channels; 734 unsigned n_tx_channels;
755 unsigned int rx_buffer_len; 735 unsigned int rx_buffer_len;
756 unsigned int rx_buffer_order; 736 unsigned int rx_buffer_order;
737 u8 rx_hash_key[40];
738 u32 rx_indir_table[128];
757 739
758 unsigned int_error_count; 740 unsigned int_error_count;
759 unsigned long int_error_expire; 741 unsigned long int_error_expire;
@@ -866,7 +848,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
866 * @evq_ptr_tbl_base: Event queue pointer table base address 848 * @evq_ptr_tbl_base: Event queue pointer table base address
867 * @evq_rptr_tbl_base: Event queue read-pointer table base address 849 * @evq_rptr_tbl_base: Event queue read-pointer table base address
868 * @max_dma_mask: Maximum possible DMA mask 850 * @max_dma_mask: Maximum possible DMA mask
869 * @rx_buffer_padding: Padding added to each RX buffer 851 * @rx_buffer_hash_size: Size of hash at start of RX buffer
852 * @rx_buffer_padding: Size of padding at end of RX buffer
870 * @max_interrupt_mode: Highest capability interrupt mode supported 853 * @max_interrupt_mode: Highest capability interrupt mode supported
871 * from &enum efx_init_mode. 854 * from &enum efx_init_mode.
872 * @phys_addr_channels: Number of channels with physically addressed 855 * @phys_addr_channels: Number of channels with physically addressed
@@ -910,6 +893,7 @@ struct efx_nic_type {
910 unsigned int evq_ptr_tbl_base; 893 unsigned int evq_ptr_tbl_base;
911 unsigned int evq_rptr_tbl_base; 894 unsigned int evq_rptr_tbl_base;
912 u64 max_dma_mask; 895 u64 max_dma_mask;
896 unsigned int rx_buffer_hash_size;
913 unsigned int rx_buffer_padding; 897 unsigned int rx_buffer_padding;
914 unsigned int max_interrupt_mode; 898 unsigned int max_interrupt_mode;
915 unsigned int phys_addr_channels; 899 unsigned int phys_addr_channels;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 5d3aaec58556..f595d920c7c4 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
79/* Depth of RX flush request fifo */ 79/* Depth of RX flush request fifo */
80#define EFX_RX_FLUSH_COUNT 4 80#define EFX_RX_FLUSH_COUNT 4
81 81
82/* Generated event code for efx_generate_test_event() */
83#define EFX_CHANNEL_MAGIC_TEST(_channel) \
84 (0x00010100 + (_channel)->channel)
85
86/* Generated event code for efx_generate_fill_event() */
87#define EFX_CHANNEL_MAGIC_FILL(_channel) \
88 (0x00010200 + (_channel)->channel)
89
82/************************************************************************** 90/**************************************************************************
83 * 91 *
84 * Solarstorm hardware access 92 * Solarstorm hardware access
@@ -171,9 +179,10 @@ int efx_nic_test_registers(struct efx_nic *efx,
171 return 0; 179 return 0;
172 180
173fail: 181fail:
174 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 182 netif_err(efx, hw, efx->net_dev,
175 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 183 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
176 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 184 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
185 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
177 return -EIO; 186 return -EIO;
178} 187}
179 188
@@ -206,8 +215,9 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
206 for (i = 0; i < buffer->entries; i++) { 215 for (i = 0; i < buffer->entries; i++) {
207 index = buffer->index + i; 216 index = buffer->index + i;
208 dma_addr = buffer->dma_addr + (i * 4096); 217 dma_addr = buffer->dma_addr + (i * 4096);
209 EFX_LOG(efx, "mapping special buffer %d at %llx\n", 218 netif_dbg(efx, probe, efx->net_dev,
210 index, (unsigned long long)dma_addr); 219 "mapping special buffer %d at %llx\n",
220 index, (unsigned long long)dma_addr);
211 EFX_POPULATE_QWORD_3(buf_desc, 221 EFX_POPULATE_QWORD_3(buf_desc,
212 FRF_AZ_BUF_ADR_REGION, 0, 222 FRF_AZ_BUF_ADR_REGION, 0,
213 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 223 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
@@ -227,8 +237,8 @@ efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
227 if (!buffer->entries) 237 if (!buffer->entries)
228 return; 238 return;
229 239
230 EFX_LOG(efx, "unmapping special buffers %d-%d\n", 240 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
231 buffer->index, buffer->index + buffer->entries - 1); 241 buffer->index, buffer->index + buffer->entries - 1);
232 242
233 EFX_POPULATE_OWORD_4(buf_tbl_upd, 243 EFX_POPULATE_OWORD_4(buf_tbl_upd,
234 FRF_AZ_BUF_UPD_CMD, 0, 244 FRF_AZ_BUF_UPD_CMD, 0,
@@ -268,11 +278,12 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
268 buffer->index = efx->next_buffer_table; 278 buffer->index = efx->next_buffer_table;
269 efx->next_buffer_table += buffer->entries; 279 efx->next_buffer_table += buffer->entries;
270 280
271 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " 281 netif_dbg(efx, probe, efx->net_dev,
272 "(virt %p phys %llx)\n", buffer->index, 282 "allocating special buffers %d-%d at %llx+%x "
273 buffer->index + buffer->entries - 1, 283 "(virt %p phys %llx)\n", buffer->index,
274 (u64)buffer->dma_addr, len, 284 buffer->index + buffer->entries - 1,
275 buffer->addr, (u64)virt_to_phys(buffer->addr)); 285 (u64)buffer->dma_addr, len,
286 buffer->addr, (u64)virt_to_phys(buffer->addr));
276 287
277 return 0; 288 return 0;
278} 289}
@@ -283,11 +294,12 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
283 if (!buffer->addr) 294 if (!buffer->addr)
284 return; 295 return;
285 296
286 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " 297 netif_dbg(efx, hw, efx->net_dev,
287 "(virt %p phys %llx)\n", buffer->index, 298 "deallocating special buffers %d-%d at %llx+%x "
288 buffer->index + buffer->entries - 1, 299 "(virt %p phys %llx)\n", buffer->index,
289 (u64)buffer->dma_addr, buffer->len, 300 buffer->index + buffer->entries - 1,
290 buffer->addr, (u64)virt_to_phys(buffer->addr)); 301 (u64)buffer->dma_addr, buffer->len,
302 buffer->addr, (u64)virt_to_phys(buffer->addr));
291 303
292 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, 304 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
293 buffer->dma_addr); 305 buffer->dma_addr);
@@ -547,9 +559,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
547 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 559 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
548 bool iscsi_digest_en = is_b0; 560 bool iscsi_digest_en = is_b0;
549 561
550 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 562 netif_dbg(efx, hw, efx->net_dev,
551 rx_queue->queue, rx_queue->rxd.index, 563 "RX queue %d ring in special buffers %d-%d\n",
552 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 564 rx_queue->queue, rx_queue->rxd.index,
565 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
553 566
554 rx_queue->flushed = FLUSH_NONE; 567 rx_queue->flushed = FLUSH_NONE;
555 568
@@ -686,9 +699,10 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
686 EFX_WORKAROUND_10727(efx)) { 699 EFX_WORKAROUND_10727(efx)) {
687 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 700 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
688 } else { 701 } else {
689 EFX_ERR(efx, "channel %d unexpected TX event " 702 netif_err(efx, tx_err, efx->net_dev,
690 EFX_QWORD_FMT"\n", channel->channel, 703 "channel %d unexpected TX event "
691 EFX_QWORD_VAL(*event)); 704 EFX_QWORD_FMT"\n", channel->channel,
705 EFX_QWORD_VAL(*event));
692 } 706 }
693 707
694 return tx_packets; 708 return tx_packets;
@@ -751,20 +765,21 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
751 * to a FIFO overflow. 765 * to a FIFO overflow.
752 */ 766 */
753#ifdef EFX_ENABLE_DEBUG 767#ifdef EFX_ENABLE_DEBUG
754 if (rx_ev_other_err) { 768 if (rx_ev_other_err && net_ratelimit()) {
755 EFX_INFO_RL(efx, " RX queue %d unexpected RX event " 769 netif_dbg(efx, rx_err, efx->net_dev,
756 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 770 " RX queue %d unexpected RX event "
757 rx_queue->queue, EFX_QWORD_VAL(*event), 771 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
758 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 772 rx_queue->queue, EFX_QWORD_VAL(*event),
759 rx_ev_ip_hdr_chksum_err ? 773 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
760 " [IP_HDR_CHKSUM_ERR]" : "", 774 rx_ev_ip_hdr_chksum_err ?
761 rx_ev_tcp_udp_chksum_err ? 775 " [IP_HDR_CHKSUM_ERR]" : "",
762 " [TCP_UDP_CHKSUM_ERR]" : "", 776 rx_ev_tcp_udp_chksum_err ?
763 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 777 " [TCP_UDP_CHKSUM_ERR]" : "",
764 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 778 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
765 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 779 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
766 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 780 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
767 rx_ev_pause_frm ? " [PAUSE]" : ""); 781 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
782 rx_ev_pause_frm ? " [PAUSE]" : "");
768 } 783 }
769#endif 784#endif
770} 785}
@@ -778,8 +793,9 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
778 793
779 expected = rx_queue->removed_count & EFX_RXQ_MASK; 794 expected = rx_queue->removed_count & EFX_RXQ_MASK;
780 dropped = (index - expected) & EFX_RXQ_MASK; 795 dropped = (index - expected) & EFX_RXQ_MASK;
781 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", 796 netif_info(efx, rx_err, efx->net_dev,
782 dropped, index, expected); 797 "dropped %d events (index=%d expected=%d)\n",
798 dropped, index, expected);
783 799
784 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 800 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
785 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 801 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
@@ -850,6 +866,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
850 checksummed, discard); 866 checksummed, discard);
851} 867}
852 868
869static void
870efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
871{
872 struct efx_nic *efx = channel->efx;
873 unsigned code;
874
875 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
876 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
877 ++channel->magic_count;
878 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
879 /* The queue must be empty, so we won't receive any rx
880 * events, so efx_process_channel() won't refill the
881 * queue. Refill it here */
882 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
883 else
884 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
885 "generated event "EFX_QWORD_FMT"\n",
886 channel->channel, EFX_QWORD_VAL(*event));
887}
888
853/* Global events are basically PHY events */ 889/* Global events are basically PHY events */
854static void 890static void
855efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) 891efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -873,8 +909,9 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
873 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 909 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
874 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : 910 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
875 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { 911 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
876 EFX_ERR(efx, "channel %d seen global RX_RESET " 912 netif_err(efx, rx_err, efx->net_dev,
877 "event. Resetting.\n", channel->channel); 913 "channel %d seen global RX_RESET event. Resetting.\n",
914 channel->channel);
878 915
879 atomic_inc(&efx->rx_reset); 916 atomic_inc(&efx->rx_reset);
880 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? 917 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
@@ -883,9 +920,10 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
883 } 920 }
884 921
885 if (!handled) 922 if (!handled)
886 EFX_ERR(efx, "channel %d unknown global event " 923 netif_err(efx, hw, efx->net_dev,
887 EFX_QWORD_FMT "\n", channel->channel, 924 "channel %d unknown global event "
888 EFX_QWORD_VAL(*event)); 925 EFX_QWORD_FMT "\n", channel->channel,
926 EFX_QWORD_VAL(*event));
889} 927}
890 928
891static void 929static void
@@ -900,31 +938,35 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
900 938
901 switch (ev_sub_code) { 939 switch (ev_sub_code) {
902 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 940 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
903 EFX_TRACE(efx, "channel %d TXQ %d flushed\n", 941 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
904 channel->channel, ev_sub_data); 942 channel->channel, ev_sub_data);
905 break; 943 break;
906 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 944 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
907 EFX_TRACE(efx, "channel %d RXQ %d flushed\n", 945 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
908 channel->channel, ev_sub_data); 946 channel->channel, ev_sub_data);
909 break; 947 break;
910 case FSE_AZ_EVQ_INIT_DONE_EV: 948 case FSE_AZ_EVQ_INIT_DONE_EV:
911 EFX_LOG(efx, "channel %d EVQ %d initialised\n", 949 netif_dbg(efx, hw, efx->net_dev,
912 channel->channel, ev_sub_data); 950 "channel %d EVQ %d initialised\n",
951 channel->channel, ev_sub_data);
913 break; 952 break;
914 case FSE_AZ_SRM_UPD_DONE_EV: 953 case FSE_AZ_SRM_UPD_DONE_EV:
915 EFX_TRACE(efx, "channel %d SRAM update done\n", 954 netif_vdbg(efx, hw, efx->net_dev,
916 channel->channel); 955 "channel %d SRAM update done\n", channel->channel);
917 break; 956 break;
918 case FSE_AZ_WAKE_UP_EV: 957 case FSE_AZ_WAKE_UP_EV:
919 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", 958 netif_vdbg(efx, hw, efx->net_dev,
920 channel->channel, ev_sub_data); 959 "channel %d RXQ %d wakeup event\n",
960 channel->channel, ev_sub_data);
921 break; 961 break;
922 case FSE_AZ_TIMER_EV: 962 case FSE_AZ_TIMER_EV:
923 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", 963 netif_vdbg(efx, hw, efx->net_dev,
924 channel->channel, ev_sub_data); 964 "channel %d RX queue %d timer expired\n",
965 channel->channel, ev_sub_data);
925 break; 966 break;
926 case FSE_AA_RX_RECOVER_EV: 967 case FSE_AA_RX_RECOVER_EV:
927 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 968 netif_err(efx, rx_err, efx->net_dev,
969 "channel %d seen DRIVER RX_RESET event. "
928 "Resetting.\n", channel->channel); 970 "Resetting.\n", channel->channel);
929 atomic_inc(&efx->rx_reset); 971 atomic_inc(&efx->rx_reset);
930 efx_schedule_reset(efx, 972 efx_schedule_reset(efx,
@@ -933,19 +975,22 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
933 RESET_TYPE_DISABLE); 975 RESET_TYPE_DISABLE);
934 break; 976 break;
935 case FSE_BZ_RX_DSC_ERROR_EV: 977 case FSE_BZ_RX_DSC_ERROR_EV:
936 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." 978 netif_err(efx, rx_err, efx->net_dev,
937 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 979 "RX DMA Q %d reports descriptor fetch error."
980 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
938 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 981 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
939 break; 982 break;
940 case FSE_BZ_TX_DSC_ERROR_EV: 983 case FSE_BZ_TX_DSC_ERROR_EV:
941 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." 984 netif_err(efx, tx_err, efx->net_dev,
942 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 985 "TX DMA Q %d reports descriptor fetch error."
986 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
943 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 987 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
944 break; 988 break;
945 default: 989 default:
946 EFX_TRACE(efx, "channel %d unknown driver event code %d " 990 netif_vdbg(efx, hw, efx->net_dev,
947 "data %04x\n", channel->channel, ev_sub_code, 991 "channel %d unknown driver event code %d "
948 ev_sub_data); 992 "data %04x\n", channel->channel, ev_sub_code,
993 ev_sub_data);
949 break; 994 break;
950 } 995 }
951} 996}
@@ -968,8 +1013,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
968 /* End of events */ 1013 /* End of events */
969 break; 1014 break;
970 1015
971 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", 1016 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
972 channel->channel, EFX_QWORD_VAL(event)); 1017 "channel %d event is "EFX_QWORD_FMT"\n",
1018 channel->channel, EFX_QWORD_VAL(event));
973 1019
974 /* Clear this event by marking it all ones */ 1020 /* Clear this event by marking it all ones */
975 EFX_SET_QWORD(*p_event); 1021 EFX_SET_QWORD(*p_event);
@@ -993,11 +1039,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
993 } 1039 }
994 break; 1040 break;
995 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1041 case FSE_AZ_EV_CODE_DRV_GEN_EV:
996 channel->eventq_magic = EFX_QWORD_FIELD( 1042 efx_handle_generated_event(channel, &event);
997 event, FSF_AZ_DRV_GEN_EV_MAGIC);
998 EFX_LOG(channel->efx, "channel %d received generated "
999 "event "EFX_QWORD_FMT"\n", channel->channel,
1000 EFX_QWORD_VAL(event));
1001 break; 1043 break;
1002 case FSE_AZ_EV_CODE_GLOBAL_EV: 1044 case FSE_AZ_EV_CODE_GLOBAL_EV:
1003 efx_handle_global_event(channel, &event); 1045 efx_handle_global_event(channel, &event);
@@ -1009,9 +1051,10 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1009 efx_mcdi_process_event(channel, &event); 1051 efx_mcdi_process_event(channel, &event);
1010 break; 1052 break;
1011 default: 1053 default:
1012 EFX_ERR(channel->efx, "channel %d unknown event type %d" 1054 netif_err(channel->efx, hw, channel->efx->net_dev,
1013 " (data " EFX_QWORD_FMT ")\n", channel->channel, 1055 "channel %d unknown event type %d (data "
1014 ev_code, EFX_QWORD_VAL(event)); 1056 EFX_QWORD_FMT ")\n", channel->channel,
1057 ev_code, EFX_QWORD_VAL(event));
1015 } 1058 }
1016 } 1059 }
1017 1060
@@ -1036,9 +1079,10 @@ void efx_nic_init_eventq(struct efx_channel *channel)
1036 efx_oword_t reg; 1079 efx_oword_t reg;
1037 struct efx_nic *efx = channel->efx; 1080 struct efx_nic *efx = channel->efx;
1038 1081
1039 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", 1082 netif_dbg(efx, hw, efx->net_dev,
1040 channel->channel, channel->eventq.index, 1083 "channel %d event queue in special buffers %d-%d\n",
1041 channel->eventq.index + channel->eventq.entries - 1); 1084 channel->channel, channel->eventq.index,
1085 channel->eventq.index + channel->eventq.entries - 1);
1042 1086
1043 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1087 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1044 EFX_POPULATE_OWORD_3(reg, 1088 EFX_POPULATE_OWORD_3(reg,
@@ -1088,12 +1132,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
1088} 1132}
1089 1133
1090 1134
1091/* Generates a test event on the event queue. A subsequent call to 1135void efx_nic_generate_test_event(struct efx_channel *channel)
1092 * process_eventq() should pick up the event and place the value of
1093 * "magic" into channel->eventq_magic;
1094 */
1095void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
1096{ 1136{
1137 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1138 efx_qword_t test_event;
1139
1140 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1141 FSE_AZ_EV_CODE_DRV_GEN_EV,
1142 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1143 efx_generate_event(channel, &test_event);
1144}
1145
1146void efx_nic_generate_fill_event(struct efx_channel *channel)
1147{
1148 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1097 efx_qword_t test_event; 1149 efx_qword_t test_event;
1098 1150
1099 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1151 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
@@ -1208,20 +1260,19 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1208 * leading to a reset, or fake up success anyway */ 1260 * leading to a reset, or fake up success anyway */
1209 efx_for_each_tx_queue(tx_queue, efx) { 1261 efx_for_each_tx_queue(tx_queue, efx) {
1210 if (tx_queue->flushed != FLUSH_DONE) 1262 if (tx_queue->flushed != FLUSH_DONE)
1211 EFX_ERR(efx, "tx queue %d flush command timed out\n", 1263 netif_err(efx, hw, efx->net_dev,
1212 tx_queue->queue); 1264 "tx queue %d flush command timed out\n",
1265 tx_queue->queue);
1213 tx_queue->flushed = FLUSH_DONE; 1266 tx_queue->flushed = FLUSH_DONE;
1214 } 1267 }
1215 efx_for_each_rx_queue(rx_queue, efx) { 1268 efx_for_each_rx_queue(rx_queue, efx) {
1216 if (rx_queue->flushed != FLUSH_DONE) 1269 if (rx_queue->flushed != FLUSH_DONE)
1217 EFX_ERR(efx, "rx queue %d flush command timed out\n", 1270 netif_err(efx, hw, efx->net_dev,
1218 rx_queue->queue); 1271 "rx queue %d flush command timed out\n",
1272 rx_queue->queue);
1219 rx_queue->flushed = FLUSH_DONE; 1273 rx_queue->flushed = FLUSH_DONE;
1220 } 1274 }
1221 1275
1222 if (EFX_WORKAROUND_7803(efx))
1223 return 0;
1224
1225 return -ETIMEDOUT; 1276 return -ETIMEDOUT;
1226} 1277}
1227 1278
@@ -1290,10 +1341,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1290 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1341 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1291 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1342 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1292 1343
1293 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " 1344 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1294 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1345 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1295 EFX_OWORD_VAL(fatal_intr), 1346 EFX_OWORD_VAL(fatal_intr),
1296 error ? "disabling bus mastering" : "no recognised error"); 1347 error ? "disabling bus mastering" : "no recognised error");
1297 1348
1298 /* If this is a memory parity error dump which blocks are offending */ 1349 /* If this is a memory parity error dump which blocks are offending */
1299 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1350 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
@@ -1301,8 +1352,9 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1301 if (mem_perr) { 1352 if (mem_perr) {
1302 efx_oword_t reg; 1353 efx_oword_t reg;
1303 efx_reado(efx, &reg, FR_AZ_MEM_STAT); 1354 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1304 EFX_ERR(efx, "SYSTEM ERROR: memory parity error " 1355 netif_err(efx, hw, efx->net_dev,
1305 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1356 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1357 EFX_OWORD_VAL(reg));
1306 } 1358 }
1307 1359
1308 /* Disable both devices */ 1360 /* Disable both devices */
@@ -1319,11 +1371,13 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1319 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1371 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1320 } 1372 }
1321 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1373 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1322 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1374 netif_err(efx, hw, efx->net_dev,
1375 "SYSTEM ERROR - reset scheduled\n");
1323 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1376 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1324 } else { 1377 } else {
1325 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." 1378 netif_err(efx, hw, efx->net_dev,
1326 "NIC will be disabled\n"); 1379 "SYSTEM ERROR - max number of errors seen."
1380 "NIC will be disabled\n");
1327 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1381 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1328 } 1382 }
1329 1383
@@ -1386,8 +1440,9 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1386 1440
1387 if (result == IRQ_HANDLED) { 1441 if (result == IRQ_HANDLED) {
1388 efx->last_irq_cpu = raw_smp_processor_id(); 1442 efx->last_irq_cpu = raw_smp_processor_id();
1389 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1443 netif_vdbg(efx, intr, efx->net_dev,
1390 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1444 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1445 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1391 } 1446 }
1392 1447
1393 return result; 1448 return result;
@@ -1408,8 +1463,9 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1408 int syserr; 1463 int syserr;
1409 1464
1410 efx->last_irq_cpu = raw_smp_processor_id(); 1465 efx->last_irq_cpu = raw_smp_processor_id();
1411 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1466 netif_vdbg(efx, intr, efx->net_dev,
1412 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1467 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1468 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1413 1469
1414 /* Check to see if we have a serious error condition */ 1470 /* Check to see if we have a serious error condition */
1415 if (channel->channel == efx->fatal_irq_level) { 1471 if (channel->channel == efx->fatal_irq_level) {
@@ -1428,22 +1484,21 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1428/* Setup RSS indirection table. 1484/* Setup RSS indirection table.
1429 * This maps from the hash value of the packet to RXQ 1485 * This maps from the hash value of the packet to RXQ
1430 */ 1486 */
1431static void efx_setup_rss_indir_table(struct efx_nic *efx) 1487void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1432{ 1488{
1433 int i = 0; 1489 size_t i = 0;
1434 unsigned long offset;
1435 efx_dword_t dword; 1490 efx_dword_t dword;
1436 1491
1437 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1492 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1438 return; 1493 return;
1439 1494
1440 for (offset = FR_BZ_RX_INDIRECTION_TBL; 1495 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1441 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; 1496 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1442 offset += 0x10) { 1497
1498 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1443 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1499 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1444 i % efx->n_rx_channels); 1500 efx->rx_indir_table[i]);
1445 efx_writed(efx, &dword, offset); 1501 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1446 i++;
1447 } 1502 }
1448} 1503}
1449 1504
@@ -1465,8 +1520,9 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1465 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1520 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1466 efx->name, efx); 1521 efx->name, efx);
1467 if (rc) { 1522 if (rc) {
1468 EFX_ERR(efx, "failed to hook legacy IRQ %d\n", 1523 netif_err(efx, drv, efx->net_dev,
1469 efx->pci_dev->irq); 1524 "failed to hook legacy IRQ %d\n",
1525 efx->pci_dev->irq);
1470 goto fail1; 1526 goto fail1;
1471 } 1527 }
1472 return 0; 1528 return 0;
@@ -1478,7 +1534,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1478 IRQF_PROBE_SHARED, /* Not shared */ 1534 IRQF_PROBE_SHARED, /* Not shared */
1479 channel->name, channel); 1535 channel->name, channel);
1480 if (rc) { 1536 if (rc) {
1481 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); 1537 netif_err(efx, drv, efx->net_dev,
1538 "failed to hook IRQ %d\n", channel->irq);
1482 goto fail2; 1539 goto fail2;
1483 } 1540 }
1484 } 1541 }
@@ -1576,7 +1633,7 @@ void efx_nic_init_common(struct efx_nic *efx)
1576 EFX_INVERT_OWORD(temp); 1633 EFX_INVERT_OWORD(temp);
1577 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1634 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1578 1635
1579 efx_setup_rss_indir_table(efx); 1636 efx_nic_push_rx_indir_table(efx);
1580 1637
1581 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1638 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1582 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1639 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
@@ -1598,3 +1655,269 @@ void efx_nic_init_common(struct efx_nic *efx)
1598 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1655 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1599 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1656 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1600} 1657}
1658
1659/* Register dump */
1660
1661#define REGISTER_REVISION_A 1
1662#define REGISTER_REVISION_B 2
1663#define REGISTER_REVISION_C 3
1664#define REGISTER_REVISION_Z 3 /* latest revision */
1665
1666struct efx_nic_reg {
1667 u32 offset:24;
1668 u32 min_revision:2, max_revision:2;
1669};
1670
1671#define REGISTER(name, min_rev, max_rev) { \
1672 FR_ ## min_rev ## max_rev ## _ ## name, \
1673 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1674}
1675#define REGISTER_AA(name) REGISTER(name, A, A)
1676#define REGISTER_AB(name) REGISTER(name, A, B)
1677#define REGISTER_AZ(name) REGISTER(name, A, Z)
1678#define REGISTER_BB(name) REGISTER(name, B, B)
1679#define REGISTER_BZ(name) REGISTER(name, B, Z)
1680#define REGISTER_CZ(name) REGISTER(name, C, Z)
1681
1682static const struct efx_nic_reg efx_nic_regs[] = {
1683 REGISTER_AZ(ADR_REGION),
1684 REGISTER_AZ(INT_EN_KER),
1685 REGISTER_BZ(INT_EN_CHAR),
1686 REGISTER_AZ(INT_ADR_KER),
1687 REGISTER_BZ(INT_ADR_CHAR),
1688 /* INT_ACK_KER is WO */
1689 /* INT_ISR0 is RC */
1690 REGISTER_AZ(HW_INIT),
1691 REGISTER_CZ(USR_EV_CFG),
1692 REGISTER_AB(EE_SPI_HCMD),
1693 REGISTER_AB(EE_SPI_HADR),
1694 REGISTER_AB(EE_SPI_HDATA),
1695 REGISTER_AB(EE_BASE_PAGE),
1696 REGISTER_AB(EE_VPD_CFG0),
1697 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1698 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1699 /* PCIE_CORE_INDIRECT is indirect */
1700 REGISTER_AB(NIC_STAT),
1701 REGISTER_AB(GPIO_CTL),
1702 REGISTER_AB(GLB_CTL),
1703 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1704 REGISTER_BZ(DP_CTRL),
1705 REGISTER_AZ(MEM_STAT),
1706 REGISTER_AZ(CS_DEBUG),
1707 REGISTER_AZ(ALTERA_BUILD),
1708 REGISTER_AZ(CSR_SPARE),
1709 REGISTER_AB(PCIE_SD_CTL0123),
1710 REGISTER_AB(PCIE_SD_CTL45),
1711 REGISTER_AB(PCIE_PCS_CTL_STAT),
1712 /* DEBUG_DATA_OUT is not used */
1713 /* DRV_EV is WO */
1714 REGISTER_AZ(EVQ_CTL),
1715 REGISTER_AZ(EVQ_CNT1),
1716 REGISTER_AZ(EVQ_CNT2),
1717 REGISTER_AZ(BUF_TBL_CFG),
1718 REGISTER_AZ(SRM_RX_DC_CFG),
1719 REGISTER_AZ(SRM_TX_DC_CFG),
1720 REGISTER_AZ(SRM_CFG),
1721 /* BUF_TBL_UPD is WO */
1722 REGISTER_AZ(SRM_UPD_EVQ),
1723 REGISTER_AZ(SRAM_PARITY),
1724 REGISTER_AZ(RX_CFG),
1725 REGISTER_BZ(RX_FILTER_CTL),
1726 /* RX_FLUSH_DESCQ is WO */
1727 REGISTER_AZ(RX_DC_CFG),
1728 REGISTER_AZ(RX_DC_PF_WM),
1729 REGISTER_BZ(RX_RSS_TKEY),
1730 /* RX_NODESC_DROP is RC */
1731 REGISTER_AA(RX_SELF_RST),
1732 /* RX_DEBUG, RX_PUSH_DROP are not used */
1733 REGISTER_CZ(RX_RSS_IPV6_REG1),
1734 REGISTER_CZ(RX_RSS_IPV6_REG2),
1735 REGISTER_CZ(RX_RSS_IPV6_REG3),
1736 /* TX_FLUSH_DESCQ is WO */
1737 REGISTER_AZ(TX_DC_CFG),
1738 REGISTER_AA(TX_CHKSM_CFG),
1739 REGISTER_AZ(TX_CFG),
1740 /* TX_PUSH_DROP is not used */
1741 REGISTER_AZ(TX_RESERVED),
1742 REGISTER_BZ(TX_PACE),
1743 /* TX_PACE_DROP_QID is RC */
1744 REGISTER_BB(TX_VLAN),
1745 REGISTER_BZ(TX_IPFIL_PORTEN),
1746 REGISTER_AB(MD_TXD),
1747 REGISTER_AB(MD_RXD),
1748 REGISTER_AB(MD_CS),
1749 REGISTER_AB(MD_PHY_ADR),
1750 REGISTER_AB(MD_ID),
1751 /* MD_STAT is RC */
1752 REGISTER_AB(MAC_STAT_DMA),
1753 REGISTER_AB(MAC_CTRL),
1754 REGISTER_BB(GEN_MODE),
1755 REGISTER_AB(MAC_MC_HASH_REG0),
1756 REGISTER_AB(MAC_MC_HASH_REG1),
1757 REGISTER_AB(GM_CFG1),
1758 REGISTER_AB(GM_CFG2),
1759 /* GM_IPG and GM_HD are not used */
1760 REGISTER_AB(GM_MAX_FLEN),
1761 /* GM_TEST is not used */
1762 REGISTER_AB(GM_ADR1),
1763 REGISTER_AB(GM_ADR2),
1764 REGISTER_AB(GMF_CFG0),
1765 REGISTER_AB(GMF_CFG1),
1766 REGISTER_AB(GMF_CFG2),
1767 REGISTER_AB(GMF_CFG3),
1768 REGISTER_AB(GMF_CFG4),
1769 REGISTER_AB(GMF_CFG5),
1770 REGISTER_BB(TX_SRC_MAC_CTL),
1771 REGISTER_AB(XM_ADR_LO),
1772 REGISTER_AB(XM_ADR_HI),
1773 REGISTER_AB(XM_GLB_CFG),
1774 REGISTER_AB(XM_TX_CFG),
1775 REGISTER_AB(XM_RX_CFG),
1776 REGISTER_AB(XM_MGT_INT_MASK),
1777 REGISTER_AB(XM_FC),
1778 REGISTER_AB(XM_PAUSE_TIME),
1779 REGISTER_AB(XM_TX_PARAM),
1780 REGISTER_AB(XM_RX_PARAM),
1781 /* XM_MGT_INT_MSK (note no 'A') is RC */
1782 REGISTER_AB(XX_PWR_RST),
1783 REGISTER_AB(XX_SD_CTL),
1784 REGISTER_AB(XX_TXDRV_CTL),
1785 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1786 /* XX_CORE_STAT is partly RC */
1787};
1788
1789struct efx_nic_reg_table {
1790 u32 offset:24;
1791 u32 min_revision:2, max_revision:2;
1792 u32 step:6, rows:21;
1793};
1794
1795#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1796 offset, \
1797 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1798 step, rows \
1799}
1800#define REGISTER_TABLE(name, min_rev, max_rev) \
1801 REGISTER_TABLE_DIMENSIONS( \
1802 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1803 min_rev, max_rev, \
1804 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1805 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1806#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1807#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1808#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1809#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1810#define REGISTER_TABLE_BB_CZ(name) \
1811 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1812 FR_BZ_ ## name ## _STEP, \
1813 FR_BB_ ## name ## _ROWS), \
1814 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1815 FR_BZ_ ## name ## _STEP, \
1816 FR_CZ_ ## name ## _ROWS)
1817#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1818
1819static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1820 /* DRIVER is not used */
1821 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1822 REGISTER_TABLE_BB(TX_IPFIL_TBL),
1823 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1824 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1825 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1826 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1827 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1828 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1829 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1830 /* The register buffer is allocated with slab, so we can't
1831 * reasonably read all of the buffer table (up to 8MB!).
1832 * However this driver will only use a few entries. Reading
1833 * 1K entries allows for some expansion of queue count and
1834 * size before we need to change the version. */
1835 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1836 A, A, 8, 1024),
1837 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1838 B, Z, 8, 1024),
1839 /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
1840 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1841 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1842 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1843 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1844 /* TX_FILTER_TBL0 is huge and not used by this driver */
1845 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1846 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1847 /* MSIX_PBA_TABLE is not mapped */
1848 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1849};
1850
1851size_t efx_nic_get_regs_len(struct efx_nic *efx)
1852{
1853 const struct efx_nic_reg *reg;
1854 const struct efx_nic_reg_table *table;
1855 size_t len = 0;
1856
1857 for (reg = efx_nic_regs;
1858 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1859 reg++)
1860 if (efx->type->revision >= reg->min_revision &&
1861 efx->type->revision <= reg->max_revision)
1862 len += sizeof(efx_oword_t);
1863
1864 for (table = efx_nic_reg_tables;
1865 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1866 table++)
1867 if (efx->type->revision >= table->min_revision &&
1868 efx->type->revision <= table->max_revision)
1869 len += table->rows * min_t(size_t, table->step, 16);
1870
1871 return len;
1872}
1873
1874void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1875{
1876 const struct efx_nic_reg *reg;
1877 const struct efx_nic_reg_table *table;
1878
1879 for (reg = efx_nic_regs;
1880 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1881 reg++) {
1882 if (efx->type->revision >= reg->min_revision &&
1883 efx->type->revision <= reg->max_revision) {
1884 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1885 buf += sizeof(efx_oword_t);
1886 }
1887 }
1888
1889 for (table = efx_nic_reg_tables;
1890 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1891 table++) {
1892 size_t size, i;
1893
1894 if (!(efx->type->revision >= table->min_revision &&
1895 efx->type->revision <= table->max_revision))
1896 continue;
1897
1898 size = min_t(size_t, table->step, 16);
1899
1900 for (i = 0; i < table->rows; i++) {
1901 switch (table->step) {
1902 case 4: /* 32-bit register or SRAM */
1903 efx_readd_table(efx, buf, table->offset, i);
1904 break;
1905 case 8: /* 64-bit SRAM */
1906 efx_sram_readq(efx,
1907 efx->membase + table->offset,
1908 buf, i);
1909 break;
1910 case 16: /* 128-bit register */
1911 efx_reado_table(efx, buf, table->offset, i);
1912 break;
1913 case 32: /* 128-bit register, interleaved */
1914 efx_reado_table(efx, buf, table->offset, 2 * i);
1915 break;
1916 default:
1917 WARN_ON(1);
1918 return;
1919 }
1920 buf += size;
1921 }
1922 }
1923}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index bbc2c0c2f843..0438dc98722d 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -142,7 +142,6 @@ struct siena_nic_data {
142 u32 fw_build; 142 u32 fw_build;
143 struct efx_mcdi_iface mcdi; 143 struct efx_mcdi_iface mcdi;
144 int wol_filter_id; 144 int wol_filter_id;
145 u8 ipv6_rss_key[40];
146}; 145};
147 146
148extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); 147extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -190,8 +189,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
190/* Interrupts and test events */ 189/* Interrupts and test events */
191extern int efx_nic_init_interrupt(struct efx_nic *efx); 190extern int efx_nic_init_interrupt(struct efx_nic *efx);
192extern void efx_nic_enable_interrupts(struct efx_nic *efx); 191extern void efx_nic_enable_interrupts(struct efx_nic *efx);
193extern void efx_nic_generate_test_event(struct efx_channel *channel, 192extern void efx_nic_generate_test_event(struct efx_channel *channel);
194 unsigned int magic); 193extern void efx_nic_generate_fill_event(struct efx_channel *channel);
195extern void efx_nic_generate_interrupt(struct efx_nic *efx); 194extern void efx_nic_generate_interrupt(struct efx_nic *efx);
196extern void efx_nic_disable_interrupts(struct efx_nic *efx); 195extern void efx_nic_disable_interrupts(struct efx_nic *efx);
197extern void efx_nic_fini_interrupt(struct efx_nic *efx); 196extern void efx_nic_fini_interrupt(struct efx_nic *efx);
@@ -208,6 +207,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx);
208extern void falcon_setup_xaui(struct efx_nic *efx); 207extern void falcon_setup_xaui(struct efx_nic *efx);
209extern int falcon_reset_xaui(struct efx_nic *efx); 208extern int falcon_reset_xaui(struct efx_nic *efx);
210extern void efx_nic_init_common(struct efx_nic *efx); 209extern void efx_nic_init_common(struct efx_nic *efx);
210extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
211 211
212int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 212int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
213 unsigned int len); 213 unsigned int len);
@@ -222,6 +222,9 @@ extern int efx_nic_test_registers(struct efx_nic *efx,
222 const struct efx_nic_register_test *regs, 222 const struct efx_nic_register_test *regs,
223 size_t n_regs); 223 size_t n_regs);
224 224
225extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
226extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
227
225/************************************************************************** 228/**************************************************************************
226 * 229 *
227 * Falcon MAC stats 230 * Falcon MAC stats
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e077bef08a50..68813d1d85f3 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -91,9 +91,10 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx)
91 if (time_after(jiffies, timeout)) { 91 if (time_after(jiffies, timeout)) {
92 /* Some cables have EEPROMs that conflict with the 92 /* Some cables have EEPROMs that conflict with the
93 * PHY's on-board EEPROM so it cannot load firmware */ 93 * PHY's on-board EEPROM so it cannot load firmware */
94 EFX_ERR(efx, "If an SFP+ direct attach cable is" 94 netif_err(efx, hw, efx->net_dev,
95 " connected, please check that it complies" 95 "If an SFP+ direct attach cable is"
96 " with the SFP+ specification\n"); 96 " connected, please check that it complies"
97 " with the SFP+ specification\n");
97 return -ETIMEDOUT; 98 return -ETIMEDOUT;
98 } 99 }
99 msleep(QT2025C_HEARTB_WAIT); 100 msleep(QT2025C_HEARTB_WAIT);
@@ -145,7 +146,8 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
145 /* Bug 17689: occasionally heartbeat starts but firmware status 146 /* Bug 17689: occasionally heartbeat starts but firmware status
146 * code never progresses beyond 0x00. Try again, once, after 147 * code never progresses beyond 0x00. Try again, once, after
147 * restarting execution of the firmware image. */ 148 * restarting execution of the firmware image. */
148 EFX_LOG(efx, "bashing QT2025C microcontroller\n"); 149 netif_dbg(efx, hw, efx->net_dev,
150 "bashing QT2025C microcontroller\n");
149 qt2025c_restart_firmware(efx); 151 qt2025c_restart_firmware(efx);
150 rc = qt2025c_wait_heartbeat(efx); 152 rc = qt2025c_wait_heartbeat(efx);
151 if (rc != 0) 153 if (rc != 0)
@@ -165,11 +167,12 @@ static void qt2025c_firmware_id(struct efx_nic *efx)
165 for (i = 0; i < sizeof(firmware_id); i++) 167 for (i = 0; i < sizeof(firmware_id); i++)
166 firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, 168 firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
167 PCS_FW_PRODUCT_CODE_1 + i); 169 PCS_FW_PRODUCT_CODE_1 + i);
168 EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", 170 netif_info(efx, probe, efx->net_dev,
169 (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], 171 "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
170 firmware_id[3] >> 4, firmware_id[3] & 0xf, 172 (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
171 firmware_id[4], firmware_id[5], 173 firmware_id[3] >> 4, firmware_id[3] & 0xf,
172 firmware_id[6], firmware_id[7], firmware_id[8]); 174 firmware_id[4], firmware_id[5],
175 firmware_id[6], firmware_id[7], firmware_id[8]);
173 phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | 176 phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
174 ((firmware_id[3] & 0x0f) << 16) | 177 ((firmware_id[3] & 0x0f) << 16) |
175 (firmware_id[4] << 8) | firmware_id[5]; 178 (firmware_id[4] << 8) | firmware_id[5];
@@ -198,7 +201,7 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx)
198 } 201 }
199 202
200 if (time_after_eq(jiffies, phy_data->bug17190_timer)) { 203 if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
201 EFX_LOG(efx, "bashing QT2025C PMA/PMD\n"); 204 netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
202 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, 205 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
203 MDIO_PMA_CTRL1_LOOPBACK, true); 206 MDIO_PMA_CTRL1_LOOPBACK, true);
204 msleep(100); 207 msleep(100);
@@ -231,7 +234,8 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
231 reg = efx_mdio_read(efx, 1, 0xc319); 234 reg = efx_mdio_read(efx, 1, 0xc319);
232 if ((reg & 0x0038) == phy_op_mode) 235 if ((reg & 0x0038) == phy_op_mode)
233 return 0; 236 return 0;
234 EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode); 237 netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
238 phy_op_mode);
235 239
236 /* This sequence replicates the register writes configured in the boot 240 /* This sequence replicates the register writes configured in the boot
237 * EEPROM (including the differences between board revisions), except 241 * EEPROM (including the differences between board revisions), except
@@ -287,8 +291,9 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
287 /* Wait for the microcontroller to be ready again */ 291 /* Wait for the microcontroller to be ready again */
288 rc = qt2025c_wait_reset(efx); 292 rc = qt2025c_wait_reset(efx);
289 if (rc < 0) { 293 if (rc < 0) {
290 EFX_ERR(efx, "PHY microcontroller reset during mode switch " 294 netif_err(efx, hw, efx->net_dev,
291 "timed out\n"); 295 "PHY microcontroller reset during mode switch "
296 "timed out\n");
292 return rc; 297 return rc;
293 } 298 }
294 299
@@ -324,7 +329,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
324 return 0; 329 return 0;
325 330
326 fail: 331 fail:
327 EFX_ERR(efx, "PHY reset timed out\n"); 332 netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
328 return rc; 333 return rc;
329} 334}
330 335
@@ -353,14 +358,15 @@ static int qt202x_phy_init(struct efx_nic *efx)
353 358
354 rc = qt202x_reset_phy(efx); 359 rc = qt202x_reset_phy(efx);
355 if (rc) { 360 if (rc) {
356 EFX_ERR(efx, "PHY init failed\n"); 361 netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
357 return rc; 362 return rc;
358 } 363 }
359 364
360 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 365 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
361 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", 366 netif_info(efx, probe, efx->net_dev,
362 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), 367 "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
363 efx_mdio_id_rev(devid)); 368 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
369 efx_mdio_id_rev(devid));
364 370
365 if (efx->phy_type == PHY_TYPE_QT2025C) 371 if (efx->phy_type == PHY_TYPE_QT2025C)
366 qt2025c_firmware_id(efx); 372 qt2025c_firmware_id(efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818b9f55..799c461ce7b8 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -25,6 +25,9 @@
25/* Number of RX descriptors pushed at once. */ 25/* Number of RX descriptors pushed at once. */
26#define EFX_RX_BATCH 8 26#define EFX_RX_BATCH 8
27 27
28/* Maximum size of a buffer sharing a page */
29#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
30
28/* Size of buffer allocated for skb header area. */ 31/* Size of buffer allocated for skb header area. */
29#define EFX_SKB_HEADERS 64u 32#define EFX_SKB_HEADERS 64u
30 33
@@ -98,155 +101,151 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
98 return PAGE_SIZE << efx->rx_buffer_order; 101 return PAGE_SIZE << efx->rx_buffer_order;
99} 102}
100 103
104static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
105{
106#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
107 return __le32_to_cpup((const __le32 *)(buf->data - 4));
108#else
109 const u8 *data = (const u8 *)(buf->data - 4);
110 return ((u32)data[0] |
111 (u32)data[1] << 8 |
112 (u32)data[2] << 16 |
113 (u32)data[3] << 24);
114#endif
115}
101 116
102/** 117/**
103 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation 118 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
104 * 119 *
105 * @rx_queue: Efx RX queue 120 * @rx_queue: Efx RX queue
106 * @rx_buf: RX buffer structure to populate
107 * 121 *
108 * This allocates memory for a new receive buffer, maps it for DMA, 122 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
109 * and populates a struct efx_rx_buffer with the relevant 123 * struct efx_rx_buffer for each one. Return a negative error code or 0
110 * information. Return a negative error code or 0 on success. 124 * on success. May fail having only inserted fewer than EFX_RX_BATCH
125 * buffers.
111 */ 126 */
112static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, 127static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
113 struct efx_rx_buffer *rx_buf)
114{ 128{
115 struct efx_nic *efx = rx_queue->efx; 129 struct efx_nic *efx = rx_queue->efx;
116 struct net_device *net_dev = efx->net_dev; 130 struct net_device *net_dev = efx->net_dev;
131 struct efx_rx_buffer *rx_buf;
117 int skb_len = efx->rx_buffer_len; 132 int skb_len = efx->rx_buffer_len;
133 unsigned index, count;
118 134
119 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 135 for (count = 0; count < EFX_RX_BATCH; ++count) {
120 if (unlikely(!rx_buf->skb)) 136 index = rx_queue->added_count & EFX_RXQ_MASK;
121 return -ENOMEM; 137 rx_buf = efx_rx_buffer(rx_queue, index);
122 138
123 /* Adjust the SKB for padding and checksum */ 139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
124 skb_reserve(rx_buf->skb, NET_IP_ALIGN); 140 if (unlikely(!rx_buf->skb))
125 rx_buf->len = skb_len - NET_IP_ALIGN; 141 return -ENOMEM;
126 rx_buf->data = (char *)rx_buf->skb->data; 142 rx_buf->page = NULL;
127 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
128 143
129 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 144 /* Adjust the SKB for padding and checksum */
130 rx_buf->data, rx_buf->len, 145 skb_reserve(rx_buf->skb, NET_IP_ALIGN);
131 PCI_DMA_FROMDEVICE); 146 rx_buf->len = skb_len - NET_IP_ALIGN;
147 rx_buf->data = (char *)rx_buf->skb->data;
148 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
149
150 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
151 rx_buf->data, rx_buf->len,
152 PCI_DMA_FROMDEVICE);
153 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
154 rx_buf->dma_addr))) {
155 dev_kfree_skb_any(rx_buf->skb);
156 rx_buf->skb = NULL;
157 return -EIO;
158 }
132 159
133 if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { 160 ++rx_queue->added_count;
134 dev_kfree_skb_any(rx_buf->skb); 161 ++rx_queue->alloc_skb_count;
135 rx_buf->skb = NULL;
136 return -EIO;
137 } 162 }
138 163
139 return 0; 164 return 0;
140} 165}
141 166
142/** 167/**
143 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation 168 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
144 * 169 *
145 * @rx_queue: Efx RX queue 170 * @rx_queue: Efx RX queue
146 * @rx_buf: RX buffer structure to populate
147 * 171 *
148 * This allocates memory for a new receive buffer, maps it for DMA, 172 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
149 * and populates a struct efx_rx_buffer with the relevant 173 * and populates struct efx_rx_buffers for each one. Return a negative error
150 * information. Return a negative error code or 0 on success. 174 * code or 0 on success. If a single page can be split between two buffers,
175 * then the page will either be inserted fully, or not at at all.
151 */ 176 */
152static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, 177static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
153 struct efx_rx_buffer *rx_buf)
154{ 178{
155 struct efx_nic *efx = rx_queue->efx; 179 struct efx_nic *efx = rx_queue->efx;
156 int bytes, space, offset; 180 struct efx_rx_buffer *rx_buf;
157 181 struct page *page;
158 bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 182 void *page_addr;
159 183 struct efx_rx_page_state *state;
160 /* If there is space left in the previously allocated page, 184 dma_addr_t dma_addr;
161 * then use it. Otherwise allocate a new one */ 185 unsigned index, count;
162 rx_buf->page = rx_queue->buf_page; 186
163 if (rx_buf->page == NULL) { 187 /* We can split a page between two buffers */
164 dma_addr_t dma_addr; 188 BUILD_BUG_ON(EFX_RX_BATCH & 1);
165 189
166 rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, 190 for (count = 0; count < EFX_RX_BATCH; ++count) {
167 efx->rx_buffer_order); 191 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
168 if (unlikely(rx_buf->page == NULL)) 192 efx->rx_buffer_order);
193 if (unlikely(page == NULL))
169 return -ENOMEM; 194 return -ENOMEM;
170 195 dma_addr = pci_map_page(efx->pci_dev, page, 0,
171 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 196 efx_rx_buf_size(efx),
172 0, efx_rx_buf_size(efx),
173 PCI_DMA_FROMDEVICE); 197 PCI_DMA_FROMDEVICE);
174
175 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { 198 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
176 __free_pages(rx_buf->page, efx->rx_buffer_order); 199 __free_pages(page, efx->rx_buffer_order);
177 rx_buf->page = NULL;
178 return -EIO; 200 return -EIO;
179 } 201 }
180 202 page_addr = page_address(page);
181 rx_queue->buf_page = rx_buf->page; 203 state = page_addr;
182 rx_queue->buf_dma_addr = dma_addr; 204 state->refcnt = 0;
183 rx_queue->buf_data = (page_address(rx_buf->page) + 205 state->dma_addr = dma_addr;
184 EFX_PAGE_IP_ALIGN); 206
185 } 207 page_addr += sizeof(struct efx_rx_page_state);
186 208 dma_addr += sizeof(struct efx_rx_page_state);
187 rx_buf->len = bytes; 209
188 rx_buf->data = rx_queue->buf_data; 210 split:
189 offset = efx_rx_buf_offset(rx_buf); 211 index = rx_queue->added_count & EFX_RXQ_MASK;
190 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; 212 rx_buf = efx_rx_buffer(rx_queue, index);
191 213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
192 /* Try to pack multiple buffers per page */ 214 rx_buf->skb = NULL;
193 if (efx->rx_buffer_order == 0) { 215 rx_buf->page = page;
194 /* The next buffer starts on the next 512 byte boundary */ 216 rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
195 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 217 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
196 offset += ((bytes + 0x1ff) & ~0x1ff); 218 ++rx_queue->added_count;
197 219 ++rx_queue->alloc_page_count;
198 space = efx_rx_buf_size(efx) - offset; 220 ++state->refcnt;
199 if (space >= bytes) { 221
200 /* Refs dropped on kernel releasing each skb */ 222 if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
201 get_page(rx_queue->buf_page); 223 /* Use the second half of the page */
202 goto out; 224 get_page(page);
225 dma_addr += (PAGE_SIZE >> 1);
226 page_addr += (PAGE_SIZE >> 1);
227 ++count;
228 goto split;
203 } 229 }
204 } 230 }
205 231
206 /* This is the final RX buffer for this page, so mark it for
207 * unmapping */
208 rx_queue->buf_page = NULL;
209 rx_buf->unmap_addr = rx_queue->buf_dma_addr;
210
211 out:
212 return 0; 232 return 0;
213} 233}
214 234
215/* This allocates memory for a new receive buffer, maps it for DMA,
216 * and populates a struct efx_rx_buffer with the relevant
217 * information.
218 */
219static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
220 struct efx_rx_buffer *new_rx_buf)
221{
222 int rc = 0;
223
224 if (rx_queue->channel->rx_alloc_push_pages) {
225 new_rx_buf->skb = NULL;
226 rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
227 rx_queue->alloc_page_count++;
228 } else {
229 new_rx_buf->page = NULL;
230 rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
231 rx_queue->alloc_skb_count++;
232 }
233
234 if (unlikely(rc < 0))
235 EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
236 rx_queue->queue, rc);
237 return rc;
238}
239
240static void efx_unmap_rx_buffer(struct efx_nic *efx, 235static void efx_unmap_rx_buffer(struct efx_nic *efx,
241 struct efx_rx_buffer *rx_buf) 236 struct efx_rx_buffer *rx_buf)
242{ 237{
243 if (rx_buf->page) { 238 if (rx_buf->page) {
239 struct efx_rx_page_state *state;
240
244 EFX_BUG_ON_PARANOID(rx_buf->skb); 241 EFX_BUG_ON_PARANOID(rx_buf->skb);
245 if (rx_buf->unmap_addr) { 242
246 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 243 state = page_address(rx_buf->page);
244 if (--state->refcnt == 0) {
245 pci_unmap_page(efx->pci_dev,
246 state->dma_addr,
247 efx_rx_buf_size(efx), 247 efx_rx_buf_size(efx),
248 PCI_DMA_FROMDEVICE); 248 PCI_DMA_FROMDEVICE);
249 rx_buf->unmap_addr = 0;
250 } 249 }
251 } else if (likely(rx_buf->skb)) { 250 } else if (likely(rx_buf->skb)) {
252 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 251 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -273,31 +272,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
273 efx_free_rx_buffer(rx_queue->efx, rx_buf); 272 efx_free_rx_buffer(rx_queue->efx, rx_buf);
274} 273}
275 274
275/* Attempt to resurrect the other receive buffer that used to share this page,
276 * which had previously been passed up to the kernel and freed. */
277static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
278 struct efx_rx_buffer *rx_buf)
279{
280 struct efx_rx_page_state *state = page_address(rx_buf->page);
281 struct efx_rx_buffer *new_buf;
282 unsigned fill_level, index;
283
284 /* +1 because efx_rx_packet() incremented removed_count. +1 because
285 * we'd like to insert an additional descriptor whilst leaving
286 * EFX_RXD_HEAD_ROOM for the non-recycle path */
287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
288 if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
289 /* We could place "state" on a list, and drain the list in
290 * efx_fast_push_rx_descriptors(). For now, this will do. */
291 return;
292 }
293
294 ++state->refcnt;
295 get_page(rx_buf->page);
296
297 index = rx_queue->added_count & EFX_RXQ_MASK;
298 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL;
301 new_buf->page = rx_buf->page;
302 new_buf->data = (void *)
303 ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
304 new_buf->len = rx_buf->len;
305 ++rx_queue->added_count;
306}
307
308/* Recycle the given rx buffer directly back into the rx_queue. There is
309 * always room to add this buffer, because we've just popped a buffer. */
310static void efx_recycle_rx_buffer(struct efx_channel *channel,
311 struct efx_rx_buffer *rx_buf)
312{
313 struct efx_nic *efx = channel->efx;
314 struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
315 struct efx_rx_buffer *new_buf;
316 unsigned index;
317
318 if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
319 page_count(rx_buf->page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321
322 index = rx_queue->added_count & EFX_RXQ_MASK;
323 new_buf = efx_rx_buffer(rx_queue, index);
324
325 memcpy(new_buf, rx_buf, sizeof(*new_buf));
326 rx_buf->page = NULL;
327 rx_buf->skb = NULL;
328 ++rx_queue->added_count;
329}
330
276/** 331/**
277 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 332 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
278 * @rx_queue: RX descriptor queue 333 * @rx_queue: RX descriptor queue
279 * @retry: Recheck the fill level
280 * This will aim to fill the RX descriptor queue up to 334 * This will aim to fill the RX descriptor queue up to
281 * @rx_queue->@fast_fill_limit. If there is insufficient atomic 335 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
282 * memory to do so, the caller should retry. 336 * memory to do so, a slow fill will be scheduled.
337 *
338 * The caller must provide serialisation (none is used here). In practise,
339 * this means this function must run from the NAPI handler, or be called
340 * when NAPI is disabled.
283 */ 341 */
284static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, 342void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
285 int retry)
286{ 343{
287 struct efx_rx_buffer *rx_buf; 344 struct efx_channel *channel = rx_queue->channel;
288 unsigned fill_level, index; 345 unsigned fill_level;
289 int i, space, rc = 0; 346 int space, rc = 0;
290 347
291 /* Calculate current fill level. Do this outside the lock, 348 /* Calculate current fill level, and exit if we don't need to fill */
292 * because most of the time we'll end up not wanting to do the
293 * fill anyway.
294 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 349 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 350 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297
298 /* Don't fill if we don't need to */
299 if (fill_level >= rx_queue->fast_fill_trigger) 351 if (fill_level >= rx_queue->fast_fill_trigger)
300 return 0; 352 goto out;
301 353
302 /* Record minimum fill level */ 354 /* Record minimum fill level */
303 if (unlikely(fill_level < rx_queue->min_fill)) { 355 if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,99 +357,47 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
305 rx_queue->min_fill = fill_level; 357 rx_queue->min_fill = fill_level;
306 } 358 }
307 359
308 /* Acquire RX add lock. If this lock is contended, then a fast
309 * fill must already be in progress (e.g. in the refill
310 * tasklet), so we don't need to do anything
311 */
312 if (!spin_trylock_bh(&rx_queue->add_lock))
313 return -1;
314
315 retry:
316 /* Recalculate current fill level now that we have the lock */
317 fill_level = (rx_queue->added_count - rx_queue->removed_count);
318 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
319 space = rx_queue->fast_fill_limit - fill_level; 360 space = rx_queue->fast_fill_limit - fill_level;
320 if (space < EFX_RX_BATCH) 361 if (space < EFX_RX_BATCH)
321 goto out_unlock; 362 goto out;
322 363
323 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" 364 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
324 " level %d to level %d using %s allocation\n", 365 "RX queue %d fast-filling descriptor ring from"
325 rx_queue->queue, fill_level, rx_queue->fast_fill_limit, 366 " level %d to level %d using %s allocation\n",
326 rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); 367 rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
368 channel->rx_alloc_push_pages ? "page" : "skb");
327 369
328 do { 370 do {
329 for (i = 0; i < EFX_RX_BATCH; ++i) { 371 if (channel->rx_alloc_push_pages)
330 index = rx_queue->added_count & EFX_RXQ_MASK; 372 rc = efx_init_rx_buffers_page(rx_queue);
331 rx_buf = efx_rx_buffer(rx_queue, index); 373 else
332 rc = efx_init_rx_buffer(rx_queue, rx_buf); 374 rc = efx_init_rx_buffers_skb(rx_queue);
333 if (unlikely(rc)) 375 if (unlikely(rc)) {
334 goto out; 376 /* Ensure that we don't leave the rx queue empty */
335 ++rx_queue->added_count; 377 if (rx_queue->added_count == rx_queue->removed_count)
378 efx_schedule_slow_fill(rx_queue);
379 goto out;
336 } 380 }
337 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 381 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
338 382
339 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " 383 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
340 "to level %d\n", rx_queue->queue, 384 "RX queue %d fast-filled descriptor ring "
341 rx_queue->added_count - rx_queue->removed_count); 385 "to level %d\n", rx_queue->queue,
386 rx_queue->added_count - rx_queue->removed_count);
342 387
343 out: 388 out:
344 /* Send write pointer to card. */ 389 if (rx_queue->notified_count != rx_queue->added_count)
345 efx_nic_notify_rx_desc(rx_queue); 390 efx_nic_notify_rx_desc(rx_queue);
346
347 /* If the fast fill is running inside from the refill tasklet, then
348 * for SMP systems it may be running on a different CPU to
349 * RX event processing, which means that the fill level may now be
350 * out of date. */
351 if (unlikely(retry && (rc == 0)))
352 goto retry;
353
354 out_unlock:
355 spin_unlock_bh(&rx_queue->add_lock);
356
357 return rc;
358}
359
360/**
361 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
362 * @rx_queue: RX descriptor queue
363 *
364 * This will aim to fill the RX descriptor queue up to
365 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
366 * it will schedule a work item to immediately continue the fast fill
367 */
368void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
369{
370 int rc;
371
372 rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
373 if (unlikely(rc)) {
374 /* Schedule the work item to run immediately. The hope is
375 * that work is immediately pending to free some memory
376 * (e.g. an RX event or TX completion)
377 */
378 efx_schedule_slow_fill(rx_queue, 0);
379 }
380} 391}
381 392
382void efx_rx_work(struct work_struct *data) 393void efx_rx_slow_fill(unsigned long context)
383{ 394{
384 struct efx_rx_queue *rx_queue; 395 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
385 int rc; 396 struct efx_channel *channel = rx_queue->channel;
386
387 rx_queue = container_of(data, struct efx_rx_queue, work.work);
388
389 if (unlikely(!rx_queue->channel->enabled))
390 return;
391
392 EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
393 "%d\n", rx_queue->queue, raw_smp_processor_id());
394 397
398 /* Post an event to cause NAPI to run and refill the queue */
399 efx_nic_generate_fill_event(channel);
395 ++rx_queue->slow_fill_count; 400 ++rx_queue->slow_fill_count;
396 /* Push new RX descriptors, allowing at least 1 jiffy for
397 * the kernel to free some more memory. */
398 rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
399 if (rc)
400 efx_schedule_slow_fill(rx_queue, 1);
401} 401}
402 402
403static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 403static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -417,10 +417,12 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
417 *discard = true; 417 *discard = true;
418 418
419 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 419 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
420 EFX_ERR_RL(efx, " RX queue %d seriously overlength " 420 if (net_ratelimit())
421 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 421 netif_err(efx, rx_err, efx->net_dev,
422 rx_queue->queue, len, max_len, 422 " RX queue %d seriously overlength "
423 efx->type->rx_buffer_padding); 423 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
424 rx_queue->queue, len, max_len,
425 efx->type->rx_buffer_padding);
424 /* If this buffer was skb-allocated, then the meta 426 /* If this buffer was skb-allocated, then the meta
425 * data at the end of the skb will be trashed. So 427 * data at the end of the skb will be trashed. So
426 * we have no choice but to leak the fragment. 428 * we have no choice but to leak the fragment.
@@ -428,8 +430,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
428 *leak_packet = (rx_buf->skb != NULL); 430 *leak_packet = (rx_buf->skb != NULL);
429 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 431 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
430 } else { 432 } else {
431 EFX_ERR_RL(efx, " RX queue %d overlength RX event " 433 if (net_ratelimit())
432 "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); 434 netif_err(efx, rx_err, efx->net_dev,
435 " RX queue %d overlength RX event "
436 "(0x%x > 0x%x)\n",
437 rx_queue->queue, len, max_len);
433 } 438 }
434 439
435 rx_queue->channel->n_rx_overlength++; 440 rx_queue->channel->n_rx_overlength++;
@@ -449,6 +454,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
449 454
450 /* Pass the skb/page into the LRO engine */ 455 /* Pass the skb/page into the LRO engine */
451 if (rx_buf->page) { 456 if (rx_buf->page) {
457 struct efx_nic *efx = channel->efx;
452 struct page *page = rx_buf->page; 458 struct page *page = rx_buf->page;
453 struct sk_buff *skb; 459 struct sk_buff *skb;
454 460
@@ -461,6 +467,9 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
461 return; 467 return;
462 } 468 }
463 469
470 if (efx->net_dev->features & NETIF_F_RXHASH)
471 skb->rxhash = efx_rx_buf_hash(rx_buf);
472
464 skb_shinfo(skb)->frags[0].page = page; 473 skb_shinfo(skb)->frags[0].page = page;
465 skb_shinfo(skb)->frags[0].page_offset = 474 skb_shinfo(skb)->frags[0].page_offset =
466 efx_rx_buf_offset(rx_buf); 475 efx_rx_buf_offset(rx_buf);
@@ -498,6 +507,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
498 unsigned int len, bool checksummed, bool discard) 507 unsigned int len, bool checksummed, bool discard)
499{ 508{
500 struct efx_nic *efx = rx_queue->efx; 509 struct efx_nic *efx = rx_queue->efx;
510 struct efx_channel *channel = rx_queue->channel;
501 struct efx_rx_buffer *rx_buf; 511 struct efx_rx_buffer *rx_buf;
502 bool leak_packet = false; 512 bool leak_packet = false;
503 513
@@ -516,21 +526,23 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
516 efx_rx_packet__check_len(rx_queue, rx_buf, len, 526 efx_rx_packet__check_len(rx_queue, rx_buf, len,
517 &discard, &leak_packet); 527 &discard, &leak_packet);
518 528
519 EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n", 529 netif_vdbg(efx, rx_status, efx->net_dev,
520 rx_queue->queue, index, 530 "RX queue %d received id %x at %llx+%x %s%s\n",
521 (unsigned long long)rx_buf->dma_addr, len, 531 rx_queue->queue, index,
522 (checksummed ? " [SUMMED]" : ""), 532 (unsigned long long)rx_buf->dma_addr, len,
523 (discard ? " [DISCARD]" : "")); 533 (checksummed ? " [SUMMED]" : ""),
534 (discard ? " [DISCARD]" : ""));
524 535
525 /* Discard packet, if instructed to do so */ 536 /* Discard packet, if instructed to do so */
526 if (unlikely(discard)) { 537 if (unlikely(discard)) {
527 if (unlikely(leak_packet)) 538 if (unlikely(leak_packet))
528 rx_queue->channel->n_skbuff_leaks++; 539 channel->n_skbuff_leaks++;
529 else 540 else
530 /* We haven't called efx_unmap_rx_buffer yet, 541 efx_recycle_rx_buffer(channel, rx_buf);
531 * so fini the entire rx_buffer here */ 542
532 efx_fini_rx_buffer(rx_queue, rx_buf); 543 /* Don't hold off the previous receive */
533 return; 544 rx_buf = NULL;
545 goto out;
534 } 546 }
535 547
536 /* Release card resources - assumes all RX buffers consumed in-order 548 /* Release card resources - assumes all RX buffers consumed in-order
@@ -547,6 +559,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
547 * prefetched into cache. 559 * prefetched into cache.
548 */ 560 */
549 rx_buf->len = len; 561 rx_buf->len = len;
562out:
550 if (rx_queue->channel->rx_pkt) 563 if (rx_queue->channel->rx_pkt)
551 __efx_rx_packet(rx_queue->channel, 564 __efx_rx_packet(rx_queue->channel,
552 rx_queue->channel->rx_pkt, 565 rx_queue->channel->rx_pkt,
@@ -562,6 +575,9 @@ void __efx_rx_packet(struct efx_channel *channel,
562 struct efx_nic *efx = channel->efx; 575 struct efx_nic *efx = channel->efx;
563 struct sk_buff *skb; 576 struct sk_buff *skb;
564 577
578 rx_buf->data += efx->type->rx_buffer_hash_size;
579 rx_buf->len -= efx->type->rx_buffer_hash_size;
580
565 /* If we're in loopback test, then pass the packet directly to the 581 /* If we're in loopback test, then pass the packet directly to the
566 * loopback layer, and free the rx_buf here 582 * loopback layer, and free the rx_buf here
567 */ 583 */
@@ -574,8 +590,12 @@ void __efx_rx_packet(struct efx_channel *channel,
574 if (rx_buf->skb) { 590 if (rx_buf->skb) {
575 prefetch(skb_shinfo(rx_buf->skb)); 591 prefetch(skb_shinfo(rx_buf->skb));
576 592
593 skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
577 skb_put(rx_buf->skb, rx_buf->len); 594 skb_put(rx_buf->skb, rx_buf->len);
578 595
596 if (efx->net_dev->features & NETIF_F_RXHASH)
597 rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
598
579 /* Move past the ethernet header. rx_buf->data still points 599 /* Move past the ethernet header. rx_buf->data still points
580 * at the ethernet header */ 600 * at the ethernet header */
581 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, 601 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
@@ -633,7 +653,8 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
633 unsigned int rxq_size; 653 unsigned int rxq_size;
634 int rc; 654 int rc;
635 655
636 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); 656 netif_dbg(efx, probe, efx->net_dev,
657 "creating RX queue %d\n", rx_queue->queue);
637 658
638 /* Allocate RX buffers */ 659 /* Allocate RX buffers */
639 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); 660 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
@@ -653,7 +674,8 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
653{ 674{
654 unsigned int max_fill, trigger, limit; 675 unsigned int max_fill, trigger, limit;
655 676
656 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); 677 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
678 "initialising RX queue %d\n", rx_queue->queue);
657 679
658 /* Initialise ptr fields */ 680 /* Initialise ptr fields */
659 rx_queue->added_count = 0; 681 rx_queue->added_count = 0;
@@ -680,8 +702,10 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
680 int i; 702 int i;
681 struct efx_rx_buffer *rx_buf; 703 struct efx_rx_buffer *rx_buf;
682 704
683 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); 705 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
706 "shutting down RX queue %d\n", rx_queue->queue);
684 707
708 del_timer_sync(&rx_queue->slow_fill);
685 efx_nic_fini_rx(rx_queue); 709 efx_nic_fini_rx(rx_queue);
686 710
687 /* Release RX buffers NB start at index 0 not current HW ptr */ 711 /* Release RX buffers NB start at index 0 not current HW ptr */
@@ -691,21 +715,12 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
691 efx_fini_rx_buffer(rx_queue, rx_buf); 715 efx_fini_rx_buffer(rx_queue, rx_buf);
692 } 716 }
693 } 717 }
694
695 /* For a page that is part-way through splitting into RX buffers */
696 if (rx_queue->buf_page != NULL) {
697 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
698 efx_rx_buf_size(rx_queue->efx),
699 PCI_DMA_FROMDEVICE);
700 __free_pages(rx_queue->buf_page,
701 rx_queue->efx->rx_buffer_order);
702 rx_queue->buf_page = NULL;
703 }
704} 718}
705 719
706void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 720void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
707{ 721{
708 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); 722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
723 "destroying RX queue %d\n", rx_queue->queue);
709 724
710 efx_nic_remove_rx(rx_queue); 725 efx_nic_remove_rx(rx_queue);
711 726
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 371e86cc090f..85f015f005d5 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -38,7 +38,7 @@ struct efx_loopback_payload {
38 struct udphdr udp; 38 struct udphdr udp;
39 __be16 iteration; 39 __be16 iteration;
40 const char msg[64]; 40 const char msg[64];
41} __attribute__ ((packed)); 41} __packed;
42 42
43/* Loopback test source MAC address */ 43/* Loopback test source MAC address */
44static const unsigned char payload_source[ETH_ALEN] = { 44static const unsigned char payload_source[ETH_ALEN] = {
@@ -123,7 +123,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
123{ 123{
124 struct efx_channel *channel; 124 struct efx_channel *channel;
125 125
126 EFX_LOG(efx, "testing interrupts\n"); 126 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
127 tests->interrupt = -1; 127 tests->interrupt = -1;
128 128
129 /* Reset interrupt flag */ 129 /* Reset interrupt flag */
@@ -142,16 +142,17 @@ static int efx_test_interrupts(struct efx_nic *efx,
142 efx_nic_generate_interrupt(efx); 142 efx_nic_generate_interrupt(efx);
143 143
144 /* Wait for arrival of test interrupt. */ 144 /* Wait for arrival of test interrupt. */
145 EFX_LOG(efx, "waiting for test interrupt\n"); 145 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
146 schedule_timeout_uninterruptible(HZ / 10); 146 schedule_timeout_uninterruptible(HZ / 10);
147 if (efx->last_irq_cpu >= 0) 147 if (efx->last_irq_cpu >= 0)
148 goto success; 148 goto success;
149 149
150 EFX_ERR(efx, "timed out waiting for interrupt\n"); 150 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
151 return -ETIMEDOUT; 151 return -ETIMEDOUT;
152 152
153 success: 153 success:
154 EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx), 154 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
155 INT_MODE(efx),
155 efx->last_irq_cpu); 156 efx->last_irq_cpu);
156 tests->interrupt = 1; 157 tests->interrupt = 1;
157 return 0; 158 return 0;
@@ -161,23 +162,18 @@ static int efx_test_interrupts(struct efx_nic *efx,
161static int efx_test_eventq_irq(struct efx_channel *channel, 162static int efx_test_eventq_irq(struct efx_channel *channel,
162 struct efx_self_tests *tests) 163 struct efx_self_tests *tests)
163{ 164{
164 unsigned int magic, count; 165 struct efx_nic *efx = channel->efx;
165 166 unsigned int magic_count, count;
166 /* Channel specific code, limited to 20 bits */
167 magic = (0x00010150 + channel->channel);
168 EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
169 channel->channel, magic);
170 167
171 tests->eventq_dma[channel->channel] = -1; 168 tests->eventq_dma[channel->channel] = -1;
172 tests->eventq_int[channel->channel] = -1; 169 tests->eventq_int[channel->channel] = -1;
173 tests->eventq_poll[channel->channel] = -1; 170 tests->eventq_poll[channel->channel] = -1;
174 171
175 /* Reset flag and zero magic word */ 172 magic_count = channel->magic_count;
176 channel->efx->last_irq_cpu = -1; 173 channel->efx->last_irq_cpu = -1;
177 channel->eventq_magic = 0;
178 smp_wmb(); 174 smp_wmb();
179 175
180 efx_nic_generate_test_event(channel, magic); 176 efx_nic_generate_test_event(channel);
181 177
182 /* Wait for arrival of interrupt */ 178 /* Wait for arrival of interrupt */
183 count = 0; 179 count = 0;
@@ -187,33 +183,36 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
187 if (channel->work_pending) 183 if (channel->work_pending)
188 efx_process_channel_now(channel); 184 efx_process_channel_now(channel);
189 185
190 if (channel->eventq_magic == magic) 186 if (channel->magic_count != magic_count)
191 goto eventq_ok; 187 goto eventq_ok;
192 } while (++count < 2); 188 } while (++count < 2);
193 189
194 EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", 190 netif_err(efx, drv, efx->net_dev,
195 channel->channel); 191 "channel %d timed out waiting for event queue\n",
192 channel->channel);
196 193
197 /* See if interrupt arrived */ 194 /* See if interrupt arrived */
198 if (channel->efx->last_irq_cpu >= 0) { 195 if (channel->efx->last_irq_cpu >= 0) {
199 EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " 196 netif_err(efx, drv, efx->net_dev,
200 "during event queue test\n", channel->channel, 197 "channel %d saw interrupt on CPU%d "
201 raw_smp_processor_id()); 198 "during event queue test\n", channel->channel,
199 raw_smp_processor_id());
202 tests->eventq_int[channel->channel] = 1; 200 tests->eventq_int[channel->channel] = 1;
203 } 201 }
204 202
205 /* Check to see if event was received even if interrupt wasn't */ 203 /* Check to see if event was received even if interrupt wasn't */
206 efx_process_channel_now(channel); 204 efx_process_channel_now(channel);
207 if (channel->eventq_magic == magic) { 205 if (channel->magic_count != magic_count) {
208 EFX_ERR(channel->efx, "channel %d event was generated, but " 206 netif_err(efx, drv, efx->net_dev,
209 "failed to trigger an interrupt\n", channel->channel); 207 "channel %d event was generated, but "
208 "failed to trigger an interrupt\n", channel->channel);
210 tests->eventq_dma[channel->channel] = 1; 209 tests->eventq_dma[channel->channel] = 1;
211 } 210 }
212 211
213 return -ETIMEDOUT; 212 return -ETIMEDOUT;
214 eventq_ok: 213 eventq_ok:
215 EFX_LOG(channel->efx, "channel %d event queue passed\n", 214 netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
216 channel->channel); 215 channel->channel);
217 tests->eventq_dma[channel->channel] = 1; 216 tests->eventq_dma[channel->channel] = 1;
218 tests->eventq_int[channel->channel] = 1; 217 tests->eventq_int[channel->channel] = 1;
219 tests->eventq_poll[channel->channel] = 1; 218 tests->eventq_poll[channel->channel] = 1;
@@ -266,51 +265,57 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
266 265
267 /* Check that header exists */ 266 /* Check that header exists */
268 if (pkt_len < sizeof(received->header)) { 267 if (pkt_len < sizeof(received->header)) {
269 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " 268 netif_err(efx, drv, efx->net_dev,
270 "test\n", pkt_len, LOOPBACK_MODE(efx)); 269 "saw runt RX packet (length %d) in %s loopback "
270 "test\n", pkt_len, LOOPBACK_MODE(efx));
271 goto err; 271 goto err;
272 } 272 }
273 273
274 /* Check that the ethernet header exists */ 274 /* Check that the ethernet header exists */
275 if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { 275 if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
276 EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", 276 netif_err(efx, drv, efx->net_dev,
277 LOOPBACK_MODE(efx)); 277 "saw non-loopback RX packet in %s loopback test\n",
278 LOOPBACK_MODE(efx));
278 goto err; 279 goto err;
279 } 280 }
280 281
281 /* Check packet length */ 282 /* Check packet length */
282 if (pkt_len != sizeof(*payload)) { 283 if (pkt_len != sizeof(*payload)) {
283 EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " 284 netif_err(efx, drv, efx->net_dev,
284 "%s loopback test\n", pkt_len, (int)sizeof(*payload), 285 "saw incorrect RX packet length %d (wanted %d) in "
285 LOOPBACK_MODE(efx)); 286 "%s loopback test\n", pkt_len, (int)sizeof(*payload),
287 LOOPBACK_MODE(efx));
286 goto err; 288 goto err;
287 } 289 }
288 290
289 /* Check that IP header matches */ 291 /* Check that IP header matches */
290 if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { 292 if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
291 EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", 293 netif_err(efx, drv, efx->net_dev,
292 LOOPBACK_MODE(efx)); 294 "saw corrupted IP header in %s loopback test\n",
295 LOOPBACK_MODE(efx));
293 goto err; 296 goto err;
294 } 297 }
295 298
296 /* Check that msg and padding matches */ 299 /* Check that msg and padding matches */
297 if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { 300 if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
298 EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", 301 netif_err(efx, drv, efx->net_dev,
299 LOOPBACK_MODE(efx)); 302 "saw corrupted RX packet in %s loopback test\n",
303 LOOPBACK_MODE(efx));
300 goto err; 304 goto err;
301 } 305 }
302 306
303 /* Check that iteration matches */ 307 /* Check that iteration matches */
304 if (received->iteration != payload->iteration) { 308 if (received->iteration != payload->iteration) {
305 EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " 309 netif_err(efx, drv, efx->net_dev,
306 "%s loopback test\n", ntohs(received->iteration), 310 "saw RX packet from iteration %d (wanted %d) in "
307 ntohs(payload->iteration), LOOPBACK_MODE(efx)); 311 "%s loopback test\n", ntohs(received->iteration),
312 ntohs(payload->iteration), LOOPBACK_MODE(efx));
308 goto err; 313 goto err;
309 } 314 }
310 315
311 /* Increase correct RX count */ 316 /* Increase correct RX count */
312 EFX_TRACE(efx, "got loopback RX in %s loopback test\n", 317 netif_vdbg(efx, drv, efx->net_dev,
313 LOOPBACK_MODE(efx)); 318 "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
314 319
315 atomic_inc(&state->rx_good); 320 atomic_inc(&state->rx_good);
316 return; 321 return;
@@ -318,10 +323,10 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
318 err: 323 err:
319#ifdef EFX_ENABLE_DEBUG 324#ifdef EFX_ENABLE_DEBUG
320 if (atomic_read(&state->rx_bad) == 0) { 325 if (atomic_read(&state->rx_bad) == 0) {
321 EFX_ERR(efx, "received packet:\n"); 326 netif_err(efx, drv, efx->net_dev, "received packet:\n");
322 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 327 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
323 buf_ptr, pkt_len, 0); 328 buf_ptr, pkt_len, 0);
324 EFX_ERR(efx, "expected packet:\n"); 329 netif_err(efx, drv, efx->net_dev, "expected packet:\n");
325 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 330 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
326 &state->payload, sizeof(state->payload), 0); 331 &state->payload, sizeof(state->payload), 0);
327 } 332 }
@@ -402,9 +407,11 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
402 netif_tx_unlock_bh(efx->net_dev); 407 netif_tx_unlock_bh(efx->net_dev);
403 408
404 if (rc != NETDEV_TX_OK) { 409 if (rc != NETDEV_TX_OK) {
405 EFX_ERR(efx, "TX queue %d could not transmit packet %d " 410 netif_err(efx, drv, efx->net_dev,
406 "of %d in %s loopback test\n", tx_queue->queue, 411 "TX queue %d could not transmit packet %d of "
407 i + 1, state->packet_count, LOOPBACK_MODE(efx)); 412 "%d in %s loopback test\n", tx_queue->queue,
413 i + 1, state->packet_count,
414 LOOPBACK_MODE(efx));
408 415
409 /* Defer cleaning up the other skbs for the caller */ 416 /* Defer cleaning up the other skbs for the caller */
410 kfree_skb(skb); 417 kfree_skb(skb);
@@ -460,20 +467,22 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
460 /* Don't free the skbs; they will be picked up on TX 467 /* Don't free the skbs; they will be picked up on TX
461 * overflow or channel teardown. 468 * overflow or channel teardown.
462 */ 469 */
463 EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " 470 netif_err(efx, drv, efx->net_dev,
464 "TX completion events in %s loopback test\n", 471 "TX queue %d saw only %d out of an expected %d "
465 tx_queue->queue, tx_done, state->packet_count, 472 "TX completion events in %s loopback test\n",
466 LOOPBACK_MODE(efx)); 473 tx_queue->queue, tx_done, state->packet_count,
474 LOOPBACK_MODE(efx));
467 rc = -ETIMEDOUT; 475 rc = -ETIMEDOUT;
468 /* Allow to fall through so we see the RX errors as well */ 476 /* Allow to fall through so we see the RX errors as well */
469 } 477 }
470 478
471 /* We may always be up to a flush away from our desired packet total */ 479 /* We may always be up to a flush away from our desired packet total */
472 if (rx_good != state->packet_count) { 480 if (rx_good != state->packet_count) {
473 EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " 481 netif_dbg(efx, drv, efx->net_dev,
474 "received packets in %s loopback test\n", 482 "TX queue %d saw only %d out of an expected %d "
475 tx_queue->queue, rx_good, state->packet_count, 483 "received packets in %s loopback test\n",
476 LOOPBACK_MODE(efx)); 484 tx_queue->queue, rx_good, state->packet_count,
485 LOOPBACK_MODE(efx));
477 rc = -ETIMEDOUT; 486 rc = -ETIMEDOUT;
478 /* Fall through */ 487 /* Fall through */
479 } 488 }
@@ -505,9 +514,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
505 return -ENOMEM; 514 return -ENOMEM;
506 state->flush = false; 515 state->flush = false;
507 516
508 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 517 netif_dbg(efx, drv, efx->net_dev,
509 "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 518 "TX queue %d testing %s loopback with %d packets\n",
510 state->packet_count); 519 tx_queue->queue, LOOPBACK_MODE(efx),
520 state->packet_count);
511 521
512 efx_iterate_state(efx); 522 efx_iterate_state(efx);
513 begin_rc = efx_begin_loopback(tx_queue); 523 begin_rc = efx_begin_loopback(tx_queue);
@@ -531,9 +541,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
531 } 541 }
532 } 542 }
533 543
534 EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " 544 netif_dbg(efx, drv, efx->net_dev,
535 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 545 "TX queue %d passed %s loopback test with a burst length "
536 state->packet_count); 546 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
547 state->packet_count);
537 548
538 return 0; 549 return 0;
539} 550}
@@ -545,7 +556,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
545static int efx_wait_for_link(struct efx_nic *efx) 556static int efx_wait_for_link(struct efx_nic *efx)
546{ 557{
547 struct efx_link_state *link_state = &efx->link_state; 558 struct efx_link_state *link_state = &efx->link_state;
548 int count; 559 int count, link_up_count = 0;
549 bool link_up; 560 bool link_up;
550 561
551 for (count = 0; count < 40; count++) { 562 for (count = 0; count < 40; count++) {
@@ -567,8 +578,12 @@ static int efx_wait_for_link(struct efx_nic *efx)
567 link_up = !efx->mac_op->check_fault(efx); 578 link_up = !efx->mac_op->check_fault(efx);
568 mutex_unlock(&efx->mac_lock); 579 mutex_unlock(&efx->mac_lock);
569 580
570 if (link_up) 581 if (link_up) {
571 return 0; 582 if (++link_up_count == 2)
583 return 0;
584 } else {
585 link_up_count = 0;
586 }
572 } 587 }
573 588
574 return -ETIMEDOUT; 589 return -ETIMEDOUT;
@@ -604,15 +619,17 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
604 rc = __efx_reconfigure_port(efx); 619 rc = __efx_reconfigure_port(efx);
605 mutex_unlock(&efx->mac_lock); 620 mutex_unlock(&efx->mac_lock);
606 if (rc) { 621 if (rc) {
607 EFX_ERR(efx, "unable to move into %s loopback\n", 622 netif_err(efx, drv, efx->net_dev,
608 LOOPBACK_MODE(efx)); 623 "unable to move into %s loopback\n",
624 LOOPBACK_MODE(efx));
609 goto out; 625 goto out;
610 } 626 }
611 627
612 rc = efx_wait_for_link(efx); 628 rc = efx_wait_for_link(efx);
613 if (rc) { 629 if (rc) {
614 EFX_ERR(efx, "loopback %s never came up\n", 630 netif_err(efx, drv, efx->net_dev,
615 LOOPBACK_MODE(efx)); 631 "loopback %s never came up\n",
632 LOOPBACK_MODE(efx));
616 goto out; 633 goto out;
617 } 634 }
618 635
@@ -720,7 +737,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
720 rc_reset = rc; 737 rc_reset = rc;
721 738
722 if (rc_reset) { 739 if (rc_reset) {
723 EFX_ERR(efx, "Unable to recover from chip test\n"); 740 netif_err(efx, drv, efx->net_dev,
741 "Unable to recover from chip test\n");
724 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 742 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
725 return rc_reset; 743 return rc_reset;
726 } 744 }
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f2b1e6180753..3fab030f8ab5 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -118,10 +118,11 @@ static int siena_probe_port(struct efx_nic *efx)
118 MC_CMD_MAC_NSTATS * sizeof(u64)); 118 MC_CMD_MAC_NSTATS * sizeof(u64));
119 if (rc) 119 if (rc)
120 return rc; 120 return rc;
121 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", 121 netif_dbg(efx, probe, efx->net_dev,
122 (u64)efx->stats_buffer.dma_addr, 122 "stats buffer at %llx (virt %p phys %llx)\n",
123 efx->stats_buffer.addr, 123 (u64)efx->stats_buffer.dma_addr,
124 (u64)virt_to_phys(efx->stats_buffer.addr)); 124 efx->stats_buffer.addr,
125 (u64)virt_to_phys(efx->stats_buffer.addr));
125 126
126 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); 127 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
127 128
@@ -216,7 +217,8 @@ static int siena_probe_nic(struct efx_nic *efx)
216 efx->nic_data = nic_data; 217 efx->nic_data = nic_data;
217 218
218 if (efx_nic_fpga_ver(efx) != 0) { 219 if (efx_nic_fpga_ver(efx) != 0) {
219 EFX_ERR(efx, "Siena FPGA not supported\n"); 220 netif_err(efx, probe, efx->net_dev,
221 "Siena FPGA not supported\n");
220 rc = -ENODEV; 222 rc = -ENODEV;
221 goto fail1; 223 goto fail1;
222 } 224 }
@@ -233,8 +235,8 @@ static int siena_probe_nic(struct efx_nic *efx)
233 235
234 rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); 236 rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
235 if (rc) { 237 if (rc) {
236 EFX_ERR(efx, "Failed to read MCPU firmware version - " 238 netif_err(efx, probe, efx->net_dev,
237 "rc %d\n", rc); 239 "Failed to read MCPU firmware version - rc %d\n", rc);
238 goto fail1; /* MCPU absent? */ 240 goto fail1; /* MCPU absent? */
239 } 241 }
240 242
@@ -242,17 +244,19 @@ static int siena_probe_nic(struct efx_nic *efx)
242 * filter settings. We must do this before we reset the NIC */ 244 * filter settings. We must do this before we reset the NIC */
243 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 245 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
244 if (rc) { 246 if (rc) {
245 EFX_ERR(efx, "Unable to register driver with MCPU\n"); 247 netif_err(efx, probe, efx->net_dev,
248 "Unable to register driver with MCPU\n");
246 goto fail2; 249 goto fail2;
247 } 250 }
248 if (already_attached) 251 if (already_attached)
249 /* Not a fatal error */ 252 /* Not a fatal error */
250 EFX_ERR(efx, "Host already registered with MCPU\n"); 253 netif_err(efx, probe, efx->net_dev,
254 "Host already registered with MCPU\n");
251 255
252 /* Now we can reset the NIC */ 256 /* Now we can reset the NIC */
253 rc = siena_reset_hw(efx, RESET_TYPE_ALL); 257 rc = siena_reset_hw(efx, RESET_TYPE_ALL);
254 if (rc) { 258 if (rc) {
255 EFX_ERR(efx, "failed to reset NIC\n"); 259 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
256 goto fail3; 260 goto fail3;
257 } 261 }
258 262
@@ -264,24 +268,23 @@ static int siena_probe_nic(struct efx_nic *efx)
264 goto fail4; 268 goto fail4;
265 BUG_ON(efx->irq_status.dma_addr & 0x0f); 269 BUG_ON(efx->irq_status.dma_addr & 0x0f);
266 270
267 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", 271 netif_dbg(efx, probe, efx->net_dev,
268 (unsigned long long)efx->irq_status.dma_addr, 272 "INT_KER at %llx (virt %p phys %llx)\n",
269 efx->irq_status.addr, 273 (unsigned long long)efx->irq_status.dma_addr,
270 (unsigned long long)virt_to_phys(efx->irq_status.addr)); 274 efx->irq_status.addr,
275 (unsigned long long)virt_to_phys(efx->irq_status.addr));
271 276
272 /* Read in the non-volatile configuration */ 277 /* Read in the non-volatile configuration */
273 rc = siena_probe_nvconfig(efx); 278 rc = siena_probe_nvconfig(efx);
274 if (rc == -EINVAL) { 279 if (rc == -EINVAL) {
275 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); 280 netif_err(efx, probe, efx->net_dev,
281 "NVRAM is invalid therefore using defaults\n");
276 efx->phy_type = PHY_TYPE_NONE; 282 efx->phy_type = PHY_TYPE_NONE;
277 efx->mdio.prtad = MDIO_PRTAD_NONE; 283 efx->mdio.prtad = MDIO_PRTAD_NONE;
278 } else if (rc) { 284 } else if (rc) {
279 goto fail5; 285 goto fail5;
280 } 286 }
281 287
282 get_random_bytes(&nic_data->ipv6_rss_key,
283 sizeof(nic_data->ipv6_rss_key));
284
285 return 0; 288 return 0;
286 289
287fail5: 290fail5:
@@ -301,7 +304,6 @@ fail1:
301 */ 304 */
302static int siena_init_nic(struct efx_nic *efx) 305static int siena_init_nic(struct efx_nic *efx)
303{ 306{
304 struct siena_nic_data *nic_data = efx->nic_data;
305 efx_oword_t temp; 307 efx_oword_t temp;
306 int rc; 308 int rc;
307 309
@@ -326,25 +328,36 @@ static int siena_init_nic(struct efx_nic *efx)
326 efx_reado(efx, &temp, FR_AZ_RX_CFG); 328 efx_reado(efx, &temp, FR_AZ_RX_CFG);
327 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); 329 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
328 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); 330 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
331 /* Enable hash insertion. This is broken for the 'Falcon' hash
332 * if IPv6 hashing is also enabled, so also select Toeplitz
333 * TCP/IPv4 and IPv4 hashes. */
334 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
335 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
336 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
329 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 337 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
330 338
339 /* Set hash key for IPv4 */
340 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
341 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
342
331 /* Enable IPv6 RSS */ 343 /* Enable IPv6 RSS */
332 BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) != 344 BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
333 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || 345 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
334 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); 346 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
335 memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp)); 347 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
336 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); 348 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
337 memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp)); 349 memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
338 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); 350 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
339 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, 351 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
340 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); 352 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
341 memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp), 353 memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
342 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); 354 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
343 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); 355 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
344 356
345 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) 357 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
346 /* No MCDI operation has been defined to set thresholds */ 358 /* No MCDI operation has been defined to set thresholds */
347 EFX_ERR(efx, "ignoring RX flow control thresholds\n"); 359 netif_err(efx, hw, efx->net_dev,
360 "ignoring RX flow control thresholds\n");
348 361
349 /* Enable event logging */ 362 /* Enable event logging */
350 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 363 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -565,7 +578,8 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
565 578
566 return 0; 579 return 0;
567 fail: 580 fail:
568 EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc); 581 netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
582 __func__, type, rc);
569 return rc; 583 return rc;
570} 584}
571 585
@@ -628,6 +642,7 @@ struct efx_nic_type siena_a0_nic_type = {
628 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 642 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
629 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 643 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
630 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 644 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
645 .rx_buffer_hash_size = 0x10,
631 .rx_buffer_padding = 0, 646 .rx_buffer_padding = 0,
632 .max_interrupt_mode = EFX_INT_MODE_MSIX, 647 .max_interrupt_mode = EFX_INT_MODE_MSIX,
633 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 648 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -635,6 +650,7 @@ struct efx_nic_type siena_a0_nic_type = {
635 * channels */ 650 * channels */
636 .tx_dc_base = 0x88000, 651 .tx_dc_base = 0x88000,
637 .rx_dc_base = 0x68000, 652 .rx_dc_base = 0x68000,
638 .offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM, 653 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
654 NETIF_F_RXHASH),
639 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, 655 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
640}; 656};
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f21efe7bd316..6791be90c2fe 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -228,7 +228,8 @@ int sft9001_wait_boot(struct efx_nic *efx)
228 boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS, 228 boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
229 PCS_BOOT_STATUS_REG); 229 PCS_BOOT_STATUS_REG);
230 if (boot_stat >= 0) { 230 if (boot_stat >= 0) {
231 EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat); 231 netif_dbg(efx, hw, efx->net_dev,
232 "PHY boot status = %#x\n", boot_stat);
232 switch (boot_stat & 233 switch (boot_stat &
233 ((1 << PCS_BOOT_FATAL_ERROR_LBN) | 234 ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
234 (3 << PCS_BOOT_PROGRESS_LBN) | 235 (3 << PCS_BOOT_PROGRESS_LBN) |
@@ -463,10 +464,11 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
463 reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN; 464 reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
464 } else { 465 } else {
465 reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN; 466 reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
466 EFX_ERR(efx, "appears to be plugged into a port" 467 netif_err(efx, link, efx->net_dev,
467 " that is not 10GBASE-T capable. The PHY" 468 "appears to be plugged into a port"
468 " supports 10GBASE-T ONLY, so no link can" 469 " that is not 10GBASE-T capable. The PHY"
469 " be established\n"); 470 " supports 10GBASE-T ONLY, so no link can"
471 " be established\n");
470 } 472 }
471 efx_mdio_write(efx, MDIO_MMD_PMAPMD, 473 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
472 PMA_PMD_LED_OVERR_REG, reg); 474 PMA_PMD_LED_OVERR_REG, reg);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 6bb12a87ef2d..c6942da2c99a 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -42,7 +42,7 @@ void efx_stop_queue(struct efx_channel *channel)
42 return; 42 return;
43 43
44 spin_lock_bh(&channel->tx_stop_lock); 44 spin_lock_bh(&channel->tx_stop_lock);
45 EFX_TRACE(efx, "stop TX queue\n"); 45 netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
46 46
47 atomic_inc(&channel->tx_stop_count); 47 atomic_inc(&channel->tx_stop_count);
48 netif_tx_stop_queue( 48 netif_tx_stop_queue(
@@ -64,7 +64,7 @@ void efx_wake_queue(struct efx_channel *channel)
64 local_bh_disable(); 64 local_bh_disable();
65 if (atomic_dec_and_lock(&channel->tx_stop_count, 65 if (atomic_dec_and_lock(&channel->tx_stop_count,
66 &channel->tx_stop_lock)) { 66 &channel->tx_stop_lock)) {
67 EFX_TRACE(efx, "waking TX queue\n"); 67 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
68 netif_tx_wake_queue( 68 netif_tx_wake_queue(
69 netdev_get_tx_queue( 69 netdev_get_tx_queue(
70 efx->net_dev, 70 efx->net_dev,
@@ -94,8 +94,9 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
94 if (buffer->skb) { 94 if (buffer->skb) {
95 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 95 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
96 buffer->skb = NULL; 96 buffer->skb = NULL;
97 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x " 97 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
98 "complete\n", tx_queue->queue, read_ptr); 98 "TX queue %d transmission id %x complete\n",
99 tx_queue->queue, tx_queue->read_count);
99 } 100 }
100} 101}
101 102
@@ -300,9 +301,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
300 return NETDEV_TX_OK; 301 return NETDEV_TX_OK;
301 302
302 pci_err: 303 pci_err:
303 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d " 304 netif_err(efx, tx_err, efx->net_dev,
304 "fragments for DMA\n", tx_queue->queue, skb->len, 305 " TX queue %d could not map skb with %d bytes %d "
305 skb_shinfo(skb)->nr_frags + 1); 306 "fragments for DMA\n", tx_queue->queue, skb->len,
307 skb_shinfo(skb)->nr_frags + 1);
306 308
307 /* Mark the packet as transmitted, and free the SKB ourselves */ 309 /* Mark the packet as transmitted, and free the SKB ourselves */
308 dev_kfree_skb_any(skb); 310 dev_kfree_skb_any(skb);
@@ -354,9 +356,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
354 while (read_ptr != stop_index) { 356 while (read_ptr != stop_index) {
355 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
356 if (unlikely(buffer->len == 0)) { 358 if (unlikely(buffer->len == 0)) {
357 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX " 359 netif_err(efx, tx_err, efx->net_dev,
358 "completion id %x\n", tx_queue->queue, 360 "TX queue %d spurious TX completion id %x\n",
359 read_ptr); 361 tx_queue->queue, read_ptr);
360 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 362 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
361 return; 363 return;
362 } 364 }
@@ -431,7 +433,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
431 unsigned int txq_size; 433 unsigned int txq_size;
432 int i, rc; 434 int i, rc;
433 435
434 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 436 netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n",
437 tx_queue->queue);
435 438
436 /* Allocate software ring */ 439 /* Allocate software ring */
437 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); 440 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
@@ -456,7 +459,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
456 459
457void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 460void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
458{ 461{
459 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); 462 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
463 "initialising TX queue %d\n", tx_queue->queue);
460 464
461 tx_queue->insert_count = 0; 465 tx_queue->insert_count = 0;
462 tx_queue->write_count = 0; 466 tx_queue->write_count = 0;
@@ -488,7 +492,8 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
488 492
489void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 493void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
490{ 494{
491 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); 495 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
496 "shutting down TX queue %d\n", tx_queue->queue);
492 497
493 /* Flush TX queue, remove descriptor ring */ 498 /* Flush TX queue, remove descriptor ring */
494 efx_nic_fini_tx(tx_queue); 499 efx_nic_fini_tx(tx_queue);
@@ -507,7 +512,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
507 512
508void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 513void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
509{ 514{
510 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); 515 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
516 "destroying TX queue %d\n", tx_queue->queue);
511 efx_nic_remove_tx(tx_queue); 517 efx_nic_remove_tx(tx_queue);
512 518
513 kfree(tx_queue->buffer); 519 kfree(tx_queue->buffer);
@@ -639,8 +645,8 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
639 645
640 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 646 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
641 if (base_kva == NULL) { 647 if (base_kva == NULL) {
642 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" 648 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
643 " headers\n"); 649 "Unable to allocate page for TSO headers\n");
644 return -ENOMEM; 650 return -ENOMEM;
645 } 651 }
646 652
@@ -1124,7 +1130,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124 return NETDEV_TX_OK; 1130 return NETDEV_TX_OK;
1125 1131
1126 mem_err: 1132 mem_err:
1127 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); 1133 netif_err(efx, tx_err, efx->net_dev,
1134 "Out of memory for TSO headers, or PCI mapping error\n");
1128 dev_kfree_skb_any(skb); 1135 dev_kfree_skb_any(skb);
1129 goto unwind; 1136 goto unwind;
1130 1137
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 518f7fc91473..782e45a613d6 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -54,7 +54,7 @@
54/* Increase filter depth to avoid RX_RESET */ 54/* Increase filter depth to avoid RX_RESET */
55#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A 55#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
56/* Flushes may never complete */ 56/* Flushes may never complete */
57#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A 57#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
58/* Leak overlength packets rather than free */ 58/* Leak overlength packets rather than free */
59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
60 60
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 501a55ffce57..f5a9eb1df593 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -88,6 +88,55 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
88 .rpadir = 1, 88 .rpadir = 1,
89 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 89 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
90}; 90};
91#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
92#define SH_ETH_RESET_DEFAULT 1
93static void sh_eth_set_duplex(struct net_device *ndev)
94{
95 struct sh_eth_private *mdp = netdev_priv(ndev);
96 u32 ioaddr = ndev->base_addr;
97
98 if (mdp->duplex) /* Full */
99 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
100 else /* Half */
101 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
102}
103
104static void sh_eth_set_rate(struct net_device *ndev)
105{
106 struct sh_eth_private *mdp = netdev_priv(ndev);
107 u32 ioaddr = ndev->base_addr;
108
109 switch (mdp->speed) {
110 case 10: /* 10BASE */
111 ctrl_outl(0, ioaddr + RTRATE);
112 break;
113 case 100:/* 100BASE */
114 ctrl_outl(1, ioaddr + RTRATE);
115 break;
116 default:
117 break;
118 }
119}
120
121/* SH7757 */
122static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
123 .set_duplex = sh_eth_set_duplex,
124 .set_rate = sh_eth_set_rate,
125
126 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
127 .rmcr_value = 0x00000001,
128
129 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
130 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
131 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
132 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
133
134 .apr = 1,
135 .mpr = 1,
136 .tpauser = 1,
137 .hw_swap = 1,
138 .no_ade = 1,
139};
91 140
92#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 141#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
93#define SH_ETH_HAS_TSU 1 142#define SH_ETH_HAS_TSU 1
@@ -1023,7 +1072,9 @@ static int sh_eth_open(struct net_device *ndev)
1023 pm_runtime_get_sync(&mdp->pdev->dev); 1072 pm_runtime_get_sync(&mdp->pdev->dev);
1024 1073
1025 ret = request_irq(ndev->irq, sh_eth_interrupt, 1074 ret = request_irq(ndev->irq, sh_eth_interrupt,
1026#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764) 1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077 defined(CONFIG_CPU_SUBTYPE_SH7757)
1027 IRQF_SHARED, 1078 IRQF_SHARED,
1028#else 1079#else
1029 0, 1080 0,
@@ -1233,7 +1284,7 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1233 if (!phydev) 1284 if (!phydev)
1234 return -ENODEV; 1285 return -ENODEV;
1235 1286
1236 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1287 return phy_mii_ioctl(phydev, rq, cmd);
1237} 1288}
1238 1289
1239#if defined(SH_ETH_HAS_TSU) 1290#if defined(SH_ETH_HAS_TSU)
@@ -1325,7 +1376,7 @@ static int sh_mdio_init(struct net_device *ndev, int id)
1325 bitbang->mdc_msk = 0x01; 1376 bitbang->mdc_msk = 0x01;
1326 bitbang->ctrl.ops = &bb_ops; 1377 bitbang->ctrl.ops = &bb_ops;
1327 1378
1328 /* MII contorller setting */ 1379 /* MII controller setting */
1329 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 1380 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1330 if (!mdp->mii_bus) { 1381 if (!mdp->mii_bus) {
1331 ret = -ENOMEM; 1382 ret = -ENOMEM;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7985165e84fc..194e5cf8c763 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -79,7 +79,7 @@
79 79
80#define SKY2_EEPROM_MAGIC 0x9955aabb 80#define SKY2_EEPROM_MAGIC 0x9955aabb
81 81
82#define RING_NEXT(x,s) (((x)+1) & ((s)-1)) 82#define RING_NEXT(x, s) (((x)+1) & ((s)-1))
83 83
84static const u32 default_msg = 84static const u32 default_msg =
85 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 85 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
@@ -172,7 +172,7 @@ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
172 udelay(10); 172 udelay(10);
173 } 173 }
174 174
175 dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name); 175 dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
176 return -ETIMEDOUT; 176 return -ETIMEDOUT;
177 177
178io_error: 178io_error:
@@ -1067,7 +1067,7 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
1067 return le; 1067 return le;
1068} 1068}
1069 1069
1070static unsigned sky2_get_rx_threshold(struct sky2_port* sky2) 1070static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
1071{ 1071{
1072 unsigned size; 1072 unsigned size;
1073 1073
@@ -1078,7 +1078,7 @@ static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
1078 return (size - 8) / sizeof(u32); 1078 return (size - 8) / sizeof(u32);
1079} 1079}
1080 1080
1081static unsigned sky2_get_rx_data_size(struct sky2_port* sky2) 1081static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
1082{ 1082{
1083 struct rx_ring_info *re; 1083 struct rx_ring_info *re;
1084 unsigned size; 1084 unsigned size;
@@ -1102,7 +1102,7 @@ static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
1102} 1102}
1103 1103
1104/* Build description to hardware for one receive segment */ 1104/* Build description to hardware for one receive segment */
1105static void sky2_rx_add(struct sky2_port *sky2, u8 op, 1105static void sky2_rx_add(struct sky2_port *sky2, u8 op,
1106 dma_addr_t map, unsigned len) 1106 dma_addr_t map, unsigned len)
1107{ 1107{
1108 struct sky2_rx_le *le; 1108 struct sky2_rx_le *le;
@@ -3014,7 +3014,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
3014 hw->chip_id = sky2_read8(hw, B2_CHIP_ID); 3014 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
3015 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; 3015 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
3016 3016
3017 switch(hw->chip_id) { 3017 switch (hw->chip_id) {
3018 case CHIP_ID_YUKON_XL: 3018 case CHIP_ID_YUKON_XL:
3019 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; 3019 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
3020 if (hw->chip_rev < CHIP_REV_YU_XL_A2) 3020 if (hw->chip_rev < CHIP_REV_YU_XL_A2)
@@ -3685,7 +3685,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
3685 return 0; 3685 return 0;
3686} 3686}
3687 3687
3688static void inline sky2_add_filter(u8 filter[8], const u8 *addr) 3688static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
3689{ 3689{
3690 u32 bit; 3690 u32 bit;
3691 3691
@@ -3911,7 +3911,7 @@ static int sky2_set_coalesce(struct net_device *dev,
3911 return -EINVAL; 3911 return -EINVAL;
3912 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) 3912 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
3913 return -EINVAL; 3913 return -EINVAL;
3914 if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING) 3914 if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
3915 return -EINVAL; 3915 return -EINVAL;
3916 3916
3917 if (ecmd->tx_coalesce_usecs == 0) 3917 if (ecmd->tx_coalesce_usecs == 0)
@@ -4188,17 +4188,13 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
4188static int sky2_set_flags(struct net_device *dev, u32 data) 4188static int sky2_set_flags(struct net_device *dev, u32 data)
4189{ 4189{
4190 struct sky2_port *sky2 = netdev_priv(dev); 4190 struct sky2_port *sky2 = netdev_priv(dev);
4191 u32 supported =
4192 (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
4193 int rc;
4191 4194
4192 if (data & ~ETH_FLAG_RXHASH) 4195 rc = ethtool_op_set_flags(dev, data, supported);
4193 return -EOPNOTSUPP; 4196 if (rc)
4194 4197 return rc;
4195 if (data & ETH_FLAG_RXHASH) {
4196 if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
4197 return -EINVAL;
4198
4199 dev->features |= NETIF_F_RXHASH;
4200 } else
4201 dev->features &= ~NETIF_F_RXHASH;
4202 4198
4203 rx_set_rss(dev); 4199 rx_set_rss(dev);
4204 4200
@@ -4376,7 +4372,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
4376 seq_printf(seq, "%u:", idx); 4372 seq_printf(seq, "%u:", idx);
4377 sop = 0; 4373 sop = 0;
4378 4374
4379 switch(le->opcode & ~HW_OWNER) { 4375 switch (le->opcode & ~HW_OWNER) {
4380 case OP_ADDR64: 4376 case OP_ADDR64:
4381 seq_printf(seq, " %#x:", a); 4377 seq_printf(seq, " %#x:", a);
4382 break; 4378 break;
@@ -4445,7 +4441,7 @@ static int sky2_device_event(struct notifier_block *unused,
4445 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) 4441 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
4446 return NOTIFY_DONE; 4442 return NOTIFY_DONE;
4447 4443
4448 switch(event) { 4444 switch (event) {
4449 case NETDEV_CHANGENAME: 4445 case NETDEV_CHANGENAME:
4450 if (sky2->debugfs) { 4446 if (sky2->debugfs) {
4451 sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, 4447 sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
@@ -4640,7 +4636,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
4640 struct pci_dev *pdev = hw->pdev; 4636 struct pci_dev *pdev = hw->pdev;
4641 int err; 4637 int err;
4642 4638
4643 init_waitqueue_head (&hw->msi_wait); 4639 init_waitqueue_head(&hw->msi_wait);
4644 4640
4645 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); 4641 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4646 4642
@@ -4757,7 +4753,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4757 * this driver uses software swapping. 4753 * this driver uses software swapping.
4758 */ 4754 */
4759 reg &= ~PCI_REV_DESC; 4755 reg &= ~PCI_REV_DESC;
4760 err = pci_write_config_dword(pdev,PCI_DEV_REG2, reg); 4756 err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
4761 if (err) { 4757 if (err) {
4762 dev_err(&pdev->dev, "PCI write config failed\n"); 4758 dev_err(&pdev->dev, "PCI write config failed\n");
4763 goto err_out_free_regions; 4759 goto err_out_free_regions;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 084eff21b67a..61891a6cacc2 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2161,21 +2161,21 @@ struct sky2_tx_le {
2161 __le16 length; /* also vlan tag or checksum start */ 2161 __le16 length; /* also vlan tag or checksum start */
2162 u8 ctrl; 2162 u8 ctrl;
2163 u8 opcode; 2163 u8 opcode;
2164} __attribute((packed)); 2164} __packed;
2165 2165
2166struct sky2_rx_le { 2166struct sky2_rx_le {
2167 __le32 addr; 2167 __le32 addr;
2168 __le16 length; 2168 __le16 length;
2169 u8 ctrl; 2169 u8 ctrl;
2170 u8 opcode; 2170 u8 opcode;
2171} __attribute((packed)); 2171} __packed;
2172 2172
2173struct sky2_status_le { 2173struct sky2_status_le {
2174 __le32 status; /* also checksum */ 2174 __le32 status; /* also checksum */
2175 __le16 length; /* also vlan tag */ 2175 __le16 length; /* also vlan tag */
2176 u8 css; 2176 u8 css;
2177 u8 opcode; 2177 u8 opcode;
2178} __attribute((packed)); 2178} __packed;
2179 2179
2180struct tx_ring_info { 2180struct tx_ring_info {
2181 struct sk_buff *skb; 2181 struct sk_buff *skb;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8d2772cc42f2..ee747919a766 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -83,43 +83,6 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
83 } 83 }
84} 84}
85 85
86#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6)
87
88/* We can only do 16-bit reads and writes in the static memory space. */
89#define SMC_CAN_USE_8BIT 0
90#define SMC_CAN_USE_16BIT 1
91#define SMC_CAN_USE_32BIT 0
92#define SMC_NOWAIT 1
93
94#define SMC_IO_SHIFT 0
95
96#define SMC_inw(a, r) in_be16((volatile u16 *)((a) + (r)))
97#define SMC_outw(v, a, r) out_be16((volatile u16 *)((a) + (r)), v)
98#define SMC_insw(a, r, p, l) \
99 do { \
100 unsigned long __port = (a) + (r); \
101 u16 *__p = (u16 *)(p); \
102 int __l = (l); \
103 insw(__port, __p, __l); \
104 while (__l > 0) { \
105 *__p = swab16(*__p); \
106 __p++; \
107 __l--; \
108 } \
109 } while (0)
110#define SMC_outsw(a, r, p, l) \
111 do { \
112 unsigned long __port = (a) + (r); \
113 u16 *__p = (u16 *)(p); \
114 int __l = (l); \
115 while (__l > 0) { \
116 /* Believe it or not, the swab isn't needed. */ \
117 outw( /* swab16 */ (*__p++), __port); \
118 __l--; \
119 } \
120 } while (0)
121#define SMC_IRQ_FLAGS (0)
122
123#elif defined(CONFIG_SA1100_PLEB) 86#elif defined(CONFIG_SA1100_PLEB)
124/* We can only do 16-bit reads and writes in the static memory space. */ 87/* We can only do 16-bit reads and writes in the static memory space. */
125#define SMC_CAN_USE_8BIT 1 88#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index cc559741b0fa..0909ae934ad0 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -84,8 +84,7 @@ struct smsc911x_data {
84 */ 84 */
85 spinlock_t mac_lock; 85 spinlock_t mac_lock;
86 86
87 /* spinlock to ensure 16-bit accesses are serialised. 87 /* spinlock to ensure register accesses are serialised */
88 * unused with a 32-bit bus */
89 spinlock_t dev_lock; 88 spinlock_t dev_lock;
90 89
91 struct phy_device *phy_dev; 90 struct phy_device *phy_dev;
@@ -118,37 +117,33 @@ struct smsc911x_data {
118 unsigned int hashlo; 117 unsigned int hashlo;
119}; 118};
120 119
121/* The 16-bit access functions are significantly slower, due to the locking 120static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
122 * necessary. If your bus hardware can be configured to do this for you
123 * (in response to a single 32-bit operation from software), you should use
124 * the 32-bit access functions instead. */
125
126static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
127{ 121{
128 if (pdata->config.flags & SMSC911X_USE_32BIT) 122 if (pdata->config.flags & SMSC911X_USE_32BIT)
129 return readl(pdata->ioaddr + reg); 123 return readl(pdata->ioaddr + reg);
130 124
131 if (pdata->config.flags & SMSC911X_USE_16BIT) { 125 if (pdata->config.flags & SMSC911X_USE_16BIT)
132 u32 data; 126 return ((readw(pdata->ioaddr + reg) & 0xFFFF) |
133 unsigned long flags;
134
135 /* these two 16-bit reads must be performed consecutively, so
136 * must not be interrupted by our own ISR (which would start
137 * another read operation) */
138 spin_lock_irqsave(&pdata->dev_lock, flags);
139 data = ((readw(pdata->ioaddr + reg) & 0xFFFF) |
140 ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); 127 ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16));
141 spin_unlock_irqrestore(&pdata->dev_lock, flags);
142
143 return data;
144 }
145 128
146 BUG(); 129 BUG();
147 return 0; 130 return 0;
148} 131}
149 132
150static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, 133static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
151 u32 val) 134{
135 u32 data;
136 unsigned long flags;
137
138 spin_lock_irqsave(&pdata->dev_lock, flags);
139 data = __smsc911x_reg_read(pdata, reg);
140 spin_unlock_irqrestore(&pdata->dev_lock, flags);
141
142 return data;
143}
144
145static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
146 u32 val)
152{ 147{
153 if (pdata->config.flags & SMSC911X_USE_32BIT) { 148 if (pdata->config.flags & SMSC911X_USE_32BIT) {
154 writel(val, pdata->ioaddr + reg); 149 writel(val, pdata->ioaddr + reg);
@@ -156,44 +151,54 @@ static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
156 } 151 }
157 152
158 if (pdata->config.flags & SMSC911X_USE_16BIT) { 153 if (pdata->config.flags & SMSC911X_USE_16BIT) {
159 unsigned long flags;
160
161 /* these two 16-bit writes must be performed consecutively, so
162 * must not be interrupted by our own ISR (which would start
163 * another read operation) */
164 spin_lock_irqsave(&pdata->dev_lock, flags);
165 writew(val & 0xFFFF, pdata->ioaddr + reg); 154 writew(val & 0xFFFF, pdata->ioaddr + reg);
166 writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); 155 writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2);
167 spin_unlock_irqrestore(&pdata->dev_lock, flags);
168 return; 156 return;
169 } 157 }
170 158
171 BUG(); 159 BUG();
172} 160}
173 161
162static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
163 u32 val)
164{
165 unsigned long flags;
166
167 spin_lock_irqsave(&pdata->dev_lock, flags);
168 __smsc911x_reg_write(pdata, reg, val);
169 spin_unlock_irqrestore(&pdata->dev_lock, flags);
170}
171
174/* Writes a packet to the TX_DATA_FIFO */ 172/* Writes a packet to the TX_DATA_FIFO */
175static inline void 173static inline void
176smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, 174smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
177 unsigned int wordcount) 175 unsigned int wordcount)
178{ 176{
177 unsigned long flags;
178
179 spin_lock_irqsave(&pdata->dev_lock, flags);
180
179 if (pdata->config.flags & SMSC911X_SWAP_FIFO) { 181 if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
180 while (wordcount--) 182 while (wordcount--)
181 smsc911x_reg_write(pdata, TX_DATA_FIFO, swab32(*buf++)); 183 __smsc911x_reg_write(pdata, TX_DATA_FIFO,
182 return; 184 swab32(*buf++));
185 goto out;
183 } 186 }
184 187
185 if (pdata->config.flags & SMSC911X_USE_32BIT) { 188 if (pdata->config.flags & SMSC911X_USE_32BIT) {
186 writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); 189 writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
187 return; 190 goto out;
188 } 191 }
189 192
190 if (pdata->config.flags & SMSC911X_USE_16BIT) { 193 if (pdata->config.flags & SMSC911X_USE_16BIT) {
191 while (wordcount--) 194 while (wordcount--)
192 smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); 195 __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
193 return; 196 goto out;
194 } 197 }
195 198
196 BUG(); 199 BUG();
200out:
201 spin_unlock_irqrestore(&pdata->dev_lock, flags);
197} 202}
198 203
199/* Reads a packet out of the RX_DATA_FIFO */ 204/* Reads a packet out of the RX_DATA_FIFO */
@@ -201,24 +206,31 @@ static inline void
201smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, 206smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
202 unsigned int wordcount) 207 unsigned int wordcount)
203{ 208{
209 unsigned long flags;
210
211 spin_lock_irqsave(&pdata->dev_lock, flags);
212
204 if (pdata->config.flags & SMSC911X_SWAP_FIFO) { 213 if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
205 while (wordcount--) 214 while (wordcount--)
206 *buf++ = swab32(smsc911x_reg_read(pdata, RX_DATA_FIFO)); 215 *buf++ = swab32(__smsc911x_reg_read(pdata,
207 return; 216 RX_DATA_FIFO));
217 goto out;
208 } 218 }
209 219
210 if (pdata->config.flags & SMSC911X_USE_32BIT) { 220 if (pdata->config.flags & SMSC911X_USE_32BIT) {
211 readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); 221 readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
212 return; 222 goto out;
213 } 223 }
214 224
215 if (pdata->config.flags & SMSC911X_USE_16BIT) { 225 if (pdata->config.flags & SMSC911X_USE_16BIT) {
216 while (wordcount--) 226 while (wordcount--)
217 *buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO); 227 *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO);
218 return; 228 goto out;
219 } 229 }
220 230
221 BUG(); 231 BUG();
232out:
233 spin_unlock_irqrestore(&pdata->dev_lock, flags);
222} 234}
223 235
224/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read 236/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
@@ -1538,7 +1550,7 @@ static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1538 if (!netif_running(dev) || !pdata->phy_dev) 1550 if (!netif_running(dev) || !pdata->phy_dev)
1539 return -EINVAL; 1551 return -EINVAL;
1540 1552
1541 return phy_mii_ioctl(pdata->phy_dev, if_mii(ifr), cmd); 1553 return phy_mii_ioctl(pdata->phy_dev, ifr, cmd);
1542} 1554}
1543 1555
1544static int 1556static int
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 6cdee6a15f9f..b09ee1c319e8 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -245,7 +245,7 @@ static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
245 if (!netif_running(dev) || !pd->phy_dev) 245 if (!netif_running(dev) || !pd->phy_dev)
246 return -EINVAL; 246 return -EINVAL;
247 247
248 return phy_mii_ioctl(pd->phy_dev, if_mii(ifr), cmd); 248 return phy_mii_ioctl(pd->phy_dev, ifr, cmd);
249} 249}
250 250
251static int smsc9420_ethtool_get_settings(struct net_device *dev, 251static int smsc9420_ethtool_get_settings(struct net_device *dev,
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 74b7ae76906e..a42b6873370b 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -562,7 +562,6 @@ struct netdev_private {
562 unsigned int tx_done; 562 unsigned int tx_done;
563 struct napi_struct napi; 563 struct napi_struct napi;
564 struct net_device *dev; 564 struct net_device *dev;
565 struct net_device_stats stats;
566 struct pci_dev *pci_dev; 565 struct pci_dev *pci_dev;
567#ifdef VLAN_SUPPORT 566#ifdef VLAN_SUPPORT
568 struct vlan_group *vlgrp; 567 struct vlan_group *vlgrp;
@@ -1174,7 +1173,7 @@ static void tx_timeout(struct net_device *dev)
1174 /* Trigger an immediate transmit demand. */ 1173 /* Trigger an immediate transmit demand. */
1175 1174
1176 dev->trans_start = jiffies; /* prevent tx timeout */ 1175 dev->trans_start = jiffies; /* prevent tx timeout */
1177 np->stats.tx_errors++; 1176 dev->stats.tx_errors++;
1178 netif_wake_queue(dev); 1177 netif_wake_queue(dev);
1179} 1178}
1180 1179
@@ -1265,7 +1264,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1265 } 1264 }
1266 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1265 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1267 status |= TxCalTCP; 1266 status |= TxCalTCP;
1268 np->stats.tx_compressed++; 1267 dev->stats.tx_compressed++;
1269 } 1268 }
1270 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16); 1269 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1271 1270
@@ -1374,7 +1373,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1374 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n", 1373 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1375 dev->name, np->dirty_tx, np->tx_done, tx_status); 1374 dev->name, np->dirty_tx, np->tx_done, tx_status);
1376 if ((tx_status & 0xe0000000) == 0xa0000000) { 1375 if ((tx_status & 0xe0000000) == 0xa0000000) {
1377 np->stats.tx_packets++; 1376 dev->stats.tx_packets++;
1378 } else if ((tx_status & 0xe0000000) == 0x80000000) { 1377 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1379 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc); 1378 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1380 struct sk_buff *skb = np->tx_info[entry].skb; 1379 struct sk_buff *skb = np->tx_info[entry].skb;
@@ -1462,9 +1461,9 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1462 /* There was an error. */ 1461 /* There was an error. */
1463 if (debug > 2) 1462 if (debug > 2)
1464 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status); 1463 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1465 np->stats.rx_errors++; 1464 dev->stats.rx_errors++;
1466 if (desc_status & RxFIFOErr) 1465 if (desc_status & RxFIFOErr)
1467 np->stats.rx_fifo_errors++; 1466 dev->stats.rx_fifo_errors++;
1468 goto next_rx; 1467 goto next_rx;
1469 } 1468 }
1470 1469
@@ -1515,7 +1514,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1515#endif 1514#endif
1516 if (le16_to_cpu(desc->status2) & 0x0100) { 1515 if (le16_to_cpu(desc->status2) & 0x0100) {
1517 skb->ip_summed = CHECKSUM_UNNECESSARY; 1516 skb->ip_summed = CHECKSUM_UNNECESSARY;
1518 np->stats.rx_compressed++; 1517 dev->stats.rx_compressed++;
1519 } 1518 }
1520 /* 1519 /*
1521 * This feature doesn't seem to be working, at least 1520 * This feature doesn't seem to be working, at least
@@ -1547,7 +1546,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1547 } else 1546 } else
1548#endif /* VLAN_SUPPORT */ 1547#endif /* VLAN_SUPPORT */
1549 netif_receive_skb(skb); 1548 netif_receive_skb(skb);
1550 np->stats.rx_packets++; 1549 dev->stats.rx_packets++;
1551 1550
1552 next_rx: 1551 next_rx:
1553 np->cur_rx++; 1552 np->cur_rx++;
@@ -1717,12 +1716,12 @@ static void netdev_error(struct net_device *dev, int intr_status)
1717 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name); 1716 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1718 } 1717 }
1719 if (intr_status & IntrRxGFPDead) { 1718 if (intr_status & IntrRxGFPDead) {
1720 np->stats.rx_fifo_errors++; 1719 dev->stats.rx_fifo_errors++;
1721 np->stats.rx_errors++; 1720 dev->stats.rx_errors++;
1722 } 1721 }
1723 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) { 1722 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1724 np->stats.tx_fifo_errors++; 1723 dev->stats.tx_fifo_errors++;
1725 np->stats.tx_errors++; 1724 dev->stats.tx_errors++;
1726 } 1725 }
1727 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug) 1726 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1728 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n", 1727 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
@@ -1736,24 +1735,24 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1736 void __iomem *ioaddr = np->base; 1735 void __iomem *ioaddr = np->base;
1737 1736
1738 /* This adapter architecture needs no SMP locks. */ 1737 /* This adapter architecture needs no SMP locks. */
1739 np->stats.tx_bytes = readl(ioaddr + 0x57010); 1738 dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1740 np->stats.rx_bytes = readl(ioaddr + 0x57044); 1739 dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1741 np->stats.tx_packets = readl(ioaddr + 0x57000); 1740 dev->stats.tx_packets = readl(ioaddr + 0x57000);
1742 np->stats.tx_aborted_errors = 1741 dev->stats.tx_aborted_errors =
1743 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028); 1742 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1744 np->stats.tx_window_errors = readl(ioaddr + 0x57018); 1743 dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1745 np->stats.collisions = 1744 dev->stats.collisions =
1746 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008); 1745 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1747 1746
1748 /* The chip only need report frame silently dropped. */ 1747 /* The chip only need report frame silently dropped. */
1749 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus); 1748 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1750 writew(0, ioaddr + RxDMAStatus); 1749 writew(0, ioaddr + RxDMAStatus);
1751 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C); 1750 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1752 np->stats.rx_frame_errors = readl(ioaddr + 0x57040); 1751 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1753 np->stats.rx_length_errors = readl(ioaddr + 0x57058); 1752 dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1754 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C); 1753 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1755 1754
1756 return &np->stats; 1755 return &dev->stats;
1757} 1756}
1758 1757
1759 1758
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 144f76fd3e39..66b9da0260fe 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -108,6 +108,7 @@ enum rx_frame_status { /* IPC status */
108 good_frame = 0, 108 good_frame = 0,
109 discard_frame = 1, 109 discard_frame = 1,
110 csum_none = 2, 110 csum_none = 2,
111 llc_snap = 4,
111}; 112};
112 113
113enum tx_dma_irq_status { 114enum tx_dma_irq_status {
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index d8d0f3553770..8b20b19971cb 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -93,7 +93,7 @@ enum inter_frame_gap {
93#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ 93#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
94#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ 94#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
95#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ 95#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
96#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */ 96#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Stripping */
97#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ 97#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
98#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ 98#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
99#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 99#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 917b4e16923b..2b2f5c8caf1c 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -220,6 +220,8 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
220 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); 220 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
221 221
222 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 222 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
223 if (!mac)
224 return NULL;
223 225
224 mac->mac = &dwmac1000_ops; 226 mac->mac = &dwmac1000_ops;
225 mac->dma = &dwmac1000_dma_ops; 227 mac->dma = &dwmac1000_dma_ops;
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index 6f270a0e151a..2fb165fa2ba0 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -179,6 +179,8 @@ struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
179 struct mac_device_info *mac; 179 struct mac_device_info *mac;
180 180
181 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 181 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
182 if (!mac)
183 return NULL;
182 184
183 pr_info("\tDWMAC100\n"); 185 pr_info("\tDWMAC100\n");
184 186
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
index 3c18ebece043..f612f986a7e1 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/stmmac/enh_desc.c
@@ -123,7 +123,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
123 */ 123 */
124 if (status == 0x0) { 124 if (status == 0x0) {
125 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); 125 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
126 ret = good_frame; 126 ret = llc_snap;
127 } else if (status == 0x4) { 127 } else if (status == 0x4) {
128 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); 128 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
129 ret = good_frame; 129 ret = good_frame;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index a31d580f306d..bbb7951b9c4c 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -829,7 +829,6 @@ static int stmmac_open(struct net_device *dev)
829 * In case of failure continue without timer. */ 829 * In case of failure continue without timer. */
830 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { 830 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
831 pr_warning("stmmaceth: cannot attach the external timer.\n"); 831 pr_warning("stmmaceth: cannot attach the external timer.\n");
832 tmrate = 0;
833 priv->tm->freq = 0; 832 priv->tm->freq = 0;
834 priv->tm->timer_start = stmmac_no_timer_started; 833 priv->tm->timer_start = stmmac_no_timer_started;
835 priv->tm->timer_stop = stmmac_no_timer_stopped; 834 priv->tm->timer_stop = stmmac_no_timer_stopped;
@@ -1217,9 +1216,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1217 priv->dev->stats.rx_errors++; 1216 priv->dev->stats.rx_errors++;
1218 else { 1217 else {
1219 struct sk_buff *skb; 1218 struct sk_buff *skb;
1220 /* Length should omit the CRC */ 1219 int frame_len;
1221 int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
1222 1220
1221 frame_len = priv->hw->desc->get_rx_frame_len(p);
1222 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1223 * Type frames (LLC/LLC-SNAP) */
1224 if (unlikely(status != llc_snap))
1225 frame_len -= ETH_FCS_LEN;
1223#ifdef STMMAC_RX_DEBUG 1226#ifdef STMMAC_RX_DEBUG
1224 if (frame_len > ETH_FRAME_LEN) 1227 if (frame_len > ETH_FRAME_LEN)
1225 pr_debug("\tRX frame size %d, COE status: %d\n", 1228 pr_debug("\tRX frame size %d, COE status: %d\n",
@@ -1437,24 +1440,18 @@ static void stmmac_poll_controller(struct net_device *dev)
1437static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1440static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1438{ 1441{
1439 struct stmmac_priv *priv = netdev_priv(dev); 1442 struct stmmac_priv *priv = netdev_priv(dev);
1440 int ret = -EOPNOTSUPP; 1443 int ret;
1441 1444
1442 if (!netif_running(dev)) 1445 if (!netif_running(dev))
1443 return -EINVAL; 1446 return -EINVAL;
1444 1447
1445 switch (cmd) { 1448 if (!priv->phydev)
1446 case SIOCGMIIPHY: 1449 return -EINVAL;
1447 case SIOCGMIIREG: 1450
1448 case SIOCSMIIREG: 1451 spin_lock(&priv->lock);
1449 if (!priv->phydev) 1452 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1450 return -EINVAL; 1453 spin_unlock(&priv->lock);
1451 1454
1452 spin_lock(&priv->lock);
1453 ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1454 spin_unlock(&priv->lock);
1455 default:
1456 break;
1457 }
1458 return ret; 1455 return ret;
1459} 1456}
1460 1457
@@ -1564,15 +1561,15 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1564 else 1561 else
1565 device = dwmac100_setup(ioaddr); 1562 device = dwmac100_setup(ioaddr);
1566 1563
1564 if (!device)
1565 return -ENOMEM;
1566
1567 if (priv->enh_desc) { 1567 if (priv->enh_desc) {
1568 device->desc = &enh_desc_ops; 1568 device->desc = &enh_desc_ops;
1569 pr_info("\tEnhanced descriptor structure\n"); 1569 pr_info("\tEnhanced descriptor structure\n");
1570 } else 1570 } else
1571 device->desc = &ndesc_ops; 1571 device->desc = &ndesc_ops;
1572 1572
1573 if (!device)
1574 return -ENOMEM;
1575
1576 priv->hw = device; 1573 priv->hw = device;
1577 1574
1578 priv->wolenabled = priv->hw->pmt; /* PMT supported */ 1575 priv->wolenabled = priv->hw->pmt; /* PMT supported */
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 151312342243..b6ae53bada75 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -142,7 +142,6 @@ static void sun3_82586_rnr_int(struct net_device *dev);
142 142
143struct priv 143struct priv
144{ 144{
145 struct net_device_stats stats;
146 unsigned long base; 145 unsigned long base;
147 char *memtop; 146 char *memtop;
148 long int lock; 147 long int lock;
@@ -788,10 +787,10 @@ static void sun3_82586_rcv_int(struct net_device *dev)
788 skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); 787 skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
789 skb->protocol=eth_type_trans(skb,dev); 788 skb->protocol=eth_type_trans(skb,dev);
790 netif_rx(skb); 789 netif_rx(skb);
791 p->stats.rx_packets++; 790 dev->stats.rx_packets++;
792 } 791 }
793 else 792 else
794 p->stats.rx_dropped++; 793 dev->stats.rx_dropped++;
795 } 794 }
796 else 795 else
797 { 796 {
@@ -812,13 +811,13 @@ static void sun3_82586_rcv_int(struct net_device *dev)
812 totlen += rstat & RBD_MASK; 811 totlen += rstat & RBD_MASK;
813 rbd->status = 0; 812 rbd->status = 0;
814 printk("%s: received oversized frame! length: %d\n",dev->name,totlen); 813 printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
815 p->stats.rx_dropped++; 814 dev->stats.rx_dropped++;
816 } 815 }
817 } 816 }
818 else /* frame !(ok), only with 'save-bad-frames' */ 817 else /* frame !(ok), only with 'save-bad-frames' */
819 { 818 {
820 printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); 819 printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
821 p->stats.rx_errors++; 820 dev->stats.rx_errors++;
822 } 821 }
823 p->rfd_top->stat_high = 0; 822 p->rfd_top->stat_high = 0;
824 p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ 823 p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
@@ -885,7 +884,7 @@ static void sun3_82586_rnr_int(struct net_device *dev)
885{ 884{
886 struct priv *p = netdev_priv(dev); 885 struct priv *p = netdev_priv(dev);
887 886
888 p->stats.rx_errors++; 887 dev->stats.rx_errors++;
889 888
890 WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ 889 WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
891 p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ 890 p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
@@ -918,29 +917,29 @@ static void sun3_82586_xmt_int(struct net_device *dev)
918 917
919 if(status & STAT_OK) 918 if(status & STAT_OK)
920 { 919 {
921 p->stats.tx_packets++; 920 dev->stats.tx_packets++;
922 p->stats.collisions += (status & TCMD_MAXCOLLMASK); 921 dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
923 } 922 }
924 else 923 else
925 { 924 {
926 p->stats.tx_errors++; 925 dev->stats.tx_errors++;
927 if(status & TCMD_LATECOLL) { 926 if(status & TCMD_LATECOLL) {
928 printk("%s: late collision detected.\n",dev->name); 927 printk("%s: late collision detected.\n",dev->name);
929 p->stats.collisions++; 928 dev->stats.collisions++;
930 } 929 }
931 else if(status & TCMD_NOCARRIER) { 930 else if(status & TCMD_NOCARRIER) {
932 p->stats.tx_carrier_errors++; 931 dev->stats.tx_carrier_errors++;
933 printk("%s: no carrier detected.\n",dev->name); 932 printk("%s: no carrier detected.\n",dev->name);
934 } 933 }
935 else if(status & TCMD_LOSTCTS) 934 else if(status & TCMD_LOSTCTS)
936 printk("%s: loss of CTS detected.\n",dev->name); 935 printk("%s: loss of CTS detected.\n",dev->name);
937 else if(status & TCMD_UNDERRUN) { 936 else if(status & TCMD_UNDERRUN) {
938 p->stats.tx_fifo_errors++; 937 dev->stats.tx_fifo_errors++;
939 printk("%s: DMA underrun detected.\n",dev->name); 938 printk("%s: DMA underrun detected.\n",dev->name);
940 } 939 }
941 else if(status & TCMD_MAXCOLL) { 940 else if(status & TCMD_MAXCOLL) {
942 printk("%s: Max. collisions exceeded.\n",dev->name); 941 printk("%s: Max. collisions exceeded.\n",dev->name);
943 p->stats.collisions += 16; 942 dev->stats.collisions += 16;
944 } 943 }
945 } 944 }
946 945
@@ -1129,12 +1128,12 @@ static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev)
1129 ovrn = swab16(p->scb->ovrn_errs); 1128 ovrn = swab16(p->scb->ovrn_errs);
1130 p->scb->ovrn_errs = 0; 1129 p->scb->ovrn_errs = 0;
1131 1130
1132 p->stats.rx_crc_errors += crc; 1131 dev->stats.rx_crc_errors += crc;
1133 p->stats.rx_fifo_errors += ovrn; 1132 dev->stats.rx_fifo_errors += ovrn;
1134 p->stats.rx_frame_errors += aln; 1133 dev->stats.rx_frame_errors += aln;
1135 p->stats.rx_dropped += rsc; 1134 dev->stats.rx_dropped += rsc;
1136 1135
1137 return &p->stats; 1136 return &dev->stats;
1138} 1137}
1139 1138
1140/******************************************************** 1139/********************************************************
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 367e96f317d4..09c071bd6ad4 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1201,7 +1201,7 @@ static int __devinit bigmac_ether_init(struct of_device *op,
1201 dev->watchdog_timeo = 5*HZ; 1201 dev->watchdog_timeo = 5*HZ;
1202 1202
1203 /* Finish net device registration. */ 1203 /* Finish net device registration. */
1204 dev->irq = bp->bigmac_op->irqs[0]; 1204 dev->irq = bp->bigmac_op->archdata.irqs[0];
1205 dev->dma = 0; 1205 dev->dma = 0;
1206 1206
1207 if (register_netdev(dev)) { 1207 if (register_netdev(dev)) {
@@ -1301,12 +1301,12 @@ static struct of_platform_driver bigmac_sbus_driver = {
1301 1301
1302static int __init bigmac_init(void) 1302static int __init bigmac_init(void)
1303{ 1303{
1304 return of_register_driver(&bigmac_sbus_driver, &of_bus_type); 1304 return of_register_platform_driver(&bigmac_sbus_driver);
1305} 1305}
1306 1306
1307static void __exit bigmac_exit(void) 1307static void __exit bigmac_exit(void)
1308{ 1308{
1309 of_unregister_driver(&bigmac_sbus_driver); 1309 of_unregister_platform_driver(&bigmac_sbus_driver);
1310} 1310}
1311 1311
1312module_init(bigmac_init); 1312module_init(bigmac_init);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 3d9650b8d38f..eec443f64079 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2561,7 +2561,7 @@ static int __init quattro_sbus_register_irqs(void)
2561 if (skip) 2561 if (skip)
2562 continue; 2562 continue;
2563 2563
2564 err = request_irq(op->irqs[0], 2564 err = request_irq(op->archdata.irqs[0],
2565 quattro_sbus_interrupt, 2565 quattro_sbus_interrupt,
2566 IRQF_SHARED, "Quattro", 2566 IRQF_SHARED, "Quattro",
2567 qp); 2567 qp);
@@ -2590,7 +2590,7 @@ static void quattro_sbus_free_irqs(void)
2590 if (skip) 2590 if (skip)
2591 continue; 2591 continue;
2592 2592
2593 free_irq(op->irqs[0], qp); 2593 free_irq(op->archdata.irqs[0], qp);
2594 } 2594 }
2595} 2595}
2596#endif /* CONFIG_SBUS */ 2596#endif /* CONFIG_SBUS */
@@ -2790,7 +2790,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2790 /* Happy Meal can do it all... */ 2790 /* Happy Meal can do it all... */
2791 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2791 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2792 2792
2793 dev->irq = op->irqs[0]; 2793 dev->irq = op->archdata.irqs[0];
2794 2794
2795#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 2795#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2796 /* Hook up SBUS register/descriptor accessors. */ 2796 /* Hook up SBUS register/descriptor accessors. */
@@ -3304,7 +3304,7 @@ static int __init happy_meal_sbus_init(void)
3304{ 3304{
3305 int err; 3305 int err;
3306 3306
3307 err = of_register_driver(&hme_sbus_driver, &of_bus_type); 3307 err = of_register_platform_driver(&hme_sbus_driver);
3308 if (!err) 3308 if (!err)
3309 err = quattro_sbus_register_irqs(); 3309 err = quattro_sbus_register_irqs();
3310 3310
@@ -3313,7 +3313,7 @@ static int __init happy_meal_sbus_init(void)
3313 3313
3314static void happy_meal_sbus_exit(void) 3314static void happy_meal_sbus_exit(void)
3315{ 3315{
3316 of_unregister_driver(&hme_sbus_driver); 3316 of_unregister_platform_driver(&hme_sbus_driver);
3317 quattro_sbus_free_irqs(); 3317 quattro_sbus_free_irqs();
3318 3318
3319 while (qfe_sbus_list) { 3319 while (qfe_sbus_list) {
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 7d9c33dd9d1a..ee364fa75634 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1474,7 +1474,7 @@ no_link_test:
1474 dev->ethtool_ops = &sparc_lance_ethtool_ops; 1474 dev->ethtool_ops = &sparc_lance_ethtool_ops;
1475 dev->netdev_ops = &sparc_lance_ops; 1475 dev->netdev_ops = &sparc_lance_ops;
1476 1476
1477 dev->irq = op->irqs[0]; 1477 dev->irq = op->archdata.irqs[0];
1478 1478
1479 /* We cannot sleep if the chip is busy during a 1479 /* We cannot sleep if the chip is busy during a
1480 * multicast list update event, because such events 1480 * multicast list update event, because such events
@@ -1558,12 +1558,12 @@ static struct of_platform_driver sunlance_sbus_driver = {
1558/* Find all the lance cards on the system and initialize them */ 1558/* Find all the lance cards on the system and initialize them */
1559static int __init sparc_lance_init(void) 1559static int __init sparc_lance_init(void)
1560{ 1560{
1561 return of_register_driver(&sunlance_sbus_driver, &of_bus_type); 1561 return of_register_platform_driver(&sunlance_sbus_driver);
1562} 1562}
1563 1563
1564static void __exit sparc_lance_exit(void) 1564static void __exit sparc_lance_exit(void)
1565{ 1565{
1566 of_unregister_driver(&sunlance_sbus_driver); 1566 of_unregister_platform_driver(&sunlance_sbus_driver);
1567} 1567}
1568 1568
1569module_init(sparc_lance_init); 1569module_init(sparc_lance_init);
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 72b579c8d812..5f84a5dadedd 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -803,7 +803,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
803 803
804 qec_init_once(qecp, op); 804 qec_init_once(qecp, op);
805 805
806 if (request_irq(op->irqs[0], qec_interrupt, 806 if (request_irq(op->archdata.irqs[0], qec_interrupt,
807 IRQF_SHARED, "qec", (void *) qecp)) { 807 IRQF_SHARED, "qec", (void *) qecp)) {
808 printk(KERN_ERR "qec: Can't register irq.\n"); 808 printk(KERN_ERR "qec: Can't register irq.\n");
809 goto fail; 809 goto fail;
@@ -901,7 +901,7 @@ static int __devinit qec_ether_init(struct of_device *op)
901 SET_NETDEV_DEV(dev, &op->dev); 901 SET_NETDEV_DEV(dev, &op->dev);
902 902
903 dev->watchdog_timeo = 5*HZ; 903 dev->watchdog_timeo = 5*HZ;
904 dev->irq = op->irqs[0]; 904 dev->irq = op->archdata.irqs[0];
905 dev->dma = 0; 905 dev->dma = 0;
906 dev->ethtool_ops = &qe_ethtool_ops; 906 dev->ethtool_ops = &qe_ethtool_ops;
907 dev->netdev_ops = &qec_ops; 907 dev->netdev_ops = &qec_ops;
@@ -988,18 +988,18 @@ static struct of_platform_driver qec_sbus_driver = {
988 988
989static int __init qec_init(void) 989static int __init qec_init(void)
990{ 990{
991 return of_register_driver(&qec_sbus_driver, &of_bus_type); 991 return of_register_platform_driver(&qec_sbus_driver);
992} 992}
993 993
994static void __exit qec_exit(void) 994static void __exit qec_exit(void)
995{ 995{
996 of_unregister_driver(&qec_sbus_driver); 996 of_unregister_platform_driver(&qec_sbus_driver);
997 997
998 while (root_qec_dev) { 998 while (root_qec_dev) {
999 struct sunqec *next = root_qec_dev->next_module; 999 struct sunqec *next = root_qec_dev->next_module;
1000 struct of_device *op = root_qec_dev->op; 1000 struct of_device *op = root_qec_dev->op;
1001 1001
1002 free_irq(op->irqs[0], (void *) root_qec_dev); 1002 free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
1003 of_iounmap(&op->resource[0], root_qec_dev->gregs, 1003 of_iounmap(&op->resource[0], root_qec_dev->gregs,
1004 GLOB_REG_SIZE); 1004 GLOB_REG_SIZE);
1005 kfree(root_qec_dev); 1005 kfree(root_qec_dev);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index be08b75dbc15..99e423a5b9f1 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -2066,7 +2066,7 @@ static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2066 return -EINVAL; 2066 return -EINVAL;
2067 if (!lp->phy_dev) 2067 if (!lp->phy_dev)
2068 return -ENODEV; 2068 return -ENODEV;
2069 return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd); 2069 return phy_mii_ioctl(lp->phy_dev, rq, cmd);
2070} 2070}
2071 2071
2072static void tc35815_chip_reset(struct net_device *dev) 2072static void tc35815_chip_reset(struct net_device *dev)
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index cff98d07cba8..67e3b71bf705 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -334,7 +334,7 @@ struct txd_desc {
334 u32 va_lo; 334 u32 va_lo;
335 u32 va_hi; 335 u32 va_hi;
336 struct pbl pbl[0]; /* Fragments */ 336 struct pbl pbl[0]; /* Fragments */
337} __attribute__ ((packed)); 337} __packed;
338 338
339/* Register region size */ 339/* Register region size */
340#define BDX_REGS_SIZE 0x1000 340#define BDX_REGS_SIZE 0x1000
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 573054ae7b58..bc3af78a869f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/stringify.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/compiler.h> 24#include <linux/compiler.h>
@@ -67,8 +68,11 @@
67#include "tg3.h" 68#include "tg3.h"
68 69
69#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
70#define DRV_MODULE_VERSION "3.110" 71#define TG3_MAJ_NUM 3
71#define DRV_MODULE_RELDATE "April 9, 2010" 72#define TG3_MIN_NUM 113
73#define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75#define DRV_MODULE_RELDATE "August 2, 2010"
72 76
73#define TG3_DEF_MAC_MODE 0 77#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 78#define TG3_DEF_RX_MODE 0
@@ -145,8 +149,6 @@
145#define TG3_RX_JMB_BUFF_RING_SIZE \ 149#define TG3_RX_JMB_BUFF_RING_SIZE \
146 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) 150 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
147 151
148#define TG3_RSS_MIN_NUM_MSIX_VECS 2
149
150/* Due to a hardware bug, the 5701 can only DMA to memory addresses 152/* Due to a hardware bug, the 5701 can only DMA to memory addresses
151 * that are at least dword aligned when used in PCIX mode. The driver 153 * that are at least dword aligned when used in PCIX mode. The driver
152 * works around this bug by double copying the packet. This workaround 154 * works around this bug by double copying the packet. This workaround
@@ -219,12 +221,9 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, 221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, 223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, 228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
@@ -272,6 +271,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
276 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
277 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -585,18 +585,23 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
585static void tg3_ape_lock_init(struct tg3 *tp) 585static void tg3_ape_lock_init(struct tg3 *tp)
586{ 586{
587 int i; 587 int i;
588 u32 regbase;
589
590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
591 regbase = TG3_APE_LOCK_GRANT;
592 else
593 regbase = TG3_APE_PER_LOCK_GRANT;
588 594
589 /* Make sure the driver hasn't any stale locks. */ 595 /* Make sure the driver hasn't any stale locks. */
590 for (i = 0; i < 8; i++) 596 for (i = 0; i < 8; i++)
591 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i, 597 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
592 APE_LOCK_GRANT_DRIVER);
593} 598}
594 599
595static int tg3_ape_lock(struct tg3 *tp, int locknum) 600static int tg3_ape_lock(struct tg3 *tp, int locknum)
596{ 601{
597 int i, off; 602 int i, off;
598 int ret = 0; 603 int ret = 0;
599 u32 status; 604 u32 status, req, gnt;
600 605
601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 606 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
602 return 0; 607 return 0;
@@ -609,13 +614,21 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
609 return -EINVAL; 614 return -EINVAL;
610 } 615 }
611 616
617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
618 req = TG3_APE_LOCK_REQ;
619 gnt = TG3_APE_LOCK_GRANT;
620 } else {
621 req = TG3_APE_PER_LOCK_REQ;
622 gnt = TG3_APE_PER_LOCK_GRANT;
623 }
624
612 off = 4 * locknum; 625 off = 4 * locknum;
613 626
614 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER); 627 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
615 628
616 /* Wait for up to 1 millisecond to acquire lock. */ 629 /* Wait for up to 1 millisecond to acquire lock. */
617 for (i = 0; i < 100; i++) { 630 for (i = 0; i < 100; i++) {
618 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off); 631 status = tg3_ape_read32(tp, gnt + off);
619 if (status == APE_LOCK_GRANT_DRIVER) 632 if (status == APE_LOCK_GRANT_DRIVER)
620 break; 633 break;
621 udelay(10); 634 udelay(10);
@@ -623,7 +636,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
623 636
624 if (status != APE_LOCK_GRANT_DRIVER) { 637 if (status != APE_LOCK_GRANT_DRIVER) {
625 /* Revoke the lock request. */ 638 /* Revoke the lock request. */
626 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, 639 tg3_ape_write32(tp, gnt + off,
627 APE_LOCK_GRANT_DRIVER); 640 APE_LOCK_GRANT_DRIVER);
628 641
629 ret = -EBUSY; 642 ret = -EBUSY;
@@ -634,7 +647,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
634 647
635static void tg3_ape_unlock(struct tg3 *tp, int locknum) 648static void tg3_ape_unlock(struct tg3 *tp, int locknum)
636{ 649{
637 int off; 650 u32 gnt;
638 651
639 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 652 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
640 return; 653 return;
@@ -647,8 +660,12 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
647 return; 660 return;
648 } 661 }
649 662
650 off = 4 * locknum; 663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
651 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER); 664 gnt = TG3_APE_LOCK_GRANT;
665 else
666 gnt = TG3_APE_PER_LOCK_GRANT;
667
668 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
652} 669}
653 670
654static void tg3_disable_ints(struct tg3 *tp) 671static void tg3_disable_ints(struct tg3 *tp)
@@ -862,7 +879,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
862 unsigned int loops; 879 unsigned int loops;
863 int ret; 880 int ret;
864 881
865 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && 882 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
866 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) 883 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
867 return 0; 884 return 0;
868 885
@@ -1069,14 +1086,11 @@ static int tg3_mdio_init(struct tg3 *tp)
1069 u32 reg; 1086 u32 reg;
1070 struct phy_device *phydev; 1087 struct phy_device *phydev;
1071 1088
1072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 1089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1073 u32 funcnum, is_serdes; 1090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1091 u32 is_serdes;
1074 1092
1075 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC; 1093 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1076 if (funcnum)
1077 tp->phy_addr = 2;
1078 else
1079 tp->phy_addr = 1;
1080 1094
1081 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) 1095 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1082 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1096 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
@@ -1161,7 +1175,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1161 case PHY_ID_BCMAC131: 1175 case PHY_ID_BCMAC131:
1162 phydev->interface = PHY_INTERFACE_MODE_MII; 1176 phydev->interface = PHY_INTERFACE_MODE_MII;
1163 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1177 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1164 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 1178 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1165 break; 1179 break;
1166 } 1180 }
1167 1181
@@ -1254,7 +1268,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
1254 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); 1268 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1255 1269
1256 val = 0; 1270 val = 0;
1257 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { 1271 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1258 if (!tg3_readphy(tp, MII_CTRL1000, &reg)) 1272 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1259 val = reg << 16; 1273 val = reg << 16;
1260 if (!tg3_readphy(tp, MII_STAT1000, &reg)) 1274 if (!tg3_readphy(tp, MII_STAT1000, &reg))
@@ -1362,7 +1376,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1362 1376
1363 if (autoneg == AUTONEG_ENABLE && 1377 if (autoneg == AUTONEG_ENABLE &&
1364 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { 1378 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1365 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 1379 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1366 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1380 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1367 else 1381 else
1368 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1382 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -1476,7 +1490,7 @@ static int tg3_phy_init(struct tg3 *tp)
1476{ 1490{
1477 struct phy_device *phydev; 1491 struct phy_device *phydev;
1478 1492
1479 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) 1493 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1480 return 0; 1494 return 0;
1481 1495
1482 /* Bring the PHY back to a known state. */ 1496 /* Bring the PHY back to a known state. */
@@ -1496,7 +1510,7 @@ static int tg3_phy_init(struct tg3 *tp)
1496 switch (phydev->interface) { 1510 switch (phydev->interface) {
1497 case PHY_INTERFACE_MODE_GMII: 1511 case PHY_INTERFACE_MODE_GMII:
1498 case PHY_INTERFACE_MODE_RGMII: 1512 case PHY_INTERFACE_MODE_RGMII:
1499 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 1513 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1500 phydev->supported &= (PHY_GBIT_FEATURES | 1514 phydev->supported &= (PHY_GBIT_FEATURES |
1501 SUPPORTED_Pause | 1515 SUPPORTED_Pause |
1502 SUPPORTED_Asym_Pause); 1516 SUPPORTED_Asym_Pause);
@@ -1513,7 +1527,7 @@ static int tg3_phy_init(struct tg3 *tp)
1513 return -EINVAL; 1527 return -EINVAL;
1514 } 1528 }
1515 1529
1516 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED; 1530 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1517 1531
1518 phydev->advertising = phydev->supported; 1532 phydev->advertising = phydev->supported;
1519 1533
@@ -1524,13 +1538,13 @@ static void tg3_phy_start(struct tg3 *tp)
1524{ 1538{
1525 struct phy_device *phydev; 1539 struct phy_device *phydev;
1526 1540
1527 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1541 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1528 return; 1542 return;
1529 1543
1530 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1544 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1531 1545
1532 if (tp->link_config.phy_is_low_power) { 1546 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1533 tp->link_config.phy_is_low_power = 0; 1547 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1534 phydev->speed = tp->link_config.orig_speed; 1548 phydev->speed = tp->link_config.orig_speed;
1535 phydev->duplex = tp->link_config.orig_duplex; 1549 phydev->duplex = tp->link_config.orig_duplex;
1536 phydev->autoneg = tp->link_config.orig_autoneg; 1550 phydev->autoneg = tp->link_config.orig_autoneg;
@@ -1544,7 +1558,7 @@ static void tg3_phy_start(struct tg3 *tp)
1544 1558
1545static void tg3_phy_stop(struct tg3 *tp) 1559static void tg3_phy_stop(struct tg3 *tp)
1546{ 1560{
1547 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1561 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1548 return; 1562 return;
1549 1563
1550 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 1564 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
@@ -1552,16 +1566,21 @@ static void tg3_phy_stop(struct tg3 *tp)
1552 1566
1553static void tg3_phy_fini(struct tg3 *tp) 1567static void tg3_phy_fini(struct tg3 *tp)
1554{ 1568{
1555 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 1569 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1556 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 1570 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1557 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; 1571 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1558 } 1572 }
1559} 1573}
1560 1574
1561static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1575static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1562{ 1576{
1563 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1577 int err;
1564 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1578
1579 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1580 if (!err)
1581 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1582
1583 return err;
1565} 1584}
1566 1585
1567static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 1586static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
@@ -1589,11 +1608,12 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1589 u32 reg; 1608 u32 reg;
1590 1609
1591 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1610 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1592 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 1611 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1593 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 1612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1613 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1594 return; 1614 return;
1595 1615
1596 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 1616 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1597 tg3_phy_fet_toggle_apd(tp, enable); 1617 tg3_phy_fet_toggle_apd(tp, enable);
1598 return; 1618 return;
1599 } 1619 }
@@ -1624,10 +1644,10 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1624 u32 phy; 1644 u32 phy;
1625 1645
1626 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1646 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1627 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 1647 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1628 return; 1648 return;
1629 1649
1630 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 1650 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1631 u32 ephy; 1651 u32 ephy;
1632 1652
1633 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 1653 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
@@ -1663,7 +1683,7 @@ static void tg3_phy_set_wirespeed(struct tg3 *tp)
1663{ 1683{
1664 u32 val; 1684 u32 val;
1665 1685
1666 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) 1686 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1667 return; 1687 return;
1668 1688
1669 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && 1689 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
@@ -1722,7 +1742,7 @@ static int tg3_wait_macro_done(struct tg3 *tp)
1722 while (limit--) { 1742 while (limit--) {
1723 u32 tmp32; 1743 u32 tmp32;
1724 1744
1725 if (!tg3_readphy(tp, 0x16, &tmp32)) { 1745 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1726 if ((tmp32 & 0x1000) == 0) 1746 if ((tmp32 & 0x1000) == 0)
1727 break; 1747 break;
1728 } 1748 }
@@ -1748,13 +1768,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1748 1768
1749 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 1769 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1750 (chan * 0x2000) | 0x0200); 1770 (chan * 0x2000) | 0x0200);
1751 tg3_writephy(tp, 0x16, 0x0002); 1771 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1752 1772
1753 for (i = 0; i < 6; i++) 1773 for (i = 0; i < 6; i++)
1754 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 1774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1755 test_pat[chan][i]); 1775 test_pat[chan][i]);
1756 1776
1757 tg3_writephy(tp, 0x16, 0x0202); 1777 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1758 if (tg3_wait_macro_done(tp)) { 1778 if (tg3_wait_macro_done(tp)) {
1759 *resetp = 1; 1779 *resetp = 1;
1760 return -EBUSY; 1780 return -EBUSY;
@@ -1762,13 +1782,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1762 1782
1763 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 1783 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1764 (chan * 0x2000) | 0x0200); 1784 (chan * 0x2000) | 0x0200);
1765 tg3_writephy(tp, 0x16, 0x0082); 1785 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1766 if (tg3_wait_macro_done(tp)) { 1786 if (tg3_wait_macro_done(tp)) {
1767 *resetp = 1; 1787 *resetp = 1;
1768 return -EBUSY; 1788 return -EBUSY;
1769 } 1789 }
1770 1790
1771 tg3_writephy(tp, 0x16, 0x0802); 1791 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1772 if (tg3_wait_macro_done(tp)) { 1792 if (tg3_wait_macro_done(tp)) {
1773 *resetp = 1; 1793 *resetp = 1;
1774 return -EBUSY; 1794 return -EBUSY;
@@ -1808,10 +1828,10 @@ static int tg3_phy_reset_chanpat(struct tg3 *tp)
1808 1828
1809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 1829 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1810 (chan * 0x2000) | 0x0200); 1830 (chan * 0x2000) | 0x0200);
1811 tg3_writephy(tp, 0x16, 0x0002); 1831 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1812 for (i = 0; i < 6; i++) 1832 for (i = 0; i < 6; i++)
1813 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1814 tg3_writephy(tp, 0x16, 0x0202); 1834 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1815 if (tg3_wait_macro_done(tp)) 1835 if (tg3_wait_macro_done(tp))
1816 return -EBUSY; 1836 return -EBUSY;
1817 } 1837 }
@@ -1857,8 +1877,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1857 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1877 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1858 1878
1859 /* Block the PHY control access. */ 1879 /* Block the PHY control access. */
1860 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); 1880 tg3_phydsp_write(tp, 0x8005, 0x0800);
1861 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1862 1881
1863 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 1882 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1864 if (!err) 1883 if (!err)
@@ -1869,11 +1888,10 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1869 if (err) 1888 if (err)
1870 return err; 1889 return err;
1871 1890
1872 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); 1891 tg3_phydsp_write(tp, 0x8005, 0x0000);
1873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1874 1892
1875 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 1893 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1876 tg3_writephy(tp, 0x16, 0x0000); 1894 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1877 1895
1878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 1896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 1897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
@@ -1964,43 +1982,39 @@ static int tg3_phy_reset(struct tg3 *tp)
1964 } 1982 }
1965 } 1983 }
1966 1984
1967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 1985 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1968 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) 1986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1987 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
1969 return 0; 1988 return 0;
1970 1989
1971 tg3_phy_apply_otp(tp); 1990 tg3_phy_apply_otp(tp);
1972 1991
1973 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 1992 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
1974 tg3_phy_toggle_apd(tp, true); 1993 tg3_phy_toggle_apd(tp, true);
1975 else 1994 else
1976 tg3_phy_toggle_apd(tp, false); 1995 tg3_phy_toggle_apd(tp, false);
1977 1996
1978out: 1997out:
1979 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { 1998 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
1980 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1981 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 2000 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
1982 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); 2001 tg3_phydsp_write(tp, 0x000a, 0x0323);
1983 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1984 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1985 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 2002 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1986 } 2003 }
1987 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { 2004 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
1988 tg3_writephy(tp, 0x1c, 0x8d68); 2005 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1989 tg3_writephy(tp, 0x1c, 0x8d68); 2006 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1990 } 2007 }
1991 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { 2008 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1992 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2009 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1993 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2010 tg3_phydsp_write(tp, 0x000a, 0x310b);
1994 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); 2011 tg3_phydsp_write(tp, 0x201f, 0x9506);
1995 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 2012 tg3_phydsp_write(tp, 0x401f, 0x14e2);
1996 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1997 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1998 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 2013 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2000 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { 2014 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2001 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2015 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2002 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2016 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2003 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { 2017 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2004 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2005 tg3_writephy(tp, MII_TG3_TEST1, 2019 tg3_writephy(tp, MII_TG3_TEST1,
2006 MII_TG3_TEST1_TRIM_EN | 0x4); 2020 MII_TG3_TEST1_TRIM_EN | 0x4);
@@ -2049,6 +2063,7 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2049 2063
2050 /* The GPIOs do something completely different on 57765. */ 2064 /* The GPIOs do something completely different on 57765. */
2051 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || 2065 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 2067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2053 return; 2068 return;
2054 2069
@@ -2184,7 +2199,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2184{ 2199{
2185 u32 val; 2200 u32 val;
2186 2201
2187 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 2202 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 2203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2189 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 2204 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2190 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 2205 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
@@ -2203,7 +2218,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2203 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 2218 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2204 udelay(40); 2219 udelay(40);
2205 return; 2220 return;
2206 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 2221 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2207 u32 phytest; 2222 u32 phytest;
2208 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2223 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2209 u32 phy; 2224 u32 phy;
@@ -2240,7 +2255,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2242 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && 2257 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2243 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 2258 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2244 return; 2259 return;
2245 2260
2246 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || 2261 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
@@ -2543,14 +2558,14 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2543 2558
2544 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 2559 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2545 do_low_power = false; 2560 do_low_power = false;
2546 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) && 2561 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2547 !tp->link_config.phy_is_low_power) { 2562 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2548 struct phy_device *phydev; 2563 struct phy_device *phydev;
2549 u32 phyid, advertising; 2564 u32 phyid, advertising;
2550 2565
2551 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 2566 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2552 2567
2553 tp->link_config.phy_is_low_power = 1; 2568 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2554 2569
2555 tp->link_config.orig_speed = phydev->speed; 2570 tp->link_config.orig_speed = phydev->speed;
2556 tp->link_config.orig_duplex = phydev->duplex; 2571 tp->link_config.orig_duplex = phydev->duplex;
@@ -2589,14 +2604,14 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2589 } else { 2604 } else {
2590 do_low_power = true; 2605 do_low_power = true;
2591 2606
2592 if (tp->link_config.phy_is_low_power == 0) { 2607 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2593 tp->link_config.phy_is_low_power = 1; 2608 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2594 tp->link_config.orig_speed = tp->link_config.speed; 2609 tp->link_config.orig_speed = tp->link_config.speed;
2595 tp->link_config.orig_duplex = tp->link_config.duplex; 2610 tp->link_config.orig_duplex = tp->link_config.duplex;
2596 tp->link_config.orig_autoneg = tp->link_config.autoneg; 2611 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2597 } 2612 }
2598 2613
2599 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 2614 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2600 tp->link_config.speed = SPEED_10; 2615 tp->link_config.speed = SPEED_10;
2601 tp->link_config.duplex = DUPLEX_HALF; 2616 tp->link_config.duplex = DUPLEX_HALF;
2602 tp->link_config.autoneg = AUTONEG_ENABLE; 2617 tp->link_config.autoneg = AUTONEG_ENABLE;
@@ -2629,13 +2644,13 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2629 if (device_should_wake) { 2644 if (device_should_wake) {
2630 u32 mac_mode; 2645 u32 mac_mode;
2631 2646
2632 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 2647 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2633 if (do_low_power) { 2648 if (do_low_power) {
2634 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 2649 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2635 udelay(40); 2650 udelay(40);
2636 } 2651 }
2637 2652
2638 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 2653 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2639 mac_mode = MAC_MODE_PORT_MODE_GMII; 2654 mac_mode = MAC_MODE_PORT_MODE_GMII;
2640 else 2655 else
2641 mac_mode = MAC_MODE_PORT_MODE_MII; 2656 mac_mode = MAC_MODE_PORT_MODE_MII;
@@ -2803,7 +2818,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
2803 break; 2818 break;
2804 2819
2805 default: 2820 default:
2806 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 2821 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2807 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 2822 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2808 SPEED_10; 2823 SPEED_10;
2809 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 2824 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
@@ -2821,7 +2836,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2821 u32 new_adv; 2836 u32 new_adv;
2822 int i; 2837 int i;
2823 2838
2824 if (tp->link_config.phy_is_low_power) { 2839 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2825 /* Entering low power mode. Disable gigabit and 2840 /* Entering low power mode. Disable gigabit and
2826 * 100baseT advertisements. 2841 * 100baseT advertisements.
2827 */ 2842 */
@@ -2834,7 +2849,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2834 2849
2835 tg3_writephy(tp, MII_ADVERTISE, new_adv); 2850 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2836 } else if (tp->link_config.speed == SPEED_INVALID) { 2851 } else if (tp->link_config.speed == SPEED_INVALID) {
2837 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 2852 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2838 tp->link_config.advertising &= 2853 tp->link_config.advertising &=
2839 ~(ADVERTISED_1000baseT_Half | 2854 ~(ADVERTISED_1000baseT_Half |
2840 ADVERTISED_1000baseT_Full); 2855 ADVERTISED_1000baseT_Full);
@@ -2860,7 +2875,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2860 new_adv |= MII_TG3_CTRL_ADV_1000_HALF; 2875 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2861 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) 2876 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2862 new_adv |= MII_TG3_CTRL_ADV_1000_FULL; 2877 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2863 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && 2878 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2864 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 2879 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2865 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) 2880 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2866 new_adv |= (MII_TG3_CTRL_AS_MASTER | 2881 new_adv |= (MII_TG3_CTRL_AS_MASTER |
@@ -2962,20 +2977,11 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
2962 /* Set Extended packet length bit */ 2977 /* Set Extended packet length bit */
2963 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 2978 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2964 2979
2965 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); 2980 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
2966 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); 2981 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
2967 2982 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
2968 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); 2983 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
2969 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); 2984 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
2970
2971 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2972 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2973
2974 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2975 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2976
2977 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2978 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2979 2985
2980 udelay(40); 2986 udelay(40);
2981 2987
@@ -3000,7 +3006,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3000 3006
3001 if ((adv_reg & all_mask) != all_mask) 3007 if ((adv_reg & all_mask) != all_mask)
3002 return 0; 3008 return 0;
3003 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 3009 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3004 u32 tg3_ctrl; 3010 u32 tg3_ctrl;
3005 3011
3006 all_mask = 0; 3012 all_mask = 0;
@@ -3128,18 +3134,18 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3128 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { 3134 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3129 /* 5701 {A0,B0} CRC bug workaround */ 3135 /* 5701 {A0,B0} CRC bug workaround */
3130 tg3_writephy(tp, 0x15, 0x0a75); 3136 tg3_writephy(tp, 0x15, 0x0a75);
3131 tg3_writephy(tp, 0x1c, 0x8c68); 3137 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3132 tg3_writephy(tp, 0x1c, 0x8d68); 3138 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3133 tg3_writephy(tp, 0x1c, 0x8c68); 3139 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3134 } 3140 }
3135 3141
3136 /* Clear pending interrupts... */ 3142 /* Clear pending interrupts... */
3137 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3143 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3138 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3144 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3139 3145
3140 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) 3146 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3141 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 3147 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3142 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) 3148 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3143 tg3_writephy(tp, MII_TG3_IMASK, ~0); 3149 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3144 3150
3145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 3151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -3155,7 +3161,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3155 current_speed = SPEED_INVALID; 3161 current_speed = SPEED_INVALID;
3156 current_duplex = DUPLEX_INVALID; 3162 current_duplex = DUPLEX_INVALID;
3157 3163
3158 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { 3164 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3159 u32 val; 3165 u32 val;
3160 3166
3161 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); 3167 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
@@ -3231,7 +3237,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3231 } 3237 }
3232 3238
3233relink: 3239relink:
3234 if (current_link_up == 0 || tp->link_config.phy_is_low_power) { 3240 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3235 u32 tmp; 3241 u32 tmp;
3236 3242
3237 tg3_phy_copper_begin(tp); 3243 tg3_phy_copper_begin(tp);
@@ -3249,7 +3255,7 @@ relink:
3249 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 3255 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3250 else 3256 else
3251 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 3257 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3252 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) 3258 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3253 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 3259 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3254 else 3260 else
3255 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 3261 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -3800,7 +3806,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3800 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 3806 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3801 3807
3802 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 3808 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3803 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && 3809 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3804 tp->serdes_counter && 3810 tp->serdes_counter &&
3805 ((mac_status & (MAC_STATUS_PCS_SYNCED | 3811 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3806 MAC_STATUS_RCVD_CFG)) == 3812 MAC_STATUS_RCVD_CFG)) ==
@@ -3817,7 +3823,7 @@ restart_autoneg:
3817 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 3823 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3818 3824
3819 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 3825 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3820 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 3826 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3821 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 3827 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3822 MAC_STATUS_SIGNAL_DET)) { 3828 MAC_STATUS_SIGNAL_DET)) {
3823 sg_dig_status = tr32(SG_DIG_STATUS); 3829 sg_dig_status = tr32(SG_DIG_STATUS);
@@ -3840,7 +3846,7 @@ restart_autoneg:
3840 tg3_setup_flow_control(tp, local_adv, remote_adv); 3846 tg3_setup_flow_control(tp, local_adv, remote_adv);
3841 current_link_up = 1; 3847 current_link_up = 1;
3842 tp->serdes_counter = 0; 3848 tp->serdes_counter = 0;
3843 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 3849 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3844 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 3850 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3845 if (tp->serdes_counter) 3851 if (tp->serdes_counter)
3846 tp->serdes_counter--; 3852 tp->serdes_counter--;
@@ -3867,8 +3873,8 @@ restart_autoneg:
3867 !(mac_status & MAC_STATUS_RCVD_CFG)) { 3873 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3868 tg3_setup_flow_control(tp, 0, 0); 3874 tg3_setup_flow_control(tp, 0, 0);
3869 current_link_up = 1; 3875 current_link_up = 1;
3870 tp->tg3_flags2 |= 3876 tp->phy_flags |=
3871 TG3_FLG2_PARALLEL_DETECT; 3877 TG3_PHYFLG_PARALLEL_DETECT;
3872 tp->serdes_counter = 3878 tp->serdes_counter =
3873 SERDES_PARALLEL_DET_TIMEOUT; 3879 SERDES_PARALLEL_DET_TIMEOUT;
3874 } else 3880 } else
@@ -3877,7 +3883,7 @@ restart_autoneg:
3877 } 3883 }
3878 } else { 3884 } else {
3879 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 3885 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3880 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 3886 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3881 } 3887 }
3882 3888
3883out: 3889out:
@@ -4094,7 +4100,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4094 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 4100 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4095 4101
4096 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 4102 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4097 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { 4103 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4098 /* do nothing, just check for link up at the end */ 4104 /* do nothing, just check for link up at the end */
4099 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4105 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4100 u32 adv, new_adv; 4106 u32 adv, new_adv;
@@ -4119,7 +4125,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4119 4125
4120 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 4126 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4121 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 4127 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4122 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4128 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4123 4129
4124 return err; 4130 return err;
4125 } 4131 }
@@ -4164,7 +4170,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4164 else 4170 else
4165 bmsr &= ~BMSR_LSTATUS; 4171 bmsr &= ~BMSR_LSTATUS;
4166 } 4172 }
4167 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4173 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4168 } 4174 }
4169 } 4175 }
4170 4176
@@ -4191,6 +4197,8 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4191 current_duplex = DUPLEX_FULL; 4197 current_duplex = DUPLEX_FULL;
4192 else 4198 else
4193 current_duplex = DUPLEX_HALF; 4199 current_duplex = DUPLEX_HALF;
4200 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4201 /* Link is up via parallel detect */
4194 } else { 4202 } else {
4195 current_link_up = 0; 4203 current_link_up = 0;
4196 } 4204 }
@@ -4217,7 +4225,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4217 netif_carrier_on(tp->dev); 4225 netif_carrier_on(tp->dev);
4218 else { 4226 else {
4219 netif_carrier_off(tp->dev); 4227 netif_carrier_off(tp->dev);
4220 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4228 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4221 } 4229 }
4222 tg3_link_report(tp); 4230 tg3_link_report(tp);
4223 } 4231 }
@@ -4241,13 +4249,14 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4241 u32 phy1, phy2; 4249 u32 phy1, phy2;
4242 4250
4243 /* Select shadow register 0x1f */ 4251 /* Select shadow register 0x1f */
4244 tg3_writephy(tp, 0x1c, 0x7c00); 4252 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4245 tg3_readphy(tp, 0x1c, &phy1); 4253 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4246 4254
4247 /* Select expansion interrupt status register */ 4255 /* Select expansion interrupt status register */
4248 tg3_writephy(tp, 0x17, 0x0f01); 4256 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4249 tg3_readphy(tp, 0x15, &phy2); 4257 MII_TG3_DSP_EXP1_INT_STAT);
4250 tg3_readphy(tp, 0x15, &phy2); 4258 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4259 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4251 4260
4252 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 4261 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4253 /* We have signal detect and not receiving 4262 /* We have signal detect and not receiving
@@ -4258,17 +4267,18 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4258 bmcr &= ~BMCR_ANENABLE; 4267 bmcr &= ~BMCR_ANENABLE;
4259 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 4268 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4260 tg3_writephy(tp, MII_BMCR, bmcr); 4269 tg3_writephy(tp, MII_BMCR, bmcr);
4261 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; 4270 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4262 } 4271 }
4263 } 4272 }
4264 } else if (netif_carrier_ok(tp->dev) && 4273 } else if (netif_carrier_ok(tp->dev) &&
4265 (tp->link_config.autoneg == AUTONEG_ENABLE) && 4274 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4266 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { 4275 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4267 u32 phy2; 4276 u32 phy2;
4268 4277
4269 /* Select expansion interrupt status register */ 4278 /* Select expansion interrupt status register */
4270 tg3_writephy(tp, 0x17, 0x0f01); 4279 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4271 tg3_readphy(tp, 0x15, &phy2); 4280 MII_TG3_DSP_EXP1_INT_STAT);
4281 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4272 if (phy2 & 0x20) { 4282 if (phy2 & 0x20) {
4273 u32 bmcr; 4283 u32 bmcr;
4274 4284
@@ -4276,7 +4286,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4276 tg3_readphy(tp, MII_BMCR, &bmcr); 4286 tg3_readphy(tp, MII_BMCR, &bmcr);
4277 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 4287 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4278 4288
4279 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4289 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4280 4290
4281 } 4291 }
4282 } 4292 }
@@ -4286,9 +4296,9 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4286{ 4296{
4287 int err; 4297 int err;
4288 4298
4289 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 4299 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4290 err = tg3_setup_fiber_phy(tp, force_reset); 4300 err = tg3_setup_fiber_phy(tp, force_reset);
4291 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 4301 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4292 err = tg3_setup_fiber_mii_phy(tp, force_reset); 4302 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4293 else 4303 else
4294 err = tg3_setup_copper_phy(tp, force_reset); 4304 err = tg3_setup_copper_phy(tp, force_reset);
@@ -4367,7 +4377,8 @@ static void tg3_tx_recover(struct tg3 *tp)
4367 4377
4368static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 4378static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4369{ 4379{
4370 smp_mb(); 4380 /* Tell compiler to fetch tx indices from memory. */
4381 barrier();
4371 return tnapi->tx_pending - 4382 return tnapi->tx_pending -
4372 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 4383 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4373} 4384}
@@ -5552,8 +5563,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5552 5563
5553 entry = tnapi->tx_prod; 5564 entry = tnapi->tx_prod;
5554 base_flags = 0; 5565 base_flags = 0;
5555 mss = 0; 5566 mss = skb_shinfo(skb)->gso_size;
5556 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5567 if (mss) {
5557 int tcp_opt_len, ip_tcp_len; 5568 int tcp_opt_len, ip_tcp_len;
5558 u32 hdrlen; 5569 u32 hdrlen;
5559 5570
@@ -5651,6 +5662,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5651 tnapi->tx_prod = entry; 5662 tnapi->tx_prod = entry;
5652 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5663 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5653 netif_tx_stop_queue(txq); 5664 netif_tx_stop_queue(txq);
5665
5666 /* netif_tx_stop_queue() must be done before checking
5667 * checking tx index in tg3_tx_avail() below, because in
5668 * tg3_tx(), we update tx index before checking for
5669 * netif_tx_queue_stopped().
5670 */
5671 smp_mb();
5654 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5672 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5655 netif_tx_wake_queue(txq); 5673 netif_tx_wake_queue(txq);
5656 } 5674 }
@@ -5696,6 +5714,13 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5696 /* Estimate the number of fragments in the worst case */ 5714 /* Estimate the number of fragments in the worst case */
5697 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { 5715 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5698 netif_stop_queue(tp->dev); 5716 netif_stop_queue(tp->dev);
5717
5718 /* netif_tx_stop_queue() must be done before checking
5719 * checking tx index in tg3_tx_avail() below, because in
5720 * tg3_tx(), we update tx index before checking for
5721 * netif_tx_queue_stopped().
5722 */
5723 smp_mb();
5699 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) 5724 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5700 return NETDEV_TX_BUSY; 5725 return NETDEV_TX_BUSY;
5701 5726
@@ -5759,9 +5784,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5759 if (skb->ip_summed == CHECKSUM_PARTIAL) 5784 if (skb->ip_summed == CHECKSUM_PARTIAL)
5760 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5785 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5761 5786
5762 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5787 mss = skb_shinfo(skb)->gso_size;
5788 if (mss) {
5763 struct iphdr *iph; 5789 struct iphdr *iph;
5764 u32 tcp_opt_len, ip_tcp_len, hdr_len; 5790 u32 tcp_opt_len, hdr_len;
5765 5791
5766 if (skb_header_cloned(skb) && 5792 if (skb_header_cloned(skb) &&
5767 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 5793 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5769,10 +5795,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5769 goto out_unlock; 5795 goto out_unlock;
5770 } 5796 }
5771 5797
5798 iph = ip_hdr(skb);
5772 tcp_opt_len = tcp_optlen(skb); 5799 tcp_opt_len = tcp_optlen(skb);
5773 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5774 5800
5775 hdr_len = ip_tcp_len + tcp_opt_len; 5801 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5802 hdr_len = skb_headlen(skb) - ETH_HLEN;
5803 } else {
5804 u32 ip_tcp_len;
5805
5806 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5807 hdr_len = ip_tcp_len + tcp_opt_len;
5808
5809 iph->check = 0;
5810 iph->tot_len = htons(mss + hdr_len);
5811 }
5812
5776 if (unlikely((ETH_HLEN + hdr_len) > 80) && 5813 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5777 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) 5814 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5778 return tg3_tso_bug(tp, skb); 5815 return tg3_tso_bug(tp, skb);
@@ -5780,9 +5817,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5780 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 5817 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5781 TXD_FLAG_CPU_POST_DMA); 5818 TXD_FLAG_CPU_POST_DMA);
5782 5819
5783 iph = ip_hdr(skb);
5784 iph->check = 0;
5785 iph->tot_len = htons(mss + hdr_len);
5786 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 5820 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5787 tcp_hdr(skb)->check = 0; 5821 tcp_hdr(skb)->check = 0;
5788 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 5822 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
@@ -5922,6 +5956,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5922 tnapi->tx_prod = entry; 5956 tnapi->tx_prod = entry;
5923 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5957 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5924 netif_tx_stop_queue(txq); 5958 netif_tx_stop_queue(txq);
5959
5960 /* netif_tx_stop_queue() must be done before checking
5961 * checking tx index in tg3_tx_avail() below, because in
5962 * tg3_tx(), we update tx index before checking for
5963 * netif_tx_queue_stopped().
5964 */
5965 smp_mb();
5925 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5966 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5926 netif_tx_wake_queue(txq); 5967 netif_tx_wake_queue(txq);
5927 } 5968 }
@@ -6212,6 +6253,8 @@ static void tg3_free_rings(struct tg3 *tp)
6212 for (j = 0; j < tp->irq_cnt; j++) { 6253 for (j = 0; j < tp->irq_cnt; j++) {
6213 struct tg3_napi *tnapi = &tp->napi[j]; 6254 struct tg3_napi *tnapi = &tp->napi[j];
6214 6255
6256 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6257
6215 if (!tnapi->tx_buffers) 6258 if (!tnapi->tx_buffers)
6216 continue; 6259 continue;
6217 6260
@@ -6247,8 +6290,6 @@ static void tg3_free_rings(struct tg3 *tp)
6247 6290
6248 dev_kfree_skb_any(skb); 6291 dev_kfree_skb_any(skb);
6249 } 6292 }
6250
6251 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6252 } 6293 }
6253} 6294}
6254 6295
@@ -6603,7 +6644,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6603 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 6644 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6604 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 6645 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6605 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 6646 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6606 APE_HOST_DRIVER_ID_MAGIC); 6647 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6607 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 6648 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6608 APE_HOST_BEHAV_NO_PHYLOCK); 6649 APE_HOST_BEHAV_NO_PHYLOCK);
6609 6650
@@ -6782,7 +6823,8 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6782 /* Allow reads and writes to the APE register and memory space. */ 6823 /* Allow reads and writes to the APE register and memory space. */
6783 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 6824 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6784 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 6825 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6785 PCISTATE_ALLOW_APE_SHMEM_WR; 6826 PCISTATE_ALLOW_APE_SHMEM_WR |
6827 PCISTATE_ALLOW_APE_PSPACE_WR;
6786 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 6828 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6787 6829
6788 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 6830 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
@@ -6897,9 +6939,13 @@ static int tg3_chip_reset(struct tg3 *tp)
6897 val = GRC_MISC_CFG_CORECLK_RESET; 6939 val = GRC_MISC_CFG_CORECLK_RESET;
6898 6940
6899 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 6941 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6900 if (tr32(0x7e2c) == 0x60) { 6942 /* Force PCIe 1.0a mode */
6901 tw32(0x7e2c, 0x20); 6943 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6902 } 6944 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
6945 tr32(TG3_PCIE_PHY_TSTCTL) ==
6946 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
6947 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
6948
6903 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { 6949 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6904 tw32(GRC_MISC_CFG, (1 << 29)); 6950 tw32(GRC_MISC_CFG, (1 << 29));
6905 val |= (1 << 29); 6951 val |= (1 << 29);
@@ -6912,8 +6958,11 @@ static int tg3_chip_reset(struct tg3 *tp)
6912 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 6958 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6913 } 6959 }
6914 6960
6915 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 6961 /* Manage gphy power for all CPMU absent PCIe devices. */
6962 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6963 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
6916 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 6964 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6965
6917 tw32(GRC_MISC_CFG, val); 6966 tw32(GRC_MISC_CFG, val);
6918 6967
6919 /* restore 5701 hardware bug workaround write method */ 6968 /* restore 5701 hardware bug workaround write method */
@@ -6970,8 +7019,7 @@ static int tg3_chip_reset(struct tg3 *tp)
6970 * Older PCIe devices only support the 128 byte 7019 * Older PCIe devices only support the 128 byte
6971 * MPS setting. Enforce the restriction. 7020 * MPS setting. Enforce the restriction.
6972 */ 7021 */
6973 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || 7022 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
6974 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6975 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 7023 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6976 pci_write_config_word(tp->pdev, 7024 pci_write_config_word(tp->pdev,
6977 tp->pcie_cap + PCI_EXP_DEVCTL, 7025 tp->pcie_cap + PCI_EXP_DEVCTL,
@@ -7018,10 +7066,10 @@ static int tg3_chip_reset(struct tg3 *tp)
7018 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7066 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7019 } 7067 }
7020 7068
7021 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 7069 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7022 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 7070 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7023 tw32_f(MAC_MODE, tp->mac_mode); 7071 tw32_f(MAC_MODE, tp->mac_mode);
7024 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 7072 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7025 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 7073 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7026 tw32_f(MAC_MODE, tp->mac_mode); 7074 tw32_f(MAC_MODE, tp->mac_mode);
7027 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 7075 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
@@ -7041,35 +7089,10 @@ static int tg3_chip_reset(struct tg3 *tp)
7041 7089
7042 tg3_mdio_start(tp); 7090 tg3_mdio_start(tp);
7043 7091
7044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7045 u8 phy_addr;
7046
7047 phy_addr = tp->phy_addr;
7048 tp->phy_addr = TG3_PHY_PCIE_ADDR;
7049
7050 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7051 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
7052 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
7053 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
7054 TG3_PCIEPHY_TX0CTRL1_NB_EN;
7055 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
7056 udelay(10);
7057
7058 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7059 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
7060 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
7061 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
7062 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
7063 udelay(10);
7064
7065 tp->phy_addr = phy_addr;
7066 }
7067
7068 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 7092 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7069 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 7093 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 7094 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 7095 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7072 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
7073 val = tr32(0x7c00); 7096 val = tr32(0x7c00);
7074 7097
7075 tw32(0x7c00, val | (1 << 25)); 7098 tw32(0x7c00, val | (1 << 25));
@@ -7427,7 +7450,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7427 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 7450 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7428 } 7451 }
7429 7452
7430 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { 7453 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7431 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 7454 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7432 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 7455 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7433 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 7456 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
@@ -7504,7 +7527,8 @@ static void tg3_rings_reset(struct tg3 *tp)
7504 7527
7505 7528
7506 /* Disable all receive return rings but the first. */ 7529 /* Disable all receive return rings but the first. */
7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 7530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7508 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 7532 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7509 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7533 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7510 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 7534 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
@@ -7720,7 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7720 */ 7744 */
7721 val = tr32(TG3PCI_PCISTATE); 7745 val = tr32(TG3PCI_PCISTATE);
7722 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 7746 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7723 PCISTATE_ALLOW_APE_SHMEM_WR; 7747 PCISTATE_ALLOW_APE_SHMEM_WR |
7748 PCISTATE_ALLOW_APE_PSPACE_WR;
7724 tw32(TG3PCI_PCISTATE, val); 7749 tw32(TG3PCI_PCISTATE, val);
7725 } 7750 }
7726 7751
@@ -7740,8 +7765,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7740 if (err) 7765 if (err)
7741 return err; 7766 return err;
7742 7767
7743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7768 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7745 val = tr32(TG3PCI_DMA_RW_CTRL) & 7769 val = tr32(TG3PCI_DMA_RW_CTRL) &
7746 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 7770 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7747 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) 7771 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
@@ -7869,7 +7893,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7869 ((u64) tpr->rx_std_mapping >> 32)); 7893 ((u64) tpr->rx_std_mapping >> 32));
7870 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7894 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7871 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7895 ((u64) tpr->rx_std_mapping & 0xffffffff));
7872 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) 7896 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7897 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7873 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7898 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7874 NIC_SRAM_RX_BUFFER_DESC); 7899 NIC_SRAM_RX_BUFFER_DESC);
7875 7900
@@ -7894,7 +7919,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7894 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7919 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7895 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7920 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7896 BDINFO_FLAGS_USE_EXT_RECV); 7921 BDINFO_FLAGS_USE_EXT_RECV);
7897 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) 7922 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7898 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7924 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7899 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7925 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7900 } else { 7926 } else {
@@ -7902,8 +7928,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7902 BDINFO_FLAGS_DISABLED); 7928 BDINFO_FLAGS_DISABLED);
7903 } 7929 }
7904 7930
7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7931 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7907 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | 7932 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7908 (TG3_RX_STD_DMA_SZ << 2); 7933 (TG3_RX_STD_DMA_SZ << 2);
7909 else 7934 else
@@ -7920,8 +7945,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7920 tp->rx_jumbo_pending : 0; 7945 tp->rx_jumbo_pending : 0;
7921 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 7946 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7922 7947
7923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7948 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7925 tw32(STD_REPLENISH_LWM, 32); 7949 tw32(STD_REPLENISH_LWM, 32);
7926 tw32(JMB_REPLENISH_LWM, 16); 7950 tw32(JMB_REPLENISH_LWM, 16);
7927 } 7951 }
@@ -7956,7 +7980,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7956 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 7980 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7957 RDMAC_MODE_LNGREAD_ENAB); 7981 RDMAC_MODE_LNGREAD_ENAB);
7958 7982
7959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 7983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7960 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 7985 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7961 7986
7962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 7987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8048,8 +8073,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8048 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8073 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8049 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 8074 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8050 8075
8051 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 8076 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8052 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 8077 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8053 /* reset to prevent losing 1st rx packet intermittently */ 8078 /* reset to prevent losing 1st rx packet intermittently */
8054 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8079 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8055 udelay(10); 8080 udelay(10);
@@ -8062,7 +8087,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8062 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 8087 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8063 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 8088 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8064 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 8089 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8065 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 8090 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8066 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) 8091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8067 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8092 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8068 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 8093 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
@@ -8195,6 +8220,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8195 } 8220 }
8196 8221
8197 tp->tx_mode = TX_MODE_ENABLE; 8222 tp->tx_mode = TX_MODE_ENABLE;
8223 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8225 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8198 tw32_f(MAC_TX_MODE, tp->tx_mode); 8226 tw32_f(MAC_TX_MODE, tp->tx_mode);
8199 udelay(100); 8227 udelay(100);
8200 8228
@@ -8244,16 +8272,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8244 tw32(MAC_LED_CTRL, tp->led_ctrl); 8272 tw32(MAC_LED_CTRL, tp->led_ctrl);
8245 8273
8246 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 8274 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8247 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 8275 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8248 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8276 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8249 udelay(10); 8277 udelay(10);
8250 } 8278 }
8251 tw32_f(MAC_RX_MODE, tp->rx_mode); 8279 tw32_f(MAC_RX_MODE, tp->rx_mode);
8252 udelay(10); 8280 udelay(10);
8253 8281
8254 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 8282 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8255 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && 8283 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8256 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { 8284 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8257 /* Set drive transmission level to 1.2V */ 8285 /* Set drive transmission level to 1.2V */
8258 /* only if the signal pre-emphasis bit is not set */ 8286 /* only if the signal pre-emphasis bit is not set */
8259 val = tr32(MAC_SERDES_CFG); 8287 val = tr32(MAC_SERDES_CFG);
@@ -8275,12 +8303,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8275 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 8303 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8276 8304
8277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 8305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8278 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 8306 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8279 /* Use hardware link auto-negotiation */ 8307 /* Use hardware link auto-negotiation */
8280 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; 8308 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8281 } 8309 }
8282 8310
8283 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && 8311 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8284 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { 8312 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8285 u32 tmp; 8313 u32 tmp;
8286 8314
@@ -8292,8 +8320,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8292 } 8320 }
8293 8321
8294 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 8322 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8295 if (tp->link_config.phy_is_low_power) { 8323 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8296 tp->link_config.phy_is_low_power = 0; 8324 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8297 tp->link_config.speed = tp->link_config.orig_speed; 8325 tp->link_config.speed = tp->link_config.orig_speed;
8298 tp->link_config.duplex = tp->link_config.orig_duplex; 8326 tp->link_config.duplex = tp->link_config.orig_duplex;
8299 tp->link_config.autoneg = tp->link_config.orig_autoneg; 8327 tp->link_config.autoneg = tp->link_config.orig_autoneg;
@@ -8303,15 +8331,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8303 if (err) 8331 if (err)
8304 return err; 8332 return err;
8305 8333
8306 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 8334 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8307 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { 8335 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8308 u32 tmp; 8336 u32 tmp;
8309 8337
8310 /* Clear CRC stats. */ 8338 /* Clear CRC stats. */
8311 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 8339 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8312 tg3_writephy(tp, MII_TG3_TEST1, 8340 tg3_writephy(tp, MII_TG3_TEST1,
8313 tmp | MII_TG3_TEST1_CRC_EN); 8341 tmp | MII_TG3_TEST1_CRC_EN);
8314 tg3_readphy(tp, 0x14, &tmp); 8342 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8315 } 8343 }
8316 } 8344 }
8317 } 8345 }
@@ -8479,7 +8507,7 @@ static void tg3_timer(unsigned long __opaque)
8479 mac_stat = tr32(MAC_STATUS); 8507 mac_stat = tr32(MAC_STATUS);
8480 8508
8481 phy_event = 0; 8509 phy_event = 0;
8482 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { 8510 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8483 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 8511 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8484 phy_event = 1; 8512 phy_event = 1;
8485 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 8513 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
@@ -8495,7 +8523,7 @@ static void tg3_timer(unsigned long __opaque)
8495 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 8523 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8496 need_setup = 1; 8524 need_setup = 1;
8497 } 8525 }
8498 if (! netif_carrier_ok(tp->dev) && 8526 if (!netif_carrier_ok(tp->dev) &&
8499 (mac_stat & (MAC_STATUS_PCS_SYNCED | 8527 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8500 MAC_STATUS_SIGNAL_DET))) { 8528 MAC_STATUS_SIGNAL_DET))) {
8501 need_setup = 1; 8529 need_setup = 1;
@@ -8511,8 +8539,10 @@ static void tg3_timer(unsigned long __opaque)
8511 } 8539 }
8512 tg3_setup_phy(tp, 0); 8540 tg3_setup_phy(tp, 0);
8513 } 8541 }
8514 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 8542 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8543 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8515 tg3_serdes_parallel_detect(tp); 8544 tg3_serdes_parallel_detect(tp);
8545 }
8516 8546
8517 tp->timer_counter = tp->timer_multiplier; 8547 tp->timer_counter = tp->timer_multiplier;
8518 } 8548 }
@@ -8605,8 +8635,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8605 * Turn off MSI one shot mode. Otherwise this test has no 8635 * Turn off MSI one shot mode. Otherwise this test has no
8606 * observable way to know whether the interrupt was delivered. 8636 * observable way to know whether the interrupt was delivered.
8607 */ 8637 */
8608 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8638 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8610 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8639 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8611 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 8640 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8612 tw32(MSGINT_MODE, val); 8641 tw32(MSGINT_MODE, val);
@@ -8649,8 +8678,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8649 8678
8650 if (intr_ok) { 8679 if (intr_ok) {
8651 /* Reenable MSI one shot mode. */ 8680 /* Reenable MSI one shot mode. */
8652 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8681 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8654 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8682 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8655 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 8683 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8656 tw32(MSGINT_MODE, val); 8684 tw32(MSGINT_MODE, val);
@@ -8775,9 +8803,9 @@ static bool tg3_enable_msix(struct tg3 *tp)
8775 } 8803 }
8776 8804
8777 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); 8805 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8778 if (rc != 0) { 8806 if (rc < 0) {
8779 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS) 8807 return false;
8780 return false; 8808 } else if (rc != 0) {
8781 if (pci_enable_msix(tp->pdev, msix_ent, rc)) 8809 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8782 return false; 8810 return false;
8783 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 8811 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
@@ -8785,16 +8813,19 @@ static bool tg3_enable_msix(struct tg3 *tp)
8785 tp->irq_cnt = rc; 8813 tp->irq_cnt = rc;
8786 } 8814 }
8787 8815
8788 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8789
8790 for (i = 0; i < tp->irq_max; i++) 8816 for (i = 0; i < tp->irq_max; i++)
8791 tp->napi[i].irq_vec = msix_ent[i].vector; 8817 tp->napi[i].irq_vec = msix_ent[i].vector;
8792 8818
8793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 8819 tp->dev->real_num_tx_queues = 1;
8794 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; 8820 if (tp->irq_cnt > 1) {
8795 tp->dev->real_num_tx_queues = tp->irq_cnt - 1; 8821 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8796 } else 8822
8797 tp->dev->real_num_tx_queues = 1; 8823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8825 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8826 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8827 }
8828 }
8798 8829
8799 return true; 8830 return true;
8800} 8831}
@@ -8838,7 +8869,7 @@ static void tg3_ints_fini(struct tg3 *tp)
8838 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 8869 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8839 pci_disable_msi(tp->pdev); 8870 pci_disable_msi(tp->pdev);
8840 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; 8871 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8841 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS; 8872 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
8842} 8873}
8843 8874
8844static int tg3_open(struct net_device *dev) 8875static int tg3_open(struct net_device *dev)
@@ -8942,10 +8973,8 @@ static int tg3_open(struct net_device *dev)
8942 goto err_out2; 8973 goto err_out2;
8943 } 8974 }
8944 8975
8945 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 8976 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8946 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && 8977 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8947 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8948 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8949 u32 val = tr32(PCIE_TRANSACTION_CFG); 8978 u32 val = tr32(PCIE_TRANSACTION_CFG);
8950 8979
8951 tw32(PCIE_TRANSACTION_CFG, 8980 tw32(PCIE_TRANSACTION_CFG,
@@ -8982,7 +9011,8 @@ err_out1:
8982 return err; 9011 return err;
8983} 9012}
8984 9013
8985static struct net_device_stats *tg3_get_stats(struct net_device *); 9014static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9015 struct rtnl_link_stats64 *);
8986static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); 9016static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8987 9017
8988static int tg3_close(struct net_device *dev) 9018static int tg3_close(struct net_device *dev)
@@ -9016,8 +9046,8 @@ static int tg3_close(struct net_device *dev)
9016 9046
9017 tg3_ints_fini(tp); 9047 tg3_ints_fini(tp);
9018 9048
9019 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), 9049 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9020 sizeof(tp->net_stats_prev)); 9050
9021 memcpy(&tp->estats_prev, tg3_get_estats(tp), 9051 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9022 sizeof(tp->estats_prev)); 9052 sizeof(tp->estats_prev));
9023 9053
@@ -9030,28 +9060,16 @@ static int tg3_close(struct net_device *dev)
9030 return 0; 9060 return 0;
9031} 9061}
9032 9062
9033static inline unsigned long get_stat64(tg3_stat64_t *val) 9063static inline u64 get_stat64(tg3_stat64_t *val)
9034{
9035 unsigned long ret;
9036
9037#if (BITS_PER_LONG == 32)
9038 ret = val->low;
9039#else
9040 ret = ((u64)val->high << 32) | ((u64)val->low);
9041#endif
9042 return ret;
9043}
9044
9045static inline u64 get_estat64(tg3_stat64_t *val)
9046{ 9064{
9047 return ((u64)val->high << 32) | ((u64)val->low); 9065 return ((u64)val->high << 32) | ((u64)val->low);
9048} 9066}
9049 9067
9050static unsigned long calc_crc_errors(struct tg3 *tp) 9068static u64 calc_crc_errors(struct tg3 *tp)
9051{ 9069{
9052 struct tg3_hw_stats *hw_stats = tp->hw_stats; 9070 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9053 9071
9054 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 9072 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9055 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 9073 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 9074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9057 u32 val; 9075 u32 val;
@@ -9060,7 +9078,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
9060 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 9078 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9061 tg3_writephy(tp, MII_TG3_TEST1, 9079 tg3_writephy(tp, MII_TG3_TEST1,
9062 val | MII_TG3_TEST1_CRC_EN); 9080 val | MII_TG3_TEST1_CRC_EN);
9063 tg3_readphy(tp, 0x14, &val); 9081 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9064 } else 9082 } else
9065 val = 0; 9083 val = 0;
9066 spin_unlock_bh(&tp->lock); 9084 spin_unlock_bh(&tp->lock);
@@ -9075,7 +9093,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
9075 9093
9076#define ESTAT_ADD(member) \ 9094#define ESTAT_ADD(member) \
9077 estats->member = old_estats->member + \ 9095 estats->member = old_estats->member + \
9078 get_estat64(&hw_stats->member) 9096 get_stat64(&hw_stats->member)
9079 9097
9080static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) 9098static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9081{ 9099{
@@ -9165,11 +9183,11 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9165 return estats; 9183 return estats;
9166} 9184}
9167 9185
9168static struct net_device_stats *tg3_get_stats(struct net_device *dev) 9186static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9187 struct rtnl_link_stats64 *stats)
9169{ 9188{
9170 struct tg3 *tp = netdev_priv(dev); 9189 struct tg3 *tp = netdev_priv(dev);
9171 struct net_device_stats *stats = &tp->net_stats; 9190 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9172 struct net_device_stats *old_stats = &tp->net_stats_prev;
9173 struct tg3_hw_stats *hw_stats = tp->hw_stats; 9191 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9174 9192
9175 if (!hw_stats) 9193 if (!hw_stats)
@@ -9350,13 +9368,13 @@ static void tg3_get_regs(struct net_device *dev,
9350 9368
9351 memset(p, 0, TG3_REGDUMP_LEN); 9369 memset(p, 0, TG3_REGDUMP_LEN);
9352 9370
9353 if (tp->link_config.phy_is_low_power) 9371 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9354 return; 9372 return;
9355 9373
9356 tg3_full_lock(tp, 0); 9374 tg3_full_lock(tp, 0);
9357 9375
9358#define __GET_REG32(reg) (*(p)++ = tr32(reg)) 9376#define __GET_REG32(reg) (*(p)++ = tr32(reg))
9359#define GET_REG32_LOOP(base,len) \ 9377#define GET_REG32_LOOP(base, len) \
9360do { p = (u32 *)(orig_p + (base)); \ 9378do { p = (u32 *)(orig_p + (base)); \
9361 for (i = 0; i < len; i += 4) \ 9379 for (i = 0; i < len; i += 4) \
9362 __GET_REG32((base) + i); \ 9380 __GET_REG32((base) + i); \
@@ -9429,7 +9447,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
9429 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) 9447 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9430 return -EINVAL; 9448 return -EINVAL;
9431 9449
9432 if (tp->link_config.phy_is_low_power) 9450 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9433 return -EAGAIN; 9451 return -EAGAIN;
9434 9452
9435 offset = eeprom->offset; 9453 offset = eeprom->offset;
@@ -9449,7 +9467,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
9449 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 9467 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9450 if (ret) 9468 if (ret)
9451 return ret; 9469 return ret;
9452 memcpy(data, ((char*)&val) + b_offset, b_count); 9470 memcpy(data, ((char *)&val) + b_offset, b_count);
9453 len -= b_count; 9471 len -= b_count;
9454 offset += b_count; 9472 offset += b_count;
9455 eeprom->len += b_count; 9473 eeprom->len += b_count;
@@ -9491,7 +9509,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
9491 u8 *buf; 9509 u8 *buf;
9492 __be32 start, end; 9510 __be32 start, end;
9493 9511
9494 if (tp->link_config.phy_is_low_power) 9512 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9495 return -EAGAIN; 9513 return -EAGAIN;
9496 9514
9497 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 9515 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -9548,7 +9566,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9548 9566
9549 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9567 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9550 struct phy_device *phydev; 9568 struct phy_device *phydev;
9551 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9569 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9552 return -EAGAIN; 9570 return -EAGAIN;
9553 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 9571 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9554 return phy_ethtool_gset(phydev, cmd); 9572 return phy_ethtool_gset(phydev, cmd);
@@ -9556,11 +9574,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9556 9574
9557 cmd->supported = (SUPPORTED_Autoneg); 9575 cmd->supported = (SUPPORTED_Autoneg);
9558 9576
9559 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 9577 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9560 cmd->supported |= (SUPPORTED_1000baseT_Half | 9578 cmd->supported |= (SUPPORTED_1000baseT_Half |
9561 SUPPORTED_1000baseT_Full); 9579 SUPPORTED_1000baseT_Full);
9562 9580
9563 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 9581 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9564 cmd->supported |= (SUPPORTED_100baseT_Half | 9582 cmd->supported |= (SUPPORTED_100baseT_Half |
9565 SUPPORTED_100baseT_Full | 9583 SUPPORTED_100baseT_Full |
9566 SUPPORTED_10baseT_Half | 9584 SUPPORTED_10baseT_Half |
@@ -9591,7 +9609,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9591 9609
9592 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9610 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9593 struct phy_device *phydev; 9611 struct phy_device *phydev;
9594 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9612 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9595 return -EAGAIN; 9613 return -EAGAIN;
9596 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 9614 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9597 return phy_ethtool_sset(phydev, cmd); 9615 return phy_ethtool_sset(phydev, cmd);
@@ -9611,11 +9629,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9611 ADVERTISED_Pause | 9629 ADVERTISED_Pause |
9612 ADVERTISED_Asym_Pause; 9630 ADVERTISED_Asym_Pause;
9613 9631
9614 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 9632 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9615 mask |= ADVERTISED_1000baseT_Half | 9633 mask |= ADVERTISED_1000baseT_Half |
9616 ADVERTISED_1000baseT_Full; 9634 ADVERTISED_1000baseT_Full;
9617 9635
9618 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 9636 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9619 mask |= ADVERTISED_100baseT_Half | 9637 mask |= ADVERTISED_100baseT_Half |
9620 ADVERTISED_100baseT_Full | 9638 ADVERTISED_100baseT_Full |
9621 ADVERTISED_10baseT_Half | 9639 ADVERTISED_10baseT_Half |
@@ -9636,7 +9654,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9636 9654
9637 cmd->advertising &= mask; 9655 cmd->advertising &= mask;
9638 } else { 9656 } else {
9639 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 9657 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9640 if (cmd->speed != SPEED_1000) 9658 if (cmd->speed != SPEED_1000)
9641 return -EINVAL; 9659 return -EINVAL;
9642 9660
@@ -9772,11 +9790,11 @@ static int tg3_nway_reset(struct net_device *dev)
9772 if (!netif_running(dev)) 9790 if (!netif_running(dev))
9773 return -EAGAIN; 9791 return -EAGAIN;
9774 9792
9775 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 9793 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
9776 return -EINVAL; 9794 return -EINVAL;
9777 9795
9778 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9796 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9779 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9797 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9780 return -EAGAIN; 9798 return -EAGAIN;
9781 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 9799 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9782 } else { 9800 } else {
@@ -9787,7 +9805,7 @@ static int tg3_nway_reset(struct net_device *dev)
9787 tg3_readphy(tp, MII_BMCR, &bmcr); 9805 tg3_readphy(tp, MII_BMCR, &bmcr);
9788 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 9806 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9789 ((bmcr & BMCR_ANENABLE) || 9807 ((bmcr & BMCR_ANENABLE) ||
9790 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { 9808 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
9791 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 9809 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9792 BMCR_ANENABLE); 9810 BMCR_ANENABLE);
9793 r = 0; 9811 r = 0;
@@ -9922,7 +9940,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9922 else 9940 else
9923 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; 9941 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9924 9942
9925 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 9943 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
9926 u32 oldadv = phydev->advertising & 9944 u32 oldadv = phydev->advertising &
9927 (ADVERTISED_Pause | ADVERTISED_Asym_Pause); 9945 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9928 if (oldadv != newadv) { 9946 if (oldadv != newadv) {
@@ -10251,7 +10269,7 @@ static int tg3_test_link(struct tg3 *tp)
10251 if (!netif_running(tp->dev)) 10269 if (!netif_running(tp->dev))
10252 return -ENODEV; 10270 return -ENODEV;
10253 10271
10254 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 10272 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10255 max = TG3_SERDES_TIMEOUT_SEC; 10273 max = TG3_SERDES_TIMEOUT_SEC;
10256 else 10274 else
10257 max = TG3_COPPER_TIMEOUT_SEC; 10275 max = TG3_COPPER_TIMEOUT_SEC;
@@ -10554,7 +10572,8 @@ static int tg3_test_memory(struct tg3 *tp)
10554 int err = 0; 10572 int err = 0;
10555 int i; 10573 int i;
10556 10574
10557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 10575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10558 mem_tbl = mem_tbl_5717; 10577 mem_tbl = mem_tbl_5717;
10559 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 10578 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10560 mem_tbl = mem_tbl_57765; 10579 mem_tbl = mem_tbl_57765;
@@ -10568,8 +10587,8 @@ static int tg3_test_memory(struct tg3 *tp)
10568 mem_tbl = mem_tbl_570x; 10587 mem_tbl = mem_tbl_570x;
10569 10588
10570 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 10589 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10571 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, 10590 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10572 mem_tbl[i].len)) != 0) 10591 if (err)
10573 break; 10592 break;
10574 } 10593 }
10575 10594
@@ -10612,7 +10631,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10612 MAC_MODE_PORT_INT_LPBACK; 10631 MAC_MODE_PORT_INT_LPBACK;
10613 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 10632 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10614 mac_mode |= MAC_MODE_LINK_POLARITY; 10633 mac_mode |= MAC_MODE_LINK_POLARITY;
10615 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 10634 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10616 mac_mode |= MAC_MODE_PORT_MODE_MII; 10635 mac_mode |= MAC_MODE_PORT_MODE_MII;
10617 else 10636 else
10618 mac_mode |= MAC_MODE_PORT_MODE_GMII; 10637 mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -10620,7 +10639,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10620 } else if (loopback_mode == TG3_PHY_LOOPBACK) { 10639 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10621 u32 val; 10640 u32 val;
10622 10641
10623 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 10642 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10624 tg3_phy_fet_toggle_apd(tp, false); 10643 tg3_phy_fet_toggle_apd(tp, false);
10625 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; 10644 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10626 } else 10645 } else
@@ -10632,7 +10651,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10632 udelay(40); 10651 udelay(40);
10633 10652
10634 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 10653 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10635 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 10654 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10636 tg3_writephy(tp, MII_TG3_FET_PTEST, 10655 tg3_writephy(tp, MII_TG3_FET_PTEST,
10637 MII_TG3_FET_PTEST_FRC_TX_LINK | 10656 MII_TG3_FET_PTEST_FRC_TX_LINK |
10638 MII_TG3_FET_PTEST_FRC_TX_LOCK); 10657 MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -10644,7 +10663,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10644 mac_mode |= MAC_MODE_PORT_MODE_GMII; 10663 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10645 10664
10646 /* reset to prevent losing 1st rx packet intermittently */ 10665 /* reset to prevent losing 1st rx packet intermittently */
10647 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 10666 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10648 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10667 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10649 udelay(10); 10668 udelay(10);
10650 tw32_f(MAC_RX_MODE, tp->rx_mode); 10669 tw32_f(MAC_RX_MODE, tp->rx_mode);
@@ -10775,7 +10794,7 @@ static int tg3_test_loopback(struct tg3 *tp)
10775 return TG3_LOOPBACK_FAILED; 10794 return TG3_LOOPBACK_FAILED;
10776 10795
10777 /* Turn off gphy autopowerdown. */ 10796 /* Turn off gphy autopowerdown. */
10778 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 10797 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10779 tg3_phy_toggle_apd(tp, false); 10798 tg3_phy_toggle_apd(tp, false);
10780 10799
10781 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { 10800 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
@@ -10812,14 +10831,14 @@ static int tg3_test_loopback(struct tg3 *tp)
10812 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); 10831 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10813 } 10832 }
10814 10833
10815 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 10834 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10816 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 10835 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10817 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) 10836 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10818 err |= TG3_PHY_LOOPBACK_FAILED; 10837 err |= TG3_PHY_LOOPBACK_FAILED;
10819 } 10838 }
10820 10839
10821 /* Re-enable gphy autopowerdown. */ 10840 /* Re-enable gphy autopowerdown. */
10822 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 10841 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10823 tg3_phy_toggle_apd(tp, true); 10842 tg3_phy_toggle_apd(tp, true);
10824 10843
10825 return err; 10844 return err;
@@ -10830,7 +10849,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10830{ 10849{
10831 struct tg3 *tp = netdev_priv(dev); 10850 struct tg3 *tp = netdev_priv(dev);
10832 10851
10833 if (tp->link_config.phy_is_low_power) 10852 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10834 tg3_set_power_state(tp, PCI_D0); 10853 tg3_set_power_state(tp, PCI_D0);
10835 10854
10836 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 10855 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
@@ -10862,7 +10881,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10862 if (!err) 10881 if (!err)
10863 tg3_nvram_unlock(tp); 10882 tg3_nvram_unlock(tp);
10864 10883
10865 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 10884 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
10866 tg3_phy_reset(tp); 10885 tg3_phy_reset(tp);
10867 10886
10868 if (tg3_test_registers(tp) != 0) { 10887 if (tg3_test_registers(tp) != 0) {
@@ -10898,7 +10917,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10898 if (irq_sync && !err2) 10917 if (irq_sync && !err2)
10899 tg3_phy_start(tp); 10918 tg3_phy_start(tp);
10900 } 10919 }
10901 if (tp->link_config.phy_is_low_power) 10920 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10902 tg3_set_power_state(tp, PCI_D3hot); 10921 tg3_set_power_state(tp, PCI_D3hot);
10903 10922
10904} 10923}
@@ -10911,10 +10930,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10911 10930
10912 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10931 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10913 struct phy_device *phydev; 10932 struct phy_device *phydev;
10914 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 10933 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10915 return -EAGAIN; 10934 return -EAGAIN;
10916 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 10935 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10917 return phy_mii_ioctl(phydev, data, cmd); 10936 return phy_mii_ioctl(phydev, ifr, cmd);
10918 } 10937 }
10919 10938
10920 switch (cmd) { 10939 switch (cmd) {
@@ -10925,10 +10944,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10925 case SIOCGMIIREG: { 10944 case SIOCGMIIREG: {
10926 u32 mii_regval; 10945 u32 mii_regval;
10927 10946
10928 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 10947 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10929 break; /* We have no PHY */ 10948 break; /* We have no PHY */
10930 10949
10931 if (tp->link_config.phy_is_low_power) 10950 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10932 return -EAGAIN; 10951 return -EAGAIN;
10933 10952
10934 spin_lock_bh(&tp->lock); 10953 spin_lock_bh(&tp->lock);
@@ -10941,10 +10960,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10941 } 10960 }
10942 10961
10943 case SIOCSMIIREG: 10962 case SIOCSMIIREG:
10944 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 10963 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10945 break; /* We have no PHY */ 10964 break; /* We have no PHY */
10946 10965
10947 if (tp->link_config.phy_is_low_power) 10966 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10948 return -EAGAIN; 10967 return -EAGAIN;
10949 10968
10950 spin_lock_bh(&tp->lock); 10969 spin_lock_bh(&tp->lock);
@@ -11634,7 +11653,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11634 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 11653 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 11654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11636 tg3_get_57780_nvram_info(tp); 11655 tg3_get_57780_nvram_info(tp);
11637 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 11656 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11657 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11638 tg3_get_5717_nvram_info(tp); 11658 tg3_get_5717_nvram_info(tp);
11639 else 11659 else
11640 tg3_get_nvram_info(tp); 11660 tg3_get_nvram_info(tp);
@@ -12070,11 +12090,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12070 12090
12071 tp->phy_id = eeprom_phy_id; 12091 tp->phy_id = eeprom_phy_id;
12072 if (eeprom_phy_serdes) { 12092 if (eeprom_phy_serdes) {
12073 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 12093 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 12094 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12075 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12076 else 12095 else
12077 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12096 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12078 } 12097 }
12079 12098
12080 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 12099 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
@@ -12158,7 +12177,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12158 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 12177 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12159 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; 12178 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12160 12179
12161 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && 12180 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12162 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 12181 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12163 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; 12182 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12164 12183
@@ -12167,19 +12186,21 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12167 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 12186 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12168 12187
12169 if (cfg2 & (1 << 17)) 12188 if (cfg2 & (1 << 17))
12170 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; 12189 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12171 12190
12172 /* serdes signal pre-emphasis in register 0x590 set by */ 12191 /* serdes signal pre-emphasis in register 0x590 set by */
12173 /* bootcode if bit 18 is set */ 12192 /* bootcode if bit 18 is set */
12174 if (cfg2 & (1 << 18)) 12193 if (cfg2 & (1 << 18))
12175 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; 12194 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12176 12195
12177 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12196 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12178 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && 12197 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12179 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12198 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12180 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD; 12199 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12181 12200
12182 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 12201 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12202 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12203 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
12183 u32 cfg3; 12204 u32 cfg3;
12184 12205
12185 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 12206 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
@@ -12284,9 +12305,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12284 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 12305 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12285 tp->phy_id = hw_phy_id; 12306 tp->phy_id = hw_phy_id;
12286 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 12307 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12287 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12308 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12288 else 12309 else
12289 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; 12310 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12290 } else { 12311 } else {
12291 if (tp->phy_id != TG3_PHY_ID_INVALID) { 12312 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12292 /* Do nothing, phy ID already set up in 12313 /* Do nothing, phy ID already set up in
@@ -12305,11 +12326,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12305 tp->phy_id = p->phy_id; 12326 tp->phy_id = p->phy_id;
12306 if (!tp->phy_id || 12327 if (!tp->phy_id ||
12307 tp->phy_id == TG3_PHY_ID_BCM8002) 12328 tp->phy_id == TG3_PHY_ID_BCM8002)
12308 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12329 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12309 } 12330 }
12310 } 12331 }
12311 12332
12312 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && 12333 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12313 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && 12334 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12314 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 12335 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12315 u32 bmsr, adv_reg, tg3_ctrl, mask; 12336 u32 bmsr, adv_reg, tg3_ctrl, mask;
@@ -12327,7 +12348,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12327 ADVERTISE_100HALF | ADVERTISE_100FULL | 12348 ADVERTISE_100HALF | ADVERTISE_100FULL |
12328 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); 12349 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12329 tg3_ctrl = 0; 12350 tg3_ctrl = 0;
12330 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 12351 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12331 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | 12352 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12332 MII_TG3_CTRL_ADV_1000_FULL); 12353 MII_TG3_CTRL_ADV_1000_FULL);
12333 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 12354 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
@@ -12342,7 +12363,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12342 if (!tg3_copper_is_advertising_all(tp, mask)) { 12363 if (!tg3_copper_is_advertising_all(tp, mask)) {
12343 tg3_writephy(tp, MII_ADVERTISE, adv_reg); 12364 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12344 12365
12345 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 12366 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12346 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); 12367 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12347 12368
12348 tg3_writephy(tp, MII_BMCR, 12369 tg3_writephy(tp, MII_BMCR,
@@ -12351,7 +12372,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12351 tg3_phy_set_wirespeed(tp); 12372 tg3_phy_set_wirespeed(tp);
12352 12373
12353 tg3_writephy(tp, MII_ADVERTISE, adv_reg); 12374 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12354 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 12375 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12355 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); 12376 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12356 } 12377 }
12357 12378
@@ -12364,13 +12385,13 @@ skip_phy_reset:
12364 err = tg3_init_5401phy_dsp(tp); 12385 err = tg3_init_5401phy_dsp(tp);
12365 } 12386 }
12366 12387
12367 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 12388 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12368 tp->link_config.advertising = 12389 tp->link_config.advertising =
12369 (ADVERTISED_1000baseT_Half | 12390 (ADVERTISED_1000baseT_Half |
12370 ADVERTISED_1000baseT_Full | 12391 ADVERTISED_1000baseT_Full |
12371 ADVERTISED_Autoneg | 12392 ADVERTISED_Autoneg |
12372 ADVERTISED_FIBRE); 12393 ADVERTISED_FIBRE);
12373 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 12394 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12374 tp->link_config.advertising &= 12395 tp->link_config.advertising &=
12375 ~(ADVERTISED_1000baseT_Half | 12396 ~(ADVERTISED_1000baseT_Half |
12376 ADVERTISED_1000baseT_Full); 12397 ADVERTISED_1000baseT_Full);
@@ -12699,6 +12720,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12699{ 12720{
12700 int vlen; 12721 int vlen;
12701 u32 apedata; 12722 u32 apedata;
12723 char *fwtype;
12702 12724
12703 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || 12725 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12704 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 12726 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
@@ -12714,9 +12736,15 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12714 12736
12715 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 12737 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12716 12738
12739 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
12740 fwtype = "NCSI";
12741 else
12742 fwtype = "DASH";
12743
12717 vlen = strlen(tp->fw_ver); 12744 vlen = strlen(tp->fw_ver);
12718 12745
12719 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d", 12746 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
12747 fwtype,
12720 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 12748 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12721 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 12749 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12722 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 12750 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
@@ -12760,6 +12788,13 @@ done:
12760 12788
12761static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 12789static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12762 12790
12791static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
12792{
12793#if TG3_VLAN_TAG_USED
12794 dev->vlan_features |= flags;
12795#endif
12796}
12797
12763static int __devinit tg3_get_invariants(struct tg3 *tp) 12798static int __devinit tg3_get_invariants(struct tg3 *tp)
12764{ 12799{
12765 static struct pci_device_id write_reorder_chipsets[] = { 12800 static struct pci_device_id write_reorder_chipsets[] = {
@@ -12804,7 +12839,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12804 12839
12805 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 12840 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12806 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 12841 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12807 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) 12842 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12843 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12808 pci_read_config_dword(tp->pdev, 12844 pci_read_config_dword(tp->pdev,
12809 TG3PCI_GEN2_PRODID_ASICREV, 12845 TG3PCI_GEN2_PRODID_ASICREV,
12810 &prod_id_asic_rev); 12846 &prod_id_asic_rev);
@@ -12962,6 +12998,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 12998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12963 tp->pdev_peer = tg3_find_peer(tp); 12999 tp->pdev_peer = tg3_find_peer(tp);
12964 13000
13001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13003 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13004 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13005
12965 /* Intentionally exclude ASIC_REV_5906 */ 13006 /* Intentionally exclude ASIC_REV_5906 */
12966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
@@ -12969,8 +13010,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13013 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
12973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12974 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; 13014 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12975 13015
12976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 13016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
@@ -12990,16 +13030,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12990 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) 13030 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12991 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; 13031 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12992 else { 13032 else {
13033 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13034
12993 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 13035 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12994 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12995 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 13036 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12996 tp->dev->features |= NETIF_F_IPV6_CSUM; 13037 features |= NETIF_F_IPV6_CSUM;
12997 tp->dev->features |= NETIF_F_GRO; 13038 tp->dev->features |= features;
13039 vlan_features_add(tp->dev, features);
12998 } 13040 }
12999 13041
13000 /* Determine TSO capabilities */ 13042 /* Determine TSO capabilities */
13001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13043 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13003 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13044 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13004 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13045 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13035,14 +13076,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13035 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 13076 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13036 } 13077 }
13037 13078
13038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13079 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13040 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 13080 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13041 tp->irq_max = TG3_IRQ_MAX_VECS; 13081 tp->irq_max = TG3_IRQ_MAX_VECS;
13042 } 13082 }
13043 } 13083 }
13044 13084
13045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13047 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; 13088 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13048 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { 13089 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
@@ -13050,8 +13091,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13050 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13091 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13051 } 13092 }
13052 13093
13053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13094 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13055 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13095 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13056 13096
13057 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13097 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13242,7 +13282,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13242 * APE register and memory space. 13282 * APE register and memory space.
13243 */ 13283 */
13244 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 13284 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13245 PCISTATE_ALLOW_APE_SHMEM_WR; 13285 PCISTATE_ALLOW_APE_SHMEM_WR |
13286 PCISTATE_ALLOW_APE_PSPACE_WR;
13246 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 13287 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13247 pci_state_reg); 13288 pci_state_reg);
13248 } 13289 }
@@ -13251,8 +13292,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13254 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13295 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13256 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 13296 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13257 13297
13258 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). 13298 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
@@ -13310,40 +13350,39 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13310 } 13350 }
13311 13351
13312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13313 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 13353 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13314 13354
13315 /* A few boards don't want Ethernet@WireSpeed phy feature */ 13355 /* A few boards don't want Ethernet@WireSpeed phy feature */
13316 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || 13356 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13317 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && 13357 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13318 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && 13358 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13319 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || 13359 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13320 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || 13360 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13321 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 13361 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13322 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; 13362 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13323 13363
13324 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || 13364 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13325 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) 13365 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13326 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; 13366 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13327 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 13367 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13328 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 13368 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13329 13369
13330 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 13370 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13331 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && 13371 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13332 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 13372 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13333 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && 13373 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13334 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 13374 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13335 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
13336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13377 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 13378 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13340 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 13379 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13341 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 13380 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13342 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 13381 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13343 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 13382 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13344 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; 13383 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13345 } else 13384 } else
13346 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 13385 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13347 } 13386 }
13348 13387
13349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 13388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
@@ -13372,8 +13411,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13372 return err; 13411 return err;
13373 13412
13374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 13413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13375 (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 || 13414 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13376 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
13377 return -ENOTSUPP; 13415 return -ENOTSUPP;
13378 13416
13379 /* Initialize data/descriptor byte/word swapping. */ 13417 /* Initialize data/descriptor byte/word swapping. */
@@ -13457,8 +13495,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || 13495 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13458 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 13496 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13459 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 13497 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13460 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) 13498 (tp->phy_flags & TG3_PHYFLG_IS_FET))
13461 tp->tg3_flags |= TG3_FLAG_10_100_ONLY; 13499 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13462 13500
13463 err = tg3_phy_probe(tp); 13501 err = tg3_phy_probe(tp);
13464 if (err) { 13502 if (err) {
@@ -13470,13 +13508,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13470 tg3_read_vpd(tp); 13508 tg3_read_vpd(tp);
13471 tg3_read_fw_ver(tp); 13509 tg3_read_fw_ver(tp);
13472 13510
13473 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 13511 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
13474 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; 13512 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13475 } else { 13513 } else {
13476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) 13514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13477 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; 13515 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13478 else 13516 else
13479 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; 13517 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13480 } 13518 }
13481 13519
13482 /* 5700 {AX,BX} chips have a broken status block link 13520 /* 5700 {AX,BX} chips have a broken status block link
@@ -13494,13 +13532,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13494 */ 13532 */
13495 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 13533 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13497 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 13535 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13498 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | 13536 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13499 TG3_FLAG_USE_LINKCHG_REG); 13537 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13500 } 13538 }
13501 13539
13502 /* For all SERDES we poll the MAC status register. */ 13540 /* For all SERDES we poll the MAC status register. */
13503 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 13541 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13504 tp->tg3_flags |= TG3_FLAG_POLL_SERDES; 13542 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13505 else 13543 else
13506 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13544 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
@@ -13580,9 +13618,12 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
13580 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 13618 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13581 else 13619 else
13582 tg3_nvram_unlock(tp); 13620 tg3_nvram_unlock(tp);
13583 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 13621 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13584 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC) 13622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13623 if (PCI_FUNC(tp->pdev->devfn) & 1)
13585 mac_offset = 0xcc; 13624 mac_offset = 0xcc;
13625 if (PCI_FUNC(tp->pdev->devfn) > 1)
13626 mac_offset += 0x18c;
13586 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13627 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13587 mac_offset = 0x10; 13628 mac_offset = 0x10;
13588 13629
@@ -13667,8 +13708,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13667#endif 13708#endif
13668#endif 13709#endif
13669 13710
13670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13711 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13672 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 13712 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13673 goto out; 13713 goto out;
13674 } 13714 }
@@ -13879,8 +13919,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13879 13919
13880 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13920 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13881 13921
13882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13922 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13884 goto out; 13923 goto out;
13885 13924
13886 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13925 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
@@ -14070,7 +14109,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
14070 tp->link_config.autoneg = AUTONEG_ENABLE; 14109 tp->link_config.autoneg = AUTONEG_ENABLE;
14071 tp->link_config.active_speed = SPEED_INVALID; 14110 tp->link_config.active_speed = SPEED_INVALID;
14072 tp->link_config.active_duplex = DUPLEX_INVALID; 14111 tp->link_config.active_duplex = DUPLEX_INVALID;
14073 tp->link_config.phy_is_low_power = 0;
14074 tp->link_config.orig_speed = SPEED_INVALID; 14112 tp->link_config.orig_speed = SPEED_INVALID;
14075 tp->link_config.orig_duplex = DUPLEX_INVALID; 14113 tp->link_config.orig_duplex = DUPLEX_INVALID;
14076 tp->link_config.orig_autoneg = AUTONEG_INVALID; 14114 tp->link_config.orig_autoneg = AUTONEG_INVALID;
@@ -14078,8 +14116,7 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
14078 14116
14079static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14117static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14080{ 14118{
14081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 14119 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14083 tp->bufmgr_config.mbuf_read_dma_low_water = 14120 tp->bufmgr_config.mbuf_read_dma_low_water =
14084 DEFAULT_MB_RDMA_LOW_WATER_5705; 14121 DEFAULT_MB_RDMA_LOW_WATER_5705;
14085 tp->bufmgr_config.mbuf_mac_rx_low_water = 14122 tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14156,6 +14193,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
14156 case TG3_PHY_ID_BCM5718C: return "5718C"; 14193 case TG3_PHY_ID_BCM5718C: return "5718C";
14157 case TG3_PHY_ID_BCM5718S: return "5718S"; 14194 case TG3_PHY_ID_BCM5718S: return "5718S";
14158 case TG3_PHY_ID_BCM57765: return "57765"; 14195 case TG3_PHY_ID_BCM57765: return "57765";
14196 case TG3_PHY_ID_BCM5719C: return "5719C";
14159 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 14197 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14160 case 0: return "serdes"; 14198 case 0: return "serdes";
14161 default: return "unknown"; 14199 default: return "unknown";
@@ -14261,7 +14299,7 @@ static const struct net_device_ops tg3_netdev_ops = {
14261 .ndo_open = tg3_open, 14299 .ndo_open = tg3_open,
14262 .ndo_stop = tg3_close, 14300 .ndo_stop = tg3_close,
14263 .ndo_start_xmit = tg3_start_xmit, 14301 .ndo_start_xmit = tg3_start_xmit,
14264 .ndo_get_stats = tg3_get_stats, 14302 .ndo_get_stats64 = tg3_get_stats64,
14265 .ndo_validate_addr = eth_validate_addr, 14303 .ndo_validate_addr = eth_validate_addr,
14266 .ndo_set_multicast_list = tg3_set_rx_mode, 14304 .ndo_set_multicast_list = tg3_set_rx_mode,
14267 .ndo_set_mac_address = tg3_set_mac_addr, 14305 .ndo_set_mac_address = tg3_set_mac_addr,
@@ -14280,7 +14318,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14280 .ndo_open = tg3_open, 14318 .ndo_open = tg3_open,
14281 .ndo_stop = tg3_close, 14319 .ndo_stop = tg3_close,
14282 .ndo_start_xmit = tg3_start_xmit_dma_bug, 14320 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14283 .ndo_get_stats = tg3_get_stats, 14321 .ndo_get_stats64 = tg3_get_stats64,
14284 .ndo_validate_addr = eth_validate_addr, 14322 .ndo_validate_addr = eth_validate_addr,
14285 .ndo_set_multicast_list = tg3_set_rx_mode, 14323 .ndo_set_multicast_list = tg3_set_rx_mode,
14286 .ndo_set_mac_address = tg3_set_mac_addr, 14324 .ndo_set_mac_address = tg3_set_mac_addr,
@@ -14404,7 +14442,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14404 } 14442 }
14405 14443
14406 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 14444 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14407 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) 14445 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
14446 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14408 dev->netdev_ops = &tg3_netdev_ops; 14447 dev->netdev_ops = &tg3_netdev_ops;
14409 else 14448 else
14410 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14449 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14468,20 +14507,25 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14468 * is off by default, but can be enabled using ethtool. 14507 * is off by default, but can be enabled using ethtool.
14469 */ 14508 */
14470 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && 14509 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14471 (dev->features & NETIF_F_IP_CSUM)) 14510 (dev->features & NETIF_F_IP_CSUM)) {
14472 dev->features |= NETIF_F_TSO; 14511 dev->features |= NETIF_F_TSO;
14473 14512 vlan_features_add(dev, NETIF_F_TSO);
14513 }
14474 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || 14514 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14475 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { 14515 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14476 if (dev->features & NETIF_F_IPV6_CSUM) 14516 if (dev->features & NETIF_F_IPV6_CSUM) {
14477 dev->features |= NETIF_F_TSO6; 14517 dev->features |= NETIF_F_TSO6;
14518 vlan_features_add(dev, NETIF_F_TSO6);
14519 }
14478 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || 14520 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14480 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14522 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14481 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14523 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 14525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14484 dev->features |= NETIF_F_TSO_ECN; 14526 dev->features |= NETIF_F_TSO_ECN;
14527 vlan_features_add(dev, NETIF_F_TSO_ECN);
14528 }
14485 } 14529 }
14486 14530
14487 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14531 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
@@ -14597,24 +14641,31 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14597 tg3_bus_string(tp, str), 14641 tg3_bus_string(tp, str),
14598 dev->dev_addr); 14642 dev->dev_addr);
14599 14643
14600 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 14644 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
14601 struct phy_device *phydev; 14645 struct phy_device *phydev;
14602 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 14646 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14603 netdev_info(dev, 14647 netdev_info(dev,
14604 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14648 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14605 phydev->drv->name, dev_name(&phydev->dev)); 14649 phydev->drv->name, dev_name(&phydev->dev));
14606 } else 14650 } else {
14651 char *ethtype;
14652
14653 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
14654 ethtype = "10/100Base-TX";
14655 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
14656 ethtype = "1000Base-SX";
14657 else
14658 ethtype = "10/100/1000Base-T";
14659
14607 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 14660 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14608 "(WireSpeed[%d])\n", tg3_phy_string(tp), 14661 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
14609 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : 14662 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
14610 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : 14663 }
14611 "10/100/1000Base-T")),
14612 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14613 14664
14614 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 14665 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14615 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, 14666 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14616 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, 14667 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14617 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, 14668 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14618 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, 14669 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14619 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 14670 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14620 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 14671 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ce9c4918c318..4937bd190964 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -53,6 +53,7 @@
53#define TG3PCI_DEVICE_TIGON3_57765 0x16b4 53#define TG3PCI_DEVICE_TIGON3_57765 0x16b4
54#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 54#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
55#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 55#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
56#define TG3PCI_DEVICE_TIGON3_5719 0x1657
56/* 0x04 --> 0x2c unused */ 57/* 0x04 --> 0x2c unused */
57#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM 58#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
58#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 59#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -160,6 +161,7 @@
160#define ASIC_REV_57780 0x57780 161#define ASIC_REV_57780 0x57780
161#define ASIC_REV_5717 0x5717 162#define ASIC_REV_5717 0x5717
162#define ASIC_REV_57765 0x57785 163#define ASIC_REV_57765 0x57785
164#define ASIC_REV_5719 0x5719
163#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 165#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
164#define CHIPREV_5700_AX 0x70 166#define CHIPREV_5700_AX 0x70
165#define CHIPREV_5700_BX 0x71 167#define CHIPREV_5700_BX 0x71
@@ -231,6 +233,7 @@
231#define PCISTATE_RETRY_SAME_DMA 0x00002000 233#define PCISTATE_RETRY_SAME_DMA 0x00002000
232#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 234#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000
233#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 235#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000
236#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000
234#define TG3PCI_CLOCK_CTRL 0x00000074 237#define TG3PCI_CLOCK_CTRL 0x00000074
235#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 238#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200
236#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 239#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400
@@ -468,6 +471,7 @@
468#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010 471#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010
469#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 472#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
470#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 473#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
474#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100
471#define MAC_TX_STATUS 0x00000460 475#define MAC_TX_STATUS 0x00000460
472#define TX_STATUS_XOFFED 0x00000001 476#define TX_STATUS_XOFFED 0x00000001
473#define TX_STATUS_SENT_XOFF 0x00000002 477#define TX_STATUS_SENT_XOFF 0x00000002
@@ -1071,10 +1075,8 @@
1071#define TG3_CPMU_HST_ACC 0x0000361c 1075#define TG3_CPMU_HST_ACC 0x0000361c
1072#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000 1076#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000
1073#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 1077#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
1074/* 0x3620 --> 0x362c unused */ 1078/* 0x3620 --> 0x3630 unused */
1075 1079
1076#define TG3_CPMU_STATUS 0x0000362c
1077#define TG3_CPMU_STATUS_PCIE_FUNC 0x20000000
1078#define TG3_CPMU_CLCK_STAT 0x00003630 1080#define TG3_CPMU_CLCK_STAT 0x00003630
1079#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1081#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
1080#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 1082#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -1842,6 +1844,10 @@
1842#define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080 1844#define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080
1843/* 0x7d58 --> 0x7e70 unused */ 1845/* 0x7d58 --> 0x7e70 unused */
1844 1846
1847#define TG3_PCIE_PHY_TSTCTL 0x00007e2c
1848#define TG3_PCIE_PHY_TSTCTL_PCIE10 0x00000040
1849#define TG3_PCIE_PHY_TSTCTL_PSCRAM 0x00000020
1850
1845#define TG3_PCIE_EIDLE_DELAY 0x00007e70 1851#define TG3_PCIE_EIDLE_DELAY 0x00007e70
1846#define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f 1852#define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f
1847#define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c 1853#define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c
@@ -2030,31 +2036,9 @@
2030 2036
2031 2037
2032/* Currently this is fixed. */ 2038/* Currently this is fixed. */
2033#define TG3_PHY_PCIE_ADDR 0x00
2034#define TG3_PHY_MII_ADDR 0x01 2039#define TG3_PHY_MII_ADDR 0x01
2035 2040
2036 2041
2037/*** Tigon3 specific PHY PCIE registers. ***/
2038
2039#define TG3_PCIEPHY_BLOCK_ADDR 0x1f
2040#define TG3_PCIEPHY_XGXS_BLK1 0x0801
2041#define TG3_PCIEPHY_TXB_BLK 0x0861
2042#define TG3_PCIEPHY_BLOCK_SHIFT 4
2043
2044/* TG3_PCIEPHY_TXB_BLK */
2045#define TG3_PCIEPHY_TX0CTRL1 0x15
2046#define TG3_PCIEPHY_TX0CTRL1_TXOCM 0x0003
2047#define TG3_PCIEPHY_TX0CTRL1_RDCTL 0x0008
2048#define TG3_PCIEPHY_TX0CTRL1_TXCMV 0x0030
2049#define TG3_PCIEPHY_TX0CTRL1_TKSEL 0x0040
2050#define TG3_PCIEPHY_TX0CTRL1_NB_EN 0x0400
2051
2052/* TG3_PCIEPHY_XGXS_BLK1 */
2053#define TG3_PCIEPHY_PWRMGMT4 0x1a
2054#define TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN 0x0038
2055#define TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN 0x4000
2056
2057
2058/*** Tigon3 specific PHY MII registers. ***/ 2042/*** Tigon3 specific PHY MII registers. ***/
2059#define TG3_BMCR_SPEED1000 0x0040 2043#define TG3_BMCR_SPEED1000 0x0040
2060 2044
@@ -2073,8 +2057,9 @@
2073#define MII_TG3_EXT_STAT 0x11 /* Extended status register */ 2057#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
2074#define MII_TG3_EXT_STAT_LPASS 0x0100 2058#define MII_TG3_EXT_STAT_LPASS 0x0100
2075 2059
2060#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */
2076#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */ 2061#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */
2077 2062#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */
2078#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */ 2063#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */
2079 2064
2080#define MII_TG3_DSP_TAP1 0x0001 2065#define MII_TG3_DSP_TAP1 0x0001
@@ -2082,6 +2067,7 @@
2082#define MII_TG3_DSP_AADJ1CH0 0x001f 2067#define MII_TG3_DSP_AADJ1CH0 0x001f
2083#define MII_TG3_DSP_AADJ1CH3 0x601f 2068#define MII_TG3_DSP_AADJ1CH3 0x601f
2084#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 2069#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
2070#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01
2085#define MII_TG3_DSP_EXP8 0x0f08 2071#define MII_TG3_DSP_EXP8 0x0f08
2086#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 2072#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
2087#define MII_TG3_DSP_EXP8_AEDW 0x0200 2073#define MII_TG3_DSP_EXP8_AEDW 0x0200
@@ -2177,6 +2163,8 @@
2177/* APE shared memory. Accessible through BAR1 */ 2163/* APE shared memory. Accessible through BAR1 */
2178#define TG3_APE_FW_STATUS 0x400c 2164#define TG3_APE_FW_STATUS 0x400c
2179#define APE_FW_STATUS_READY 0x00000100 2165#define APE_FW_STATUS_READY 0x00000100
2166#define TG3_APE_FW_FEATURES 0x4010
2167#define TG3_APE_FW_FEATURE_NCSI 0x00000002
2180#define TG3_APE_FW_VERSION 0x4018 2168#define TG3_APE_FW_VERSION 0x4018
2181#define APE_FW_VERSION_MAJMSK 0xff000000 2169#define APE_FW_VERSION_MAJMSK 0xff000000
2182#define APE_FW_VERSION_MAJSFT 24 2170#define APE_FW_VERSION_MAJSFT 24
@@ -2191,7 +2179,9 @@
2191#define APE_HOST_SEG_LEN_MAGIC 0x0000001c 2179#define APE_HOST_SEG_LEN_MAGIC 0x0000001c
2192#define TG3_APE_HOST_INIT_COUNT 0x4208 2180#define TG3_APE_HOST_INIT_COUNT 0x4208
2193#define TG3_APE_HOST_DRIVER_ID 0x420c 2181#define TG3_APE_HOST_DRIVER_ID 0x420c
2194#define APE_HOST_DRIVER_ID_MAGIC 0xf0035100 2182#define APE_HOST_DRIVER_ID_LINUX 0xf0000000
2183#define APE_HOST_DRIVER_ID_MAGIC(maj, min) \
2184 (APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8)
2195#define TG3_APE_HOST_BEHAVIOR 0x4210 2185#define TG3_APE_HOST_BEHAVIOR 0x4210
2196#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 2186#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001
2197#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 2187#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214
@@ -2209,6 +2199,11 @@
2209#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 2199#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000
2210#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 2200#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000
2211 2201
2202#define TG3_APE_PER_LOCK_REQ 0x8400
2203#define APE_LOCK_PER_REQ_DRIVER 0x00001000
2204#define TG3_APE_PER_LOCK_GRANT 0x8420
2205#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
2206
2212/* APE convenience enumerations. */ 2207/* APE convenience enumerations. */
2213#define TG3_APE_LOCK_GRC 1 2208#define TG3_APE_LOCK_GRC 1
2214#define TG3_APE_LOCK_MEM 4 2209#define TG3_APE_LOCK_MEM 4
@@ -2539,7 +2534,6 @@ struct tg3_link_config {
2539 /* When we go in and out of low power mode we need 2534 /* When we go in and out of low power mode we need
2540 * to swap with this state. 2535 * to swap with this state.
2541 */ 2536 */
2542 int phy_is_low_power;
2543 u16 orig_speed; 2537 u16 orig_speed;
2544 u8 orig_duplex; 2538 u8 orig_duplex;
2545 u8 orig_autoneg; 2539 u8 orig_autoneg;
@@ -2765,8 +2759,8 @@ struct tg3 {
2765 2759
2766 2760
2767 /* begin "everything else" cacheline(s) section */ 2761 /* begin "everything else" cacheline(s) section */
2768 struct net_device_stats net_stats; 2762 struct rtnl_link_stats64 net_stats;
2769 struct net_device_stats net_stats_prev; 2763 struct rtnl_link_stats64 net_stats_prev;
2770 struct tg3_ethtool_stats estats; 2764 struct tg3_ethtool_stats estats;
2771 struct tg3_ethtool_stats estats_prev; 2765 struct tg3_ethtool_stats estats_prev;
2772 2766
@@ -2780,7 +2774,6 @@ struct tg3 {
2780#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 2774#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
2781#define TG3_FLAG_RX_CHECKSUMS 0x00000004 2775#define TG3_FLAG_RX_CHECKSUMS 0x00000004
2782#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 2776#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
2783#define TG3_FLAG_USE_MI_INTERRUPT 0x00000010
2784#define TG3_FLAG_ENABLE_ASF 0x00000020 2777#define TG3_FLAG_ENABLE_ASF 0x00000020
2785#define TG3_FLAG_ASPM_WORKAROUND 0x00000040 2778#define TG3_FLAG_ASPM_WORKAROUND 0x00000040
2786#define TG3_FLAG_POLL_SERDES 0x00000080 2779#define TG3_FLAG_POLL_SERDES 0x00000080
@@ -2802,7 +2795,6 @@ struct tg3 {
2802#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000 2795#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000
2803#define TG3_FLAG_WOL_CAP 0x00400000 2796#define TG3_FLAG_WOL_CAP 0x00400000
2804#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2797#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2805#define TG3_FLAG_10_100_ONLY 0x01000000
2806#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2798#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2807#define TG3_FLAG_CPMU_PRESENT 0x04000000 2799#define TG3_FLAG_CPMU_PRESENT 0x04000000
2808#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 2800#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
@@ -2813,22 +2805,15 @@ struct tg3 {
2813 u32 tg3_flags2; 2805 u32 tg3_flags2;
2814#define TG3_FLG2_RESTART_TIMER 0x00000001 2806#define TG3_FLG2_RESTART_TIMER 0x00000001
2815#define TG3_FLG2_TSO_BUG 0x00000002 2807#define TG3_FLG2_TSO_BUG 0x00000002
2816#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
2817#define TG3_FLG2_IS_5788 0x00000008 2808#define TG3_FLG2_IS_5788 0x00000008
2818#define TG3_FLG2_MAX_RXPEND_64 0x00000010 2809#define TG3_FLG2_MAX_RXPEND_64 0x00000010
2819#define TG3_FLG2_TSO_CAPABLE 0x00000020 2810#define TG3_FLG2_TSO_CAPABLE 0x00000020
2820#define TG3_FLG2_PHY_ADC_BUG 0x00000040
2821#define TG3_FLG2_PHY_5704_A0_BUG 0x00000080
2822#define TG3_FLG2_PHY_BER_BUG 0x00000100
2823#define TG3_FLG2_PCI_EXPRESS 0x00000200 2811#define TG3_FLG2_PCI_EXPRESS 0x00000200
2824#define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400 2812#define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400
2825#define TG3_FLG2_HW_AUTONEG 0x00000800 2813#define TG3_FLG2_HW_AUTONEG 0x00000800
2826#define TG3_FLG2_IS_NIC 0x00001000 2814#define TG3_FLG2_IS_NIC 0x00001000
2827#define TG3_FLG2_PHY_SERDES 0x00002000
2828#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000
2829#define TG3_FLG2_FLASH 0x00008000 2815#define TG3_FLG2_FLASH 0x00008000
2830#define TG3_FLG2_HW_TSO_1 0x00010000 2816#define TG3_FLG2_HW_TSO_1 0x00010000
2831#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
2832#define TG3_FLG2_5705_PLUS 0x00040000 2817#define TG3_FLG2_5705_PLUS 0x00040000
2833#define TG3_FLG2_5750_PLUS 0x00080000 2818#define TG3_FLG2_5750_PLUS 0x00080000
2834#define TG3_FLG2_HW_TSO_3 0x00100000 2819#define TG3_FLG2_HW_TSO_3 0x00100000
@@ -2836,10 +2821,6 @@ struct tg3 {
2836#define TG3_FLG2_USING_MSIX 0x00400000 2821#define TG3_FLG2_USING_MSIX 0x00400000
2837#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ 2822#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \
2838 TG3_FLG2_USING_MSIX) 2823 TG3_FLG2_USING_MSIX)
2839#define TG3_FLG2_MII_SERDES 0x00800000
2840#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \
2841 TG3_FLG2_MII_SERDES)
2842#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2843#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2824#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2844#define TG3_FLG2_5780_CLASS 0x04000000 2825#define TG3_FLG2_5780_CLASS 0x04000000
2845#define TG3_FLG2_HW_TSO_2 0x08000000 2826#define TG3_FLG2_HW_TSO_2 0x08000000
@@ -2847,9 +2828,7 @@ struct tg3 {
2847 TG3_FLG2_HW_TSO_2 | \ 2828 TG3_FLG2_HW_TSO_2 | \
2848 TG3_FLG2_HW_TSO_3) 2829 TG3_FLG2_HW_TSO_3)
2849#define TG3_FLG2_1SHOT_MSI 0x10000000 2830#define TG3_FLG2_1SHOT_MSI 0x10000000
2850#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2851#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 2831#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
2852#define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000
2853 u32 tg3_flags3; 2832 u32 tg3_flags3;
2854#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 2833#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
2855#define TG3_FLG3_ENABLE_APE 0x00000002 2834#define TG3_FLG3_ENABLE_APE 0x00000002
@@ -2857,15 +2836,12 @@ struct tg3 {
2857#define TG3_FLG3_5701_DMA_BUG 0x00000008 2836#define TG3_FLG3_5701_DMA_BUG 0x00000008
2858#define TG3_FLG3_USE_PHYLIB 0x00000010 2837#define TG3_FLG3_USE_PHYLIB 0x00000010
2859#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2838#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2860#define TG3_FLG3_PHY_CONNECTED 0x00000080
2861#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100 2839#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100
2862#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2840#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2863#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400 2841#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
2864#define TG3_FLG3_CLKREQ_BUG 0x00000800 2842#define TG3_FLG3_CLKREQ_BUG 0x00000800
2865#define TG3_FLG3_PHY_ENABLE_APD 0x00001000
2866#define TG3_FLG3_5755_PLUS 0x00002000 2843#define TG3_FLG3_5755_PLUS 0x00002000
2867#define TG3_FLG3_NO_NVRAM 0x00004000 2844#define TG3_FLG3_NO_NVRAM 0x00004000
2868#define TG3_FLG3_PHY_IS_FET 0x00010000
2869#define TG3_FLG3_ENABLE_RSS 0x00020000 2845#define TG3_FLG3_ENABLE_RSS 0x00020000
2870#define TG3_FLG3_ENABLE_TSS 0x00040000 2846#define TG3_FLG3_ENABLE_TSS 0x00040000
2871#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000 2847#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
@@ -2873,6 +2849,7 @@ struct tg3 {
2873#define TG3_FLG3_SHORT_DMA_BUG 0x00200000 2849#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2874#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 2850#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2875#define TG3_FLG3_L1PLLPD_EN 0x00800000 2851#define TG3_FLG3_L1PLLPD_EN 0x00800000
2852#define TG3_FLG3_5717_PLUS 0x01000000
2876 2853
2877 struct timer_list timer; 2854 struct timer_list timer;
2878 u16 timer_counter; 2855 u16 timer_counter;
@@ -2942,6 +2919,7 @@ struct tg3 {
2942#define TG3_PHY_ID_BCM5718C 0x5c0d8a00 2919#define TG3_PHY_ID_BCM5718C 0x5c0d8a00
2943#define TG3_PHY_ID_BCM5718S 0xbc050ff0 2920#define TG3_PHY_ID_BCM5718S 0xbc050ff0
2944#define TG3_PHY_ID_BCM57765 0x5c0d8a40 2921#define TG3_PHY_ID_BCM57765 0x5c0d8a40
2922#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
2945#define TG3_PHY_ID_BCM5906 0xdc00ac40 2923#define TG3_PHY_ID_BCM5906 0xdc00ac40
2946#define TG3_PHY_ID_BCM8002 0x60010140 2924#define TG3_PHY_ID_BCM8002 0x60010140
2947#define TG3_PHY_ID_INVALID 0xffffffff 2925#define TG3_PHY_ID_INVALID 0xffffffff
@@ -2965,7 +2943,29 @@ struct tg3 {
2965 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ 2943 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
2966 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ 2944 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
2967 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ 2945 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
2968 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002) 2946 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
2947 (X) == TG3_PHY_ID_BCM8002)
2948
2949 u32 phy_flags;
2950#define TG3_PHYFLG_IS_LOW_POWER 0x00000001
2951#define TG3_PHYFLG_IS_CONNECTED 0x00000002
2952#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004
2953#define TG3_PHYFLG_PHY_SERDES 0x00000010
2954#define TG3_PHYFLG_MII_SERDES 0x00000020
2955#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \
2956 TG3_PHYFLG_MII_SERDES)
2957#define TG3_PHYFLG_IS_FET 0x00000040
2958#define TG3_PHYFLG_10_100_ONLY 0x00000080
2959#define TG3_PHYFLG_ENABLE_APD 0x00000100
2960#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200
2961#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400
2962#define TG3_PHYFLG_JITTER_BUG 0x00000800
2963#define TG3_PHYFLG_ADJUST_TRIM 0x00001000
2964#define TG3_PHYFLG_ADC_BUG 0x00002000
2965#define TG3_PHYFLG_5704_A0_BUG 0x00004000
2966#define TG3_PHYFLG_BER_BUG 0x00008000
2967#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
2968#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
2969 2969
2970 u32 led_ctrl; 2970 u32 led_ctrl;
2971 u32 phy_otp; 2971 u32 phy_otp;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 06b552fca63d..5efa57757a2c 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -262,13 +262,13 @@ struct de_srom_media_block {
262 u16 csr13; 262 u16 csr13;
263 u16 csr14; 263 u16 csr14;
264 u16 csr15; 264 u16 csr15;
265} __attribute__((packed)); 265} __packed;
266 266
267struct de_srom_info_leaf { 267struct de_srom_info_leaf {
268 u16 default_media; 268 u16 default_media;
269 u8 n_blocks; 269 u8 n_blocks;
270 u8 unused; 270 u8 unused;
271} __attribute__((packed)); 271} __packed;
272 272
273struct de_desc { 273struct de_desc {
274 __le32 opts1; 274 __le32 opts1;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 29e6c63d39fd..0bc4f3030a80 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -589,7 +589,7 @@ static int dmfe_open(struct DEVICE *dev)
589 db->dm910x_chk_mode = 1; /* Enter the check mode */ 589 db->dm910x_chk_mode = 1; /* Enter the check mode */
590 } 590 }
591 591
592 /* Initilize DM910X board */ 592 /* Initialize DM910X board */
593 dmfe_init_dm910x(dev); 593 dmfe_init_dm910x(dev);
594 594
595 /* Active System Interface */ 595 /* Active System Interface */
@@ -606,9 +606,9 @@ static int dmfe_open(struct DEVICE *dev)
606} 606}
607 607
608 608
609/* Initilize DM910X board 609/* Initialize DM910X board
610 * Reset DM910X board 610 * Reset DM910X board
611 * Initilize TX/Rx descriptor chain structure 611 * Initialize TX/Rx descriptor chain structure
612 * Send the set-up frame 612 * Send the set-up frame
613 * Enable Tx/Rx machine 613 * Enable Tx/Rx machine
614 */ 614 */
@@ -649,7 +649,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
649 if ( !(db->media_mode & DMFE_AUTO) ) 649 if ( !(db->media_mode & DMFE_AUTO) )
650 db->op_mode = db->media_mode; /* Force Mode */ 650 db->op_mode = db->media_mode; /* Force Mode */
651 651
652 /* Initiliaze Transmit/Receive decriptor and CR3/4 */ 652 /* Initialize Transmit/Receive decriptor and CR3/4 */
653 dmfe_descriptor_init(db, ioaddr); 653 dmfe_descriptor_init(db, ioaddr);
654 654
655 /* Init CR6 to program DM910x operation */ 655 /* Init CR6 to program DM910x operation */
@@ -1288,7 +1288,7 @@ static void dmfe_timer(unsigned long data)
1288 * Stop DM910X board 1288 * Stop DM910X board
1289 * Free Tx/Rx allocated memory 1289 * Free Tx/Rx allocated memory
1290 * Reset DM910X board 1290 * Reset DM910X board
1291 * Re-initilize DM910X board 1291 * Re-initialize DM910X board
1292 */ 1292 */
1293 1293
1294static void dmfe_dynamic_reset(struct DEVICE *dev) 1294static void dmfe_dynamic_reset(struct DEVICE *dev)
@@ -1316,7 +1316,7 @@ static void dmfe_dynamic_reset(struct DEVICE *dev)
1316 netif_carrier_off(dev); 1316 netif_carrier_off(dev);
1317 db->wait_reset = 0; 1317 db->wait_reset = 0;
1318 1318
1319 /* Re-initilize DM910X board */ 1319 /* Re-initialize DM910X board */
1320 dmfe_init_dm910x(dev); 1320 dmfe_init_dm910x(dev);
1321 1321
1322 /* Restart upper layer interface */ 1322 /* Restart upper layer interface */
@@ -1447,7 +1447,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1447 1447
1448/* 1448/*
1449 * Send a setup frame for DM9132 1449 * Send a setup frame for DM9132
1450 * This setup frame initilize DM910X address filter mode 1450 * This setup frame initialize DM910X address filter mode
1451*/ 1451*/
1452 1452
1453static void dm9132_id_table(struct DEVICE *dev) 1453static void dm9132_id_table(struct DEVICE *dev)
@@ -1489,7 +1489,7 @@ static void dm9132_id_table(struct DEVICE *dev)
1489 1489
1490/* 1490/*
1491 * Send a setup frame for DM9102/DM9102A 1491 * Send a setup frame for DM9102/DM9102A
1492 * This setup frame initilize DM910X address filter mode 1492 * This setup frame initialize DM910X address filter mode
1493 */ 1493 */
1494 1494
1495static void send_filter_frame(struct DEVICE *dev) 1495static void send_filter_frame(struct DEVICE *dev)
@@ -2142,7 +2142,7 @@ static int dmfe_resume(struct pci_dev *pci_dev)
2142 pci_set_power_state(pci_dev, PCI_D0); 2142 pci_set_power_state(pci_dev, PCI_D0);
2143 pci_restore_state(pci_dev); 2143 pci_restore_state(pci_dev);
2144 2144
2145 /* Re-initilize DM910X board */ 2145 /* Re-initialize DM910X board */
2146 dmfe_init_dm910x(dev); 2146 dmfe_init_dm910x(dev);
2147 2147
2148 /* Disable WOL */ 2148 /* Disable WOL */
@@ -2196,7 +2196,7 @@ MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2196 2196
2197/* Description: 2197/* Description:
2198 * when user used insmod to add module, system invoked init_module() 2198 * when user used insmod to add module, system invoked init_module()
2199 * to initilize and register. 2199 * to initialize and register.
2200 */ 2200 */
2201 2201
2202static int __init dmfe_init_module(void) 2202static int __init dmfe_init_module(void)
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 6002e651b9ea..3031ed9c4a1a 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -120,8 +120,8 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
120 0x00, 0x06 /* ttm bit map */ 120 0x00, 0x06 /* ttm bit map */
121 }; 121 };
122 122
123 tp->mtable = (struct mediatable *) 123 tp->mtable = kmalloc(sizeof(struct mediatable) +
124 kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL); 124 sizeof(struct medialeaf), GFP_KERNEL);
125 125
126 if (tp->mtable == NULL) 126 if (tp->mtable == NULL)
127 return; /* Horrible, impossible failure. */ 127 return; /* Horrible, impossible failure. */
@@ -227,9 +227,9 @@ subsequent_board:
227 return; 227 return;
228 } 228 }
229 229
230 mtable = (struct mediatable *) 230 mtable = kmalloc(sizeof(struct mediatable) +
231 kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf), 231 count * sizeof(struct medialeaf),
232 GFP_KERNEL); 232 GFP_KERNEL);
233 if (mtable == NULL) 233 if (mtable == NULL)
234 return; /* Horrible, impossible failure. */ 234 return; /* Horrible, impossible failure. */
235 last_mediatable = tp->mtable = mtable; 235 last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 0afa2d4f9472..e525875ed67d 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -20,6 +20,7 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/ethtool.h>
23#include <linux/timer.h> 24#include <linux/timer.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/pci.h> 26#include <linux/pci.h>
@@ -51,22 +52,23 @@ struct tulip_chip_table {
51 52
52 53
53enum tbl_flag { 54enum tbl_flag {
54 HAS_MII = 0x0001, 55 HAS_MII = 0x00001,
55 HAS_MEDIA_TABLE = 0x0002, 56 HAS_MEDIA_TABLE = 0x00002,
56 CSR12_IN_SROM = 0x0004, 57 CSR12_IN_SROM = 0x00004,
57 ALWAYS_CHECK_MII = 0x0008, 58 ALWAYS_CHECK_MII = 0x00008,
58 HAS_ACPI = 0x0010, 59 HAS_ACPI = 0x00010,
59 MC_HASH_ONLY = 0x0020, /* Hash-only multicast filter. */ 60 MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */
60 HAS_PNICNWAY = 0x0080, 61 HAS_PNICNWAY = 0x00080,
61 HAS_NWAY = 0x0040, /* Uses internal NWay xcvr. */ 62 HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */
62 HAS_INTR_MITIGATION = 0x0100, 63 HAS_INTR_MITIGATION = 0x00100,
63 IS_ASIX = 0x0200, 64 IS_ASIX = 0x00200,
64 HAS_8023X = 0x0400, 65 HAS_8023X = 0x00400,
65 COMET_MAC_ADDR = 0x0800, 66 COMET_MAC_ADDR = 0x00800,
66 HAS_PCI_MWI = 0x1000, 67 HAS_PCI_MWI = 0x01000,
67 HAS_PHY_IRQ = 0x2000, 68 HAS_PHY_IRQ = 0x02000,
68 HAS_SWAPPED_SEEPROM = 0x4000, 69 HAS_SWAPPED_SEEPROM = 0x04000,
69 NEEDS_FAKE_MEDIA_TABLE = 0x8000, 70 NEEDS_FAKE_MEDIA_TABLE = 0x08000,
71 COMET_PM = 0x10000,
70}; 72};
71 73
72 74
@@ -120,6 +122,11 @@ enum tulip_offsets {
120 CSR13 = 0x68, 122 CSR13 = 0x68,
121 CSR14 = 0x70, 123 CSR14 = 0x70,
122 CSR15 = 0x78, 124 CSR15 = 0x78,
125 CSR18 = 0x88,
126 CSR19 = 0x8c,
127 CSR20 = 0x90,
128 CSR27 = 0xAC,
129 CSR28 = 0xB0,
123}; 130};
124 131
125/* register offset and bits for CFDD PCI config reg */ 132/* register offset and bits for CFDD PCI config reg */
@@ -289,6 +296,30 @@ enum t21143_csr6_bits {
289 csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd), 296 csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
290}; 297};
291 298
299enum tulip_comet_csr13_bits {
300/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they
301 * determine which link status transition wakes up if LSCE is
302 * enabled */
303 comet_csr13_linkoffe = (1 << 17),
304 comet_csr13_linkone = (1 << 16),
305 comet_csr13_wfre = (1 << 10),
306 comet_csr13_mpre = (1 << 9),
307 comet_csr13_lsce = (1 << 8),
308 comet_csr13_wfr = (1 << 2),
309 comet_csr13_mpr = (1 << 1),
310 comet_csr13_lsc = (1 << 0),
311};
312
313enum tulip_comet_csr18_bits {
314 comet_csr18_pmes_sticky = (1 << 24),
315 comet_csr18_pm_mode = (1 << 19),
316 comet_csr18_apm_mode = (1 << 18),
317 comet_csr18_d3a = (1 << 7)
318};
319
320enum tulip_comet_csr20_bits {
321 comet_csr20_pmes = (1 << 15),
322};
292 323
293/* Keep the ring sizes a power of two for efficiency. 324/* Keep the ring sizes a power of two for efficiency.
294 Making the Tx ring too large decreases the effectiveness of channel 325 Making the Tx ring too large decreases the effectiveness of channel
@@ -411,6 +442,7 @@ struct tulip_private {
411 unsigned int csr6; /* Current CSR6 control settings. */ 442 unsigned int csr6; /* Current CSR6 control settings. */
412 unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ 443 unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
413 void (*link_change) (struct net_device * dev, int csr5); 444 void (*link_change) (struct net_device * dev, int csr5);
445 struct ethtool_wolinfo wolinfo; /* WOL settings */
414 u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */ 446 u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */
415 u16 lpar; /* 21143 Link partner ability. */ 447 u16 lpar; /* 21143 Link partner ability. */
416 u16 advertising[4]; 448 u16 advertising[4];
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 254643ed945e..3a8d7efa2acf 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -30,7 +30,6 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/mii.h> 32#include <linux/mii.h>
33#include <linux/ethtool.h>
34#include <linux/crc32.h> 33#include <linux/crc32.h>
35#include <asm/unaligned.h> 34#include <asm/unaligned.h>
36#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -272,6 +271,7 @@ static void tulip_down(struct net_device *dev);
272static struct net_device_stats *tulip_get_stats(struct net_device *dev); 271static struct net_device_stats *tulip_get_stats(struct net_device *dev);
273static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 272static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
274static void set_rx_mode(struct net_device *dev); 273static void set_rx_mode(struct net_device *dev);
274static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
275#ifdef CONFIG_NET_POLL_CONTROLLER 275#ifdef CONFIG_NET_POLL_CONTROLLER
276static void poll_tulip(struct net_device *dev); 276static void poll_tulip(struct net_device *dev);
277#endif 277#endif
@@ -309,6 +309,11 @@ static void tulip_up(struct net_device *dev)
309 /* Wake the chip from sleep/snooze mode. */ 309 /* Wake the chip from sleep/snooze mode. */
310 tulip_set_power_state (tp, 0, 0); 310 tulip_set_power_state (tp, 0, 0);
311 311
312 /* Disable all WOL events */
313 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
314 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
315 tulip_set_wolopts(tp->pdev, 0);
316
312 /* On some chip revs we must set the MII/SYM port before the reset!? */ 317 /* On some chip revs we must set the MII/SYM port before the reset!? */
313 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) 318 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
314 iowrite32(0x00040000, ioaddr + CSR6); 319 iowrite32(0x00040000, ioaddr + CSR6);
@@ -345,8 +350,8 @@ static void tulip_up(struct net_device *dev)
345 } else if (tp->flags & COMET_MAC_ADDR) { 350 } else if (tp->flags & COMET_MAC_ADDR) {
346 iowrite32(addr_low, ioaddr + 0xA4); 351 iowrite32(addr_low, ioaddr + 0xA4);
347 iowrite32(addr_high, ioaddr + 0xA8); 352 iowrite32(addr_high, ioaddr + 0xA8);
348 iowrite32(0, ioaddr + 0xAC); 353 iowrite32(0, ioaddr + CSR27);
349 iowrite32(0, ioaddr + 0xB0); 354 iowrite32(0, ioaddr + CSR28);
350 } 355 }
351 } else { 356 } else {
352 /* This is set_rx_mode(), but without starting the transmitter. */ 357 /* This is set_rx_mode(), but without starting the transmitter. */
@@ -591,10 +596,10 @@ static void tulip_tx_timeout(struct net_device *dev)
591 pr_cont(" %02x", buf[j]); 596 pr_cont(" %02x", buf[j]);
592 pr_cont(" j=%d\n", j); 597 pr_cont(" j=%d\n", j);
593 } 598 }
594 printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring); 599 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
595 for (i = 0; i < RX_RING_SIZE; i++) 600 for (i = 0; i < RX_RING_SIZE; i++)
596 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status); 601 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
597 printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring); 602 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
598 for (i = 0; i < TX_RING_SIZE; i++) 603 for (i = 0; i < TX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status); 604 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
600 pr_cont("\n"); 605 pr_cont("\n");
@@ -876,8 +881,35 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
876 strcpy(info->bus_info, pci_name(np->pdev)); 881 strcpy(info->bus_info, pci_name(np->pdev));
877} 882}
878 883
884
885static int tulip_ethtool_set_wol(struct net_device *dev,
886 struct ethtool_wolinfo *wolinfo)
887{
888 struct tulip_private *tp = netdev_priv(dev);
889
890 if (wolinfo->wolopts & (~tp->wolinfo.supported))
891 return -EOPNOTSUPP;
892
893 tp->wolinfo.wolopts = wolinfo->wolopts;
894 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
895 return 0;
896}
897
898static void tulip_ethtool_get_wol(struct net_device *dev,
899 struct ethtool_wolinfo *wolinfo)
900{
901 struct tulip_private *tp = netdev_priv(dev);
902
903 wolinfo->supported = tp->wolinfo.supported;
904 wolinfo->wolopts = tp->wolinfo.wolopts;
905 return;
906}
907
908
879static const struct ethtool_ops ops = { 909static const struct ethtool_ops ops = {
880 .get_drvinfo = tulip_get_drvinfo 910 .get_drvinfo = tulip_get_drvinfo,
911 .set_wol = tulip_ethtool_set_wol,
912 .get_wol = tulip_ethtool_get_wol,
881}; 913};
882 914
883/* Provide ioctl() calls to examine the MII xcvr state. */ 915/* Provide ioctl() calls to examine the MII xcvr state. */
@@ -1093,8 +1125,8 @@ static void set_rx_mode(struct net_device *dev)
1093 iowrite32(3, ioaddr + CSR13); 1125 iowrite32(3, ioaddr + CSR13);
1094 iowrite32(mc_filter[1], ioaddr + CSR14); 1126 iowrite32(mc_filter[1], ioaddr + CSR14);
1095 } else if (tp->flags & COMET_MAC_ADDR) { 1127 } else if (tp->flags & COMET_MAC_ADDR) {
1096 iowrite32(mc_filter[0], ioaddr + 0xAC); 1128 iowrite32(mc_filter[0], ioaddr + CSR27);
1097 iowrite32(mc_filter[1], ioaddr + 0xB0); 1129 iowrite32(mc_filter[1], ioaddr + CSR28);
1098 } 1130 }
1099 tp->mc_filter[0] = mc_filter[0]; 1131 tp->mc_filter[0] = mc_filter[0];
1100 tp->mc_filter[1] = mc_filter[1]; 1132 tp->mc_filter[1] = mc_filter[1];
@@ -1309,6 +1341,12 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1309 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { 1341 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1310 pr_err(PFX "skipping LMC card\n"); 1342 pr_err(PFX "skipping LMC card\n");
1311 return -ENODEV; 1343 return -ENODEV;
1344 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1345 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1346 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1347 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1348 pr_err(PFX "skipping SBE T3E3 port\n");
1349 return -ENODEV;
1312 } 1350 }
1313 1351
1314 /* 1352 /*
@@ -1381,6 +1419,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1381 return i; 1419 return i;
1382 } 1420 }
1383 1421
1422 /* The chip will fail to enter a low-power state later unless
1423 * first explicitly commanded into D0 */
1424 if (pci_set_power_state(pdev, PCI_D0)) {
1425 printk (KERN_NOTICE PFX
1426 "Failed to set power state to D0\n");
1427 }
1428
1384 irq = pdev->irq; 1429 irq = pdev->irq;
1385 1430
1386 /* alloc_etherdev ensures aligned and zeroed private structures */ 1431 /* alloc_etherdev ensures aligned and zeroed private structures */
@@ -1427,6 +1472,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1427 1472
1428 tp->chip_id = chip_idx; 1473 tp->chip_id = chip_idx;
1429 tp->flags = tulip_tbl[chip_idx].flags; 1474 tp->flags = tulip_tbl[chip_idx].flags;
1475
1476 tp->wolinfo.supported = 0;
1477 tp->wolinfo.wolopts = 0;
1478 /* COMET: Enable power management only for AN983B */
1479 if (chip_idx == COMET ) {
1480 u32 sig;
1481 pci_read_config_dword (pdev, 0x80, &sig);
1482 if (sig == 0x09811317) {
1483 tp->flags |= COMET_PM;
1484 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1485 printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n");
1486 }
1487 }
1430 tp->pdev = pdev; 1488 tp->pdev = pdev;
1431 tp->base_addr = ioaddr; 1489 tp->base_addr = ioaddr;
1432 tp->revision = pdev->revision; 1490 tp->revision = pdev->revision;
@@ -1759,11 +1817,43 @@ err_out_free_netdev:
1759} 1817}
1760 1818
1761 1819
1820/* set the registers according to the given wolopts */
1821static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1822{
1823 struct net_device *dev = pci_get_drvdata(pdev);
1824 struct tulip_private *tp = netdev_priv(dev);
1825 void __iomem *ioaddr = tp->base_addr;
1826
1827 if (tp->flags & COMET_PM) {
1828
1829 unsigned int tmp;
1830
1831 tmp = ioread32(ioaddr + CSR18);
1832 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1833 tmp |= comet_csr18_pm_mode;
1834 iowrite32(tmp, ioaddr + CSR18);
1835
1836 /* Set the Wake-up Control/Status Register to the given WOL options*/
1837 tmp = ioread32(ioaddr + CSR13);
1838 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1839 if (wolopts & WAKE_MAGIC)
1840 tmp |= comet_csr13_mpre;
1841 if (wolopts & WAKE_PHY)
1842 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1843 /* Clear the event flags */
1844 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1845 iowrite32(tmp, ioaddr + CSR13);
1846 }
1847}
1848
1762#ifdef CONFIG_PM 1849#ifdef CONFIG_PM
1763 1850
1851
1764static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) 1852static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1765{ 1853{
1854 pci_power_t pstate;
1766 struct net_device *dev = pci_get_drvdata(pdev); 1855 struct net_device *dev = pci_get_drvdata(pdev);
1856 struct tulip_private *tp = netdev_priv(dev);
1767 1857
1768 if (!dev) 1858 if (!dev)
1769 return -EINVAL; 1859 return -EINVAL;
@@ -1779,7 +1869,16 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1779save_state: 1869save_state:
1780 pci_save_state(pdev); 1870 pci_save_state(pdev);
1781 pci_disable_device(pdev); 1871 pci_disable_device(pdev);
1782 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1872 pstate = pci_choose_state(pdev, state);
1873 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1874 int rc;
1875
1876 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1877 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1878 if (rc)
1879 printk("tulip: pci_enable_wake failed (%d)\n", rc);
1880 }
1881 pci_set_power_state(pdev, pstate);
1783 1882
1784 return 0; 1883 return 0;
1785} 1884}
@@ -1788,7 +1887,10 @@ save_state:
1788static int tulip_resume(struct pci_dev *pdev) 1887static int tulip_resume(struct pci_dev *pdev)
1789{ 1888{
1790 struct net_device *dev = pci_get_drvdata(pdev); 1889 struct net_device *dev = pci_get_drvdata(pdev);
1890 struct tulip_private *tp = netdev_priv(dev);
1891 void __iomem *ioaddr = tp->base_addr;
1791 int retval; 1892 int retval;
1893 unsigned int tmp;
1792 1894
1793 if (!dev) 1895 if (!dev)
1794 return -EINVAL; 1896 return -EINVAL;
@@ -1809,6 +1911,18 @@ static int tulip_resume(struct pci_dev *pdev)
1809 return retval; 1911 return retval;
1810 } 1912 }
1811 1913
1914 if (tp->flags & COMET_PM) {
1915 pci_enable_wake(pdev, PCI_D3hot, 0);
1916 pci_enable_wake(pdev, PCI_D3cold, 0);
1917
1918 /* Clear the PMES flag */
1919 tmp = ioread32(ioaddr + CSR20);
1920 tmp |= comet_csr20_pmes;
1921 iowrite32(tmp, ioaddr + CSR20);
1922
1923 /* Disable all wake-up events */
1924 tulip_set_wolopts(pdev, 0);
1925 }
1812 netif_device_attach(dev); 1926 netif_device_attach(dev);
1813 1927
1814 if (netif_running(dev)) 1928 if (netif_running(dev))
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 608b279b921b..66d41cf8da29 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1514,12 +1514,12 @@ static int netdev_close(struct net_device *dev)
1514 if (debug > 2) { 1514 if (debug > 2) {
1515 int i; 1515 int i;
1516 1516
1517 printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring); 1517 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1518 for (i = 0; i < TX_RING_SIZE; i++) 1518 for (i = 0; i < TX_RING_SIZE; i++)
1519 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", 1519 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1520 i, np->tx_ring[i].length, 1520 i, np->tx_ring[i].length,
1521 np->tx_ring[i].status, np->tx_ring[i].buffer1); 1521 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1522 printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring); 1522 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1523 for (i = 0; i < RX_RING_SIZE; i++) { 1523 for (i = 0; i < RX_RING_SIZE; i++) {
1524 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", 1524 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1525 i, np->rx_ring[i].length, 1525 i, np->rx_ring[i].length,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 63042596f0cf..55f3a3e667a9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -149,6 +149,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
149 tfile->tun = tun; 149 tfile->tun = tun;
150 tun->tfile = tfile; 150 tun->tfile = tfile;
151 tun->socket.file = file; 151 tun->socket.file = file;
152 netif_carrier_on(tun->dev);
152 dev_hold(tun->dev); 153 dev_hold(tun->dev);
153 sock_hold(tun->socket.sk); 154 sock_hold(tun->socket.sk);
154 atomic_inc(&tfile->count); 155 atomic_inc(&tfile->count);
@@ -162,6 +163,7 @@ static void __tun_detach(struct tun_struct *tun)
162{ 163{
163 /* Detach from net device */ 164 /* Detach from net device */
164 netif_tx_lock_bh(tun->dev); 165 netif_tx_lock_bh(tun->dev);
166 netif_carrier_off(tun->dev);
165 tun->tfile = NULL; 167 tun->tfile = NULL;
166 tun->socket.file = NULL; 168 tun->socket.file = NULL;
167 netif_tx_unlock_bh(tun->dev); 169 netif_tx_unlock_bh(tun->dev);
@@ -1574,12 +1576,6 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
1574#endif 1576#endif
1575} 1577}
1576 1578
1577static u32 tun_get_link(struct net_device *dev)
1578{
1579 struct tun_struct *tun = netdev_priv(dev);
1580 return !!tun->tfile;
1581}
1582
1583static u32 tun_get_rx_csum(struct net_device *dev) 1579static u32 tun_get_rx_csum(struct net_device *dev)
1584{ 1580{
1585 struct tun_struct *tun = netdev_priv(dev); 1581 struct tun_struct *tun = netdev_priv(dev);
@@ -1601,7 +1597,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
1601 .get_drvinfo = tun_get_drvinfo, 1597 .get_drvinfo = tun_get_drvinfo,
1602 .get_msglevel = tun_get_msglevel, 1598 .get_msglevel = tun_get_msglevel,
1603 .set_msglevel = tun_set_msglevel, 1599 .set_msglevel = tun_set_msglevel,
1604 .get_link = tun_get_link, 1600 .get_link = ethtool_op_get_link,
1605 .get_rx_csum = tun_get_rx_csum, 1601 .get_rx_csum = tun_get_rx_csum,
1606 .set_rx_csum = tun_set_rx_csum 1602 .set_rx_csum = tun_set_rx_csum
1607}; 1603};
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 22bde49262c0..2e50077ff450 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -255,7 +255,7 @@ struct typhoon_shared {
255 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; 255 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
256 u32 zeroWord; 256 u32 zeroWord;
257 struct tx_desc txHi[TXHI_ENTRIES]; 257 struct tx_desc txHi[TXHI_ENTRIES];
258} __attribute__ ((packed)); 258} __packed;
259 259
260struct rxbuff_ent { 260struct rxbuff_ent {
261 struct sk_buff *skb; 261 struct sk_buff *skb;
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
index 673fd5125914..88187fc84aa3 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/typhoon.h
@@ -77,7 +77,7 @@ struct typhoon_indexes {
77 volatile __le32 cmdCleared; 77 volatile __le32 cmdCleared;
78 volatile __le32 respReady; 78 volatile __le32 respReady;
79 volatile __le32 rxHiReady; 79 volatile __le32 rxHiReady;
80} __attribute__ ((packed)); 80} __packed;
81 81
82/* The host<->Typhoon interface 82/* The host<->Typhoon interface
83 * Our means of communicating where things are 83 * Our means of communicating where things are
@@ -125,7 +125,7 @@ struct typhoon_interface {
125 __le32 rxHiAddr; 125 __le32 rxHiAddr;
126 __le32 rxHiAddrHi; 126 __le32 rxHiAddrHi;
127 __le32 rxHiSize; 127 __le32 rxHiSize;
128} __attribute__ ((packed)); 128} __packed;
129 129
130/* The Typhoon transmit/fragment descriptor 130/* The Typhoon transmit/fragment descriptor
131 * 131 *
@@ -187,7 +187,7 @@ struct tx_desc {
187#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) 187#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000)
188#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) 188#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000)
189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
190} __attribute__ ((packed)); 190} __packed;
191 191
192/* The TCP Segmentation offload option descriptor 192/* The TCP Segmentation offload option descriptor
193 * 193 *
@@ -208,7 +208,7 @@ struct tcpopt_desc {
208 __le32 respAddrLo; 208 __le32 respAddrLo;
209 __le32 bytesTx; 209 __le32 bytesTx;
210 __le32 status; 210 __le32 status;
211} __attribute__ ((packed)); 211} __packed;
212 212
213/* The IPSEC Offload descriptor 213/* The IPSEC Offload descriptor
214 * 214 *
@@ -227,7 +227,7 @@ struct ipsec_desc {
227 __le32 sa1; 227 __le32 sa1;
228 __le32 sa2; 228 __le32 sa2;
229 __le32 reserved; 229 __le32 reserved;
230} __attribute__ ((packed)); 230} __packed;
231 231
232/* The Typhoon receive descriptor (Updated by NIC) 232/* The Typhoon receive descriptor (Updated by NIC)
233 * 233 *
@@ -284,7 +284,7 @@ struct rx_desc {
284#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) 284#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100)
285#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) 285#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200)
286 __be32 vlanTag; 286 __be32 vlanTag;
287} __attribute__ ((packed)); 287} __packed;
288 288
289/* The Typhoon free buffer descriptor, used to give a buffer to the NIC 289/* The Typhoon free buffer descriptor, used to give a buffer to the NIC
290 * 290 *
@@ -301,7 +301,7 @@ struct rx_free {
301 __le32 physAddrHi; 301 __le32 physAddrHi;
302 u32 virtAddr; 302 u32 virtAddr;
303 u32 virtAddrHi; 303 u32 virtAddrHi;
304} __attribute__ ((packed)); 304} __packed;
305 305
306/* The Typhoon command descriptor, used for commands and responses 306/* The Typhoon command descriptor, used for commands and responses
307 * 307 *
@@ -347,7 +347,7 @@ struct cmd_desc {
347 __le16 parm1; 347 __le16 parm1;
348 __le32 parm2; 348 __le32 parm2;
349 __le32 parm3; 349 __le32 parm3;
350} __attribute__ ((packed)); 350} __packed;
351 351
352/* The Typhoon response descriptor, see command descriptor for details 352/* The Typhoon response descriptor, see command descriptor for details
353 */ 353 */
@@ -359,7 +359,7 @@ struct resp_desc {
359 __le16 parm1; 359 __le16 parm1;
360 __le32 parm2; 360 __le32 parm2;
361 __le32 parm3; 361 __le32 parm3;
362} __attribute__ ((packed)); 362} __packed;
363 363
364#define INIT_COMMAND_NO_RESPONSE(x, command) \ 364#define INIT_COMMAND_NO_RESPONSE(x, command) \
365 do { struct cmd_desc *_ptr = (x); \ 365 do { struct cmd_desc *_ptr = (x); \
@@ -427,7 +427,7 @@ struct stats_resp {
427#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) 427#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000)
428 __le32 unused2; 428 __le32 unused2;
429 __le32 unused3; 429 __le32 unused3;
430} __attribute__ ((packed)); 430} __packed;
431 431
432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) 432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
433 */ 433 */
@@ -488,7 +488,7 @@ struct sa_descriptor {
488 u32 index; 488 u32 index;
489 u32 unused; 489 u32 unused;
490 u32 unused2; 490 u32 unused2;
491} __attribute__ ((packed)); 491} __packed;
492 492
493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) 493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
494 * This is all for IPv4. 494 * This is all for IPv4.
@@ -518,14 +518,14 @@ struct typhoon_file_header {
518 __le32 numSections; 518 __le32 numSections;
519 __le32 startAddr; 519 __le32 startAddr;
520 __le32 hmacDigest[5]; 520 __le32 hmacDigest[5];
521} __attribute__ ((packed)); 521} __packed;
522 522
523struct typhoon_section_header { 523struct typhoon_section_header {
524 __le32 len; 524 __le32 len;
525 u16 checksum; 525 u16 checksum;
526 u16 reserved; 526 u16 reserved;
527 __le32 startAddr; 527 __le32 startAddr;
528} __attribute__ ((packed)); 528} __packed;
529 529
530/* The Typhoon Register offsets 530/* The Typhoon Register offsets
531 */ 531 */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 807470e156af..8d532f9b50d0 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -594,7 +594,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
594{ 594{
595 int i; 595 int i;
596 596
597 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); 597 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1);
598 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); 598 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
599 599
600 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", 600 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
@@ -3704,6 +3704,19 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type)
3704 return PHY_INTERFACE_MODE_MII; 3704 return PHY_INTERFACE_MODE_MII;
3705} 3705}
3706 3706
3707static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3708{
3709 struct ucc_geth_private *ugeth = netdev_priv(dev);
3710
3711 if (!netif_running(dev))
3712 return -EINVAL;
3713
3714 if (!ugeth->phydev)
3715 return -ENODEV;
3716
3717 return phy_mii_ioctl(ugeth->phydev, rq, cmd);
3718}
3719
3707static const struct net_device_ops ucc_geth_netdev_ops = { 3720static const struct net_device_ops ucc_geth_netdev_ops = {
3708 .ndo_open = ucc_geth_open, 3721 .ndo_open = ucc_geth_open,
3709 .ndo_stop = ucc_geth_close, 3722 .ndo_stop = ucc_geth_close,
@@ -3713,6 +3726,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
3713 .ndo_change_mtu = eth_change_mtu, 3726 .ndo_change_mtu = eth_change_mtu,
3714 .ndo_set_multicast_list = ucc_geth_set_multi, 3727 .ndo_set_multicast_list = ucc_geth_set_multi,
3715 .ndo_tx_timeout = ucc_geth_timeout, 3728 .ndo_tx_timeout = ucc_geth_timeout,
3729 .ndo_do_ioctl = ucc_geth_ioctl,
3716#ifdef CONFIG_NET_POLL_CONTROLLER 3730#ifdef CONFIG_NET_POLL_CONTROLLER
3717 .ndo_poll_controller = ucc_netpoll, 3731 .ndo_poll_controller = ucc_netpoll,
3718#endif 3732#endif
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index ef1fbeb11c6e..05a95586f3c5 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -106,7 +106,7 @@ struct ucc_geth {
106 u32 scar; /* Statistics carry register */ 106 u32 scar; /* Statistics carry register */
107 u32 scam; /* Statistics caryy mask register */ 107 u32 scam; /* Statistics caryy mask register */
108 u8 res5[0x200 - 0x1c4]; 108 u8 res5[0x200 - 0x1c4];
109} __attribute__ ((packed)); 109} __packed;
110 110
111/* UCC GETH TEMODR Register */ 111/* UCC GETH TEMODR Register */
112#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics 112#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
@@ -420,11 +420,11 @@ struct ucc_geth {
420 420
421struct ucc_geth_thread_data_tx { 421struct ucc_geth_thread_data_tx {
422 u8 res0[104]; 422 u8 res0[104];
423} __attribute__ ((packed)); 423} __packed;
424 424
425struct ucc_geth_thread_data_rx { 425struct ucc_geth_thread_data_rx {
426 u8 res0[40]; 426 u8 res0[40];
427} __attribute__ ((packed)); 427} __packed;
428 428
429/* Send Queue Queue-Descriptor */ 429/* Send Queue Queue-Descriptor */
430struct ucc_geth_send_queue_qd { 430struct ucc_geth_send_queue_qd {
@@ -432,19 +432,19 @@ struct ucc_geth_send_queue_qd {
432 u8 res0[0x8]; 432 u8 res0[0x8];
433 u32 last_bd_completed_address;/* initialize to last entry in BD ring */ 433 u32 last_bd_completed_address;/* initialize to last entry in BD ring */
434 u8 res1[0x30]; 434 u8 res1[0x30];
435} __attribute__ ((packed)); 435} __packed;
436 436
437struct ucc_geth_send_queue_mem_region { 437struct ucc_geth_send_queue_mem_region {
438 struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; 438 struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
439} __attribute__ ((packed)); 439} __packed;
440 440
441struct ucc_geth_thread_tx_pram { 441struct ucc_geth_thread_tx_pram {
442 u8 res0[64]; 442 u8 res0[64];
443} __attribute__ ((packed)); 443} __packed;
444 444
445struct ucc_geth_thread_rx_pram { 445struct ucc_geth_thread_rx_pram {
446 u8 res0[128]; 446 u8 res0[128];
447} __attribute__ ((packed)); 447} __packed;
448 448
449#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 449#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
450#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 450#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
@@ -484,7 +484,7 @@ struct ucc_geth_scheduler {
484 /**< weight factor for queues */ 484 /**< weight factor for queues */
485 u32 minw; /* temporary variable handled by QE */ 485 u32 minw; /* temporary variable handled by QE */
486 u8 res1[0x70 - 0x64]; 486 u8 res1[0x70 - 0x64];
487} __attribute__ ((packed)); 487} __packed;
488 488
489struct ucc_geth_tx_firmware_statistics_pram { 489struct ucc_geth_tx_firmware_statistics_pram {
490 u32 sicoltx; /* single collision */ 490 u32 sicoltx; /* single collision */
@@ -506,7 +506,7 @@ struct ucc_geth_tx_firmware_statistics_pram {
506 and 1518 octets */ 506 and 1518 octets */
507 u32 txpktsjumbo; /* total packets (including bad) between 1024 507 u32 txpktsjumbo; /* total packets (including bad) between 1024
508 and MAXLength octets */ 508 and MAXLength octets */
509} __attribute__ ((packed)); 509} __packed;
510 510
511struct ucc_geth_rx_firmware_statistics_pram { 511struct ucc_geth_rx_firmware_statistics_pram {
512 u32 frrxfcser; /* frames with crc error */ 512 u32 frrxfcser; /* frames with crc error */
@@ -540,7 +540,7 @@ struct ucc_geth_rx_firmware_statistics_pram {
540 replaced */ 540 replaced */
541 u32 insertvlan; /* total frames that had their VLAN tag 541 u32 insertvlan; /* total frames that had their VLAN tag
542 inserted */ 542 inserted */
543} __attribute__ ((packed)); 543} __packed;
544 544
545struct ucc_geth_rx_interrupt_coalescing_entry { 545struct ucc_geth_rx_interrupt_coalescing_entry {
546 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max 546 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
@@ -548,23 +548,23 @@ struct ucc_geth_rx_interrupt_coalescing_entry {
548 u32 interruptcoalescingcounter; /* interrupt coalescing counter, 548 u32 interruptcoalescingcounter; /* interrupt coalescing counter,
549 initialize to 549 initialize to
550 interruptcoalescingmaxvalue */ 550 interruptcoalescingmaxvalue */
551} __attribute__ ((packed)); 551} __packed;
552 552
553struct ucc_geth_rx_interrupt_coalescing_table { 553struct ucc_geth_rx_interrupt_coalescing_table {
554 struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; 554 struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
555 /**< interrupt coalescing entry */ 555 /**< interrupt coalescing entry */
556} __attribute__ ((packed)); 556} __packed;
557 557
558struct ucc_geth_rx_prefetched_bds { 558struct ucc_geth_rx_prefetched_bds {
559 struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ 559 struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
560} __attribute__ ((packed)); 560} __packed;
561 561
562struct ucc_geth_rx_bd_queues_entry { 562struct ucc_geth_rx_bd_queues_entry {
563 u32 bdbaseptr; /* BD base pointer */ 563 u32 bdbaseptr; /* BD base pointer */
564 u32 bdptr; /* BD pointer */ 564 u32 bdptr; /* BD pointer */
565 u32 externalbdbaseptr; /* external BD base pointer */ 565 u32 externalbdbaseptr; /* external BD base pointer */
566 u32 externalbdptr; /* external BD pointer */ 566 u32 externalbdptr; /* external BD pointer */
567} __attribute__ ((packed)); 567} __packed;
568 568
569struct ucc_geth_tx_global_pram { 569struct ucc_geth_tx_global_pram {
570 u16 temoder; 570 u16 temoder;
@@ -580,13 +580,13 @@ struct ucc_geth_tx_global_pram {
580 u32 tqptr; /* a base pointer to the Tx Queues Memory 580 u32 tqptr; /* a base pointer to the Tx Queues Memory
581 Region */ 581 Region */
582 u8 res2[0x80 - 0x74]; 582 u8 res2[0x80 - 0x74];
583} __attribute__ ((packed)); 583} __packed;
584 584
585/* structure representing Extended Filtering Global Parameters in PRAM */ 585/* structure representing Extended Filtering Global Parameters in PRAM */
586struct ucc_geth_exf_global_pram { 586struct ucc_geth_exf_global_pram {
587 u32 l2pcdptr; /* individual address filter, high */ 587 u32 l2pcdptr; /* individual address filter, high */
588 u8 res0[0x10 - 0x04]; 588 u8 res0[0x10 - 0x04];
589} __attribute__ ((packed)); 589} __packed;
590 590
591struct ucc_geth_rx_global_pram { 591struct ucc_geth_rx_global_pram {
592 u32 remoder; /* ethernet mode reg. */ 592 u32 remoder; /* ethernet mode reg. */
@@ -620,7 +620,7 @@ struct ucc_geth_rx_global_pram {
620 u32 exfGlobalParam; /* base address for extended filtering global 620 u32 exfGlobalParam; /* base address for extended filtering global
621 parameters */ 621 parameters */
622 u8 res6[0x100 - 0xC4]; /* Initialize to zero */ 622 u8 res6[0x100 - 0xC4]; /* Initialize to zero */
623} __attribute__ ((packed)); 623} __packed;
624 624
625#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 625#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
626 626
@@ -639,7 +639,7 @@ struct ucc_geth_init_pram {
639 u32 txglobal; /* tx global */ 639 u32 txglobal; /* tx global */
640 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ 640 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
641 u8 res3[0x1]; 641 u8 res3[0x1];
642} __attribute__ ((packed)); 642} __packed;
643 643
644#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) 644#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
645#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) 645#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
@@ -661,7 +661,7 @@ struct ucc_geth_82xx_enet_address {
661 u16 h; /* address (MSB) */ 661 u16 h; /* address (MSB) */
662 u16 m; /* address */ 662 u16 m; /* address */
663 u16 l; /* address (LSB) */ 663 u16 l; /* address (LSB) */
664} __attribute__ ((packed)); 664} __packed;
665 665
666/* structure representing 82xx Address Filtering PRAM */ 666/* structure representing 82xx Address Filtering PRAM */
667struct ucc_geth_82xx_address_filtering_pram { 667struct ucc_geth_82xx_address_filtering_pram {
@@ -672,7 +672,7 @@ struct ucc_geth_82xx_address_filtering_pram {
672 struct ucc_geth_82xx_enet_address __iomem taddr; 672 struct ucc_geth_82xx_enet_address __iomem taddr;
673 struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; 673 struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
674 u8 res0[0x40 - 0x38]; 674 u8 res0[0x40 - 0x38];
675} __attribute__ ((packed)); 675} __packed;
676 676
677/* GETH Tx firmware statistics structure, used when calling 677/* GETH Tx firmware statistics structure, used when calling
678 UCC_GETH_GetStatistics. */ 678 UCC_GETH_GetStatistics. */
@@ -696,7 +696,7 @@ struct ucc_geth_tx_firmware_statistics {
696 and 1518 octets */ 696 and 1518 octets */
697 u32 txpktsjumbo; /* total packets (including bad) between 1024 697 u32 txpktsjumbo; /* total packets (including bad) between 1024
698 and MAXLength octets */ 698 and MAXLength octets */
699} __attribute__ ((packed)); 699} __packed;
700 700
701/* GETH Rx firmware statistics structure, used when calling 701/* GETH Rx firmware statistics structure, used when calling
702 UCC_GETH_GetStatistics. */ 702 UCC_GETH_GetStatistics. */
@@ -732,7 +732,7 @@ struct ucc_geth_rx_firmware_statistics {
732 replaced */ 732 replaced */
733 u32 insertvlan; /* total frames that had their VLAN tag 733 u32 insertvlan; /* total frames that had their VLAN tag
734 inserted */ 734 inserted */
735} __attribute__ ((packed)); 735} __packed;
736 736
737/* GETH hardware statistics structure, used when calling 737/* GETH hardware statistics structure, used when calling
738 UCC_GETH_GetStatistics. */ 738 UCC_GETH_GetStatistics. */
@@ -781,7 +781,7 @@ struct ucc_geth_hardware_statistics {
781 u32 rbca; /* Total number of frames received successfully 781 u32 rbca; /* Total number of frames received successfully
782 that had destination address equal to the 782 that had destination address equal to the
783 broadcast address */ 783 broadcast address */
784} __attribute__ ((packed)); 784} __packed;
785 785
786/* UCC GETH Tx errors returned via TxConf callback */ 786/* UCC GETH Tx errors returned via TxConf callback */
787#define TX_ERRORS_DEF 0x0200 787#define TX_ERRORS_DEF 0x0200
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 9516f382a6ba..aea4645be7f6 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -179,7 +179,7 @@ struct ax88172_int_data {
179 __le16 res2; 179 __le16 res2;
180 u8 status; 180 u8 status;
181 __le16 res3; 181 __le16 res3;
182} __attribute__ ((packed)); 182} __packed;
183 183
184static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, 184static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
185 u16 size, void *data) 185 u16 size, void *data)
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index dc9444525b49..109751bad3bb 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -97,8 +97,9 @@ static void tx_complete(struct urb *req)
97 struct sk_buff *skb = req->context; 97 struct sk_buff *skb = req->context;
98 struct net_device *dev = skb->dev; 98 struct net_device *dev = skb->dev;
99 struct usbpn_dev *pnd = netdev_priv(dev); 99 struct usbpn_dev *pnd = netdev_priv(dev);
100 int status = req->status;
100 101
101 switch (req->status) { 102 switch (status) {
102 case 0: 103 case 0:
103 dev->stats.tx_bytes += skb->len; 104 dev->stats.tx_bytes += skb->len;
104 break; 105 break;
@@ -109,7 +110,7 @@ static void tx_complete(struct urb *req)
109 dev->stats.tx_aborted_errors++; 110 dev->stats.tx_aborted_errors++;
110 default: 111 default:
111 dev->stats.tx_errors++; 112 dev->stats.tx_errors++;
112 dev_dbg(&dev->dev, "TX error (%d)\n", req->status); 113 dev_dbg(&dev->dev, "TX error (%d)\n", status);
113 } 114 }
114 dev->stats.tx_packets++; 115 dev->stats.tx_packets++;
115 116
@@ -150,8 +151,9 @@ static void rx_complete(struct urb *req)
150 struct page *page = virt_to_page(req->transfer_buffer); 151 struct page *page = virt_to_page(req->transfer_buffer);
151 struct sk_buff *skb; 152 struct sk_buff *skb;
152 unsigned long flags; 153 unsigned long flags;
154 int status = req->status;
153 155
154 switch (req->status) { 156 switch (status) {
155 case 0: 157 case 0:
156 spin_lock_irqsave(&pnd->rx_lock, flags); 158 spin_lock_irqsave(&pnd->rx_lock, flags);
157 skb = pnd->rx_skb; 159 skb = pnd->rx_skb;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 4dd23513c5af..6efca66b8766 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -73,7 +73,6 @@
73#include <linux/serial.h> 73#include <linux/serial.h>
74 74
75 75
76#define DRIVER_VERSION "1.2"
77#define MOD_AUTHOR "Option Wireless" 76#define MOD_AUTHOR "Option Wireless"
78#define MOD_DESCRIPTION "USB High Speed Option driver" 77#define MOD_DESCRIPTION "USB High Speed Option driver"
79#define MOD_LICENSE "GPL" 78#define MOD_LICENSE "GPL"
@@ -211,7 +210,7 @@ struct hso_serial_state_notification {
211 u16 wIndex; 210 u16 wIndex;
212 u16 wLength; 211 u16 wLength;
213 u16 UART_state_bitmap; 212 u16 UART_state_bitmap;
214} __attribute__((packed)); 213} __packed;
215 214
216struct hso_tiocmget { 215struct hso_tiocmget {
217 struct mutex mutex; 216 struct mutex mutex;
@@ -401,7 +400,7 @@ static int disable_net;
401/* driver info */ 400/* driver info */
402static const char driver_name[] = "hso"; 401static const char driver_name[] = "hso";
403static const char tty_filename[] = "ttyHS"; 402static const char tty_filename[] = "ttyHS";
404static const char *version = __FILE__ ": " DRIVER_VERSION " " MOD_AUTHOR; 403static const char *version = __FILE__ ": " MOD_AUTHOR;
405/* the usb driver itself (registered in hso_init) */ 404/* the usb driver itself (registered in hso_init) */
406static struct usb_driver hso_driver; 405static struct usb_driver hso_driver;
407/* serial structures */ 406/* serial structures */
@@ -478,6 +477,7 @@ static const struct usb_device_id hso_ids[] = {
478 {USB_DEVICE(0x0af0, 0x8600)}, 477 {USB_DEVICE(0x0af0, 0x8600)},
479 {USB_DEVICE(0x0af0, 0x8800)}, 478 {USB_DEVICE(0x0af0, 0x8800)},
480 {USB_DEVICE(0x0af0, 0x8900)}, 479 {USB_DEVICE(0x0af0, 0x8900)},
480 {USB_DEVICE(0x0af0, 0x9000)},
481 {USB_DEVICE(0x0af0, 0xd035)}, 481 {USB_DEVICE(0x0af0, 0xd035)},
482 {USB_DEVICE(0x0af0, 0xd055)}, 482 {USB_DEVICE(0x0af0, 0xd055)},
483 {USB_DEVICE(0x0af0, 0xd155)}, 483 {USB_DEVICE(0x0af0, 0xd155)},
@@ -848,7 +848,6 @@ static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info
848 struct hso_net *odev = netdev_priv(net); 848 struct hso_net *odev = netdev_priv(net);
849 849
850 strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); 850 strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
851 strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
852 usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info); 851 usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info);
853} 852}
854 853
@@ -3388,7 +3387,6 @@ module_exit(hso_exit);
3388MODULE_AUTHOR(MOD_AUTHOR); 3387MODULE_AUTHOR(MOD_AUTHOR);
3389MODULE_DESCRIPTION(MOD_DESCRIPTION); 3388MODULE_DESCRIPTION(MOD_DESCRIPTION);
3390MODULE_LICENSE(MOD_LICENSE); 3389MODULE_LICENSE(MOD_LICENSE);
3391MODULE_INFO(Version, DRIVER_VERSION);
3392 3390
3393/* change the debug level (eg: insmod hso.ko debug=0x04) */ 3391/* change the debug level (eg: insmod hso.ko debug=0x04) */
3394MODULE_PARM_DESC(debug, "Level of debug [0x01 | 0x02 | 0x04 | 0x08 | 0x10]"); 3392MODULE_PARM_DESC(debug, "Level of debug [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 197c352c47fb..08e7b6abacdd 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -193,7 +193,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
193 case 0: 193 case 0:
194 break; 194 break;
195 default: 195 default:
196 err("%s: urb status: %d", __func__, urb->status); 196 err("%s: urb status: %d", __func__, status);
197 return; 197 return;
198 } 198 }
199 199
@@ -222,16 +222,17 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
222static void ipheth_sndbulk_callback(struct urb *urb) 222static void ipheth_sndbulk_callback(struct urb *urb)
223{ 223{
224 struct ipheth_device *dev; 224 struct ipheth_device *dev;
225 int status = urb->status;
225 226
226 dev = urb->context; 227 dev = urb->context;
227 if (dev == NULL) 228 if (dev == NULL)
228 return; 229 return;
229 230
230 if (urb->status != 0 && 231 if (status != 0 &&
231 urb->status != -ENOENT && 232 status != -ENOENT &&
232 urb->status != -ECONNRESET && 233 status != -ECONNRESET &&
233 urb->status != -ESHUTDOWN) 234 status != -ESHUTDOWN)
234 err("%s: urb status: %d", __func__, urb->status); 235 err("%s: urb status: %d", __func__, status);
235 236
236 dev_kfree_skb_irq(dev->tx_skb); 237 dev_kfree_skb_irq(dev->tx_skb);
237 netif_wake_queue(dev->net); 238 netif_wake_queue(dev->net);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index d6078b8c4273..2b7b39cad1ce 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -207,7 +207,7 @@ struct kaweth_ethernet_configuration
207 __le16 segment_size; 207 __le16 segment_size;
208 __u16 max_multicast_filters; 208 __u16 max_multicast_filters;
209 __u8 reserved3; 209 __u8 reserved3;
210} __attribute__ ((packed)); 210} __packed;
211 211
212/**************************************************************** 212/****************************************************************
213 * kaweth_device 213 * kaweth_device
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 961a8ed38d8f..ba72a7281cb0 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -64,13 +64,13 @@ struct nc_header { // packed:
64 // all else is optional, and must start with: 64 // all else is optional, and must start with:
65 // __le16 vendorId; // from usb-if 65 // __le16 vendorId; // from usb-if
66 // __le16 productId; 66 // __le16 productId;
67} __attribute__((__packed__)); 67} __packed;
68 68
69#define PAD_BYTE ((unsigned char)0xAC) 69#define PAD_BYTE ((unsigned char)0xAC)
70 70
71struct nc_trailer { 71struct nc_trailer {
72 __le16 packet_id; 72 __le16 packet_id;
73} __attribute__((__packed__)); 73} __packed;
74 74
75// packets may use FLAG_FRAMING_NC and optional pad 75// packets may use FLAG_FRAMING_NC and optional pad
76#define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \ 76#define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 974d17f0263e..6710f09346d6 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -21,11 +21,11 @@
21 * behaves. Pegasus II support added since this version. 21 * behaves. Pegasus II support added since this version.
22 * TODO: suppressing HCD warnings spewage on disconnect. 22 * TODO: suppressing HCD warnings spewage on disconnect.
23 * v0.4.13 Ethernet address is now set at probe(), not at open() 23 * v0.4.13 Ethernet address is now set at probe(), not at open()
24 * time as this seems to break dhcpd. 24 * time as this seems to break dhcpd.
25 * v0.5.0 branch to 2.5.x kernels 25 * v0.5.0 branch to 2.5.x kernels
26 * v0.5.1 ethtool support added 26 * v0.5.1 ethtool support added
27 * v0.5.5 rx socket buffers are in a pool and the their allocation 27 * v0.5.5 rx socket buffers are in a pool and the their allocation
28 * is out of the interrupt routine. 28 * is out of the interrupt routine.
29 */ 29 */
30 30
31#include <linux/sched.h> 31#include <linux/sched.h>
@@ -55,9 +55,9 @@ static const char driver_name[] = "pegasus";
55#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ 55#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
56 BMSR_100FULL | BMSR_ANEGCAPABLE) 56 BMSR_100FULL | BMSR_ANEGCAPABLE)
57 57
58static int loopback = 0; 58static int loopback;
59static int mii_mode = 0; 59static int mii_mode;
60static char *devid=NULL; 60static char *devid;
61 61
62static struct usb_eth_dev usb_dev_id[] = { 62static struct usb_eth_dev usb_dev_id[] = {
63#define PEGASUS_DEV(pn, vid, pid, flags) \ 63#define PEGASUS_DEV(pn, vid, pid, flags) \
@@ -102,8 +102,8 @@ MODULE_PARM_DESC(devid, "The format is: 'DEV_name:VendorID:DeviceID:Flags'");
102 102
103/* use ethtool to change the level for any given device */ 103/* use ethtool to change the level for any given device */
104static int msg_level = -1; 104static int msg_level = -1;
105module_param (msg_level, int, 0); 105module_param(msg_level, int, 0);
106MODULE_PARM_DESC (msg_level, "Override default message level"); 106MODULE_PARM_DESC(msg_level, "Override default message level");
107 107
108MODULE_DEVICE_TABLE(usb, pegasus_ids); 108MODULE_DEVICE_TABLE(usb, pegasus_ids);
109static const struct net_device_ops pegasus_netdev_ops; 109static const struct net_device_ops pegasus_netdev_ops;
@@ -141,7 +141,7 @@ static void ctrl_callback(struct urb *urb)
141 wake_up(&pegasus->ctrl_wait); 141 wake_up(&pegasus->ctrl_wait);
142} 142}
143 143
144static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size, 144static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
145 void *data) 145 void *data)
146{ 146{
147 int ret; 147 int ret;
@@ -196,7 +196,7 @@ out:
196 return ret; 196 return ret;
197} 197}
198 198
199static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size, 199static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
200 void *data) 200 void *data)
201{ 201{
202 int ret; 202 int ret;
@@ -248,7 +248,7 @@ out:
248 return ret; 248 return ret;
249} 249}
250 250
251static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data) 251static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
252{ 252{
253 int ret; 253 int ret;
254 char *tmp; 254 char *tmp;
@@ -299,7 +299,7 @@ out:
299 return ret; 299 return ret;
300} 300}
301 301
302static int update_eth_regs_async(pegasus_t * pegasus) 302static int update_eth_regs_async(pegasus_t *pegasus)
303{ 303{
304 int ret; 304 int ret;
305 305
@@ -326,7 +326,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
326} 326}
327 327
328/* Returns 0 on success, error on failure */ 328/* Returns 0 on success, error on failure */
329static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd) 329static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
330{ 330{
331 int i; 331 int i;
332 __u8 data[4] = { phy, 0, 0, indx }; 332 __u8 data[4] = { phy, 0, 0, indx };
@@ -334,7 +334,7 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
334 int ret; 334 int ret;
335 335
336 set_register(pegasus, PhyCtrl, 0); 336 set_register(pegasus, PhyCtrl, 0);
337 set_registers(pegasus, PhyAddr, sizeof (data), data); 337 set_registers(pegasus, PhyAddr, sizeof(data), data);
338 set_register(pegasus, PhyCtrl, (indx | PHY_READ)); 338 set_register(pegasus, PhyCtrl, (indx | PHY_READ));
339 for (i = 0; i < REG_TIMEOUT; i++) { 339 for (i = 0; i < REG_TIMEOUT; i++) {
340 ret = get_registers(pegasus, PhyCtrl, 1, data); 340 ret = get_registers(pegasus, PhyCtrl, 1, data);
@@ -366,7 +366,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
366 return (int)res; 366 return (int)res;
367} 367}
368 368
369static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd) 369static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 regd)
370{ 370{
371 int i; 371 int i;
372 __u8 data[4] = { phy, 0, 0, indx }; 372 __u8 data[4] = { phy, 0, 0, indx };
@@ -402,7 +402,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
402 write_mii_word(pegasus, phy_id, loc, val); 402 write_mii_word(pegasus, phy_id, loc, val);
403} 403}
404 404
405static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata) 405static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
406{ 406{
407 int i; 407 int i;
408 __u8 tmp; 408 __u8 tmp;
@@ -433,7 +433,7 @@ fail:
433} 433}
434 434
435#ifdef PEGASUS_WRITE_EEPROM 435#ifdef PEGASUS_WRITE_EEPROM
436static inline void enable_eprom_write(pegasus_t * pegasus) 436static inline void enable_eprom_write(pegasus_t *pegasus)
437{ 437{
438 __u8 tmp; 438 __u8 tmp;
439 int ret; 439 int ret;
@@ -442,7 +442,7 @@ static inline void enable_eprom_write(pegasus_t * pegasus)
442 set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE); 442 set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE);
443} 443}
444 444
445static inline void disable_eprom_write(pegasus_t * pegasus) 445static inline void disable_eprom_write(pegasus_t *pegasus)
446{ 446{
447 __u8 tmp; 447 __u8 tmp;
448 int ret; 448 int ret;
@@ -452,7 +452,7 @@ static inline void disable_eprom_write(pegasus_t * pegasus)
452 set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE); 452 set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE);
453} 453}
454 454
455static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data) 455static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
456{ 456{
457 int i; 457 int i;
458 __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; 458 __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
@@ -484,7 +484,7 @@ fail:
484} 484}
485#endif /* PEGASUS_WRITE_EEPROM */ 485#endif /* PEGASUS_WRITE_EEPROM */
486 486
487static inline void get_node_id(pegasus_t * pegasus, __u8 * id) 487static inline void get_node_id(pegasus_t *pegasus, __u8 *id)
488{ 488{
489 int i; 489 int i;
490 __u16 w16; 490 __u16 w16;
@@ -495,7 +495,7 @@ static inline void get_node_id(pegasus_t * pegasus, __u8 * id)
495 } 495 }
496} 496}
497 497
498static void set_ethernet_addr(pegasus_t * pegasus) 498static void set_ethernet_addr(pegasus_t *pegasus)
499{ 499{
500 __u8 node_id[6]; 500 __u8 node_id[6];
501 501
@@ -503,12 +503,12 @@ static void set_ethernet_addr(pegasus_t * pegasus)
503 get_registers(pegasus, 0x10, sizeof(node_id), node_id); 503 get_registers(pegasus, 0x10, sizeof(node_id), node_id);
504 } else { 504 } else {
505 get_node_id(pegasus, node_id); 505 get_node_id(pegasus, node_id);
506 set_registers(pegasus, EthID, sizeof (node_id), node_id); 506 set_registers(pegasus, EthID, sizeof(node_id), node_id);
507 } 507 }
508 memcpy(pegasus->net->dev_addr, node_id, sizeof (node_id)); 508 memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
509} 509}
510 510
511static inline int reset_mac(pegasus_t * pegasus) 511static inline int reset_mac(pegasus_t *pegasus)
512{ 512{
513 __u8 data = 0x8; 513 __u8 data = 0x8;
514 int i; 514 int i;
@@ -563,7 +563,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
563 data[1] = 0; 563 data[1] = 0;
564 data[2] = (loopback & 1) ? 0x09 : 0x01; 564 data[2] = (loopback & 1) ? 0x09 : 0x01;
565 565
566 memcpy(pegasus->eth_regs, data, sizeof (data)); 566 memcpy(pegasus->eth_regs, data, sizeof(data));
567 ret = set_registers(pegasus, EthCtrl0, 3, data); 567 ret = set_registers(pegasus, EthCtrl0, 3, data);
568 568
569 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS || 569 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS ||
@@ -577,7 +577,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
577 return ret; 577 return ret;
578} 578}
579 579
580static void fill_skb_pool(pegasus_t * pegasus) 580static void fill_skb_pool(pegasus_t *pegasus)
581{ 581{
582 int i; 582 int i;
583 583
@@ -595,7 +595,7 @@ static void fill_skb_pool(pegasus_t * pegasus)
595 } 595 }
596} 596}
597 597
598static void free_skb_pool(pegasus_t * pegasus) 598static void free_skb_pool(pegasus_t *pegasus)
599{ 599{
600 int i; 600 int i;
601 601
@@ -667,11 +667,11 @@ static void read_bulk_callback(struct urb *urb)
667 netif_dbg(pegasus, rx_err, net, 667 netif_dbg(pegasus, rx_err, net,
668 "RX packet error %x\n", rx_status); 668 "RX packet error %x\n", rx_status);
669 pegasus->stats.rx_errors++; 669 pegasus->stats.rx_errors++;
670 if (rx_status & 0x06) // long or runt 670 if (rx_status & 0x06) /* long or runt */
671 pegasus->stats.rx_length_errors++; 671 pegasus->stats.rx_length_errors++;
672 if (rx_status & 0x08) 672 if (rx_status & 0x08)
673 pegasus->stats.rx_crc_errors++; 673 pegasus->stats.rx_crc_errors++;
674 if (rx_status & 0x10) // extra bits 674 if (rx_status & 0x10) /* extra bits */
675 pegasus->stats.rx_frame_errors++; 675 pegasus->stats.rx_frame_errors++;
676 goto goon; 676 goto goon;
677 } 677 }
@@ -748,9 +748,8 @@ static void rx_fixup(unsigned long data)
748 if (pegasus->flags & PEGASUS_RX_URB_FAIL) 748 if (pegasus->flags & PEGASUS_RX_URB_FAIL)
749 if (pegasus->rx_skb) 749 if (pegasus->rx_skb)
750 goto try_again; 750 goto try_again;
751 if (pegasus->rx_skb == NULL) { 751 if (pegasus->rx_skb == NULL)
752 pegasus->rx_skb = pull_skb(pegasus); 752 pegasus->rx_skb = pull_skb(pegasus);
753 }
754 if (pegasus->rx_skb == NULL) { 753 if (pegasus->rx_skb == NULL) {
755 netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n"); 754 netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
756 tasklet_schedule(&pegasus->rx_tl); 755 tasklet_schedule(&pegasus->rx_tl);
@@ -835,7 +834,7 @@ static void intr_callback(struct urb *urb)
835 } 834 }
836 835
837 if (urb->actual_length >= 6) { 836 if (urb->actual_length >= 6) {
838 u8 * d = urb->transfer_buffer; 837 u8 *d = urb->transfer_buffer;
839 838
840 /* byte 0 == tx_status1, reg 2B */ 839 /* byte 0 == tx_status1, reg 2B */
841 if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL 840 if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
@@ -918,14 +917,14 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
918 return &((pegasus_t *) netdev_priv(dev))->stats; 917 return &((pegasus_t *) netdev_priv(dev))->stats;
919} 918}
920 919
921static inline void disable_net_traffic(pegasus_t * pegasus) 920static inline void disable_net_traffic(pegasus_t *pegasus)
922{ 921{
923 __le16 tmp = cpu_to_le16(0); 922 __le16 tmp = cpu_to_le16(0);
924 923
925 set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); 924 set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
926} 925}
927 926
928static inline void get_interrupt_interval(pegasus_t * pegasus) 927static inline void get_interrupt_interval(pegasus_t *pegasus)
929{ 928{
930 u16 data; 929 u16 data;
931 u8 interval; 930 u8 interval;
@@ -961,7 +960,7 @@ static void set_carrier(struct net_device *net)
961 netif_carrier_off(net); 960 netif_carrier_off(net);
962} 961}
963 962
964static void free_all_urbs(pegasus_t * pegasus) 963static void free_all_urbs(pegasus_t *pegasus)
965{ 964{
966 usb_free_urb(pegasus->intr_urb); 965 usb_free_urb(pegasus->intr_urb);
967 usb_free_urb(pegasus->tx_urb); 966 usb_free_urb(pegasus->tx_urb);
@@ -969,7 +968,7 @@ static void free_all_urbs(pegasus_t * pegasus)
969 usb_free_urb(pegasus->ctrl_urb); 968 usb_free_urb(pegasus->ctrl_urb);
970} 969}
971 970
972static void unlink_all_urbs(pegasus_t * pegasus) 971static void unlink_all_urbs(pegasus_t *pegasus)
973{ 972{
974 usb_kill_urb(pegasus->intr_urb); 973 usb_kill_urb(pegasus->intr_urb);
975 usb_kill_urb(pegasus->tx_urb); 974 usb_kill_urb(pegasus->tx_urb);
@@ -977,12 +976,11 @@ static void unlink_all_urbs(pegasus_t * pegasus)
977 usb_kill_urb(pegasus->ctrl_urb); 976 usb_kill_urb(pegasus->ctrl_urb);
978} 977}
979 978
980static int alloc_urbs(pegasus_t * pegasus) 979static int alloc_urbs(pegasus_t *pegasus)
981{ 980{
982 pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); 981 pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
983 if (!pegasus->ctrl_urb) { 982 if (!pegasus->ctrl_urb)
984 return 0; 983 return 0;
985 }
986 pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 984 pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
987 if (!pegasus->rx_urb) { 985 if (!pegasus->rx_urb) {
988 usb_free_urb(pegasus->ctrl_urb); 986 usb_free_urb(pegasus->ctrl_urb);
@@ -1019,7 +1017,7 @@ static int pegasus_open(struct net_device *net)
1019 return -ENOMEM; 1017 return -ENOMEM;
1020 1018
1021 res = set_registers(pegasus, EthID, 6, net->dev_addr); 1019 res = set_registers(pegasus, EthID, 6, net->dev_addr);
1022 1020
1023 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 1021 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
1024 usb_rcvbulkpipe(pegasus->usb, 1), 1022 usb_rcvbulkpipe(pegasus->usb, 1),
1025 pegasus->rx_skb->data, PEGASUS_MTU + 8, 1023 pegasus->rx_skb->data, PEGASUS_MTU + 8,
@@ -1033,7 +1031,7 @@ static int pegasus_open(struct net_device *net)
1033 1031
1034 usb_fill_int_urb(pegasus->intr_urb, pegasus->usb, 1032 usb_fill_int_urb(pegasus->intr_urb, pegasus->usb,
1035 usb_rcvintpipe(pegasus->usb, 3), 1033 usb_rcvintpipe(pegasus->usb, 3),
1036 pegasus->intr_buff, sizeof (pegasus->intr_buff), 1034 pegasus->intr_buff, sizeof(pegasus->intr_buff),
1037 intr_callback, pegasus, pegasus->intr_interval); 1035 intr_callback, pegasus, pegasus->intr_interval);
1038 if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) { 1036 if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
1039 if (res == -ENODEV) 1037 if (res == -ENODEV)
@@ -1076,9 +1074,9 @@ static void pegasus_get_drvinfo(struct net_device *dev,
1076 struct ethtool_drvinfo *info) 1074 struct ethtool_drvinfo *info)
1077{ 1075{
1078 pegasus_t *pegasus = netdev_priv(dev); 1076 pegasus_t *pegasus = netdev_priv(dev);
1079 strncpy(info->driver, driver_name, sizeof (info->driver) - 1); 1077 strncpy(info->driver, driver_name, sizeof(info->driver) - 1);
1080 strncpy(info->version, DRIVER_VERSION, sizeof (info->version) - 1); 1078 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
1081 usb_make_path(pegasus->usb, info->bus_info, sizeof (info->bus_info)); 1079 usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
1082} 1080}
1083 1081
1084/* also handles three patterns of some kind in hardware */ 1082/* also handles three patterns of some kind in hardware */
@@ -1098,7 +1096,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1098{ 1096{
1099 pegasus_t *pegasus = netdev_priv(dev); 1097 pegasus_t *pegasus = netdev_priv(dev);
1100 u8 reg78 = 0x04; 1098 u8 reg78 = 0x04;
1101 1099
1102 if (wol->wolopts & ~WOL_SUPPORTED) 1100 if (wol->wolopts & ~WOL_SUPPORTED)
1103 return -EINVAL; 1101 return -EINVAL;
1104 1102
@@ -1118,7 +1116,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1118static inline void pegasus_reset_wol(struct net_device *dev) 1116static inline void pegasus_reset_wol(struct net_device *dev)
1119{ 1117{
1120 struct ethtool_wolinfo wol; 1118 struct ethtool_wolinfo wol;
1121 1119
1122 memset(&wol, 0, sizeof wol); 1120 memset(&wol, 0, sizeof wol);
1123 (void) pegasus_set_wol(dev, &wol); 1121 (void) pegasus_set_wol(dev, &wol);
1124} 1122}
@@ -1178,7 +1176,7 @@ static const struct ethtool_ops ops = {
1178 1176
1179static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) 1177static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
1180{ 1178{
1181 __u16 *data = (__u16 *) & rq->ifr_ifru; 1179 __u16 *data = (__u16 *) &rq->ifr_ifru;
1182 pegasus_t *pegasus = netdev_priv(net); 1180 pegasus_t *pegasus = netdev_priv(net);
1183 int res; 1181 int res;
1184 1182
@@ -1223,7 +1221,7 @@ static void pegasus_set_multicast(struct net_device *net)
1223 ctrl_callback(pegasus->ctrl_urb); 1221 ctrl_callback(pegasus->ctrl_urb);
1224} 1222}
1225 1223
1226static __u8 mii_phy_probe(pegasus_t * pegasus) 1224static __u8 mii_phy_probe(pegasus_t *pegasus)
1227{ 1225{
1228 int i; 1226 int i;
1229 __u16 tmp; 1227 __u16 tmp;
@@ -1239,10 +1237,10 @@ static __u8 mii_phy_probe(pegasus_t * pegasus)
1239 return 0xff; 1237 return 0xff;
1240} 1238}
1241 1239
1242static inline void setup_pegasus_II(pegasus_t * pegasus) 1240static inline void setup_pegasus_II(pegasus_t *pegasus)
1243{ 1241{
1244 __u8 data = 0xa5; 1242 __u8 data = 0xa5;
1245 1243
1246 set_register(pegasus, Reg1d, 0); 1244 set_register(pegasus, Reg1d, 0);
1247 set_register(pegasus, Reg7b, 1); 1245 set_register(pegasus, Reg7b, 1);
1248 mdelay(100); 1246 mdelay(100);
@@ -1254,16 +1252,15 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
1254 set_register(pegasus, 0x83, data); 1252 set_register(pegasus, 0x83, data);
1255 get_registers(pegasus, 0x83, 1, &data); 1253 get_registers(pegasus, 0x83, 1, &data);
1256 1254
1257 if (data == 0xa5) { 1255 if (data == 0xa5)
1258 pegasus->chip = 0x8513; 1256 pegasus->chip = 0x8513;
1259 } else { 1257 else
1260 pegasus->chip = 0; 1258 pegasus->chip = 0;
1261 }
1262 1259
1263 set_register(pegasus, 0x80, 0xc0); 1260 set_register(pegasus, 0x80, 0xc0);
1264 set_register(pegasus, 0x83, 0xff); 1261 set_register(pegasus, 0x83, 0xff);
1265 set_register(pegasus, 0x84, 0x01); 1262 set_register(pegasus, 0x84, 0x01);
1266 1263
1267 if (pegasus->features & HAS_HOME_PNA && mii_mode) 1264 if (pegasus->features & HAS_HOME_PNA && mii_mode)
1268 set_register(pegasus, Reg81, 6); 1265 set_register(pegasus, Reg81, 6);
1269 else 1266 else
@@ -1272,7 +1269,7 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
1272 1269
1273 1270
1274static int pegasus_count; 1271static int pegasus_count;
1275static struct workqueue_struct *pegasus_workqueue = NULL; 1272static struct workqueue_struct *pegasus_workqueue;
1276#define CARRIER_CHECK_DELAY (2 * HZ) 1273#define CARRIER_CHECK_DELAY (2 * HZ)
1277 1274
1278static void check_carrier(struct work_struct *work) 1275static void check_carrier(struct work_struct *work)
@@ -1367,7 +1364,7 @@ static int pegasus_probe(struct usb_interface *intf,
1367 pegasus->mii.phy_id_mask = 0x1f; 1364 pegasus->mii.phy_id_mask = 0x1f;
1368 pegasus->mii.reg_num_mask = 0x1f; 1365 pegasus->mii.reg_num_mask = 0x1f;
1369 spin_lock_init(&pegasus->rx_pool_lock); 1366 spin_lock_init(&pegasus->rx_pool_lock);
1370 pegasus->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1367 pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
1371 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1368 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1372 1369
1373 pegasus->features = usb_dev_id[dev_index].private; 1370 pegasus->features = usb_dev_id[dev_index].private;
@@ -1442,11 +1439,11 @@ static void pegasus_disconnect(struct usb_interface *intf)
1442 pegasus_dec_workqueue(); 1439 pegasus_dec_workqueue();
1443} 1440}
1444 1441
1445static int pegasus_suspend (struct usb_interface *intf, pm_message_t message) 1442static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
1446{ 1443{
1447 struct pegasus *pegasus = usb_get_intfdata(intf); 1444 struct pegasus *pegasus = usb_get_intfdata(intf);
1448 1445
1449 netif_device_detach (pegasus->net); 1446 netif_device_detach(pegasus->net);
1450 cancel_delayed_work(&pegasus->carrier_check); 1447 cancel_delayed_work(&pegasus->carrier_check);
1451 if (netif_running(pegasus->net)) { 1448 if (netif_running(pegasus->net)) {
1452 usb_kill_urb(pegasus->rx_urb); 1449 usb_kill_urb(pegasus->rx_urb);
@@ -1455,11 +1452,11 @@ static int pegasus_suspend (struct usb_interface *intf, pm_message_t message)
1455 return 0; 1452 return 0;
1456} 1453}
1457 1454
1458static int pegasus_resume (struct usb_interface *intf) 1455static int pegasus_resume(struct usb_interface *intf)
1459{ 1456{
1460 struct pegasus *pegasus = usb_get_intfdata(intf); 1457 struct pegasus *pegasus = usb_get_intfdata(intf);
1461 1458
1462 netif_device_attach (pegasus->net); 1459 netif_device_attach(pegasus->net);
1463 if (netif_running(pegasus->net)) { 1460 if (netif_running(pegasus->net)) {
1464 pegasus->rx_urb->status = 0; 1461 pegasus->rx_urb->status = 0;
1465 pegasus->rx_urb->actual_length = 0; 1462 pegasus->rx_urb->actual_length = 0;
@@ -1498,8 +1495,8 @@ static struct usb_driver pegasus_driver = {
1498 1495
1499static void __init parse_id(char *id) 1496static void __init parse_id(char *id)
1500{ 1497{
1501 unsigned int vendor_id=0, device_id=0, flags=0, i=0; 1498 unsigned int vendor_id = 0, device_id = 0, flags = 0, i = 0;
1502 char *token, *name=NULL; 1499 char *token, *name = NULL;
1503 1500
1504 if ((token = strsep(&id, ":")) != NULL) 1501 if ((token = strsep(&id, ":")) != NULL)
1505 name = token; 1502 name = token;
@@ -1510,14 +1507,14 @@ static void __init parse_id(char *id)
1510 device_id = simple_strtoul(token, NULL, 16); 1507 device_id = simple_strtoul(token, NULL, 16);
1511 flags = simple_strtoul(id, NULL, 16); 1508 flags = simple_strtoul(id, NULL, 16);
1512 pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n", 1509 pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n",
1513 driver_name, name, vendor_id, device_id, flags); 1510 driver_name, name, vendor_id, device_id, flags);
1514 1511
1515 if (vendor_id > 0x10000 || vendor_id == 0) 1512 if (vendor_id > 0x10000 || vendor_id == 0)
1516 return; 1513 return;
1517 if (device_id > 0x10000 || device_id == 0) 1514 if (device_id > 0x10000 || device_id == 0)
1518 return; 1515 return;
1519 1516
1520 for (i=0; usb_dev_id[i].name; i++); 1517 for (i = 0; usb_dev_id[i].name; i++);
1521 usb_dev_id[i].name = name; 1518 usb_dev_id[i].name = name;
1522 usb_dev_id[i].vendor = vendor_id; 1519 usb_dev_id[i].vendor = vendor_id;
1523 usb_dev_id[i].device = device_id; 1520 usb_dev_id[i].device = device_id;
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index 29f5211e645b..65b78b35b73c 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -68,7 +68,7 @@ enum pegasus_registers {
68 EpromData = 0x21, /* 0x21 low, 0x22 high byte */ 68 EpromData = 0x21, /* 0x21 low, 0x22 high byte */
69 EpromCtrl = 0x23, 69 EpromCtrl = 0x23,
70 PhyAddr = 0x25, 70 PhyAddr = 0x25,
71 PhyData = 0x26, /* 0x26 low, 0x27 high byte */ 71 PhyData = 0x26, /* 0x26 low, 0x27 high byte */
72 PhyCtrl = 0x28, 72 PhyCtrl = 0x28,
73 UsbStst = 0x2a, 73 UsbStst = 0x2a,
74 EthTxStat0 = 0x2b, 74 EthTxStat0 = 0x2b,
@@ -154,162 +154,162 @@ struct usb_eth_dev {
154 154
155#else /* PEGASUS_DEV */ 155#else /* PEGASUS_DEV */
156 156
157PEGASUS_DEV( "3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601, 157PEGASUS_DEV("3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601,
158 DEFAULT_GPIO_RESET | PEGASUS_II ) 158 DEFAULT_GPIO_RESET | PEGASUS_II)
159PEGASUS_DEV( "ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007, 159PEGASUS_DEV("ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007,
160 DEFAULT_GPIO_RESET | PEGASUS_II ) 160 DEFAULT_GPIO_RESET | PEGASUS_II)
161PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c, 161PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c,
162 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA ) 162 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
163PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104, 163PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104,
164 DEFAULT_GPIO_RESET | HAS_HOME_PNA ) 164 DEFAULT_GPIO_RESET | HAS_HOME_PNA)
165PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004, 165PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004,
166 DEFAULT_GPIO_RESET | HAS_HOME_PNA ) 166 DEFAULT_GPIO_RESET | HAS_HOME_PNA)
167PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007, 167PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007,
168 DEFAULT_GPIO_RESET | HAS_HOME_PNA ) 168 DEFAULT_GPIO_RESET | HAS_HOME_PNA)
169PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102, 169PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102,
170 DEFAULT_GPIO_RESET | PEGASUS_II ) 170 DEFAULT_GPIO_RESET | PEGASUS_II)
171PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002, 171PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002,
172 DEFAULT_GPIO_RESET ) 172 DEFAULT_GPIO_RESET)
173PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b, 173PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b,
174 DEFAULT_GPIO_RESET | PEGASUS_II ) 174 DEFAULT_GPIO_RESET | PEGASUS_II)
175PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c, 175PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c,
176 DEFAULT_GPIO_RESET | PEGASUS_II ) 176 DEFAULT_GPIO_RESET | PEGASUS_II)
177PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1, 177PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1,
178 DEFAULT_GPIO_RESET ) 178 DEFAULT_GPIO_RESET)
179PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c, 179PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c,
180 DEFAULT_GPIO_RESET | PEGASUS_II ) 180 DEFAULT_GPIO_RESET | PEGASUS_II)
181PEGASUS_DEV( "Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046, 181PEGASUS_DEV("Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046,
182 DEFAULT_GPIO_RESET ) 182 DEFAULT_GPIO_RESET)
183PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046, 183PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046,
184 DEFAULT_GPIO_RESET | PEGASUS_II ) 184 DEFAULT_GPIO_RESET | PEGASUS_II)
185PEGASUS_DEV( "Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004, 185PEGASUS_DEV("Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004,
186 DEFAULT_GPIO_RESET | PEGASUS_II ) 186 DEFAULT_GPIO_RESET | PEGASUS_II)
187PEGASUS_DEV( "ADMtek ADM8511 \"Pegasus II\" USB Ethernet", 187PEGASUS_DEV("ADMtek ADM8511 \"Pegasus II\" USB Ethernet",
188 VENDOR_ADMTEK, 0x8511, 188 VENDOR_ADMTEK, 0x8511,
189 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA ) 189 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
190PEGASUS_DEV( "ADMtek ADM8513 \"Pegasus II\" USB Ethernet", 190PEGASUS_DEV("ADMtek ADM8513 \"Pegasus II\" USB Ethernet",
191 VENDOR_ADMTEK, 0x8513, 191 VENDOR_ADMTEK, 0x8513,
192 DEFAULT_GPIO_RESET | PEGASUS_II ) 192 DEFAULT_GPIO_RESET | PEGASUS_II)
193PEGASUS_DEV( "ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet", 193PEGASUS_DEV("ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet",
194 VENDOR_ADMTEK, 0x8515, 194 VENDOR_ADMTEK, 0x8515,
195 DEFAULT_GPIO_RESET | PEGASUS_II ) 195 DEFAULT_GPIO_RESET | PEGASUS_II)
196PEGASUS_DEV( "ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)", 196PEGASUS_DEV("ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)",
197 VENDOR_ADMTEK, 0x0986, 197 VENDOR_ADMTEK, 0x0986,
198 DEFAULT_GPIO_RESET | HAS_HOME_PNA ) 198 DEFAULT_GPIO_RESET | HAS_HOME_PNA)
199PEGASUS_DEV( "AN986A USB MAC", VENDOR_ADMTEK, 1986, 199PEGASUS_DEV("AN986A USB MAC", VENDOR_ADMTEK, 1986,
200 DEFAULT_GPIO_RESET | PEGASUS_II ) 200 DEFAULT_GPIO_RESET | PEGASUS_II)
201PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, 201PEGASUS_DEV("AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
202 DEFAULT_GPIO_RESET | PEGASUS_II ) 202 DEFAULT_GPIO_RESET | PEGASUS_II)
203PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, 203PEGASUS_DEV("Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
204 DEFAULT_GPIO_RESET | PEGASUS_II ) 204 DEFAULT_GPIO_RESET | PEGASUS_II)
205/* 205/*
206 * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors 206 * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors
207 * with the same product IDs by checking the device class too. 207 * with the same product IDs by checking the device class too.
208 */ 208 */
209PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00, 209PEGASUS_DEV_CLASS("Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
210 DEFAULT_GPIO_RESET | PEGASUS_II ) 210 DEFAULT_GPIO_RESET | PEGASUS_II)
211PEGASUS_DEV( "Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122, 211PEGASUS_DEV("Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122,
212 DEFAULT_GPIO_RESET | PEGASUS_II ) 212 DEFAULT_GPIO_RESET | PEGASUS_II)
213PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, 213PEGASUS_DEV("Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
214 DEFAULT_GPIO_RESET ) 214 DEFAULT_GPIO_RESET)
215PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987, 215PEGASUS_DEV("Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
216 DEFAULT_GPIO_RESET | HAS_HOME_PNA ) 216 DEFAULT_GPIO_RESET | HAS_HOME_PNA)
217PEGASUS_DEV( "iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511, 217PEGASUS_DEV("iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511,
218 DEFAULT_GPIO_RESET | PEGASUS_II ) 218 DEFAULT_GPIO_RESET | PEGASUS_II)
219PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988, 219PEGASUS_DEV("Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
220 DEFAULT_GPIO_RESET ) 220 DEFAULT_GPIO_RESET)
221PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511, 221PEGASUS_DEV("Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
222 DEFAULT_GPIO_RESET | PEGASUS_II ) 222 DEFAULT_GPIO_RESET | PEGASUS_II)
223PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004, 223PEGASUS_DEV("Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
224 DEFAULT_GPIO_RESET ) 224 DEFAULT_GPIO_RESET)
225PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d, 225PEGASUS_DEV("Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
226 DEFAULT_GPIO_RESET | PEGASUS_II ) 226 DEFAULT_GPIO_RESET | PEGASUS_II)
227PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001, 227PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
228 DEFAULT_GPIO_RESET ) 228 DEFAULT_GPIO_RESET)
229PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4002, 229PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4002,
230 DEFAULT_GPIO_RESET ) 230 DEFAULT_GPIO_RESET)
231PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4102, 231PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4102,
232 DEFAULT_GPIO_RESET | PEGASUS_II ) 232 DEFAULT_GPIO_RESET | PEGASUS_II)
233PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x400b, 233PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x400b,
234 DEFAULT_GPIO_RESET | PEGASUS_II ) 234 DEFAULT_GPIO_RESET | PEGASUS_II)
235PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x200c, 235PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x200c,
236 DEFAULT_GPIO_RESET | PEGASUS_II ) 236 DEFAULT_GPIO_RESET | PEGASUS_II)
237PEGASUS_DEV( "D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003, 237PEGASUS_DEV("D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003,
238 DEFAULT_GPIO_RESET | HAS_HOME_PNA ) 238 DEFAULT_GPIO_RESET | HAS_HOME_PNA)
239PEGASUS_DEV( "D-Link DSB-650", VENDOR_DLINK, 0xabc1, 239PEGASUS_DEV("D-Link DSB-650", VENDOR_DLINK, 0xabc1,
240 DEFAULT_GPIO_RESET ) 240 DEFAULT_GPIO_RESET)
241PEGASUS_DEV( "GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002, 241PEGASUS_DEV("GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002,
242 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA ) 242 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
243PEGASUS_DEV( "ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010, 243PEGASUS_DEV("ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010,
244 DEFAULT_GPIO_RESET | PEGASUS_II ) 244 DEFAULT_GPIO_RESET | PEGASUS_II)
245PEGASUS_DEV( "EasiDock Ethernet", VENDOR_MOBILITY, 0x0304, 245PEGASUS_DEV("EasiDock Ethernet", VENDOR_MOBILITY, 0x0304,
246 DEFAULT_GPIO_RESET ) 246 DEFAULT_GPIO_RESET)
247PEGASUS_DEV( "Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000, 247PEGASUS_DEV("Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000,
248 DEFAULT_GPIO_RESET )
249PEGASUS_DEV( "GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
250 DEFAULT_GPIO_RESET )
251PEGASUS_DEV( "Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
252 DEFAULT_GPIO_RESET | PEGASUS_II )
253PEGASUS_DEV( "HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
254 DEFAULT_GPIO_RESET | PEGASUS_II )
255PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
256 DEFAULT_GPIO_RESET )
257PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
258 DEFAULT_GPIO_RESET | PEGASUS_II )
259PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
260 DEFAULT_GPIO_RESET | PEGASUS_II )
261PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
262 DEFAULT_GPIO_RESET) 248 DEFAULT_GPIO_RESET)
263PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, 249PEGASUS_DEV("GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
264 DEFAULT_GPIO_RESET ) 250 DEFAULT_GPIO_RESET)
265PEGASUS_DEV( "LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005, 251PEGASUS_DEV("Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
266 DEFAULT_GPIO_RESET | PEGASUS_II) 252 DEFAULT_GPIO_RESET | PEGASUS_II)
267PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b, 253PEGASUS_DEV("HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
268 DEFAULT_GPIO_RESET | PEGASUS_II ) 254 DEFAULT_GPIO_RESET | PEGASUS_II)
269PEGASUS_DEV( "LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1, 255PEGASUS_DEV("IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
270 DEFAULT_GPIO_RESET ) 256 DEFAULT_GPIO_RESET)
271PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c, 257PEGASUS_DEV("IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
272 DEFAULT_GPIO_RESET | PEGASUS_II ) 258 DEFAULT_GPIO_RESET | PEGASUS_II)
273PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x2202, 259PEGASUS_DEV("IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
274 DEFAULT_GPIO_RESET ) 260 DEFAULT_GPIO_RESET | PEGASUS_II)
275PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2203, 261PEGASUS_DEV("Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
276 DEFAULT_GPIO_RESET ) 262 DEFAULT_GPIO_RESET)
277PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2204, 263PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
278 DEFAULT_GPIO_RESET | HAS_HOME_PNA ) 264 DEFAULT_GPIO_RESET)
279PEGASUS_DEV( "Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206, 265PEGASUS_DEV("LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005,
280 DEFAULT_GPIO_RESET | PEGASUS_II) 266 DEFAULT_GPIO_RESET | PEGASUS_II)
281PEGASUS_DEV( "Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4, 267PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b,
282 DEFAULT_GPIO_RESET ) 268 DEFAULT_GPIO_RESET | PEGASUS_II)
283PEGASUS_DEV( "Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b, 269PEGASUS_DEV("LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1,
284 DEFAULT_GPIO_RESET | PEGASUS_II ) 270 DEFAULT_GPIO_RESET)
285PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x200c, 271PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c,
286 DEFAULT_GPIO_RESET | PEGASUS_II ) 272 DEFAULT_GPIO_RESET | PEGASUS_II)
287PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001, 273PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x2202,
288 DEFAULT_GPIO_RESET ) 274 DEFAULT_GPIO_RESET)
289PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005, 275PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2203,
290 DEFAULT_GPIO_RESET ) 276 DEFAULT_GPIO_RESET)
291PEGASUS_DEV( "MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009, 277PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2204,
292 DEFAULT_GPIO_RESET | PEGASUS_II ) 278 DEFAULT_GPIO_RESET | HAS_HOME_PNA)
293PEGASUS_DEV( "Microsoft MN-110", VENDOR_MICROSOFT, 0x007a, 279PEGASUS_DEV("Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206,
294 DEFAULT_GPIO_RESET | PEGASUS_II ) 280 DEFAULT_GPIO_RESET | PEGASUS_II)
295PEGASUS_DEV( "NETGEAR FA101", VENDOR_NETGEAR, 0x1020, 281PEGASUS_DEV("Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4,
296 DEFAULT_GPIO_RESET | PEGASUS_II ) 282 DEFAULT_GPIO_RESET)
297PEGASUS_DEV( "OCT Inc.", VENDOR_OCT, 0x0109, 283PEGASUS_DEV("Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b,
298 DEFAULT_GPIO_RESET | PEGASUS_II ) 284 DEFAULT_GPIO_RESET | PEGASUS_II)
299PEGASUS_DEV( "OCT USB TO Ethernet", VENDOR_OCT, 0x0901, 285PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x200c,
300 DEFAULT_GPIO_RESET | PEGASUS_II ) 286 DEFAULT_GPIO_RESET | PEGASUS_II)
301PEGASUS_DEV( "smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003, 287PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001,
302 DEFAULT_GPIO_RESET | PEGASUS_II ) 288 DEFAULT_GPIO_RESET)
303PEGASUS_DEV( "SMC 202 USB Ethernet", VENDOR_SMC, 0x0200, 289PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005,
304 DEFAULT_GPIO_RESET ) 290 DEFAULT_GPIO_RESET)
305PEGASUS_DEV( "SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201, 291PEGASUS_DEV("MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009,
306 DEFAULT_GPIO_RESET | PEGASUS_II) 292 DEFAULT_GPIO_RESET | PEGASUS_II)
307PEGASUS_DEV( "SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100, 293PEGASUS_DEV("Microsoft MN-110", VENDOR_MICROSOFT, 0x007a,
308 DEFAULT_GPIO_RESET ) 294 DEFAULT_GPIO_RESET | PEGASUS_II)
309PEGASUS_DEV( "SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110, 295PEGASUS_DEV("NETGEAR FA101", VENDOR_NETGEAR, 0x1020,
310 DEFAULT_GPIO_RESET | PEGASUS_II ) 296 DEFAULT_GPIO_RESET | PEGASUS_II)
311PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001, 297PEGASUS_DEV("OCT Inc.", VENDOR_OCT, 0x0109,
312 DEFAULT_GPIO_RESET | PEGASUS_II ) 298 DEFAULT_GPIO_RESET | PEGASUS_II)
299PEGASUS_DEV("OCT USB TO Ethernet", VENDOR_OCT, 0x0901,
300 DEFAULT_GPIO_RESET | PEGASUS_II)
301PEGASUS_DEV("smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003,
302 DEFAULT_GPIO_RESET | PEGASUS_II)
303PEGASUS_DEV("SMC 202 USB Ethernet", VENDOR_SMC, 0x0200,
304 DEFAULT_GPIO_RESET)
305PEGASUS_DEV("SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201,
306 DEFAULT_GPIO_RESET | PEGASUS_II)
307PEGASUS_DEV("SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100,
308 DEFAULT_GPIO_RESET)
309PEGASUS_DEV("SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110,
310 DEFAULT_GPIO_RESET | PEGASUS_II)
311PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001,
312 DEFAULT_GPIO_RESET | PEGASUS_II)
313 313
314 314
315#endif /* PEGASUS_DEV */ 315#endif /* PEGASUS_DEV */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index f1942d69a0d5..ee85c8b9a858 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -165,7 +165,7 @@ struct lsi_umts {
165 u8 gw_addr_len; /* NW-supplied GW address len */ 165 u8 gw_addr_len; /* NW-supplied GW address len */
166 u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ 166 u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
167 u8 reserved[8]; 167 u8 reserved[8];
168} __attribute__ ((packed)); 168} __packed;
169 169
170#define SIERRA_NET_LSI_COMMON_LEN 4 170#define SIERRA_NET_LSI_COMMON_LEN 4
171#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) 171#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 81c76ada8e56..3b03794ac3f5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -44,6 +44,7 @@
44#include <linux/usb.h> 44#include <linux/usb.h>
45#include <linux/usb/usbnet.h> 45#include <linux/usb/usbnet.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/kernel.h>
47 48
48#define DRIVER_VERSION "22-Aug-2005" 49#define DRIVER_VERSION "22-Aug-2005"
49 50
@@ -158,16 +159,6 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
158} 159}
159EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 160EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
160 161
161static u8 nibble(unsigned char c)
162{
163 if (likely(isdigit(c)))
164 return c - '0';
165 c = toupper(c);
166 if (likely(isxdigit(c)))
167 return 10 + c - 'A';
168 return 0;
169}
170
171int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 162int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
172{ 163{
173 int tmp, i; 164 int tmp, i;
@@ -183,7 +174,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
183 } 174 }
184 for (i = tmp = 0; i < 6; i++, tmp += 2) 175 for (i = tmp = 0; i < 6; i++, tmp += 2)
185 dev->net->dev_addr [i] = 176 dev->net->dev_addr [i] =
186 (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]); 177 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
187 return 0; 178 return 0;
188} 179}
189EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 180EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
@@ -624,7 +615,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
624 while (!skb_queue_empty(&dev->rxq) 615 while (!skb_queue_empty(&dev->rxq)
625 && !skb_queue_empty(&dev->txq) 616 && !skb_queue_empty(&dev->txq)
626 && !skb_queue_empty(&dev->done)) { 617 && !skb_queue_empty(&dev->done)) {
627 schedule_timeout(UNLINK_TIMEOUT_MS); 618 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
628 set_current_state(TASK_UNINTERRUPTIBLE); 619 set_current_state(TASK_UNINTERRUPTIBLE);
629 netif_dbg(dev, ifdown, dev->net, 620 netif_dbg(dev, ifdown, dev->net,
630 "waited for %d urb completions\n", temp); 621 "waited for %d urb completions\n", temp);
@@ -643,7 +634,7 @@ int usbnet_stop (struct net_device *net)
643 netif_stop_queue (net); 634 netif_stop_queue (net);
644 635
645 netif_info(dev, ifdown, dev->net, 636 netif_info(dev, ifdown, dev->net,
646 "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 637 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
647 net->stats.rx_packets, net->stats.tx_packets, 638 net->stats.rx_packets, net->stats.tx_packets,
648 net->stats.rx_errors, net->stats.tx_errors); 639 net->stats.rx_errors, net->stats.tx_errors);
649 640
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index c38191179fae..f7b33ae7a703 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -193,7 +193,7 @@ struct rx_desc {
193 __le32 pa_low; /* Low 32 bit PCI address */ 193 __le32 pa_low; /* Low 32 bit PCI address */
194 __le16 pa_high; /* Next 16 bit PCI address (48 total) */ 194 __le16 pa_high; /* Next 16 bit PCI address (48 total) */
195 __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ 195 __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
196} __attribute__ ((__packed__)); 196} __packed;
197 197
198/* 198/*
199 * Transmit descriptor 199 * Transmit descriptor
@@ -208,7 +208,7 @@ struct tdesc1 {
208 __le16 vlan; 208 __le16 vlan;
209 u8 TCR; 209 u8 TCR;
210 u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ 210 u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
211} __attribute__ ((__packed__)); 211} __packed;
212 212
213enum { 213enum {
214 TD_QUEUE = cpu_to_le16(0x8000) 214 TD_QUEUE = cpu_to_le16(0x8000)
@@ -218,7 +218,7 @@ struct td_buf {
218 __le32 pa_low; 218 __le32 pa_low;
219 __le16 pa_high; 219 __le16 pa_high;
220 __le16 size; /* bits 0--13 - size, bit 15 - queue */ 220 __le16 size; /* bits 0--13 - size, bit 15 - queue */
221} __attribute__ ((__packed__)); 221} __packed;
222 222
223struct tx_desc { 223struct tx_desc {
224 struct tdesc0 tdesc0; 224 struct tdesc0 tdesc0;
@@ -1096,7 +1096,7 @@ struct mac_regs {
1096 1096
1097 volatile __le16 PatternCRC[8]; /* 0xB0 */ 1097 volatile __le16 PatternCRC[8]; /* 0xB0 */
1098 volatile __le32 ByteMask[4][4]; /* 0xC0 */ 1098 volatile __le32 ByteMask[4][4]; /* 0xC0 */
1099} __attribute__ ((__packed__)); 1099} __packed;
1100 1100
1101 1101
1102enum hw_mib { 1102enum hw_mib {
@@ -1216,7 +1216,7 @@ struct arp_packet {
1216 u8 ar_sip[4]; 1216 u8 ar_sip[4];
1217 u8 ar_tha[ETH_ALEN]; 1217 u8 ar_tha[ETH_ALEN];
1218 u8 ar_tip[4]; 1218 u8 ar_tip[4];
1219} __attribute__ ((__packed__)); 1219} __packed;
1220 1220
1221struct _magic_packet { 1221struct _magic_packet {
1222 u8 dest_mac[6]; 1222 u8 dest_mac[6];
@@ -1224,7 +1224,7 @@ struct _magic_packet {
1224 __be16 type; 1224 __be16 type;
1225 u8 MAC[16][6]; 1225 u8 MAC[16][6];
1226 u8 password[6]; 1226 u8 password[6];
1227} __attribute__ ((__packed__)); 1227} __packed;
1228 1228
1229/* 1229/*
1230 * Store for chip context when saving and restoring status. Not 1230 * Store for chip context when saving and restoring status. Not
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index b4889e6c4a57..ca7727b940ad 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -464,6 +464,9 @@ enum vmxnet3_intr_type {
464/* addition 1 for events */ 464/* addition 1 for events */
465#define VMXNET3_MAX_INTRS 25 465#define VMXNET3_MAX_INTRS 25
466 466
467/* value of intrCtrl */
468#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
469
467 470
468struct Vmxnet3_IntrConf { 471struct Vmxnet3_IntrConf {
469 bool autoMask; 472 bool autoMask;
@@ -471,7 +474,8 @@ struct Vmxnet3_IntrConf {
471 u8 eventIntrIdx; 474 u8 eventIntrIdx;
472 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for 475 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
473 * each intr */ 476 * each intr */
474 __le32 reserved[3]; 477 __le32 intrCtrl;
478 __le32 reserved[2];
475}; 479};
476 480
477/* one bit per VLAN ID, the size is in the units of u32 */ 481/* one bit per VLAN ID, the size is in the units of u32 */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 989b742551ac..abe0ff53daf3 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -72,6 +72,8 @@ vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
72 72
73 for (i = 0; i < adapter->intr.num_intrs; i++) 73 for (i = 0; i < adapter->intr.num_intrs; i++)
74 vmxnet3_enable_intr(adapter, i); 74 vmxnet3_enable_intr(adapter, i);
75 adapter->shared->devRead.intrConf.intrCtrl &=
76 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
75} 77}
76 78
77 79
@@ -80,6 +82,8 @@ vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
80{ 82{
81 int i; 83 int i;
82 84
85 adapter->shared->devRead.intrConf.intrCtrl |=
86 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
83 for (i = 0; i < adapter->intr.num_intrs; i++) 87 for (i = 0; i < adapter->intr.num_intrs; i++)
84 vmxnet3_disable_intr(adapter, i); 88 vmxnet3_disable_intr(adapter, i);
85} 89}
@@ -128,7 +132,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
128 * Check the link state. This may start or stop the tx queue. 132 * Check the link state. This may start or stop the tx queue.
129 */ 133 */
130static void 134static void
131vmxnet3_check_link(struct vmxnet3_adapter *adapter) 135vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
132{ 136{
133 u32 ret; 137 u32 ret;
134 138
@@ -141,14 +145,16 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
141 if (!netif_carrier_ok(adapter->netdev)) 145 if (!netif_carrier_ok(adapter->netdev))
142 netif_carrier_on(adapter->netdev); 146 netif_carrier_on(adapter->netdev);
143 147
144 vmxnet3_tq_start(&adapter->tx_queue, adapter); 148 if (affectTxQueue)
149 vmxnet3_tq_start(&adapter->tx_queue, adapter);
145 } else { 150 } else {
146 printk(KERN_INFO "%s: NIC Link is Down\n", 151 printk(KERN_INFO "%s: NIC Link is Down\n",
147 adapter->netdev->name); 152 adapter->netdev->name);
148 if (netif_carrier_ok(adapter->netdev)) 153 if (netif_carrier_ok(adapter->netdev))
149 netif_carrier_off(adapter->netdev); 154 netif_carrier_off(adapter->netdev);
150 155
151 vmxnet3_tq_stop(&adapter->tx_queue, adapter); 156 if (affectTxQueue)
157 vmxnet3_tq_stop(&adapter->tx_queue, adapter);
152 } 158 }
153} 159}
154 160
@@ -163,7 +169,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
163 169
164 /* Check if link state has changed */ 170 /* Check if link state has changed */
165 if (events & VMXNET3_ECR_LINK) 171 if (events & VMXNET3_ECR_LINK)
166 vmxnet3_check_link(adapter); 172 vmxnet3_check_link(adapter, true);
167 173
168 /* Check if there is an error on xmit/recv queues */ 174 /* Check if there is an error on xmit/recv queues */
169 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 175 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
@@ -658,8 +664,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
658 while (len) { 664 while (len) {
659 u32 buf_size; 665 u32 buf_size;
660 666
661 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ? 667 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
662 VMXNET3_MAX_TX_BUF_SIZE : len; 668 buf_size = len;
669 dw2 |= len;
670 } else {
671 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
672 /* spec says that for TxDesc.len, 0 == 2^14 */
673 }
663 674
664 tbi = tq->buf_info + tq->tx_ring.next2fill; 675 tbi = tq->buf_info + tq->tx_ring.next2fill;
665 tbi->map_type = VMXNET3_MAP_SINGLE; 676 tbi->map_type = VMXNET3_MAP_SINGLE;
@@ -667,13 +678,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
667 skb->data + buf_offset, buf_size, 678 skb->data + buf_offset, buf_size,
668 PCI_DMA_TODEVICE); 679 PCI_DMA_TODEVICE);
669 680
670 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */ 681 tbi->len = buf_size;
671 682
672 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 683 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
673 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 684 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
674 685
675 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 686 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
676 gdesc->dword[2] = cpu_to_le32(dw2 | buf_size); 687 gdesc->dword[2] = cpu_to_le32(dw2);
677 gdesc->dword[3] = 0; 688 gdesc->dword[3] = 0;
678 689
679 dev_dbg(&adapter->netdev->dev, 690 dev_dbg(&adapter->netdev->dev,
@@ -1825,6 +1836,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1825 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; 1836 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
1826 1837
1827 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; 1838 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
1839 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
1828 1840
1829 /* rx filter settings */ 1841 /* rx filter settings */
1830 devRead->rxFilterConf.rxMode = 0; 1842 devRead->rxFilterConf.rxMode = 0;
@@ -1889,7 +1901,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1889 * Check link state when first activating device. It will start the 1901 * Check link state when first activating device. It will start the
1890 * tx queue if the link is up. 1902 * tx queue if the link is up.
1891 */ 1903 */
1892 vmxnet3_check_link(adapter); 1904 vmxnet3_check_link(adapter, true);
1893 1905
1894 napi_enable(&adapter->napi); 1906 napi_enable(&adapter->napi);
1895 vmxnet3_enable_all_intrs(adapter); 1907 vmxnet3_enable_all_intrs(adapter);
@@ -2295,9 +2307,13 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2295 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2307 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2296 2308
2297 if (adapter->intr.type == VMXNET3_IT_AUTO) { 2309 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2298 int err; 2310 adapter->intr.type = VMXNET3_IT_MSIX;
2311 }
2299 2312
2300#ifdef CONFIG_PCI_MSI 2313#ifdef CONFIG_PCI_MSI
2314 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2315 int err;
2316
2301 adapter->intr.msix_entries[0].entry = 0; 2317 adapter->intr.msix_entries[0].entry = 0;
2302 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2318 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2303 VMXNET3_LINUX_MAX_MSIX_VECT); 2319 VMXNET3_LINUX_MAX_MSIX_VECT);
@@ -2306,15 +2322,18 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2306 adapter->intr.type = VMXNET3_IT_MSIX; 2322 adapter->intr.type = VMXNET3_IT_MSIX;
2307 return; 2323 return;
2308 } 2324 }
2309#endif 2325 adapter->intr.type = VMXNET3_IT_MSI;
2326 }
2310 2327
2328 if (adapter->intr.type == VMXNET3_IT_MSI) {
2329 int err;
2311 err = pci_enable_msi(adapter->pdev); 2330 err = pci_enable_msi(adapter->pdev);
2312 if (!err) { 2331 if (!err) {
2313 adapter->intr.num_intrs = 1; 2332 adapter->intr.num_intrs = 1;
2314 adapter->intr.type = VMXNET3_IT_MSI;
2315 return; 2333 return;
2316 } 2334 }
2317 } 2335 }
2336#endif /* CONFIG_PCI_MSI */
2318 2337
2319 adapter->intr.type = VMXNET3_IT_INTX; 2338 adapter->intr.type = VMXNET3_IT_INTX;
2320 2339
@@ -2358,6 +2377,7 @@ vmxnet3_reset_work(struct work_struct *data)
2358 return; 2377 return;
2359 2378
2360 /* if the device is closed, we must leave it alone */ 2379 /* if the device is closed, we must leave it alone */
2380 rtnl_lock();
2361 if (netif_running(adapter->netdev)) { 2381 if (netif_running(adapter->netdev)) {
2362 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); 2382 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2363 vmxnet3_quiesce_dev(adapter); 2383 vmxnet3_quiesce_dev(adapter);
@@ -2366,6 +2386,7 @@ vmxnet3_reset_work(struct work_struct *data)
2366 } else { 2386 } else {
2367 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); 2387 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2368 } 2388 }
2389 rtnl_unlock();
2369 2390
2370 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2391 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2371} 2392}
@@ -2491,6 +2512,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2491 } 2512 }
2492 2513
2493 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2514 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2515 vmxnet3_check_link(adapter, false);
2494 atomic_inc(&devices_found); 2516 atomic_inc(&devices_found);
2495 return 0; 2517 return 0;
2496 2518
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 3935c4493fb7..7e4b5a89165a 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -275,27 +275,27 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
275 } 275 }
276} 276}
277 277
278static u32
279vmxnet3_get_flags(struct net_device *netdev) {
280 return netdev->features;
281}
282
283static int 278static int
284vmxnet3_set_flags(struct net_device *netdev, u32 data) { 279vmxnet3_set_flags(struct net_device *netdev, u32 data)
280{
285 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 281 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
286 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; 282 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
287 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 283 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
288 284
285 if (data & ~ETH_FLAG_LRO)
286 return -EOPNOTSUPP;
287
289 if (lro_requested ^ lro_present) { 288 if (lro_requested ^ lro_present) {
290 /* toggle the LRO feature*/ 289 /* toggle the LRO feature*/
291 netdev->features ^= NETIF_F_LRO; 290 netdev->features ^= NETIF_F_LRO;
292 291
293 /* update harware LRO capability accordingly */ 292 /* update harware LRO capability accordingly */
294 if (lro_requested) 293 if (lro_requested)
295 adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO; 294 adapter->shared->devRead.misc.uptFeatures |=
295 cpu_to_le64(UPT1_F_LRO);
296 else 296 else
297 adapter->shared->devRead.misc.uptFeatures &= 297 adapter->shared->devRead.misc.uptFeatures &=
298 ~UPT1_F_LRO; 298 cpu_to_le64(~UPT1_F_LRO);
299 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 299 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
300 VMXNET3_CMD_UPDATE_FEATURE); 300 VMXNET3_CMD_UPDATE_FEATURE);
301 } 301 }
@@ -554,7 +554,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
554 .get_tso = ethtool_op_get_tso, 554 .get_tso = ethtool_op_get_tso,
555 .set_tso = ethtool_op_set_tso, 555 .set_tso = ethtool_op_set_tso,
556 .get_strings = vmxnet3_get_strings, 556 .get_strings = vmxnet3_get_strings,
557 .get_flags = vmxnet3_get_flags, 557 .get_flags = ethtool_op_get_flags,
558 .set_flags = vmxnet3_set_flags, 558 .set_flags = vmxnet3_set_flags,
559 .get_sset_count = vmxnet3_get_sset_count, 559 .get_sset_count = vmxnet3_get_sset_count,
560 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 560 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 34f392f46fb1..2121c735cabd 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01000500 74#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00
75 75
76 76
77/* 77/*
diff --git a/drivers/net/vxge/Makefile b/drivers/net/vxge/Makefile
index 8992ca26b277..b625e2c503f5 100644
--- a/drivers/net/vxge/Makefile
+++ b/drivers/net/vxge/Makefile
@@ -1,5 +1,5 @@
1# 1#
2# Makefile for Neterion Inc's X3100 Series 10 GbE PCIe # I/O 2# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
3# Virtualized Server Adapter linux driver 3# Virtualized Server Adapter linux driver
4 4
5obj-$(CONFIG_VXGE) += vxge.o 5obj-$(CONFIG_VXGE) += vxge.o
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 297f0d202073..0e6db5935609 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 4ae2625d4d8f..1a94343023cb 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-config.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#ifndef VXGE_CONFIG_H 14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H 15#define VXGE_CONFIG_H
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index cadef8549c06..05679e306fdd 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-ethtool.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include<linux/ethtool.h> 14#include<linux/ethtool.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
diff --git a/drivers/net/vxge/vxge-ethtool.h b/drivers/net/vxge/vxge-ethtool.h
index 1c3df0a34acc..6cf3044d7f43 100644
--- a/drivers/net/vxge/vxge-ethtool.h
+++ b/drivers/net/vxge/vxge-ethtool.h
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-ethtool.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#ifndef _VXGE_ETHTOOL_H 14#ifndef _VXGE_ETHTOOL_H
15#define _VXGE_ETHTOOL_H 15#define _VXGE_ETHTOOL_H
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index fc8b2d7a0919..c7c5605b3728 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -7,9 +7,9 @@
7* system is licensed under the GPL. 7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information. 8* See the file COPYING in this distribution for more information.
9* 9*
10* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11* Virtualized Server Adapter. 11* Virtualized Server Adapter.
12* Copyright(c) 2002-2009 Neterion Inc. 12* Copyright(c) 2002-2010 Exar Corp.
13* 13*
14* The module loadable parameters that are supported by the driver and a brief 14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables: 15* explanation of all the variables:
@@ -41,6 +41,8 @@
41* 41*
42******************************************************************************/ 42******************************************************************************/
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
44#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
45#include <linux/pci.h> 47#include <linux/pci.h>
46#include <linux/slab.h> 48#include <linux/slab.h>
@@ -87,7 +89,6 @@ static inline int is_vxge_card_up(struct vxgedev *vdev)
87 89
88static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) 90static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
89{ 91{
90 unsigned long flags = 0;
91 struct sk_buff **skb_ptr = NULL; 92 struct sk_buff **skb_ptr = NULL;
92 struct sk_buff **temp; 93 struct sk_buff **temp;
93#define NR_SKB_COMPLETED 128 94#define NR_SKB_COMPLETED 128
@@ -98,15 +99,16 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
98 more = 0; 99 more = 0;
99 skb_ptr = completed; 100 skb_ptr = completed;
100 101
101 if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { 102 if (__netif_tx_trylock(fifo->txq)) {
102 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, 103 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
103 NR_SKB_COMPLETED, &more); 104 NR_SKB_COMPLETED, &more);
104 spin_unlock_irqrestore(&fifo->tx_lock, flags); 105 __netif_tx_unlock(fifo->txq);
105 } 106 }
107
106 /* free SKBs */ 108 /* free SKBs */
107 for (temp = completed; temp != skb_ptr; temp++) 109 for (temp = completed; temp != skb_ptr; temp++)
108 dev_kfree_skb_irq(*temp); 110 dev_kfree_skb_irq(*temp);
109 } while (more) ; 111 } while (more);
110} 112}
111 113
112static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) 114static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
@@ -131,80 +133,6 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
131} 133}
132 134
133/* 135/*
134 * MultiQ manipulation helper functions
135 */
136void vxge_stop_all_tx_queue(struct vxgedev *vdev)
137{
138 int i;
139 struct net_device *dev = vdev->ndev;
140
141 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
142 for (i = 0; i < vdev->no_of_vpath; i++)
143 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP;
144 }
145 netif_tx_stop_all_queues(dev);
146}
147
148void vxge_stop_tx_queue(struct vxge_fifo *fifo)
149{
150 struct net_device *dev = fifo->ndev;
151
152 struct netdev_queue *txq = NULL;
153 if (fifo->tx_steering_type == TX_MULTIQ_STEERING)
154 txq = netdev_get_tx_queue(dev, fifo->driver_id);
155 else {
156 txq = netdev_get_tx_queue(dev, 0);
157 fifo->queue_state = VPATH_QUEUE_STOP;
158 }
159
160 netif_tx_stop_queue(txq);
161}
162
163void vxge_start_all_tx_queue(struct vxgedev *vdev)
164{
165 int i;
166 struct net_device *dev = vdev->ndev;
167
168 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
169 for (i = 0; i < vdev->no_of_vpath; i++)
170 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
171 }
172 netif_tx_start_all_queues(dev);
173}
174
175static void vxge_wake_all_tx_queue(struct vxgedev *vdev)
176{
177 int i;
178 struct net_device *dev = vdev->ndev;
179
180 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
181 for (i = 0; i < vdev->no_of_vpath; i++)
182 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
183 }
184 netif_tx_wake_all_queues(dev);
185}
186
187void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb)
188{
189 struct net_device *dev = fifo->ndev;
190
191 int vpath_no = fifo->driver_id;
192 struct netdev_queue *txq = NULL;
193 if (fifo->tx_steering_type == TX_MULTIQ_STEERING) {
194 txq = netdev_get_tx_queue(dev, vpath_no);
195 if (netif_tx_queue_stopped(txq))
196 netif_tx_wake_queue(txq);
197 } else {
198 txq = netdev_get_tx_queue(dev, 0);
199 if (fifo->queue_state == VPATH_QUEUE_STOP)
200 if (netif_tx_queue_stopped(txq)) {
201 fifo->queue_state = VPATH_QUEUE_START;
202 netif_tx_wake_queue(txq);
203 }
204 }
205}
206
207/*
208 * vxge_callback_link_up 136 * vxge_callback_link_up
209 * 137 *
210 * This function is called during interrupt context to notify link up state 138 * This function is called during interrupt context to notify link up state
@@ -218,11 +146,11 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
218 146
219 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 147 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
220 vdev->ndev->name, __func__, __LINE__); 148 vdev->ndev->name, __func__, __LINE__);
221 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); 149 netdev_notice(vdev->ndev, "Link Up\n");
222 vdev->stats.link_up++; 150 vdev->stats.link_up++;
223 151
224 netif_carrier_on(vdev->ndev); 152 netif_carrier_on(vdev->ndev);
225 vxge_wake_all_tx_queue(vdev); 153 netif_tx_wake_all_queues(vdev->ndev);
226 154
227 vxge_debug_entryexit(VXGE_TRACE, 155 vxge_debug_entryexit(VXGE_TRACE,
228 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); 156 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
@@ -242,11 +170,11 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
242 170
243 vxge_debug_entryexit(VXGE_TRACE, 171 vxge_debug_entryexit(VXGE_TRACE,
244 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 172 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
245 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); 173 netdev_notice(vdev->ndev, "Link Down\n");
246 174
247 vdev->stats.link_down++; 175 vdev->stats.link_down++;
248 netif_carrier_off(vdev->ndev); 176 netif_carrier_off(vdev->ndev);
249 vxge_stop_all_tx_queue(vdev); 177 netif_tx_stop_all_queues(vdev->ndev);
250 178
251 vxge_debug_entryexit(VXGE_TRACE, 179 vxge_debug_entryexit(VXGE_TRACE,
252 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); 180 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
@@ -677,7 +605,8 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
677 &dtr, &t_code) == VXGE_HW_OK); 605 &dtr, &t_code) == VXGE_HW_OK);
678 606
679 *skb_ptr = done_skb; 607 *skb_ptr = done_skb;
680 vxge_wake_tx_queue(fifo, skb); 608 if (netif_tx_queue_stopped(fifo->txq))
609 netif_tx_wake_queue(fifo->txq);
681 610
682 vxge_debug_entryexit(VXGE_TRACE, 611 vxge_debug_entryexit(VXGE_TRACE,
683 "%s: %s:%d Exiting...", 612 "%s: %s:%d Exiting...",
@@ -686,8 +615,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
686} 615}
687 616
688/* select a vpath to transmit the packet */ 617/* select a vpath to transmit the packet */
689static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, 618static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
690 int *do_lock)
691{ 619{
692 u16 queue_len, counter = 0; 620 u16 queue_len, counter = 0;
693 if (skb->protocol == htons(ETH_P_IP)) { 621 if (skb->protocol == htons(ETH_P_IP)) {
@@ -706,12 +634,6 @@ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
706 vdev->vpath_selector[queue_len - 1]; 634 vdev->vpath_selector[queue_len - 1];
707 if (counter >= queue_len) 635 if (counter >= queue_len)
708 counter = queue_len - 1; 636 counter = queue_len - 1;
709
710 if (ip->protocol == IPPROTO_UDP) {
711#ifdef NETIF_F_LLTX
712 *do_lock = 0;
713#endif
714 }
715 } 637 }
716 } 638 }
717 return counter; 639 return counter;
@@ -808,8 +730,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
808 * 730 *
809 * This function is the Tx entry point of the driver. Neterion NIC supports 731 * This function is the Tx entry point of the driver. Neterion NIC supports
810 * certain protocol assist features on Tx side, namely CSO, S/G, LSO. 732 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
811 * NOTE: when device cant queue the pkt, just the trans_start variable will
812 * not be upadted.
813*/ 733*/
814static netdev_tx_t 734static netdev_tx_t
815vxge_xmit(struct sk_buff *skb, struct net_device *dev) 735vxge_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -826,9 +746,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
826 struct vxge_tx_priv *txdl_priv = NULL; 746 struct vxge_tx_priv *txdl_priv = NULL;
827 struct __vxge_hw_fifo *fifo_hw; 747 struct __vxge_hw_fifo *fifo_hw;
828 int offload_type; 748 int offload_type;
829 unsigned long flags = 0;
830 int vpath_no = 0; 749 int vpath_no = 0;
831 int do_spin_tx_lock = 1;
832 750
833 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 751 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
834 dev->name, __func__, __LINE__); 752 dev->name, __func__, __LINE__);
@@ -864,7 +782,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
864 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) 782 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
865 vpath_no = skb_get_queue_mapping(skb); 783 vpath_no = skb_get_queue_mapping(skb);
866 else if (vdev->config.tx_steering_type == TX_PORT_STEERING) 784 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
867 vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock); 785 vpath_no = vxge_get_vpath_no(vdev, skb);
868 786
869 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); 787 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
870 788
@@ -874,46 +792,29 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
874 fifo = &vdev->vpaths[vpath_no].fifo; 792 fifo = &vdev->vpaths[vpath_no].fifo;
875 fifo_hw = fifo->handle; 793 fifo_hw = fifo->handle;
876 794
877 if (do_spin_tx_lock) 795 if (netif_tx_queue_stopped(fifo->txq))
878 spin_lock_irqsave(&fifo->tx_lock, flags); 796 return NETDEV_TX_BUSY;
879 else {
880 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
881 return NETDEV_TX_LOCKED;
882 }
883 797
884 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) {
885 if (netif_subqueue_stopped(dev, skb)) {
886 spin_unlock_irqrestore(&fifo->tx_lock, flags);
887 return NETDEV_TX_BUSY;
888 }
889 } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) {
890 if (netif_queue_stopped(dev)) {
891 spin_unlock_irqrestore(&fifo->tx_lock, flags);
892 return NETDEV_TX_BUSY;
893 }
894 }
895 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); 798 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
896 if (avail == 0) { 799 if (avail == 0) {
897 vxge_debug_tx(VXGE_ERR, 800 vxge_debug_tx(VXGE_ERR,
898 "%s: No free TXDs available", dev->name); 801 "%s: No free TXDs available", dev->name);
899 fifo->stats.txd_not_free++; 802 fifo->stats.txd_not_free++;
900 vxge_stop_tx_queue(fifo); 803 goto _exit0;
901 goto _exit2;
902 } 804 }
903 805
904 /* Last TXD? Stop tx queue to avoid dropping packets. TX 806 /* Last TXD? Stop tx queue to avoid dropping packets. TX
905 * completion will resume the queue. 807 * completion will resume the queue.
906 */ 808 */
907 if (avail == 1) 809 if (avail == 1)
908 vxge_stop_tx_queue(fifo); 810 netif_tx_stop_queue(fifo->txq);
909 811
910 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); 812 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
911 if (unlikely(status != VXGE_HW_OK)) { 813 if (unlikely(status != VXGE_HW_OK)) {
912 vxge_debug_tx(VXGE_ERR, 814 vxge_debug_tx(VXGE_ERR,
913 "%s: Out of descriptors .", dev->name); 815 "%s: Out of descriptors .", dev->name);
914 fifo->stats.txd_out_of_desc++; 816 fifo->stats.txd_out_of_desc++;
915 vxge_stop_tx_queue(fifo); 817 goto _exit0;
916 goto _exit2;
917 } 818 }
918 819
919 vxge_debug_tx(VXGE_TRACE, 820 vxge_debug_tx(VXGE_TRACE,
@@ -933,9 +834,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
933 834
934 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { 835 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
935 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 836 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
936 vxge_stop_tx_queue(fifo);
937 fifo->stats.pci_map_fail++; 837 fifo->stats.pci_map_fail++;
938 goto _exit2; 838 goto _exit0;
939 } 839 }
940 840
941 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); 841 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
@@ -958,13 +858,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
958 if (!frag->size) 858 if (!frag->size)
959 continue; 859 continue;
960 860
961 dma_pointer = 861 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
962 (u64)pci_map_page(fifo->pdev, frag->page,
963 frag->page_offset, frag->size, 862 frag->page_offset, frag->size,
964 PCI_DMA_TODEVICE); 863 PCI_DMA_TODEVICE);
965 864
966 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) 865 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
967 goto _exit0; 866 goto _exit2;
968 vxge_debug_tx(VXGE_TRACE, 867 vxge_debug_tx(VXGE_TRACE,
969 "%s: %s:%d frag = %d dma_pointer = 0x%llx", 868 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
970 dev->name, __func__, __LINE__, i, 869 dev->name, __func__, __LINE__, i,
@@ -979,11 +878,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
979 offload_type = vxge_offload_type(skb); 878 offload_type = vxge_offload_type(skb);
980 879
981 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 880 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
982
983 int mss = vxge_tcp_mss(skb); 881 int mss = vxge_tcp_mss(skb);
984 if (mss) { 882 if (mss) {
985 vxge_debug_tx(VXGE_TRACE, 883 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
986 "%s: %s:%d mss = %d",
987 dev->name, __func__, __LINE__, mss); 884 dev->name, __func__, __LINE__, mss);
988 vxge_hw_fifo_txdl_mss_set(dtr, mss); 885 vxge_hw_fifo_txdl_mss_set(dtr, mss);
989 } else { 886 } else {
@@ -1001,19 +898,13 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
1001 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); 898 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
1002 899
1003 vxge_hw_fifo_txdl_post(fifo_hw, dtr); 900 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
1004#ifdef NETIF_F_LLTX
1005 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1006#endif
1007 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1008 901
1009 VXGE_COMPLETE_VPATH_TX(fifo);
1010 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 902 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
1011 dev->name, __func__, __LINE__); 903 dev->name, __func__, __LINE__);
1012 return NETDEV_TX_OK; 904 return NETDEV_TX_OK;
1013 905
1014_exit0: 906_exit2:
1015 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); 907 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
1016
1017_exit1: 908_exit1:
1018 j = 0; 909 j = 0;
1019 frag = &skb_shinfo(skb)->frags[0]; 910 frag = &skb_shinfo(skb)->frags[0];
@@ -1028,10 +919,9 @@ _exit1:
1028 } 919 }
1029 920
1030 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 921 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
1031_exit2: 922_exit0:
923 netif_tx_stop_queue(fifo->txq);
1032 dev_kfree_skb(skb); 924 dev_kfree_skb(skb);
1033 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1034 VXGE_COMPLETE_VPATH_TX(fifo);
1035 925
1036 return NETDEV_TX_OK; 926 return NETDEV_TX_OK;
1037} 927}
@@ -1121,7 +1011,8 @@ static void vxge_set_multicast(struct net_device *dev)
1121 struct netdev_hw_addr *ha; 1011 struct netdev_hw_addr *ha;
1122 struct vxgedev *vdev; 1012 struct vxgedev *vdev;
1123 int i, mcast_cnt = 0; 1013 int i, mcast_cnt = 0;
1124 struct __vxge_hw_device *hldev; 1014 struct __vxge_hw_device *hldev;
1015 struct vxge_vpath *vpath;
1125 enum vxge_hw_status status = VXGE_HW_OK; 1016 enum vxge_hw_status status = VXGE_HW_OK;
1126 struct macInfo mac_info; 1017 struct macInfo mac_info;
1127 int vpath_idx = 0; 1018 int vpath_idx = 0;
@@ -1141,46 +1032,48 @@ static void vxge_set_multicast(struct net_device *dev)
1141 1032
1142 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { 1033 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1143 for (i = 0; i < vdev->no_of_vpath; i++) { 1034 for (i = 0; i < vdev->no_of_vpath; i++) {
1144 vxge_assert(vdev->vpaths[i].is_open); 1035 vpath = &vdev->vpaths[i];
1145 status = vxge_hw_vpath_mcast_enable( 1036 vxge_assert(vpath->is_open);
1146 vdev->vpaths[i].handle); 1037 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1038 if (status != VXGE_HW_OK)
1039 vxge_debug_init(VXGE_ERR, "failed to enable "
1040 "multicast, status %d", status);
1147 vdev->all_multi_flg = 1; 1041 vdev->all_multi_flg = 1;
1148 } 1042 }
1149 } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { 1043 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1150 for (i = 0; i < vdev->no_of_vpath; i++) { 1044 for (i = 0; i < vdev->no_of_vpath; i++) {
1151 vxge_assert(vdev->vpaths[i].is_open); 1045 vpath = &vdev->vpaths[i];
1152 status = vxge_hw_vpath_mcast_disable( 1046 vxge_assert(vpath->is_open);
1153 vdev->vpaths[i].handle); 1047 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1154 vdev->all_multi_flg = 1; 1048 if (status != VXGE_HW_OK)
1049 vxge_debug_init(VXGE_ERR, "failed to disable "
1050 "multicast, status %d", status);
1051 vdev->all_multi_flg = 0;
1155 } 1052 }
1156 } 1053 }
1157 1054
1158 if (status != VXGE_HW_OK)
1159 vxge_debug_init(VXGE_ERR,
1160 "failed to %s multicast, status %d",
1161 dev->flags & IFF_ALLMULTI ?
1162 "enable" : "disable", status);
1163 1055
1164 if (!vdev->config.addr_learn_en) { 1056 if (!vdev->config.addr_learn_en) {
1165 if (dev->flags & IFF_PROMISC) { 1057 for (i = 0; i < vdev->no_of_vpath; i++) {
1166 for (i = 0; i < vdev->no_of_vpath; i++) { 1058 vpath = &vdev->vpaths[i];
1167 vxge_assert(vdev->vpaths[i].is_open); 1059 vxge_assert(vpath->is_open);
1060
1061 if (dev->flags & IFF_PROMISC)
1168 status = vxge_hw_vpath_promisc_enable( 1062 status = vxge_hw_vpath_promisc_enable(
1169 vdev->vpaths[i].handle); 1063 vpath->handle);
1170 } 1064 else
1171 } else {
1172 for (i = 0; i < vdev->no_of_vpath; i++) {
1173 vxge_assert(vdev->vpaths[i].is_open);
1174 status = vxge_hw_vpath_promisc_disable( 1065 status = vxge_hw_vpath_promisc_disable(
1175 vdev->vpaths[i].handle); 1066 vpath->handle);
1176 } 1067 if (status != VXGE_HW_OK)
1068 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1069 ", status %d", dev->flags&IFF_PROMISC ?
1070 "enable" : "disable", status);
1177 } 1071 }
1178 } 1072 }
1179 1073
1180 memset(&mac_info, 0, sizeof(struct macInfo)); 1074 memset(&mac_info, 0, sizeof(struct macInfo));
1181 /* Update individual M_CAST address list */ 1075 /* Update individual M_CAST address list */
1182 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { 1076 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1183
1184 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1077 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1185 list_head = &vdev->vpaths[0].mac_addr_list; 1078 list_head = &vdev->vpaths[0].mac_addr_list;
1186 if ((netdev_mc_count(dev) + 1079 if ((netdev_mc_count(dev) +
@@ -1190,14 +1083,7 @@ static void vxge_set_multicast(struct net_device *dev)
1190 1083
1191 /* Delete previous MC's */ 1084 /* Delete previous MC's */
1192 for (i = 0; i < mcast_cnt; i++) { 1085 for (i = 0; i < mcast_cnt; i++) {
1193 if (!list_empty(list_head))
1194 mac_entry = (struct vxge_mac_addrs *)
1195 list_first_entry(list_head,
1196 struct vxge_mac_addrs,
1197 item);
1198
1199 list_for_each_safe(entry, next, list_head) { 1086 list_for_each_safe(entry, next, list_head) {
1200
1201 mac_entry = (struct vxge_mac_addrs *) entry; 1087 mac_entry = (struct vxge_mac_addrs *) entry;
1202 /* Copy the mac address to delete */ 1088 /* Copy the mac address to delete */
1203 mac_address = (u8 *)&mac_entry->macaddr; 1089 mac_address = (u8 *)&mac_entry->macaddr;
@@ -1240,9 +1126,7 @@ _set_all_mcast:
1240 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1126 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1241 /* Delete previous MC's */ 1127 /* Delete previous MC's */
1242 for (i = 0; i < mcast_cnt; i++) { 1128 for (i = 0; i < mcast_cnt; i++) {
1243
1244 list_for_each_safe(entry, next, list_head) { 1129 list_for_each_safe(entry, next, list_head) {
1245
1246 mac_entry = (struct vxge_mac_addrs *) entry; 1130 mac_entry = (struct vxge_mac_addrs *) entry;
1247 /* Copy the mac address to delete */ 1131 /* Copy the mac address to delete */
1248 mac_address = (u8 *)&mac_entry->macaddr; 1132 mac_address = (u8 *)&mac_entry->macaddr;
@@ -1262,9 +1146,10 @@ _set_all_mcast:
1262 1146
1263 /* Enable all multicast */ 1147 /* Enable all multicast */
1264 for (i = 0; i < vdev->no_of_vpath; i++) { 1148 for (i = 0; i < vdev->no_of_vpath; i++) {
1265 vxge_assert(vdev->vpaths[i].is_open); 1149 vpath = &vdev->vpaths[i];
1266 status = vxge_hw_vpath_mcast_enable( 1150 vxge_assert(vpath->is_open);
1267 vdev->vpaths[i].handle); 1151
1152 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1268 if (status != VXGE_HW_OK) { 1153 if (status != VXGE_HW_OK) {
1269 vxge_debug_init(VXGE_ERR, 1154 vxge_debug_init(VXGE_ERR,
1270 "%s:%d Enabling all multicasts failed", 1155 "%s:%d Enabling all multicasts failed",
@@ -1425,6 +1310,7 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1425static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) 1310static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1426{ 1311{
1427 enum vxge_hw_status status = VXGE_HW_OK; 1312 enum vxge_hw_status status = VXGE_HW_OK;
1313 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1428 int ret = 0; 1314 int ret = 0;
1429 1315
1430 /* check if device is down already */ 1316 /* check if device is down already */
@@ -1435,12 +1321,10 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1435 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 1321 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1436 return 0; 1322 return 0;
1437 1323
1438 if (vdev->vpaths[vp_id].handle) { 1324 if (vpath->handle) {
1439 if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle) 1325 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1440 == VXGE_HW_OK) {
1441 if (is_vxge_card_up(vdev) && 1326 if (is_vxge_card_up(vdev) &&
1442 vxge_hw_vpath_recover_from_reset( 1327 vxge_hw_vpath_recover_from_reset(vpath->handle)
1443 vdev->vpaths[vp_id].handle)
1444 != VXGE_HW_OK) { 1328 != VXGE_HW_OK) {
1445 vxge_debug_init(VXGE_ERR, 1329 vxge_debug_init(VXGE_ERR,
1446 "vxge_hw_vpath_recover_from_reset" 1330 "vxge_hw_vpath_recover_from_reset"
@@ -1456,11 +1340,20 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1456 } else 1340 } else
1457 return VXGE_HW_FAIL; 1341 return VXGE_HW_FAIL;
1458 1342
1459 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); 1343 vxge_restore_vpath_mac_addr(vpath);
1460 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); 1344 vxge_restore_vpath_vid_table(vpath);
1461 1345
1462 /* Enable all broadcast */ 1346 /* Enable all broadcast */
1463 vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle); 1347 vxge_hw_vpath_bcast_enable(vpath->handle);
1348
1349 /* Enable all multicast */
1350 if (vdev->all_multi_flg) {
1351 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1352 if (status != VXGE_HW_OK)
1353 vxge_debug_init(VXGE_ERR,
1354 "%s:%d Enabling multicast failed",
1355 __func__, __LINE__);
1356 }
1464 1357
1465 /* Enable the interrupts */ 1358 /* Enable the interrupts */
1466 vxge_vpath_intr_enable(vdev, vp_id); 1359 vxge_vpath_intr_enable(vdev, vp_id);
@@ -1468,17 +1361,18 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1468 smp_wmb(); 1361 smp_wmb();
1469 1362
1470 /* Enable the flow of traffic through the vpath */ 1363 /* Enable the flow of traffic through the vpath */
1471 vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle); 1364 vxge_hw_vpath_enable(vpath->handle);
1472 1365
1473 smp_wmb(); 1366 smp_wmb();
1474 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle); 1367 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1475 vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK; 1368 vpath->ring.last_status = VXGE_HW_OK;
1476 1369
1477 /* Vpath reset done */ 1370 /* Vpath reset done */
1478 clear_bit(vp_id, &vdev->vp_reset); 1371 clear_bit(vp_id, &vdev->vp_reset);
1479 1372
1480 /* Start the vpath queue */ 1373 /* Start the vpath queue */
1481 vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL); 1374 if (netif_tx_queue_stopped(vpath->fifo.txq))
1375 netif_tx_wake_queue(vpath->fifo.txq);
1482 1376
1483 return ret; 1377 return ret;
1484} 1378}
@@ -1512,9 +1406,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1512 vxge_debug_init(VXGE_ERR, 1406 vxge_debug_init(VXGE_ERR,
1513 "%s: execution mode is debug, returning..", 1407 "%s: execution mode is debug, returning..",
1514 vdev->ndev->name); 1408 vdev->ndev->name);
1515 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 1409 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1516 vxge_stop_all_tx_queue(vdev); 1410 netif_tx_stop_all_queues(vdev->ndev);
1517 return 0; 1411 return 0;
1518 } 1412 }
1519 } 1413 }
1520 1414
@@ -1523,7 +1417,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1523 1417
1524 switch (vdev->cric_err_event) { 1418 switch (vdev->cric_err_event) {
1525 case VXGE_HW_EVENT_UNKNOWN: 1419 case VXGE_HW_EVENT_UNKNOWN:
1526 vxge_stop_all_tx_queue(vdev); 1420 netif_tx_stop_all_queues(vdev->ndev);
1527 vxge_debug_init(VXGE_ERR, 1421 vxge_debug_init(VXGE_ERR,
1528 "fatal: %s: Disabling device due to" 1422 "fatal: %s: Disabling device due to"
1529 "unknown error", 1423 "unknown error",
@@ -1544,7 +1438,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1544 case VXGE_HW_EVENT_VPATH_ERR: 1438 case VXGE_HW_EVENT_VPATH_ERR:
1545 break; 1439 break;
1546 case VXGE_HW_EVENT_CRITICAL_ERR: 1440 case VXGE_HW_EVENT_CRITICAL_ERR:
1547 vxge_stop_all_tx_queue(vdev); 1441 netif_tx_stop_all_queues(vdev->ndev);
1548 vxge_debug_init(VXGE_ERR, 1442 vxge_debug_init(VXGE_ERR,
1549 "fatal: %s: Disabling device due to" 1443 "fatal: %s: Disabling device due to"
1550 "serious error", 1444 "serious error",
@@ -1554,7 +1448,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1554 ret = -EPERM; 1448 ret = -EPERM;
1555 goto out; 1449 goto out;
1556 case VXGE_HW_EVENT_SERR: 1450 case VXGE_HW_EVENT_SERR:
1557 vxge_stop_all_tx_queue(vdev); 1451 netif_tx_stop_all_queues(vdev->ndev);
1558 vxge_debug_init(VXGE_ERR, 1452 vxge_debug_init(VXGE_ERR,
1559 "fatal: %s: Disabling device due to" 1453 "fatal: %s: Disabling device due to"
1560 "serious error", 1454 "serious error",
@@ -1566,7 +1460,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1566 ret = -EPERM; 1460 ret = -EPERM;
1567 goto out; 1461 goto out;
1568 case VXGE_HW_EVENT_SLOT_FREEZE: 1462 case VXGE_HW_EVENT_SLOT_FREEZE:
1569 vxge_stop_all_tx_queue(vdev); 1463 netif_tx_stop_all_queues(vdev->ndev);
1570 vxge_debug_init(VXGE_ERR, 1464 vxge_debug_init(VXGE_ERR,
1571 "fatal: %s: Disabling device due to" 1465 "fatal: %s: Disabling device due to"
1572 "slot freeze", 1466 "slot freeze",
@@ -1580,7 +1474,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1580 } 1474 }
1581 1475
1582 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) 1476 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1583 vxge_stop_all_tx_queue(vdev); 1477 netif_tx_stop_all_queues(vdev->ndev);
1584 1478
1585 if (event == VXGE_LL_FULL_RESET) { 1479 if (event == VXGE_LL_FULL_RESET) {
1586 status = vxge_reset_all_vpaths(vdev); 1480 status = vxge_reset_all_vpaths(vdev);
@@ -1640,7 +1534,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1640 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); 1534 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1641 } 1535 }
1642 1536
1643 vxge_wake_all_tx_queue(vdev); 1537 netif_tx_wake_all_queues(vdev->ndev);
1644 } 1538 }
1645 1539
1646out: 1540out:
@@ -1661,8 +1555,7 @@ out:
1661 */ 1555 */
1662int vxge_reset(struct vxgedev *vdev) 1556int vxge_reset(struct vxgedev *vdev)
1663{ 1557{
1664 do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1558 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1665 return 0;
1666} 1559}
1667 1560
1668/** 1561/**
@@ -2025,17 +1918,17 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
2025/* reset vpaths */ 1918/* reset vpaths */
2026enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1919enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
2027{ 1920{
2028 int i;
2029 enum vxge_hw_status status = VXGE_HW_OK; 1921 enum vxge_hw_status status = VXGE_HW_OK;
1922 struct vxge_vpath *vpath;
1923 int i;
2030 1924
2031 for (i = 0; i < vdev->no_of_vpath; i++) 1925 for (i = 0; i < vdev->no_of_vpath; i++) {
2032 if (vdev->vpaths[i].handle) { 1926 vpath = &vdev->vpaths[i];
2033 if (vxge_hw_vpath_reset(vdev->vpaths[i].handle) 1927 if (vpath->handle) {
2034 == VXGE_HW_OK) { 1928 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
2035 if (is_vxge_card_up(vdev) && 1929 if (is_vxge_card_up(vdev) &&
2036 vxge_hw_vpath_recover_from_reset( 1930 vxge_hw_vpath_recover_from_reset(
2037 vdev->vpaths[i].handle) 1931 vpath->handle) != VXGE_HW_OK) {
2038 != VXGE_HW_OK) {
2039 vxge_debug_init(VXGE_ERR, 1932 vxge_debug_init(VXGE_ERR,
2040 "vxge_hw_vpath_recover_" 1933 "vxge_hw_vpath_recover_"
2041 "from_reset failed for vpath: " 1934 "from_reset failed for vpath: "
@@ -2049,83 +1942,93 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
2049 return status; 1942 return status;
2050 } 1943 }
2051 } 1944 }
1945 }
1946
2052 return status; 1947 return status;
2053} 1948}
2054 1949
2055/* close vpaths */ 1950/* close vpaths */
2056void vxge_close_vpaths(struct vxgedev *vdev, int index) 1951void vxge_close_vpaths(struct vxgedev *vdev, int index)
2057{ 1952{
1953 struct vxge_vpath *vpath;
2058 int i; 1954 int i;
1955
2059 for (i = index; i < vdev->no_of_vpath; i++) { 1956 for (i = index; i < vdev->no_of_vpath; i++) {
2060 if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) { 1957 vpath = &vdev->vpaths[i];
2061 vxge_hw_vpath_close(vdev->vpaths[i].handle); 1958
1959 if (vpath->handle && vpath->is_open) {
1960 vxge_hw_vpath_close(vpath->handle);
2062 vdev->stats.vpaths_open--; 1961 vdev->stats.vpaths_open--;
2063 } 1962 }
2064 vdev->vpaths[i].is_open = 0; 1963 vpath->is_open = 0;
2065 vdev->vpaths[i].handle = NULL; 1964 vpath->handle = NULL;
2066 } 1965 }
2067} 1966}
2068 1967
2069/* open vpaths */ 1968/* open vpaths */
2070int vxge_open_vpaths(struct vxgedev *vdev) 1969int vxge_open_vpaths(struct vxgedev *vdev)
2071{ 1970{
1971 struct vxge_hw_vpath_attr attr;
2072 enum vxge_hw_status status; 1972 enum vxge_hw_status status;
2073 int i; 1973 struct vxge_vpath *vpath;
2074 u32 vp_id = 0; 1974 u32 vp_id = 0;
2075 struct vxge_hw_vpath_attr attr; 1975 int i;
2076 1976
2077 for (i = 0; i < vdev->no_of_vpath; i++) { 1977 for (i = 0; i < vdev->no_of_vpath; i++) {
2078 vxge_assert(vdev->vpaths[i].is_configured); 1978 vpath = &vdev->vpaths[i];
2079 attr.vp_id = vdev->vpaths[i].device_id; 1979
1980 vxge_assert(vpath->is_configured);
1981 attr.vp_id = vpath->device_id;
2080 attr.fifo_attr.callback = vxge_xmit_compl; 1982 attr.fifo_attr.callback = vxge_xmit_compl;
2081 attr.fifo_attr.txdl_term = vxge_tx_term; 1983 attr.fifo_attr.txdl_term = vxge_tx_term;
2082 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); 1984 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2083 attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo; 1985 attr.fifo_attr.userdata = &vpath->fifo;
2084 1986
2085 attr.ring_attr.callback = vxge_rx_1b_compl; 1987 attr.ring_attr.callback = vxge_rx_1b_compl;
2086 attr.ring_attr.rxd_init = vxge_rx_initial_replenish; 1988 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2087 attr.ring_attr.rxd_term = vxge_rx_term; 1989 attr.ring_attr.rxd_term = vxge_rx_term;
2088 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); 1990 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2089 attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring; 1991 attr.ring_attr.userdata = &vpath->ring;
2090 1992
2091 vdev->vpaths[i].ring.ndev = vdev->ndev; 1993 vpath->ring.ndev = vdev->ndev;
2092 vdev->vpaths[i].ring.pdev = vdev->pdev; 1994 vpath->ring.pdev = vdev->pdev;
2093 status = vxge_hw_vpath_open(vdev->devh, &attr, 1995 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2094 &(vdev->vpaths[i].handle));
2095 if (status == VXGE_HW_OK) { 1996 if (status == VXGE_HW_OK) {
2096 vdev->vpaths[i].fifo.handle = 1997 vpath->fifo.handle =
2097 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; 1998 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2098 vdev->vpaths[i].ring.handle = 1999 vpath->ring.handle =
2099 (struct __vxge_hw_ring *)attr.ring_attr.userdata; 2000 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2100 vdev->vpaths[i].fifo.tx_steering_type = 2001 vpath->fifo.tx_steering_type =
2101 vdev->config.tx_steering_type; 2002 vdev->config.tx_steering_type;
2102 vdev->vpaths[i].fifo.ndev = vdev->ndev; 2003 vpath->fifo.ndev = vdev->ndev;
2103 vdev->vpaths[i].fifo.pdev = vdev->pdev; 2004 vpath->fifo.pdev = vdev->pdev;
2104 vdev->vpaths[i].fifo.indicate_max_pkts = 2005 if (vdev->config.tx_steering_type)
2006 vpath->fifo.txq =
2007 netdev_get_tx_queue(vdev->ndev, i);
2008 else
2009 vpath->fifo.txq =
2010 netdev_get_tx_queue(vdev->ndev, 0);
2011 vpath->fifo.indicate_max_pkts =
2105 vdev->config.fifo_indicate_max_pkts; 2012 vdev->config.fifo_indicate_max_pkts;
2106 vdev->vpaths[i].ring.rx_vector_no = 0; 2013 vpath->ring.rx_vector_no = 0;
2107 vdev->vpaths[i].ring.rx_csum = vdev->rx_csum; 2014 vpath->ring.rx_csum = vdev->rx_csum;
2108 vdev->vpaths[i].is_open = 1; 2015 vpath->is_open = 1;
2109 vdev->vp_handles[i] = vdev->vpaths[i].handle; 2016 vdev->vp_handles[i] = vpath->handle;
2110 vdev->vpaths[i].ring.gro_enable = 2017 vpath->ring.gro_enable = vdev->config.gro_enable;
2111 vdev->config.gro_enable; 2018 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2112 vdev->vpaths[i].ring.vlan_tag_strip =
2113 vdev->vlan_tag_strip;
2114 vdev->stats.vpaths_open++; 2019 vdev->stats.vpaths_open++;
2115 } else { 2020 } else {
2116 vdev->stats.vpath_open_fail++; 2021 vdev->stats.vpath_open_fail++;
2117 vxge_debug_init(VXGE_ERR, 2022 vxge_debug_init(VXGE_ERR,
2118 "%s: vpath: %d failed to open " 2023 "%s: vpath: %d failed to open "
2119 "with status: %d", 2024 "with status: %d",
2120 vdev->ndev->name, vdev->vpaths[i].device_id, 2025 vdev->ndev->name, vpath->device_id,
2121 status); 2026 status);
2122 vxge_close_vpaths(vdev, 0); 2027 vxge_close_vpaths(vdev, 0);
2123 return -EPERM; 2028 return -EPERM;
2124 } 2029 }
2125 2030
2126 vp_id = 2031 vp_id = vpath->handle->vpath->vp_id;
2127 ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
2128 vpath->vp_id;
2129 vdev->vpaths_deployed |= vxge_mBIT(vp_id); 2032 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2130 } 2033 }
2131 return VXGE_HW_OK; 2034 return VXGE_HW_OK;
@@ -2299,7 +2202,6 @@ start:
2299 vdev->vxge_entries[j].in_use = 0; 2202 vdev->vxge_entries[j].in_use = 0;
2300 2203
2301 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); 2204 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2302
2303 if (ret > 0) { 2205 if (ret > 0) {
2304 vxge_debug_init(VXGE_ERR, 2206 vxge_debug_init(VXGE_ERR,
2305 "%s: MSI-X enable failed for %d vectors, ret: %d", 2207 "%s: MSI-X enable failed for %d vectors, ret: %d",
@@ -2345,17 +2247,16 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2345 ret = vxge_alloc_msix(vdev); 2247 ret = vxge_alloc_msix(vdev);
2346 if (!ret) { 2248 if (!ret) {
2347 for (i = 0; i < vdev->no_of_vpath; i++) { 2249 for (i = 0; i < vdev->no_of_vpath; i++) {
2250 struct vxge_vpath *vpath = &vdev->vpaths[i];
2348 2251
2349 /* If fifo or ring are not enabled 2252 /* If fifo or ring are not enabled, the MSIX vector for
2350 the MSIX vector for that should be set to 0 2253 * it should be set to 0.
2351 Hence initializeing this array to all 0s. 2254 */
2352 */ 2255 vpath->ring.rx_vector_no = (vpath->device_id *
2353 vdev->vpaths[i].ring.rx_vector_no = 2256 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2354 (vdev->vpaths[i].device_id *
2355 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2356 2257
2357 vxge_hw_vpath_msix_set(vdev->vpaths[i].handle, 2258 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2358 tim_msix_id, VXGE_ALARM_MSIX_ID); 2259 VXGE_ALARM_MSIX_ID);
2359 } 2260 }
2360 } 2261 }
2361 2262
@@ -2570,9 +2471,10 @@ static void vxge_poll_vp_reset(unsigned long data)
2570static void vxge_poll_vp_lockup(unsigned long data) 2471static void vxge_poll_vp_lockup(unsigned long data)
2571{ 2472{
2572 struct vxgedev *vdev = (struct vxgedev *)data; 2473 struct vxgedev *vdev = (struct vxgedev *)data;
2573 int i;
2574 struct vxge_ring *ring;
2575 enum vxge_hw_status status = VXGE_HW_OK; 2474 enum vxge_hw_status status = VXGE_HW_OK;
2475 struct vxge_vpath *vpath;
2476 struct vxge_ring *ring;
2477 int i;
2576 2478
2577 for (i = 0; i < vdev->no_of_vpath; i++) { 2479 for (i = 0; i < vdev->no_of_vpath; i++) {
2578 ring = &vdev->vpaths[i].ring; 2480 ring = &vdev->vpaths[i].ring;
@@ -2586,13 +2488,13 @@ static void vxge_poll_vp_lockup(unsigned long data)
2586 2488
2587 /* schedule vpath reset */ 2489 /* schedule vpath reset */
2588 if (!test_and_set_bit(i, &vdev->vp_reset)) { 2490 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2491 vpath = &vdev->vpaths[i];
2589 2492
2590 /* disable interrupts for this vpath */ 2493 /* disable interrupts for this vpath */
2591 vxge_vpath_intr_disable(vdev, i); 2494 vxge_vpath_intr_disable(vdev, i);
2592 2495
2593 /* stop the queue for this vpath */ 2496 /* stop the queue for this vpath */
2594 vxge_stop_tx_queue(&vdev->vpaths[i]. 2497 netif_tx_stop_queue(vpath->fifo.txq);
2595 fifo);
2596 continue; 2498 continue;
2597 } 2499 }
2598 } 2500 }
@@ -2621,6 +2523,7 @@ vxge_open(struct net_device *dev)
2621 enum vxge_hw_status status; 2523 enum vxge_hw_status status;
2622 struct vxgedev *vdev; 2524 struct vxgedev *vdev;
2623 struct __vxge_hw_device *hldev; 2525 struct __vxge_hw_device *hldev;
2526 struct vxge_vpath *vpath;
2624 int ret = 0; 2527 int ret = 0;
2625 int i; 2528 int i;
2626 u64 val64, function_mode; 2529 u64 val64, function_mode;
@@ -2654,20 +2557,21 @@ vxge_open(struct net_device *dev)
2654 goto out1; 2557 goto out1;
2655 } 2558 }
2656 2559
2657
2658 if (vdev->config.intr_type != MSI_X) { 2560 if (vdev->config.intr_type != MSI_X) {
2659 netif_napi_add(dev, &vdev->napi, vxge_poll_inta, 2561 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2660 vdev->config.napi_weight); 2562 vdev->config.napi_weight);
2661 napi_enable(&vdev->napi); 2563 napi_enable(&vdev->napi);
2662 for (i = 0; i < vdev->no_of_vpath; i++) 2564 for (i = 0; i < vdev->no_of_vpath; i++) {
2663 vdev->vpaths[i].ring.napi_p = &vdev->napi; 2565 vpath = &vdev->vpaths[i];
2566 vpath->ring.napi_p = &vdev->napi;
2567 }
2664 } else { 2568 } else {
2665 for (i = 0; i < vdev->no_of_vpath; i++) { 2569 for (i = 0; i < vdev->no_of_vpath; i++) {
2666 netif_napi_add(dev, &vdev->vpaths[i].ring.napi, 2570 vpath = &vdev->vpaths[i];
2571 netif_napi_add(dev, &vpath->ring.napi,
2667 vxge_poll_msix, vdev->config.napi_weight); 2572 vxge_poll_msix, vdev->config.napi_weight);
2668 napi_enable(&vdev->vpaths[i].ring.napi); 2573 napi_enable(&vpath->ring.napi);
2669 vdev->vpaths[i].ring.napi_p = 2574 vpath->ring.napi_p = &vpath->ring.napi;
2670 &vdev->vpaths[i].ring.napi;
2671 } 2575 }
2672 } 2576 }
2673 2577
@@ -2684,9 +2588,10 @@ vxge_open(struct net_device *dev)
2684 } 2588 }
2685 2589
2686 for (i = 0; i < vdev->no_of_vpath; i++) { 2590 for (i = 0; i < vdev->no_of_vpath; i++) {
2591 vpath = &vdev->vpaths[i];
2592
2687 /* set initial mtu before enabling the device */ 2593 /* set initial mtu before enabling the device */
2688 status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle, 2594 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2689 vdev->mtu);
2690 if (status != VXGE_HW_OK) { 2595 if (status != VXGE_HW_OK) {
2691 vxge_debug_init(VXGE_ERR, 2596 vxge_debug_init(VXGE_ERR,
2692 "%s: fatal: can not set new MTU", dev->name); 2597 "%s: fatal: can not set new MTU", dev->name);
@@ -2700,10 +2605,21 @@ vxge_open(struct net_device *dev)
2700 "%s: MTU is %d", vdev->ndev->name, vdev->mtu); 2605 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2701 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); 2606 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2702 2607
2703 /* Reprogram the DA table with populated mac addresses */ 2608 /* Restore the DA, VID table and also multicast and promiscuous mode
2704 for (i = 0; i < vdev->no_of_vpath; i++) { 2609 * states
2705 vxge_restore_vpath_mac_addr(&vdev->vpaths[i]); 2610 */
2706 vxge_restore_vpath_vid_table(&vdev->vpaths[i]); 2611 if (vdev->all_multi_flg) {
2612 for (i = 0; i < vdev->no_of_vpath; i++) {
2613 vpath = &vdev->vpaths[i];
2614 vxge_restore_vpath_mac_addr(vpath);
2615 vxge_restore_vpath_vid_table(vpath);
2616
2617 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2618 if (status != VXGE_HW_OK)
2619 vxge_debug_init(VXGE_ERR,
2620 "%s:%d Enabling multicast failed",
2621 __func__, __LINE__);
2622 }
2707 } 2623 }
2708 2624
2709 /* Enable vpath to sniff all unicast/multicast traffic that not 2625 /* Enable vpath to sniff all unicast/multicast traffic that not
@@ -2732,14 +2648,14 @@ vxge_open(struct net_device *dev)
2732 2648
2733 /* Enabling Bcast and mcast for all vpath */ 2649 /* Enabling Bcast and mcast for all vpath */
2734 for (i = 0; i < vdev->no_of_vpath; i++) { 2650 for (i = 0; i < vdev->no_of_vpath; i++) {
2735 status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle); 2651 vpath = &vdev->vpaths[i];
2652 status = vxge_hw_vpath_bcast_enable(vpath->handle);
2736 if (status != VXGE_HW_OK) 2653 if (status != VXGE_HW_OK)
2737 vxge_debug_init(VXGE_ERR, 2654 vxge_debug_init(VXGE_ERR,
2738 "%s : Can not enable bcast for vpath " 2655 "%s : Can not enable bcast for vpath "
2739 "id %d", dev->name, i); 2656 "id %d", dev->name, i);
2740 if (vdev->config.addr_learn_en) { 2657 if (vdev->config.addr_learn_en) {
2741 status = 2658 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2742 vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
2743 if (status != VXGE_HW_OK) 2659 if (status != VXGE_HW_OK)
2744 vxge_debug_init(VXGE_ERR, 2660 vxge_debug_init(VXGE_ERR,
2745 "%s : Can not enable mcast for vpath " 2661 "%s : Can not enable mcast for vpath "
@@ -2765,7 +2681,7 @@ vxge_open(struct net_device *dev)
2765 2681
2766 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { 2682 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2767 netif_carrier_on(vdev->ndev); 2683 netif_carrier_on(vdev->ndev);
2768 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); 2684 netdev_notice(vdev->ndev, "Link Up\n");
2769 vdev->stats.link_up++; 2685 vdev->stats.link_up++;
2770 } 2686 }
2771 2687
@@ -2774,12 +2690,14 @@ vxge_open(struct net_device *dev)
2774 smp_wmb(); 2690 smp_wmb();
2775 2691
2776 for (i = 0; i < vdev->no_of_vpath; i++) { 2692 for (i = 0; i < vdev->no_of_vpath; i++) {
2777 vxge_hw_vpath_enable(vdev->vpaths[i].handle); 2693 vpath = &vdev->vpaths[i];
2694
2695 vxge_hw_vpath_enable(vpath->handle);
2778 smp_wmb(); 2696 smp_wmb();
2779 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); 2697 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2780 } 2698 }
2781 2699
2782 vxge_start_all_tx_queue(vdev); 2700 netif_tx_start_all_queues(vdev->ndev);
2783 goto out0; 2701 goto out0;
2784 2702
2785out2: 2703out2:
@@ -2901,8 +2819,8 @@ int do_vxge_close(struct net_device *dev, int do_io)
2901 } 2819 }
2902 2820
2903 netif_carrier_off(vdev->ndev); 2821 netif_carrier_off(vdev->ndev);
2904 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); 2822 netdev_notice(vdev->ndev, "Link Down\n");
2905 vxge_stop_all_tx_queue(vdev); 2823 netif_tx_stop_all_queues(vdev->ndev);
2906 2824
2907 /* Note that at this point xmit() is stopped by upper layer */ 2825 /* Note that at this point xmit() is stopped by upper layer */
2908 if (do_io) 2826 if (do_io)
@@ -3211,11 +3129,11 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3211 struct net_device *ndev; 3129 struct net_device *ndev;
3212 enum vxge_hw_status status = VXGE_HW_OK; 3130 enum vxge_hw_status status = VXGE_HW_OK;
3213 struct vxgedev *vdev; 3131 struct vxgedev *vdev;
3214 int i, ret = 0, no_of_queue = 1; 3132 int ret = 0, no_of_queue = 1;
3215 u64 stat; 3133 u64 stat;
3216 3134
3217 *vdev_out = NULL; 3135 *vdev_out = NULL;
3218 if (config->tx_steering_type == TX_MULTIQ_STEERING) 3136 if (config->tx_steering_type)
3219 no_of_queue = no_of_vpath; 3137 no_of_queue = no_of_vpath;
3220 3138
3221 ndev = alloc_etherdev_mq(sizeof(struct vxgedev), 3139 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
@@ -3284,16 +3202,6 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3284 if (vdev->config.gro_enable) 3202 if (vdev->config.gro_enable)
3285 ndev->features |= NETIF_F_GRO; 3203 ndev->features |= NETIF_F_GRO;
3286 3204
3287 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
3288 ndev->real_num_tx_queues = no_of_vpath;
3289
3290#ifdef NETIF_F_LLTX
3291 ndev->features |= NETIF_F_LLTX;
3292#endif
3293
3294 for (i = 0; i < no_of_vpath; i++)
3295 spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
3296
3297 if (register_netdev(ndev)) { 3205 if (register_netdev(ndev)) {
3298 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3206 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3299 "%s: %s : device registration failed!", 3207 "%s: %s : device registration failed!",
@@ -3393,6 +3301,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3393{ 3301{
3394 struct net_device *dev = hldev->ndev; 3302 struct net_device *dev = hldev->ndev;
3395 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3303 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
3304 struct vxge_vpath *vpath = NULL;
3396 int vpath_idx; 3305 int vpath_idx;
3397 3306
3398 vxge_debug_entryexit(vdev->level_trace, 3307 vxge_debug_entryexit(vdev->level_trace,
@@ -3403,9 +3312,11 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3403 */ 3312 */
3404 vdev->cric_err_event = type; 3313 vdev->cric_err_event = type;
3405 3314
3406 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) 3315 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3407 if (vdev->vpaths[vpath_idx].device_id == vp_id) 3316 vpath = &vdev->vpaths[vpath_idx];
3317 if (vpath->device_id == vp_id)
3408 break; 3318 break;
3319 }
3409 3320
3410 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { 3321 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3411 if (type == VXGE_HW_EVENT_SLOT_FREEZE) { 3322 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
@@ -3442,8 +3353,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3442 vxge_vpath_intr_disable(vdev, vpath_idx); 3353 vxge_vpath_intr_disable(vdev, vpath_idx);
3443 3354
3444 /* stop the queue for this vpath */ 3355 /* stop the queue for this vpath */
3445 vxge_stop_tx_queue(&vdev->vpaths[vpath_idx]. 3356 netif_tx_stop_queue(vpath->fifo.txq);
3446 fifo);
3447 } 3357 }
3448 } 3358 }
3449 } 3359 }
@@ -3936,9 +3846,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3936 struct vxgedev *vdev = netdev_priv(netdev); 3846 struct vxgedev *vdev = netdev_priv(netdev);
3937 3847
3938 if (pci_enable_device(pdev)) { 3848 if (pci_enable_device(pdev)) {
3939 printk(KERN_ERR "%s: " 3849 netdev_err(netdev, "Cannot re-enable device after reset\n");
3940 "Cannot re-enable device after reset\n",
3941 VXGE_DRIVER_NAME);
3942 return PCI_ERS_RESULT_DISCONNECT; 3850 return PCI_ERS_RESULT_DISCONNECT;
3943 } 3851 }
3944 3852
@@ -3963,9 +3871,8 @@ static void vxge_io_resume(struct pci_dev *pdev)
3963 3871
3964 if (netif_running(netdev)) { 3872 if (netif_running(netdev)) {
3965 if (vxge_open(netdev)) { 3873 if (vxge_open(netdev)) {
3966 printk(KERN_ERR "%s: " 3874 netdev_err(netdev,
3967 "Can't bring device back up after reset\n", 3875 "Can't bring device back up after reset\n");
3968 VXGE_DRIVER_NAME);
3969 return; 3876 return;
3970 } 3877 }
3971 } 3878 }
@@ -4023,7 +3930,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4023 int high_dma = 0; 3930 int high_dma = 0;
4024 u64 vpath_mask = 0; 3931 u64 vpath_mask = 0;
4025 struct vxgedev *vdev; 3932 struct vxgedev *vdev;
4026 struct vxge_config ll_config; 3933 struct vxge_config *ll_config = NULL;
4027 struct vxge_hw_device_config *device_config = NULL; 3934 struct vxge_hw_device_config *device_config = NULL;
4028 struct vxge_hw_device_attr attr; 3935 struct vxge_hw_device_attr attr;
4029 int i, j, no_of_vpath = 0, max_vpath_supported = 0; 3936 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
@@ -4082,17 +3989,24 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4082 goto _exit0; 3989 goto _exit0;
4083 } 3990 }
4084 3991
4085 memset(&ll_config, 0, sizeof(struct vxge_config)); 3992 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
4086 ll_config.tx_steering_type = TX_MULTIQ_STEERING; 3993 if (!ll_config) {
4087 ll_config.intr_type = MSI_X; 3994 ret = -ENOMEM;
4088 ll_config.napi_weight = NEW_NAPI_WEIGHT; 3995 vxge_debug_init(VXGE_ERR,
4089 ll_config.rth_steering = RTH_STEERING; 3996 "ll_config : malloc failed %s %d",
3997 __FILE__, __LINE__);
3998 goto _exit0;
3999 }
4000 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4001 ll_config->intr_type = MSI_X;
4002 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4003 ll_config->rth_steering = RTH_STEERING;
4090 4004
4091 /* get the default configuration parameters */ 4005 /* get the default configuration parameters */
4092 vxge_hw_device_config_default_get(device_config); 4006 vxge_hw_device_config_default_get(device_config);
4093 4007
4094 /* initialize configuration parameters */ 4008 /* initialize configuration parameters */
4095 vxge_device_config_init(device_config, &ll_config.intr_type); 4009 vxge_device_config_init(device_config, &ll_config->intr_type);
4096 4010
4097 ret = pci_enable_device(pdev); 4011 ret = pci_enable_device(pdev);
4098 if (ret) { 4012 if (ret) {
@@ -4145,7 +4059,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4145 (unsigned long long)pci_resource_start(pdev, 0)); 4059 (unsigned long long)pci_resource_start(pdev, 0));
4146 4060
4147 status = vxge_hw_device_hw_info_get(attr.bar0, 4061 status = vxge_hw_device_hw_info_get(attr.bar0,
4148 &ll_config.device_hw_info); 4062 &ll_config->device_hw_info);
4149 if (status != VXGE_HW_OK) { 4063 if (status != VXGE_HW_OK) {
4150 vxge_debug_init(VXGE_ERR, 4064 vxge_debug_init(VXGE_ERR,
4151 "%s: Reading of hardware info failed." 4065 "%s: Reading of hardware info failed."
@@ -4154,7 +4068,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4154 goto _exit3; 4068 goto _exit3;
4155 } 4069 }
4156 4070
4157 if (ll_config.device_hw_info.fw_version.major != 4071 if (ll_config->device_hw_info.fw_version.major !=
4158 VXGE_DRIVER_FW_VERSION_MAJOR) { 4072 VXGE_DRIVER_FW_VERSION_MAJOR) {
4159 vxge_debug_init(VXGE_ERR, 4073 vxge_debug_init(VXGE_ERR,
4160 "%s: Incorrect firmware version." 4074 "%s: Incorrect firmware version."
@@ -4164,7 +4078,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4164 goto _exit3; 4078 goto _exit3;
4165 } 4079 }
4166 4080
4167 vpath_mask = ll_config.device_hw_info.vpath_mask; 4081 vpath_mask = ll_config->device_hw_info.vpath_mask;
4168 if (vpath_mask == 0) { 4082 if (vpath_mask == 0) {
4169 vxge_debug_ll_config(VXGE_TRACE, 4083 vxge_debug_ll_config(VXGE_TRACE,
4170 "%s: No vpaths available in device", VXGE_DRIVER_NAME); 4084 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
@@ -4176,10 +4090,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4176 "%s:%d Vpath mask = %llx", __func__, __LINE__, 4090 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4177 (unsigned long long)vpath_mask); 4091 (unsigned long long)vpath_mask);
4178 4092
4179 function_mode = ll_config.device_hw_info.function_mode; 4093 function_mode = ll_config->device_hw_info.function_mode;
4180 host_type = ll_config.device_hw_info.host_type; 4094 host_type = ll_config->device_hw_info.host_type;
4181 is_privileged = __vxge_hw_device_is_privilaged(host_type, 4095 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4182 ll_config.device_hw_info.func_id); 4096 ll_config->device_hw_info.func_id);
4183 4097
4184 /* Check how many vpaths are available */ 4098 /* Check how many vpaths are available */
4185 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4099 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -4193,7 +4107,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4193 4107
4194 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4108 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4195 if (is_sriov(function_mode) && (max_config_dev > 1) && 4109 if (is_sriov(function_mode) && (max_config_dev > 1) &&
4196 (ll_config.intr_type != INTA) && 4110 (ll_config->intr_type != INTA) &&
4197 (is_privileged == VXGE_HW_OK)) { 4111 (is_privileged == VXGE_HW_OK)) {
4198 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) 4112 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4199 ? (max_config_dev - 1) : num_vfs); 4113 ? (max_config_dev - 1) : num_vfs);
@@ -4206,7 +4120,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4206 * Configure vpaths and get driver configured number of vpaths 4120 * Configure vpaths and get driver configured number of vpaths
4207 * which is less than or equal to the maximum vpaths per function. 4121 * which is less than or equal to the maximum vpaths per function.
4208 */ 4122 */
4209 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config); 4123 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4210 if (!no_of_vpath) { 4124 if (!no_of_vpath) {
4211 vxge_debug_ll_config(VXGE_ERR, 4125 vxge_debug_ll_config(VXGE_ERR,
4212 "%s: No more vpaths to configure", VXGE_DRIVER_NAME); 4126 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
@@ -4241,21 +4155,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4241 /* set private device info */ 4155 /* set private device info */
4242 pci_set_drvdata(pdev, hldev); 4156 pci_set_drvdata(pdev, hldev);
4243 4157
4244 ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; 4158 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4245 ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4159 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4246 ll_config.addr_learn_en = addr_learn_en; 4160 ll_config->addr_learn_en = addr_learn_en;
4247 ll_config.rth_algorithm = RTH_ALG_JENKINS; 4161 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4248 ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4162 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
4249 ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4163 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
4250 ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4164 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4251 ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4165 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4252 ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4166 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4253 ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4167 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4254 ll_config.rth_bkt_sz = RTH_BUCKET_SIZE; 4168 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4255 ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4169 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4256 ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4170 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4257 4171
4258 if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath, 4172 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4259 &vdev)) { 4173 &vdev)) {
4260 ret = -EINVAL; 4174 ret = -EINVAL;
4261 goto _exit4; 4175 goto _exit4;
@@ -4281,12 +4195,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4281 4195
4282 vdev->vpaths[j].is_configured = 1; 4196 vdev->vpaths[j].is_configured = 1;
4283 vdev->vpaths[j].device_id = i; 4197 vdev->vpaths[j].device_id = i;
4284 vdev->vpaths[j].fifo.driver_id = j;
4285 vdev->vpaths[j].ring.driver_id = j; 4198 vdev->vpaths[j].ring.driver_id = j;
4286 vdev->vpaths[j].vdev = vdev; 4199 vdev->vpaths[j].vdev = vdev;
4287 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; 4200 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4288 memcpy((u8 *)vdev->vpaths[j].macaddr, 4201 memcpy((u8 *)vdev->vpaths[j].macaddr,
4289 (u8 *)ll_config.device_hw_info.mac_addrs[i], 4202 ll_config->device_hw_info.mac_addrs[i],
4290 ETH_ALEN); 4203 ETH_ALEN);
4291 4204
4292 /* Initialize the mac address list header */ 4205 /* Initialize the mac address list header */
@@ -4307,18 +4220,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4307 4220
4308 macaddr = (u8 *)vdev->vpaths[0].macaddr; 4221 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4309 4222
4310 ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4223 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4311 ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; 4224 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4312 ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4225 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4313 4226
4314 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", 4227 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4315 vdev->ndev->name, ll_config.device_hw_info.serial_number); 4228 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4316 4229
4317 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", 4230 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4318 vdev->ndev->name, ll_config.device_hw_info.part_number); 4231 vdev->ndev->name, ll_config->device_hw_info.part_number);
4319 4232
4320 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4233 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4321 vdev->ndev->name, ll_config.device_hw_info.product_desc); 4234 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4322 4235
4323 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", 4236 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4324 vdev->ndev->name, macaddr); 4237 vdev->ndev->name, macaddr);
@@ -4328,11 +4241,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4328 4241
4329 vxge_debug_init(VXGE_TRACE, 4242 vxge_debug_init(VXGE_TRACE,
4330 "%s: Firmware version : %s Date : %s", vdev->ndev->name, 4243 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4331 ll_config.device_hw_info.fw_version.version, 4244 ll_config->device_hw_info.fw_version.version,
4332 ll_config.device_hw_info.fw_date.date); 4245 ll_config->device_hw_info.fw_date.date);
4333 4246
4334 if (new_device) { 4247 if (new_device) {
4335 switch (ll_config.device_hw_info.function_mode) { 4248 switch (ll_config->device_hw_info.function_mode) {
4336 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: 4249 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4337 vxge_debug_init(VXGE_TRACE, 4250 vxge_debug_init(VXGE_TRACE,
4338 "%s: Single Function Mode Enabled", vdev->ndev->name); 4251 "%s: Single Function Mode Enabled", vdev->ndev->name);
@@ -4355,7 +4268,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4355 vxge_print_parm(vdev, vpath_mask); 4268 vxge_print_parm(vdev, vpath_mask);
4356 4269
4357 /* Store the fw version for ethttool option */ 4270 /* Store the fw version for ethttool option */
4358 strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version); 4271 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4359 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); 4272 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4360 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); 4273 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4361 4274
@@ -4394,7 +4307,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4394 * present to prevent such a failure. 4307 * present to prevent such a failure.
4395 */ 4308 */
4396 4309
4397 if (ll_config.device_hw_info.function_mode == 4310 if (ll_config->device_hw_info.function_mode ==
4398 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) 4311 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4399 if (vdev->config.intr_type == INTA) 4312 if (vdev->config.intr_type == INTA)
4400 vxge_hw_device_unmask_all(hldev); 4313 vxge_hw_device_unmask_all(hldev);
@@ -4406,6 +4319,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4406 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4319 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4407 vxge_hw_device_trace_level_get(hldev)); 4320 vxge_hw_device_trace_level_get(hldev));
4408 4321
4322 kfree(ll_config);
4409 return 0; 4323 return 0;
4410 4324
4411_exit5: 4325_exit5:
@@ -4423,6 +4337,7 @@ _exit2:
4423_exit1: 4337_exit1:
4424 pci_disable_device(pdev); 4338 pci_disable_device(pdev);
4425_exit0: 4339_exit0:
4340 kfree(ll_config);
4426 kfree(device_config); 4341 kfree(device_config);
4427 driver_config->config_dev_cnt--; 4342 driver_config->config_dev_cnt--;
4428 pci_set_drvdata(pdev, NULL); 4343 pci_set_drvdata(pdev, NULL);
@@ -4514,13 +4429,9 @@ static int __init
4514vxge_starter(void) 4429vxge_starter(void)
4515{ 4430{
4516 int ret = 0; 4431 int ret = 0;
4517 char version[32];
4518 snprintf(version, 32, "%s", DRV_VERSION);
4519 4432
4520 printk(KERN_INFO "%s: Copyright(c) 2002-2009 Neterion Inc\n", 4433 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4521 VXGE_DRIVER_NAME); 4434 pr_info("Driver version: %s\n", DRV_VERSION);
4522 printk(KERN_INFO "%s: Driver version: %s\n",
4523 VXGE_DRIVER_NAME, version);
4524 4435
4525 verify_bandwidth(); 4436 verify_bandwidth();
4526 4437
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 60276b20fa5e..2e3b064b8e4b 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-main.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#ifndef VXGE_MAIN_H 14#ifndef VXGE_MAIN_H
15#define VXGE_MAIN_H 15#define VXGE_MAIN_H
@@ -217,21 +217,13 @@ struct vxge_fifo_stats {
217}; 217};
218 218
219struct vxge_fifo { 219struct vxge_fifo {
220 struct net_device *ndev; 220 struct net_device *ndev;
221 struct pci_dev *pdev; 221 struct pci_dev *pdev;
222 struct __vxge_hw_fifo *handle; 222 struct __vxge_hw_fifo *handle;
223 struct netdev_queue *txq;
223 224
224 /* The vpath id maintained in the driver -
225 * 0 to 'maximum_vpaths_in_function - 1'
226 */
227 int driver_id;
228 int tx_steering_type; 225 int tx_steering_type;
229 int indicate_max_pkts; 226 int indicate_max_pkts;
230 spinlock_t tx_lock;
231 /* flag used to maintain queue state when MULTIQ is not enabled */
232#define VPATH_QUEUE_START 0
233#define VPATH_QUEUE_STOP 1
234 int queue_state;
235 227
236 /* Tx stats */ 228 /* Tx stats */
237 struct vxge_fifo_stats stats; 229 struct vxge_fifo_stats stats;
@@ -279,7 +271,6 @@ struct vxge_ring {
279} ____cacheline_aligned; 271} ____cacheline_aligned;
280 272
281struct vxge_vpath { 273struct vxge_vpath {
282
283 struct vxge_fifo fifo; 274 struct vxge_fifo fifo;
284 struct vxge_ring ring; 275 struct vxge_ring ring;
285 276
@@ -447,14 +438,6 @@ int vxge_open_vpaths(struct vxgedev *vdev);
447 438
448enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); 439enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
449 440
450void vxge_stop_all_tx_queue(struct vxgedev *vdev);
451
452void vxge_stop_tx_queue(struct vxge_fifo *fifo);
453
454void vxge_start_all_tx_queue(struct vxgedev *vdev);
455
456void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb);
457
458enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, 441enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
459 struct macInfo *mac); 442 struct macInfo *mac);
460 443
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 9a0cf8eaa328..3dd5c9615ef9 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-reg.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O Virtualized 10 * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
11 * Server Adapter. 11 * Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#ifndef VXGE_REG_H 14#ifndef VXGE_REG_H
15#define VXGE_REG_H 15#define VXGE_REG_H
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 6cc1dd79b40b..cedf08f99cb3 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15 15
@@ -2466,14 +2466,12 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2466 * the same. 2466 * the same.
2467 * @fifo: Handle to the fifo object used for non offload send 2467 * @fifo: Handle to the fifo object used for non offload send
2468 * 2468 *
2469 * The function polls the Tx for the completed descriptors and calls 2469 * The function polls the Tx for the completed descriptors and calls
2470 * the driver via supplied completion callback. 2470 * the driver via supplied completion callback.
2471 * 2471 *
2472 * Returns: VXGE_HW_OK, if the polling is completed successful. 2472 * Returns: VXGE_HW_OK, if the polling is completed successful.
2473 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed 2473 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2474 * descriptors available which are yet to be processed. 2474 * descriptors available which are yet to be processed.
2475 *
2476 * See also: vxge_hw_vpath_poll_tx().
2477 */ 2475 */
2478enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, 2476enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2479 struct sk_buff ***skb_ptr, int nr_skb, 2477 struct sk_buff ***skb_ptr, int nr_skb,
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index c252f3d3f650..6fa07d13798e 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -7,9 +7,9 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-traffic.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#ifndef VXGE_TRAFFIC_H 14#ifndef VXGE_TRAFFIC_H
15#define VXGE_TRAFFIC_H 15#define VXGE_TRAFFIC_H
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 5da7ab1fd307..53fefe137368 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -7,17 +7,16 @@
7 * system is licensed under the GPL. 7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information. 8 * See the file COPYING in this distribution for more information.
9 * 9 *
10 * vxge-version.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 10 * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#ifndef VXGE_VERSION_H 14#ifndef VXGE_VERSION_H
15
16#define VXGE_VERSION_H 15#define VXGE_VERSION_H
17 16
18#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0" 18#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "8" 19#define VXGE_VERSION_FIX "9"
21#define VXGE_VERSION_BUILD "20182" 20#define VXGE_VERSION_BUILD "20840"
22#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
23#endif 22#endif
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f0bd70fb650c..04c6cd4333f1 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -89,7 +89,6 @@
89#include <linux/spinlock.h> 89#include <linux/spinlock.h>
90#include <linux/mutex.h> 90#include <linux/mutex.h>
91#include <linux/device.h> 91#include <linux/device.h>
92#include <linux/smp_lock.h>
93#include <asm/io.h> 92#include <asm/io.h>
94#include <asm/dma.h> 93#include <asm/dma.h>
95#include <asm/byteorder.h> 94#include <asm/byteorder.h>
@@ -174,6 +173,7 @@ struct cosa_data {
174 * Character device major number. 117 was allocated for us. 173 * Character device major number. 117 was allocated for us.
175 * The value of 0 means to allocate a first free one. 174 * The value of 0 means to allocate a first free one.
176 */ 175 */
176static DEFINE_MUTEX(cosa_chardev_mutex);
177static int cosa_major = 117; 177static int cosa_major = 117;
178 178
179/* 179/*
@@ -944,7 +944,7 @@ static int cosa_open(struct inode *inode, struct file *file)
944 int n; 944 int n;
945 int ret = 0; 945 int ret = 0;
946 946
947 lock_kernel(); 947 mutex_lock(&cosa_chardev_mutex);
948 if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS) 948 if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS)
949 >= nr_cards) { 949 >= nr_cards) {
950 ret = -ENODEV; 950 ret = -ENODEV;
@@ -976,7 +976,7 @@ static int cosa_open(struct inode *inode, struct file *file)
976 chan->rx_done = chrdev_rx_done; 976 chan->rx_done = chrdev_rx_done;
977 spin_unlock_irqrestore(&cosa->lock, flags); 977 spin_unlock_irqrestore(&cosa->lock, flags);
978out: 978out:
979 unlock_kernel(); 979 mutex_unlock(&cosa_chardev_mutex);
980 return ret; 980 return ret;
981} 981}
982 982
@@ -1212,10 +1212,10 @@ static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
1212 struct cosa_data *cosa; 1212 struct cosa_data *cosa;
1213 long ret; 1213 long ret;
1214 1214
1215 lock_kernel(); 1215 mutex_lock(&cosa_chardev_mutex);
1216 cosa = channel->cosa; 1216 cosa = channel->cosa;
1217 ret = cosa_ioctl_common(cosa, channel, cmd, arg); 1217 ret = cosa_ioctl_common(cosa, channel, cmd, arg);
1218 unlock_kernel(); 1218 mutex_unlock(&cosa_chardev_mutex);
1219 return ret; 1219 return ret;
1220} 1220}
1221 1221
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e087b9a6daaa..ad7719fe6d0a 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -15,6 +15,8 @@
15 * Maintainer: Kevin Curtis <kevin.curtis@farsite.co.uk> 15 * Maintainer: Kevin Curtis <kevin.curtis@farsite.co.uk>
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/kernel.h> 21#include <linux/kernel.h>
20#include <linux/version.h> 22#include <linux/version.h>
@@ -511,21 +513,19 @@ static int fst_debug_mask = { FST_DEBUG };
511 * support variable numbers of macro parameters. The inverted if prevents us 513 * support variable numbers of macro parameters. The inverted if prevents us
512 * eating someone else's else clause. 514 * eating someone else's else clause.
513 */ 515 */
514#define dbg(F,fmt,A...) if ( ! ( fst_debug_mask & (F))) \ 516#define dbg(F, fmt, args...) \
515 ; \ 517do { \
516 else \ 518 if (fst_debug_mask & (F)) \
517 printk ( KERN_DEBUG FST_NAME ": " fmt, ## A ) 519 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
518 520} while (0)
519#else 521#else
520#define dbg(X...) /* NOP */ 522#define dbg(F, fmt, args...) \
523do { \
524 if (0) \
525 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
526} while (0)
521#endif 527#endif
522 528
523/* Printing short cuts
524 */
525#define printk_err(fmt,A...) printk ( KERN_ERR FST_NAME ": " fmt, ## A )
526#define printk_warn(fmt,A...) printk ( KERN_WARNING FST_NAME ": " fmt, ## A )
527#define printk_info(fmt,A...) printk ( KERN_INFO FST_NAME ": " fmt, ## A )
528
529/* 529/*
530 * PCI ID lookup table 530 * PCI ID lookup table
531 */ 531 */
@@ -961,7 +961,7 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
961 spin_lock_irqsave(&card->card_lock, flags); 961 spin_lock_irqsave(&card->card_lock, flags);
962 962
963 if (++safety > 2000) { 963 if (++safety > 2000) {
964 printk_err("Mailbox safety timeout\n"); 964 pr_err("Mailbox safety timeout\n");
965 break; 965 break;
966 } 966 }
967 967
@@ -1241,8 +1241,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
1241 * This seems to happen on the TE1 interface sometimes 1241 * This seems to happen on the TE1 interface sometimes
1242 * so throw the frame away and log the event. 1242 * so throw the frame away and log the event.
1243 */ 1243 */
1244 printk_err("Frame received with 0 length. Card %d Port %d\n", 1244 pr_err("Frame received with 0 length. Card %d Port %d\n",
1245 card->card_no, port->index); 1245 card->card_no, port->index);
1246 /* Return descriptor to card */ 1246 /* Return descriptor to card */
1247 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); 1247 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
1248 1248
@@ -1486,9 +1486,8 @@ fst_intr(int dummy, void *dev_id)
1486 */ 1486 */
1487 dbg(DBG_INTR, "intr: %d %p\n", card->irq, card); 1487 dbg(DBG_INTR, "intr: %d %p\n", card->irq, card);
1488 if (card->state != FST_RUNNING) { 1488 if (card->state != FST_RUNNING) {
1489 printk_err 1489 pr_err("Interrupt received for card %d in a non running state (%d)\n",
1490 ("Interrupt received for card %d in a non running state (%d)\n", 1490 card->card_no, card->state);
1491 card->card_no, card->state);
1492 1491
1493 /* 1492 /*
1494 * It is possible to really be running, i.e. we have re-loaded 1493 * It is possible to really be running, i.e. we have re-loaded
@@ -1614,8 +1613,7 @@ fst_intr(int dummy, void *dev_id)
1614 break; 1613 break;
1615 1614
1616 default: 1615 default:
1617 printk_err("intr: unknown card event %d. ignored\n", 1616 pr_err("intr: unknown card event %d. ignored\n", event);
1618 event);
1619 break; 1617 break;
1620 } 1618 }
1621 1619
@@ -1637,13 +1635,13 @@ check_started_ok(struct fst_card_info *card)
1637 1635
1638 /* Check structure version and end marker */ 1636 /* Check structure version and end marker */
1639 if (FST_RDW(card, smcVersion) != SMC_VERSION) { 1637 if (FST_RDW(card, smcVersion) != SMC_VERSION) {
1640 printk_err("Bad shared memory version %d expected %d\n", 1638 pr_err("Bad shared memory version %d expected %d\n",
1641 FST_RDW(card, smcVersion), SMC_VERSION); 1639 FST_RDW(card, smcVersion), SMC_VERSION);
1642 card->state = FST_BADVERSION; 1640 card->state = FST_BADVERSION;
1643 return; 1641 return;
1644 } 1642 }
1645 if (FST_RDL(card, endOfSmcSignature) != END_SIG) { 1643 if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
1646 printk_err("Missing shared memory signature\n"); 1644 pr_err("Missing shared memory signature\n");
1647 card->state = FST_BADVERSION; 1645 card->state = FST_BADVERSION;
1648 return; 1646 return;
1649 } 1647 }
@@ -1651,11 +1649,11 @@ check_started_ok(struct fst_card_info *card)
1651 if ((i = FST_RDB(card, taskStatus)) == 0x01) { 1649 if ((i = FST_RDB(card, taskStatus)) == 0x01) {
1652 card->state = FST_RUNNING; 1650 card->state = FST_RUNNING;
1653 } else if (i == 0xFF) { 1651 } else if (i == 0xFF) {
1654 printk_err("Firmware initialisation failed. Card halted\n"); 1652 pr_err("Firmware initialisation failed. Card halted\n");
1655 card->state = FST_HALTED; 1653 card->state = FST_HALTED;
1656 return; 1654 return;
1657 } else if (i != 0x00) { 1655 } else if (i != 0x00) {
1658 printk_err("Unknown firmware status 0x%x\n", i); 1656 pr_err("Unknown firmware status 0x%x\n", i);
1659 card->state = FST_HALTED; 1657 card->state = FST_HALTED;
1660 return; 1658 return;
1661 } 1659 }
@@ -1665,9 +1663,10 @@ check_started_ok(struct fst_card_info *card)
1665 * existing firmware etc so we just report it for the moment. 1663 * existing firmware etc so we just report it for the moment.
1666 */ 1664 */
1667 if (FST_RDL(card, numberOfPorts) != card->nports) { 1665 if (FST_RDL(card, numberOfPorts) != card->nports) {
1668 printk_warn("Port count mismatch on card %d." 1666 pr_warning("Port count mismatch on card %d. "
1669 " Firmware thinks %d we say %d\n", card->card_no, 1667 "Firmware thinks %d we say %d\n",
1670 FST_RDL(card, numberOfPorts), card->nports); 1668 card->card_no,
1669 FST_RDL(card, numberOfPorts), card->nports);
1671 } 1670 }
1672} 1671}
1673 1672
@@ -2038,16 +2037,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2038 2037
2039 /* Now copy the data to the card. */ 2038 /* Now copy the data to the card. */
2040 2039
2041 buf = kmalloc(wrthdr.size, GFP_KERNEL); 2040 buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
2042 if (!buf) 2041 wrthdr.size);
2043 return -ENOMEM; 2042 if (IS_ERR(buf))
2044 2043 return PTR_ERR(buf);
2045 if (copy_from_user(buf,
2046 ifr->ifr_data + sizeof (struct fstioc_write),
2047 wrthdr.size)) {
2048 kfree(buf);
2049 return -EFAULT;
2050 }
2051 2044
2052 memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size); 2045 memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
2053 kfree(buf); 2046 kfree(buf);
@@ -2096,9 +2089,8 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2096 */ 2089 */
2097 2090
2098 if (card->state != FST_RUNNING) { 2091 if (card->state != FST_RUNNING) {
2099 printk_err 2092 pr_err("Attempt to configure card %d in non-running state (%d)\n",
2100 ("Attempt to configure card %d in non-running state (%d)\n", 2093 card->card_no, card->state);
2101 card->card_no, card->state);
2102 return -EIO; 2094 return -EIO;
2103 } 2095 }
2104 if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) { 2096 if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
@@ -2390,8 +2382,8 @@ fst_init_card(struct fst_card_info *card)
2390 err = register_hdlc_device(card->ports[i].dev); 2382 err = register_hdlc_device(card->ports[i].dev);
2391 if (err < 0) { 2383 if (err < 0) {
2392 int j; 2384 int j;
2393 printk_err ("Cannot register HDLC device for port %d" 2385 pr_err("Cannot register HDLC device for port %d (errno %d)\n",
2394 " (errno %d)\n", i, -err ); 2386 i, -err);
2395 for (j = i; j < card->nports; j++) { 2387 for (j = i; j < card->nports; j++) {
2396 free_netdev(card->ports[j].dev); 2388 free_netdev(card->ports[j].dev);
2397 card->ports[j].dev = NULL; 2389 card->ports[j].dev = NULL;
@@ -2401,10 +2393,10 @@ fst_init_card(struct fst_card_info *card)
2401 } 2393 }
2402 } 2394 }
2403 2395
2404 printk_info("%s-%s: %s IRQ%d, %d ports\n", 2396 pr_info("%s-%s: %s IRQ%d, %d ports\n",
2405 port_to_dev(&card->ports[0])->name, 2397 port_to_dev(&card->ports[0])->name,
2406 port_to_dev(&card->ports[card->nports - 1])->name, 2398 port_to_dev(&card->ports[card->nports - 1])->name,
2407 type_strings[card->type], card->irq, card->nports); 2399 type_strings[card->type], card->irq, card->nports);
2408} 2400}
2409 2401
2410static const struct net_device_ops fst_ops = { 2402static const struct net_device_ops fst_ops = {
@@ -2423,19 +2415,17 @@ static const struct net_device_ops fst_ops = {
2423static int __devinit 2415static int __devinit
2424fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2416fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2425{ 2417{
2426 static int firsttime_done = 0;
2427 static int no_of_cards_added = 0; 2418 static int no_of_cards_added = 0;
2428 struct fst_card_info *card; 2419 struct fst_card_info *card;
2429 int err = 0; 2420 int err = 0;
2430 int i; 2421 int i;
2431 2422
2432 if (!firsttime_done) { 2423 printk_once(KERN_INFO
2433 printk_info("FarSync WAN driver " FST_USER_VERSION 2424 pr_fmt("FarSync WAN driver " FST_USER_VERSION
2434 " (c) 2001-2004 FarSite Communications Ltd.\n"); 2425 " (c) 2001-2004 FarSite Communications Ltd.\n"));
2435 firsttime_done = 1; 2426#if FST_DEBUG
2436 dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask); 2427 dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
2437 } 2428#endif
2438
2439 /* 2429 /*
2440 * We are going to be clever and allow certain cards not to be 2430 * We are going to be clever and allow certain cards not to be
2441 * configured. An exclude list can be provided in /etc/modules.conf 2431 * configured. An exclude list can be provided in /etc/modules.conf
@@ -2447,8 +2437,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2447 */ 2437 */
2448 for (i = 0; i < fst_excluded_cards; i++) { 2438 for (i = 0; i < fst_excluded_cards; i++) {
2449 if ((pdev->devfn) >> 3 == fst_excluded_list[i]) { 2439 if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
2450 printk_info("FarSync PCI device %d not assigned\n", 2440 pr_info("FarSync PCI device %d not assigned\n",
2451 (pdev->devfn) >> 3); 2441 (pdev->devfn) >> 3);
2452 return -EBUSY; 2442 return -EBUSY;
2453 } 2443 }
2454 } 2444 }
@@ -2457,20 +2447,19 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2457 /* Allocate driver private data */ 2447 /* Allocate driver private data */
2458 card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL); 2448 card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
2459 if (card == NULL) { 2449 if (card == NULL) {
2460 printk_err("FarSync card found but insufficient memory for" 2450 pr_err("FarSync card found but insufficient memory for driver storage\n");
2461 " driver storage\n");
2462 return -ENOMEM; 2451 return -ENOMEM;
2463 } 2452 }
2464 2453
2465 /* Try to enable the device */ 2454 /* Try to enable the device */
2466 if ((err = pci_enable_device(pdev)) != 0) { 2455 if ((err = pci_enable_device(pdev)) != 0) {
2467 printk_err("Failed to enable card. Err %d\n", -err); 2456 pr_err("Failed to enable card. Err %d\n", -err);
2468 kfree(card); 2457 kfree(card);
2469 return err; 2458 return err;
2470 } 2459 }
2471 2460
2472 if ((err = pci_request_regions(pdev, "FarSync")) !=0) { 2461 if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
2473 printk_err("Failed to allocate regions. Err %d\n", -err); 2462 pr_err("Failed to allocate regions. Err %d\n", -err);
2474 pci_disable_device(pdev); 2463 pci_disable_device(pdev);
2475 kfree(card); 2464 kfree(card);
2476 return err; 2465 return err;
@@ -2481,14 +2470,14 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2481 card->phys_mem = pci_resource_start(pdev, 2); 2470 card->phys_mem = pci_resource_start(pdev, 2);
2482 card->phys_ctlmem = pci_resource_start(pdev, 3); 2471 card->phys_ctlmem = pci_resource_start(pdev, 3);
2483 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { 2472 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
2484 printk_err("Physical memory remap failed\n"); 2473 pr_err("Physical memory remap failed\n");
2485 pci_release_regions(pdev); 2474 pci_release_regions(pdev);
2486 pci_disable_device(pdev); 2475 pci_disable_device(pdev);
2487 kfree(card); 2476 kfree(card);
2488 return -ENODEV; 2477 return -ENODEV;
2489 } 2478 }
2490 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { 2479 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
2491 printk_err("Control memory remap failed\n"); 2480 pr_err("Control memory remap failed\n");
2492 pci_release_regions(pdev); 2481 pci_release_regions(pdev);
2493 pci_disable_device(pdev); 2482 pci_disable_device(pdev);
2494 kfree(card); 2483 kfree(card);
@@ -2498,7 +2487,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2498 2487
2499 /* Register the interrupt handler */ 2488 /* Register the interrupt handler */
2500 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { 2489 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
2501 printk_err("Unable to register interrupt %d\n", card->irq); 2490 pr_err("Unable to register interrupt %d\n", card->irq);
2502 pci_release_regions(pdev); 2491 pci_release_regions(pdev);
2503 pci_disable_device(pdev); 2492 pci_disable_device(pdev);
2504 iounmap(card->ctlmem); 2493 iounmap(card->ctlmem);
@@ -2529,7 +2518,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2529 if (!dev) { 2518 if (!dev) {
2530 while (i--) 2519 while (i--)
2531 free_netdev(card->ports[i].dev); 2520 free_netdev(card->ports[i].dev);
2532 printk_err ("FarSync: out of memory\n"); 2521 pr_err("FarSync: out of memory\n");
2533 free_irq(card->irq, card); 2522 free_irq(card->irq, card);
2534 pci_release_regions(pdev); 2523 pci_release_regions(pdev);
2535 pci_disable_device(pdev); 2524 pci_disable_device(pdev);
@@ -2593,7 +2582,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2593 pci_alloc_consistent(card->device, FST_MAX_MTU, 2582 pci_alloc_consistent(card->device, FST_MAX_MTU,
2594 &card->rx_dma_handle_card); 2583 &card->rx_dma_handle_card);
2595 if (card->rx_dma_handle_host == NULL) { 2584 if (card->rx_dma_handle_host == NULL) {
2596 printk_err("Could not allocate rx dma buffer\n"); 2585 pr_err("Could not allocate rx dma buffer\n");
2597 fst_disable_intr(card); 2586 fst_disable_intr(card);
2598 pci_release_regions(pdev); 2587 pci_release_regions(pdev);
2599 pci_disable_device(pdev); 2588 pci_disable_device(pdev);
@@ -2606,7 +2595,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2606 pci_alloc_consistent(card->device, FST_MAX_MTU, 2595 pci_alloc_consistent(card->device, FST_MAX_MTU,
2607 &card->tx_dma_handle_card); 2596 &card->tx_dma_handle_card);
2608 if (card->tx_dma_handle_host == NULL) { 2597 if (card->tx_dma_handle_host == NULL) {
2609 printk_err("Could not allocate tx dma buffer\n"); 2598 pr_err("Could not allocate tx dma buffer\n");
2610 fst_disable_intr(card); 2599 fst_disable_intr(card);
2611 pci_release_regions(pdev); 2600 pci_release_regions(pdev);
2612 pci_disable_device(pdev); 2601 pci_disable_device(pdev);
@@ -2678,7 +2667,7 @@ fst_init(void)
2678static void __exit 2667static void __exit
2679fst_cleanup_module(void) 2668fst_cleanup_module(void)
2680{ 2669{
2681 printk_info("FarSync WAN driver unloading\n"); 2670 pr_info("FarSync WAN driver unloading\n");
2682 pci_unregister_driver(&fst_driver); 2671 pci_unregister_driver(&fst_driver);
2683} 2672}
2684 2673
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index 3839662ff201..e4f539ad071b 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -153,7 +153,7 @@ typedef struct {
153 u16 len; /* Data Length */ 153 u16 len; /* Data Length */
154 u8 stat; /* Status */ 154 u8 stat; /* Status */
155 u8 unused; /* pads to 2-byte boundary */ 155 u8 unused; /* pads to 2-byte boundary */
156}__attribute__ ((packed)) pkt_desc; 156}__packed pkt_desc;
157 157
158 158
159/* Packet Descriptor Status bits */ 159/* Packet Descriptor Status bits */
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index ee7083fbea50..b38ffa149aba 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -36,7 +36,7 @@ struct hdlc_header {
36 u8 address; 36 u8 address;
37 u8 control; 37 u8 control;
38 __be16 protocol; 38 __be16 protocol;
39}__attribute__ ((packed)); 39}__packed;
40 40
41 41
42struct cisco_packet { 42struct cisco_packet {
@@ -45,7 +45,7 @@ struct cisco_packet {
45 __be32 par2; 45 __be32 par2;
46 __be16 rel; /* reliability */ 46 __be16 rel; /* reliability */
47 __be32 time; 47 __be32 time;
48}__attribute__ ((packed)); 48}__packed;
49#define CISCO_PACKET_LEN 18 49#define CISCO_PACKET_LEN 18
50#define CISCO_BIG_PACKET_LEN 20 50#define CISCO_BIG_PACKET_LEN 20
51 51
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0e52993e2079..0edb535bb2b5 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -112,7 +112,7 @@ typedef struct {
112 unsigned de: 1; 112 unsigned de: 1;
113 unsigned ea2: 1; 113 unsigned ea2: 1;
114#endif 114#endif
115}__attribute__ ((packed)) fr_hdr; 115}__packed fr_hdr;
116 116
117 117
118typedef struct pvc_device_struct { 118typedef struct pvc_device_struct {
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 43ae6f440bfb..f4125da2762f 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -330,7 +330,7 @@ struct _dlci_stat
330{ 330{
331 short dlci; 331 short dlci;
332 char flags; 332 char flags;
333} __attribute__((packed)); 333} __packed;
334 334
335struct _frad_stat 335struct _frad_stat
336{ 336{
@@ -1211,14 +1211,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
1211 } 1211 }
1212 else 1212 else
1213 { 1213 {
1214 temp = kmalloc(mem.len, GFP_KERNEL); 1214 temp = memdup_user(mem.data, mem.len);
1215 if (!temp) 1215 if (IS_ERR(temp))
1216 return(-ENOMEM); 1216 return PTR_ERR(temp);
1217 if(copy_from_user(temp, mem.data, mem.len))
1218 {
1219 kfree(temp);
1220 return -EFAULT;
1221 }
1222 sdla_write(dev, mem.addr, temp, mem.len); 1217 sdla_write(dev, mem.addr, temp, mem.len);
1223 kfree(temp); 1218 kfree(temp);
1224 } 1219 }
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 746a5ee32f33..eb72c67699ab 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -358,8 +358,10 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
358#endif 358#endif
359 359
360 err = register_netdev(dev); 360 err = register_netdev(dev);
361 if (err) 361 if (err) {
362 free_irq(dev->irq, dev); 362 free_irq(dev->irq, dev);
363 iounmap(ei_status.mem);
364 }
363 return err; 365 return err;
364} 366}
365 367
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index d86e8f31e7fc..9fb03082153a 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -50,7 +50,7 @@
50 * 50 *
51 * ROADMAP 51 * ROADMAP
52 * 52 *
53 * i2400m_dev_initalize() Called by i2400m_dev_start() 53 * i2400m_dev_initialize() Called by i2400m_dev_start()
54 * i2400m_set_init_config() 54 * i2400m_set_init_config()
55 * i2400m_cmd_get_state() 55 * i2400m_cmd_get_state()
56 * i2400m_dev_shutdown() Called by i2400m_dev_stop() 56 * i2400m_dev_shutdown() Called by i2400m_dev_stop()
@@ -848,7 +848,7 @@ struct i2400m_cmd_enter_power_save {
848 struct i2400m_l3l4_hdr hdr; 848 struct i2400m_l3l4_hdr hdr;
849 struct i2400m_tlv_hdr tlv; 849 struct i2400m_tlv_hdr tlv;
850 __le32 val; 850 __le32 val;
851} __attribute__((packed)); 851} __packed;
852 852
853 853
854/* 854/*
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 11491354e5b5..8b55a5b14152 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -651,7 +651,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
651 struct { 651 struct {
652 struct i2400m_bootrom_header cmd; 652 struct i2400m_bootrom_header cmd;
653 u8 cmd_payload[chunk_len]; 653 u8 cmd_payload[chunk_len];
654 } __attribute__((packed)) *buf; 654 } __packed *buf;
655 struct i2400m_bootrom_header ack; 655 struct i2400m_bootrom_header ack;
656 656
657 d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx " 657 d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
@@ -794,7 +794,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
794 struct { 794 struct {
795 struct i2400m_bootrom_header cmd; 795 struct i2400m_bootrom_header cmd;
796 u8 cmd_pl[0]; 796 u8 cmd_pl[0];
797 } __attribute__((packed)) *cmd_buf; 797 } __packed *cmd_buf;
798 size_t signature_block_offset, signature_block_size; 798 size_t signature_block_offset, signature_block_size;
799 799
800 d_fnstart(3, dev, "offset %zu\n", offset); 800 d_fnstart(3, dev, "offset %zu\n", offset);
@@ -1029,7 +1029,7 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
1029 struct { 1029 struct {
1030 struct i2400m_bootrom_header ack; 1030 struct i2400m_bootrom_header ack;
1031 u8 ack_pl[16]; 1031 u8 ack_pl[16];
1032 } __attribute__((packed)) ack_buf; 1032 } __packed ack_buf;
1033 1033
1034 d_fnstart(5, dev, "(i2400m %p)\n", i2400m); 1034 d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
1035 cmd = i2400m->bm_cmd_buf; 1035 cmd = i2400m->bm_cmd_buf;
@@ -1115,7 +1115,7 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m,
1115 struct { 1115 struct {
1116 struct i2400m_bootrom_header cmd; 1116 struct i2400m_bootrom_header cmd;
1117 struct i2400m_bcf_hdr cmd_pl; 1117 struct i2400m_bcf_hdr cmd_pl;
1118 } __attribute__((packed)) *cmd_buf; 1118 } __packed *cmd_buf;
1119 struct i2400m_bootrom_header ack; 1119 struct i2400m_bootrom_header ack;
1120 1120
1121 d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr); 1121 d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 035e4cf3e6ed..9e02b90b0080 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -91,7 +91,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
91 struct { 91 struct {
92 struct i2400m_l3l4_hdr hdr; 92 struct i2400m_l3l4_hdr hdr;
93 struct i2400m_tlv_rf_operation sw_rf; 93 struct i2400m_tlv_rf_operation sw_rf;
94 } __attribute__((packed)) *cmd; 94 } __packed *cmd;
95 char strerr[32]; 95 char strerr[32];
96 96
97 d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state); 97 d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state);
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 880ad9d170c2..a105087af963 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -373,8 +373,8 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
373 pktlen = status & RDES0_STATUS_FL; 373 pktlen = status & RDES0_STATUS_FL;
374 if (pktlen > RX_PKT_SIZE) { 374 if (pktlen > RX_PKT_SIZE) {
375 if (net_ratelimit()) 375 if (net_ratelimit())
376 printk(KERN_DEBUG "%s: frame too long (%d)\n", 376 wiphy_debug(dev->wiphy, "frame too long (%d)\n",
377 wiphy_name(dev->wiphy), pktlen); 377 pktlen);
378 pktlen = RX_PKT_SIZE; 378 pktlen = RX_PKT_SIZE;
379 } 379 }
380 380
@@ -454,10 +454,10 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
454 454
455static irqreturn_t adm8211_interrupt(int irq, void *dev_id) 455static irqreturn_t adm8211_interrupt(int irq, void *dev_id)
456{ 456{
457#define ADM8211_INT(x) \ 457#define ADM8211_INT(x) \
458do { \ 458do { \
459 if (unlikely(stsr & ADM8211_STSR_ ## x)) \ 459 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
460 printk(KERN_DEBUG "%s: " #x "\n", wiphy_name(dev->wiphy)); \ 460 wiphy_debug(dev->wiphy, "%s\n", #x); \
461} while (0) 461} while (0)
462 462
463 struct ieee80211_hw *dev = dev_id; 463 struct ieee80211_hw *dev = dev_id;
@@ -570,9 +570,9 @@ static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data)
570 } 570 }
571 571
572 if (timeout == 0) { 572 if (timeout == 0) {
573 printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" 573 wiphy_debug(dev->wiphy,
574 " prewrite (reg=0x%08x)\n", 574 "adm8211_write_bbp(%d,%d) failed prewrite (reg=0x%08x)\n",
575 wiphy_name(dev->wiphy), addr, data, reg); 575 addr, data, reg);
576 return -ETIMEDOUT; 576 return -ETIMEDOUT;
577 } 577 }
578 578
@@ -605,9 +605,9 @@ static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data)
605 if (timeout == 0) { 605 if (timeout == 0) {
606 ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) & 606 ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) &
607 ~ADM8211_BBPCTL_WR); 607 ~ADM8211_BBPCTL_WR);
608 printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" 608 wiphy_debug(dev->wiphy,
609 " postwrite (reg=0x%08x)\n", 609 "adm8211_write_bbp(%d,%d) failed postwrite (reg=0x%08x)\n",
610 wiphy_name(dev->wiphy), addr, data, reg); 610 addr, data, reg);
611 return -ETIMEDOUT; 611 return -ETIMEDOUT;
612 } 612 }
613 613
@@ -675,8 +675,8 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
675 break; 675 break;
676 676
677 default: 677 default:
678 printk(KERN_DEBUG "%s: unsupported transceiver type %d\n", 678 wiphy_debug(dev->wiphy, "unsupported transceiver type %d\n",
679 wiphy_name(dev->wiphy), priv->transceiver_type); 679 priv->transceiver_type);
680 break; 680 break;
681 } 681 }
682 682
@@ -732,8 +732,8 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
732 732
733 /* Nothing to do for ADMtek BBP */ 733 /* Nothing to do for ADMtek BBP */
734 } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) 734 } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
735 printk(KERN_DEBUG "%s: unsupported BBP type %d\n", 735 wiphy_debug(dev->wiphy, "unsupported bbp type %d\n",
736 wiphy_name(dev->wiphy), priv->bbp_type); 736 priv->bbp_type);
737 737
738 ADM8211_RESTORE(); 738 ADM8211_RESTORE();
739 739
@@ -1027,13 +1027,12 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
1027 break; 1027 break;
1028 1028
1029 default: 1029 default:
1030 printk(KERN_DEBUG "%s: unsupported transceiver %d\n", 1030 wiphy_debug(dev->wiphy, "unsupported transceiver %d\n",
1031 wiphy_name(dev->wiphy), priv->transceiver_type); 1031 priv->transceiver_type);
1032 break; 1032 break;
1033 } 1033 }
1034 } else 1034 } else
1035 printk(KERN_DEBUG "%s: unsupported BBP %d\n", 1035 wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type);
1036 wiphy_name(dev->wiphy), priv->bbp_type);
1037 1036
1038 ADM8211_CSR_WRITE(SYNRF, 0); 1037 ADM8211_CSR_WRITE(SYNRF, 0);
1039 1038
@@ -1509,15 +1508,13 @@ static int adm8211_start(struct ieee80211_hw *dev)
1509 /* Power up MAC and RF chips */ 1508 /* Power up MAC and RF chips */
1510 retval = adm8211_hw_reset(dev); 1509 retval = adm8211_hw_reset(dev);
1511 if (retval) { 1510 if (retval) {
1512 printk(KERN_ERR "%s: hardware reset failed\n", 1511 wiphy_err(dev->wiphy, "hardware reset failed\n");
1513 wiphy_name(dev->wiphy));
1514 goto fail; 1512 goto fail;
1515 } 1513 }
1516 1514
1517 retval = adm8211_init_rings(dev); 1515 retval = adm8211_init_rings(dev);
1518 if (retval) { 1516 if (retval) {
1519 printk(KERN_ERR "%s: failed to initialize rings\n", 1517 wiphy_err(dev->wiphy, "failed to initialize rings\n");
1520 wiphy_name(dev->wiphy));
1521 goto fail; 1518 goto fail;
1522 } 1519 }
1523 1520
@@ -1528,8 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
1528 retval = request_irq(priv->pdev->irq, adm8211_interrupt, 1525 retval = request_irq(priv->pdev->irq, adm8211_interrupt,
1529 IRQF_SHARED, "adm8211", dev); 1526 IRQF_SHARED, "adm8211", dev);
1530 if (retval) { 1527 if (retval) {
1531 printk(KERN_ERR "%s: failed to register IRQ handler\n", 1528 wiphy_err(dev->wiphy, "failed to register irq handler\n");
1532 wiphy_name(dev->wiphy));
1533 goto fail; 1529 goto fail;
1534 } 1530 }
1535 1531
@@ -1903,15 +1899,17 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1903 if (err) { 1899 if (err) {
1904 printk(KERN_ERR "%s (adm8211): Cannot register device\n", 1900 printk(KERN_ERR "%s (adm8211): Cannot register device\n",
1905 pci_name(pdev)); 1901 pci_name(pdev));
1906 goto err_free_desc; 1902 goto err_free_eeprom;
1907 } 1903 }
1908 1904
1909 printk(KERN_INFO "%s: hwaddr %pM, Rev 0x%02x\n", 1905 wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n",
1910 wiphy_name(dev->wiphy), dev->wiphy->perm_addr, 1906 dev->wiphy->perm_addr, pdev->revision);
1911 pdev->revision);
1912 1907
1913 return 0; 1908 return 0;
1914 1909
1910 err_free_eeprom:
1911 kfree(priv->eeprom);
1912
1915 err_free_desc: 1913 err_free_desc:
1916 pci_free_consistent(pdev, 1914 pci_free_consistent(pdev,
1917 sizeof(struct adm8211_desc) * priv->rx_ring_size + 1915 sizeof(struct adm8211_desc) * priv->rx_ring_size +
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index b07e4d3a6b4d..bbc10b1cde87 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -80,7 +80,7 @@ struct adm8211_csr {
80 __le32 FEMR; /* 0x104 */ 80 __le32 FEMR; /* 0x104 */
81 __le32 FPSR; /* 0x108 */ 81 __le32 FPSR; /* 0x108 */
82 __le32 FFER; /* 0x10C */ 82 __le32 FFER; /* 0x10C */
83} __attribute__ ((packed)); 83} __packed;
84 84
85/* CSR0 - PAR (PCI Address Register) */ 85/* CSR0 - PAR (PCI Address Register) */
86#define ADM8211_PAR_MWIE (1 << 24) 86#define ADM8211_PAR_MWIE (1 << 24)
@@ -484,7 +484,7 @@ struct adm8211_tx_hdr {
484 u8 entry_control; // huh?? 484 u8 entry_control; // huh??
485 u16 reserved_1; 485 u16 reserved_1;
486 u32 reserved_2; 486 u32 reserved_2;
487} __attribute__ ((packed)); 487} __packed;
488 488
489 489
490#define RX_COPY_BREAK 128 490#define RX_COPY_BREAK 128
@@ -531,7 +531,7 @@ struct adm8211_eeprom {
531 u8 lnags_threshold[14]; /* 0x70 */ 531 u8 lnags_threshold[14]; /* 0x70 */
532 __le16 checksum; /* 0x7E */ 532 __le16 checksum; /* 0x7E */
533 u8 cis_data[0]; /* 0x80, 384 bytes */ 533 u8 cis_data[0]; /* 0x80, 384 bytes */
534} __attribute__ ((packed)); 534} __packed;
535 535
536struct adm8211_priv { 536struct adm8211_priv {
537 struct pci_dev *pdev; 537 struct pci_dev *pdev;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 3b7ab20a5c54..1d05445d4ba3 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -506,20 +506,20 @@ struct WepKeyRid {
506 u8 mac[ETH_ALEN]; 506 u8 mac[ETH_ALEN];
507 __le16 klen; 507 __le16 klen;
508 u8 key[16]; 508 u8 key[16];
509} __attribute__ ((packed)); 509} __packed;
510 510
511/* These structures are from the Aironet's PC4500 Developers Manual */ 511/* These structures are from the Aironet's PC4500 Developers Manual */
512typedef struct Ssid Ssid; 512typedef struct Ssid Ssid;
513struct Ssid { 513struct Ssid {
514 __le16 len; 514 __le16 len;
515 u8 ssid[32]; 515 u8 ssid[32];
516} __attribute__ ((packed)); 516} __packed;
517 517
518typedef struct SsidRid SsidRid; 518typedef struct SsidRid SsidRid;
519struct SsidRid { 519struct SsidRid {
520 __le16 len; 520 __le16 len;
521 Ssid ssids[3]; 521 Ssid ssids[3];
522} __attribute__ ((packed)); 522} __packed;
523 523
524typedef struct ModulationRid ModulationRid; 524typedef struct ModulationRid ModulationRid;
525struct ModulationRid { 525struct ModulationRid {
@@ -528,7 +528,7 @@ struct ModulationRid {
528#define MOD_DEFAULT cpu_to_le16(0) 528#define MOD_DEFAULT cpu_to_le16(0)
529#define MOD_CCK cpu_to_le16(1) 529#define MOD_CCK cpu_to_le16(1)
530#define MOD_MOK cpu_to_le16(2) 530#define MOD_MOK cpu_to_le16(2)
531} __attribute__ ((packed)); 531} __packed;
532 532
533typedef struct ConfigRid ConfigRid; 533typedef struct ConfigRid ConfigRid;
534struct ConfigRid { 534struct ConfigRid {
@@ -652,7 +652,7 @@ struct ConfigRid {
652#define MAGIC_STAY_IN_CAM (1<<10) 652#define MAGIC_STAY_IN_CAM (1<<10)
653 u8 magicControl; 653 u8 magicControl;
654 __le16 autoWake; 654 __le16 autoWake;
655} __attribute__ ((packed)); 655} __packed;
656 656
657typedef struct StatusRid StatusRid; 657typedef struct StatusRid StatusRid;
658struct StatusRid { 658struct StatusRid {
@@ -711,20 +711,20 @@ struct StatusRid {
711#define STAT_LEAPFAILED 91 711#define STAT_LEAPFAILED 91
712#define STAT_LEAPTIMEDOUT 92 712#define STAT_LEAPTIMEDOUT 92
713#define STAT_LEAPCOMPLETE 93 713#define STAT_LEAPCOMPLETE 93
714} __attribute__ ((packed)); 714} __packed;
715 715
716typedef struct StatsRid StatsRid; 716typedef struct StatsRid StatsRid;
717struct StatsRid { 717struct StatsRid {
718 __le16 len; 718 __le16 len;
719 __le16 spacer; 719 __le16 spacer;
720 __le32 vals[100]; 720 __le32 vals[100];
721} __attribute__ ((packed)); 721} __packed;
722 722
723typedef struct APListRid APListRid; 723typedef struct APListRid APListRid;
724struct APListRid { 724struct APListRid {
725 __le16 len; 725 __le16 len;
726 u8 ap[4][ETH_ALEN]; 726 u8 ap[4][ETH_ALEN];
727} __attribute__ ((packed)); 727} __packed;
728 728
729typedef struct CapabilityRid CapabilityRid; 729typedef struct CapabilityRid CapabilityRid;
730struct CapabilityRid { 730struct CapabilityRid {
@@ -754,7 +754,7 @@ struct CapabilityRid {
754 __le16 bootBlockVer; 754 __le16 bootBlockVer;
755 __le16 requiredHard; 755 __le16 requiredHard;
756 __le16 extSoftCap; 756 __le16 extSoftCap;
757} __attribute__ ((packed)); 757} __packed;
758 758
759/* Only present on firmware >= 5.30.17 */ 759/* Only present on firmware >= 5.30.17 */
760typedef struct BSSListRidExtra BSSListRidExtra; 760typedef struct BSSListRidExtra BSSListRidExtra;
@@ -762,7 +762,7 @@ struct BSSListRidExtra {
762 __le16 unknown[4]; 762 __le16 unknown[4];
763 u8 fixed[12]; /* WLAN management frame */ 763 u8 fixed[12]; /* WLAN management frame */
764 u8 iep[624]; 764 u8 iep[624];
765} __attribute__ ((packed)); 765} __packed;
766 766
767typedef struct BSSListRid BSSListRid; 767typedef struct BSSListRid BSSListRid;
768struct BSSListRid { 768struct BSSListRid {
@@ -796,7 +796,7 @@ struct BSSListRid {
796 796
797 /* Only present on firmware >= 5.30.17 */ 797 /* Only present on firmware >= 5.30.17 */
798 BSSListRidExtra extra; 798 BSSListRidExtra extra;
799} __attribute__ ((packed)); 799} __packed;
800 800
801typedef struct { 801typedef struct {
802 BSSListRid bss; 802 BSSListRid bss;
@@ -807,13 +807,13 @@ typedef struct tdsRssiEntry tdsRssiEntry;
807struct tdsRssiEntry { 807struct tdsRssiEntry {
808 u8 rssipct; 808 u8 rssipct;
809 u8 rssidBm; 809 u8 rssidBm;
810} __attribute__ ((packed)); 810} __packed;
811 811
812typedef struct tdsRssiRid tdsRssiRid; 812typedef struct tdsRssiRid tdsRssiRid;
813struct tdsRssiRid { 813struct tdsRssiRid {
814 u16 len; 814 u16 len;
815 tdsRssiEntry x[256]; 815 tdsRssiEntry x[256];
816} __attribute__ ((packed)); 816} __packed;
817 817
818typedef struct MICRid MICRid; 818typedef struct MICRid MICRid;
819struct MICRid { 819struct MICRid {
@@ -823,7 +823,7 @@ struct MICRid {
823 u8 multicast[16]; 823 u8 multicast[16];
824 __le16 unicastValid; 824 __le16 unicastValid;
825 u8 unicast[16]; 825 u8 unicast[16];
826} __attribute__ ((packed)); 826} __packed;
827 827
828typedef struct MICBuffer MICBuffer; 828typedef struct MICBuffer MICBuffer;
829struct MICBuffer { 829struct MICBuffer {
@@ -841,7 +841,7 @@ struct MICBuffer {
841 } u; 841 } u;
842 __be32 mic; 842 __be32 mic;
843 __be32 seq; 843 __be32 seq;
844} __attribute__ ((packed)); 844} __packed;
845 845
846typedef struct { 846typedef struct {
847 u8 da[ETH_ALEN]; 847 u8 da[ETH_ALEN];
@@ -996,7 +996,7 @@ struct rx_hdr {
996 u8 rate; 996 u8 rate;
997 u8 freq; 997 u8 freq;
998 __le16 tmp[4]; 998 __le16 tmp[4];
999} __attribute__ ((packed)); 999} __packed;
1000 1000
1001typedef struct { 1001typedef struct {
1002 unsigned int ctl: 15; 1002 unsigned int ctl: 15;
@@ -2931,8 +2931,8 @@ err_out_res:
2931 release_region( dev->base_addr, 64 ); 2931 release_region( dev->base_addr, 64 );
2932err_out_nets: 2932err_out_nets:
2933 airo_networks_free(ai); 2933 airo_networks_free(ai);
2934 del_airo_dev(ai);
2935err_out_free: 2934err_out_free:
2935 del_airo_dev(ai);
2936 free_netdev(dev); 2936 free_netdev(dev);
2937 return NULL; 2937 return NULL;
2938} 2938}
@@ -4657,7 +4657,7 @@ static ssize_t proc_write( struct file *file,
4657 loff_t *offset ) 4657 loff_t *offset )
4658{ 4658{
4659 loff_t pos = *offset; 4659 loff_t pos = *offset;
4660 struct proc_data *priv = (struct proc_data*)file->private_data; 4660 struct proc_data *priv = file->private_data;
4661 4661
4662 if (!priv->wbuffer) 4662 if (!priv->wbuffer)
4663 return -EINVAL; 4663 return -EINVAL;
@@ -4689,7 +4689,7 @@ static int proc_status_open(struct inode *inode, struct file *file)
4689 4689
4690 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 4690 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
4691 return -ENOMEM; 4691 return -ENOMEM;
4692 data = (struct proc_data *)file->private_data; 4692 data = file->private_data;
4693 if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { 4693 if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
4694 kfree (file->private_data); 4694 kfree (file->private_data);
4695 return -ENOMEM; 4695 return -ENOMEM;
@@ -4772,7 +4772,7 @@ static int proc_stats_rid_open( struct inode *inode,
4772 4772
4773 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 4773 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
4774 return -ENOMEM; 4774 return -ENOMEM;
4775 data = (struct proc_data *)file->private_data; 4775 data = file->private_data;
4776 if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) { 4776 if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) {
4777 kfree (file->private_data); 4777 kfree (file->private_data);
4778 return -ENOMEM; 4778 return -ENOMEM;
@@ -5045,7 +5045,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
5045 5045
5046 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 5046 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
5047 return -ENOMEM; 5047 return -ENOMEM;
5048 data = (struct proc_data *)file->private_data; 5048 data = file->private_data;
5049 if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { 5049 if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
5050 kfree (file->private_data); 5050 kfree (file->private_data);
5051 return -ENOMEM; 5051 return -ENOMEM;
@@ -5127,7 +5127,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
5127 5127
5128static void proc_SSID_on_close(struct inode *inode, struct file *file) 5128static void proc_SSID_on_close(struct inode *inode, struct file *file)
5129{ 5129{
5130 struct proc_data *data = (struct proc_data *)file->private_data; 5130 struct proc_data *data = file->private_data;
5131 struct proc_dir_entry *dp = PDE(inode); 5131 struct proc_dir_entry *dp = PDE(inode);
5132 struct net_device *dev = dp->data; 5132 struct net_device *dev = dp->data;
5133 struct airo_info *ai = dev->ml_priv; 5133 struct airo_info *ai = dev->ml_priv;
@@ -5163,7 +5163,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
5163} 5163}
5164 5164
5165static void proc_APList_on_close( struct inode *inode, struct file *file ) { 5165static void proc_APList_on_close( struct inode *inode, struct file *file ) {
5166 struct proc_data *data = (struct proc_data *)file->private_data; 5166 struct proc_data *data = file->private_data;
5167 struct proc_dir_entry *dp = PDE(inode); 5167 struct proc_dir_entry *dp = PDE(inode);
5168 struct net_device *dev = dp->data; 5168 struct net_device *dev = dp->data;
5169 struct airo_info *ai = dev->ml_priv; 5169 struct airo_info *ai = dev->ml_priv;
@@ -5309,7 +5309,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5309 5309
5310 memset(key, 0, sizeof(key)); 5310 memset(key, 0, sizeof(key));
5311 5311
5312 data = (struct proc_data *)file->private_data; 5312 data = file->private_data;
5313 if ( !data->writelen ) return; 5313 if ( !data->writelen ) return;
5314 5314
5315 if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' && 5315 if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' &&
@@ -5363,7 +5363,7 @@ static int proc_wepkey_open( struct inode *inode, struct file *file )
5363 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 5363 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
5364 return -ENOMEM; 5364 return -ENOMEM;
5365 memset(&wkr, 0, sizeof(wkr)); 5365 memset(&wkr, 0, sizeof(wkr));
5366 data = (struct proc_data *)file->private_data; 5366 data = file->private_data;
5367 if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) { 5367 if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) {
5368 kfree (file->private_data); 5368 kfree (file->private_data);
5369 return -ENOMEM; 5369 return -ENOMEM;
@@ -5409,7 +5409,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
5409 5409
5410 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 5410 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
5411 return -ENOMEM; 5411 return -ENOMEM;
5412 data = (struct proc_data *)file->private_data; 5412 data = file->private_data;
5413 if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { 5413 if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
5414 kfree (file->private_data); 5414 kfree (file->private_data);
5415 return -ENOMEM; 5415 return -ENOMEM;
@@ -5453,7 +5453,7 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
5453 5453
5454 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 5454 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
5455 return -ENOMEM; 5455 return -ENOMEM;
5456 data = (struct proc_data *)file->private_data; 5456 data = file->private_data;
5457 if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { 5457 if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
5458 kfree (file->private_data); 5458 kfree (file->private_data);
5459 return -ENOMEM; 5459 return -ENOMEM;
@@ -5495,7 +5495,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
5495 5495
5496 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 5496 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
5497 return -ENOMEM; 5497 return -ENOMEM;
5498 data = (struct proc_data *)file->private_data; 5498 data = file->private_data;
5499 if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) { 5499 if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) {
5500 kfree (file->private_data); 5500 kfree (file->private_data);
5501 return -ENOMEM; 5501 return -ENOMEM;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 8a2d4afc74f8..d5140a87f073 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -7,6 +7,7 @@
7 * Copyright (c) 2004 Balint Seeber <n0_5p4m_p13453@hotmail.com> 7 * Copyright (c) 2004 Balint Seeber <n0_5p4m_p13453@hotmail.com>
8 * Copyright (c) 2007 Guido Guenther <agx@sigxcpu.org> 8 * Copyright (c) 2007 Guido Guenther <agx@sigxcpu.org>
9 * Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi> 9 * Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi>
10 * Copyright (c) 2010 Sebastian Smolorz <sesmo@gmx.net>
10 * 11 *
11 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as 13 * modify it under the terms of the GNU General Public License as
@@ -88,22 +89,19 @@
88#define DBG_DEFAULTS 0 89#define DBG_DEFAULTS 0
89 90
90/* Use our own dbg macro */ 91/* Use our own dbg macro */
91#define at76_dbg(bits, format, arg...) \ 92#define at76_dbg(bits, format, arg...) \
92 do { \ 93do { \
93 if (at76_debug & (bits)) \ 94 if (at76_debug & (bits)) \
94 printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , \ 95 printk(KERN_DEBUG DRIVER_NAME ": " format "\n", ##arg); \
95 ## arg); \ 96} while (0)
96 } while (0) 97
97 98#define at76_dbg_dump(bits, buf, len, format, arg...) \
98#define at76_dbg_dump(bits, buf, len, format, arg...) \ 99do { \
99 do { \ 100 if (at76_debug & (bits)) { \
100 if (at76_debug & (bits)) { \ 101 printk(KERN_DEBUG DRIVER_NAME ": " format "\n", ##arg); \
101 printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , \ 102 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); \
102 ## arg); \ 103 } \
103 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, \ 104} while (0)
104 buf, len); \
105 } \
106 } while (0)
107 105
108static uint at76_debug = DBG_DEFAULTS; 106static uint at76_debug = DBG_DEFAULTS;
109 107
@@ -305,7 +303,7 @@ struct dfu_status {
305 unsigned char poll_timeout[3]; 303 unsigned char poll_timeout[3];
306 unsigned char state; 304 unsigned char state;
307 unsigned char string; 305 unsigned char string;
308} __attribute__((packed)); 306} __packed;
309 307
310static inline int at76_is_intersil(enum board_type board) 308static inline int at76_is_intersil(enum board_type board)
311{ 309{
@@ -657,8 +655,8 @@ static int at76_get_hw_config(struct at76_priv *priv)
657exit: 655exit:
658 kfree(hwcfg); 656 kfree(hwcfg);
659 if (ret < 0) 657 if (ret < 0)
660 printk(KERN_ERR "%s: cannot get HW Config (error %d)\n", 658 wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n",
661 wiphy_name(priv->hw->wiphy), ret); 659 ret);
662 660
663 return ret; 661 return ret;
664} 662}
@@ -793,8 +791,9 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd)
793 do { 791 do {
794 status = at76_get_cmd_status(priv->udev, cmd); 792 status = at76_get_cmd_status(priv->udev, cmd);
795 if (status < 0) { 793 if (status < 0) {
796 printk(KERN_ERR "%s: at76_get_cmd_status failed: %d\n", 794 wiphy_err(priv->hw->wiphy,
797 wiphy_name(priv->hw->wiphy), status); 795 "at76_get_cmd_status failed: %d\n",
796 status);
798 break; 797 break;
799 } 798 }
800 799
@@ -809,9 +808,8 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd)
809 808
810 schedule_timeout_interruptible(HZ / 10); /* 100 ms */ 809 schedule_timeout_interruptible(HZ / 10); /* 100 ms */
811 if (time_after(jiffies, timeout)) { 810 if (time_after(jiffies, timeout)) {
812 printk(KERN_ERR 811 wiphy_err(priv->hw->wiphy,
813 "%s: completion timeout for command %d\n", 812 "completion timeout for command %d\n", cmd);
814 wiphy_name(priv->hw->wiphy), cmd);
815 status = -ETIMEDOUT; 813 status = -ETIMEDOUT;
816 break; 814 break;
817 } 815 }
@@ -832,9 +830,9 @@ static int at76_set_mib(struct at76_priv *priv, struct set_mib_buffer *buf)
832 830
833 ret = at76_wait_completion(priv, CMD_SET_MIB); 831 ret = at76_wait_completion(priv, CMD_SET_MIB);
834 if (ret != CMD_STATUS_COMPLETE) { 832 if (ret != CMD_STATUS_COMPLETE) {
835 printk(KERN_INFO 833 wiphy_info(priv->hw->wiphy,
836 "%s: set_mib: at76_wait_completion failed " 834 "set_mib: at76_wait_completion failed with %d\n",
837 "with %d\n", wiphy_name(priv->hw->wiphy), ret); 835 ret);
838 ret = -EIO; 836 ret = -EIO;
839 } 837 }
840 838
@@ -854,8 +852,8 @@ static int at76_set_radio(struct at76_priv *priv, int enable)
854 852
855 ret = at76_set_card_command(priv->udev, cmd, NULL, 0); 853 ret = at76_set_card_command(priv->udev, cmd, NULL, 0);
856 if (ret < 0) 854 if (ret < 0)
857 printk(KERN_ERR "%s: at76_set_card_command(%d) failed: %d\n", 855 wiphy_err(priv->hw->wiphy,
858 wiphy_name(priv->hw->wiphy), cmd, ret); 856 "at76_set_card_command(%d) failed: %d\n", cmd, ret);
859 else 857 else
860 ret = 1; 858 ret = 1;
861 859
@@ -875,8 +873,8 @@ static int at76_set_pm_mode(struct at76_priv *priv)
875 873
876 ret = at76_set_mib(priv, &priv->mib_buf); 874 ret = at76_set_mib(priv, &priv->mib_buf);
877 if (ret < 0) 875 if (ret < 0)
878 printk(KERN_ERR "%s: set_mib (pm_mode) failed: %d\n", 876 wiphy_err(priv->hw->wiphy, "set_mib (pm_mode) failed: %d\n",
879 wiphy_name(priv->hw->wiphy), ret); 877 ret);
880 878
881 return ret; 879 return ret;
882} 880}
@@ -892,8 +890,8 @@ static int at76_set_preamble(struct at76_priv *priv, u8 type)
892 890
893 ret = at76_set_mib(priv, &priv->mib_buf); 891 ret = at76_set_mib(priv, &priv->mib_buf);
894 if (ret < 0) 892 if (ret < 0)
895 printk(KERN_ERR "%s: set_mib (preamble) failed: %d\n", 893 wiphy_err(priv->hw->wiphy, "set_mib (preamble) failed: %d\n",
896 wiphy_name(priv->hw->wiphy), ret); 894 ret);
897 895
898 return ret; 896 return ret;
899} 897}
@@ -909,8 +907,8 @@ static int at76_set_frag(struct at76_priv *priv, u16 size)
909 907
910 ret = at76_set_mib(priv, &priv->mib_buf); 908 ret = at76_set_mib(priv, &priv->mib_buf);
911 if (ret < 0) 909 if (ret < 0)
912 printk(KERN_ERR "%s: set_mib (frag threshold) failed: %d\n", 910 wiphy_err(priv->hw->wiphy,
913 wiphy_name(priv->hw->wiphy), ret); 911 "set_mib (frag threshold) failed: %d\n", ret);
914 912
915 return ret; 913 return ret;
916} 914}
@@ -926,8 +924,7 @@ static int at76_set_rts(struct at76_priv *priv, u16 size)
926 924
927 ret = at76_set_mib(priv, &priv->mib_buf); 925 ret = at76_set_mib(priv, &priv->mib_buf);
928 if (ret < 0) 926 if (ret < 0)
929 printk(KERN_ERR "%s: set_mib (rts) failed: %d\n", 927 wiphy_err(priv->hw->wiphy, "set_mib (rts) failed: %d\n", ret);
930 wiphy_name(priv->hw->wiphy), ret);
931 928
932 return ret; 929 return ret;
933} 930}
@@ -943,8 +940,8 @@ static int at76_set_autorate_fallback(struct at76_priv *priv, int onoff)
943 940
944 ret = at76_set_mib(priv, &priv->mib_buf); 941 ret = at76_set_mib(priv, &priv->mib_buf);
945 if (ret < 0) 942 if (ret < 0)
946 printk(KERN_ERR "%s: set_mib (autorate fallback) failed: %d\n", 943 wiphy_err(priv->hw->wiphy,
947 wiphy_name(priv->hw->wiphy), ret); 944 "set_mib (autorate fallback) failed: %d\n", ret);
948 945
949 return ret; 946 return ret;
950} 947}
@@ -962,8 +959,8 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
962 ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, 959 ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m,
963 sizeof(struct mib_mac_addr)); 960 sizeof(struct mib_mac_addr));
964 if (ret < 0) { 961 if (ret < 0) {
965 printk(KERN_ERR "%s: at76_get_mib (MAC_ADDR) failed: %d\n", 962 wiphy_err(priv->hw->wiphy,
966 wiphy_name(priv->hw->wiphy), ret); 963 "at76_get_mib (mac_addr) failed: %d\n", ret);
967 goto exit; 964 goto exit;
968 } 965 }
969 966
@@ -991,8 +988,8 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
991 ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, 988 ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m,
992 sizeof(struct mib_mac_wep)); 989 sizeof(struct mib_mac_wep));
993 if (ret < 0) { 990 if (ret < 0) {
994 printk(KERN_ERR "%s: at76_get_mib (MAC_WEP) failed: %d\n", 991 wiphy_err(priv->hw->wiphy,
995 wiphy_name(priv->hw->wiphy), ret); 992 "at76_get_mib (mac_wep) failed: %d\n", ret);
996 goto exit; 993 goto exit;
997 } 994 }
998 995
@@ -1028,8 +1025,8 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1028 ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, 1025 ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m,
1029 sizeof(struct mib_mac_mgmt)); 1026 sizeof(struct mib_mac_mgmt));
1030 if (ret < 0) { 1027 if (ret < 0) {
1031 printk(KERN_ERR "%s: at76_get_mib (MAC_MGMT) failed: %d\n", 1028 wiphy_err(priv->hw->wiphy,
1032 wiphy_name(priv->hw->wiphy), ret); 1029 "at76_get_mib (mac_mgmt) failed: %d\n", ret);
1033 goto exit; 1030 goto exit;
1034 } 1031 }
1035 1032
@@ -1064,8 +1061,8 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1064 1061
1065 ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); 1062 ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
1066 if (ret < 0) { 1063 if (ret < 0) {
1067 printk(KERN_ERR "%s: at76_get_mib (MAC) failed: %d\n", 1064 wiphy_err(priv->hw->wiphy,
1068 wiphy_name(priv->hw->wiphy), ret); 1065 "at76_get_mib (mac) failed: %d\n", ret);
1069 goto exit; 1066 goto exit;
1070 } 1067 }
1071 1068
@@ -1101,8 +1098,8 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
1101 1098
1102 ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); 1099 ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
1103 if (ret < 0) { 1100 if (ret < 0) {
1104 printk(KERN_ERR "%s: at76_get_mib (PHY) failed: %d\n", 1101 wiphy_err(priv->hw->wiphy,
1105 wiphy_name(priv->hw->wiphy), ret); 1102 "at76_get_mib (phy) failed: %d\n", ret);
1106 goto exit; 1103 goto exit;
1107 } 1104 }
1108 1105
@@ -1134,8 +1131,8 @@ static void at76_dump_mib_local(struct at76_priv *priv)
1134 1131
1135 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); 1132 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
1136 if (ret < 0) { 1133 if (ret < 0) {
1137 printk(KERN_ERR "%s: at76_get_mib (LOCAL) failed: %d\n", 1134 wiphy_err(priv->hw->wiphy,
1138 wiphy_name(priv->hw->wiphy), ret); 1135 "at76_get_mib (local) failed: %d\n", ret);
1139 goto exit; 1136 goto exit;
1140 } 1137 }
1141 1138
@@ -1160,8 +1157,8 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
1160 ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, 1157 ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m,
1161 sizeof(struct mib_mdomain)); 1158 sizeof(struct mib_mdomain));
1162 if (ret < 0) { 1159 if (ret < 0) {
1163 printk(KERN_ERR "%s: at76_get_mib (MDOMAIN) failed: %d\n", 1160 wiphy_err(priv->hw->wiphy,
1164 wiphy_name(priv->hw->wiphy), ret); 1161 "at76_get_mib (mdomain) failed: %d\n", ret);
1165 goto exit; 1162 goto exit;
1166 } 1163 }
1167 1164
@@ -1232,16 +1229,16 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
1232 struct sk_buff *skb = priv->rx_skb; 1229 struct sk_buff *skb = priv->rx_skb;
1233 1230
1234 if (!priv->rx_urb) { 1231 if (!priv->rx_urb) {
1235 printk(KERN_ERR "%s: %s: priv->rx_urb is NULL\n", 1232 wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n",
1236 wiphy_name(priv->hw->wiphy), __func__); 1233 __func__);
1237 return -EFAULT; 1234 return -EFAULT;
1238 } 1235 }
1239 1236
1240 if (!skb) { 1237 if (!skb) {
1241 skb = dev_alloc_skb(sizeof(struct at76_rx_buffer)); 1238 skb = dev_alloc_skb(sizeof(struct at76_rx_buffer));
1242 if (!skb) { 1239 if (!skb) {
1243 printk(KERN_ERR "%s: cannot allocate rx skbuff\n", 1240 wiphy_err(priv->hw->wiphy,
1244 wiphy_name(priv->hw->wiphy)); 1241 "cannot allocate rx skbuff\n");
1245 ret = -ENOMEM; 1242 ret = -ENOMEM;
1246 goto exit; 1243 goto exit;
1247 } 1244 }
@@ -1260,15 +1257,14 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
1260 at76_dbg(DBG_DEVSTART, 1257 at76_dbg(DBG_DEVSTART,
1261 "usb_submit_urb returned -ENODEV"); 1258 "usb_submit_urb returned -ENODEV");
1262 else 1259 else
1263 printk(KERN_ERR "%s: rx, usb_submit_urb failed: %d\n", 1260 wiphy_err(priv->hw->wiphy,
1264 wiphy_name(priv->hw->wiphy), ret); 1261 "rx, usb_submit_urb failed: %d\n", ret);
1265 } 1262 }
1266 1263
1267exit: 1264exit:
1268 if (ret < 0 && ret != -ENODEV) 1265 if (ret < 0 && ret != -ENODEV)
1269 printk(KERN_ERR "%s: cannot submit rx urb - please unload the " 1266 wiphy_err(priv->hw->wiphy,
1270 "driver and/or power cycle the device\n", 1267 "cannot submit rx urb - please unload the driver and/or power cycle the device\n");
1271 wiphy_name(priv->hw->wiphy));
1272 1268
1273 return ret; 1269 return ret;
1274} 1270}
@@ -1437,8 +1433,8 @@ static int at76_startup_device(struct at76_priv *priv)
1437 ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config, 1433 ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config,
1438 sizeof(struct at76_card_config)); 1434 sizeof(struct at76_card_config));
1439 if (ret < 0) { 1435 if (ret < 0) {
1440 printk(KERN_ERR "%s: at76_set_card_command failed: %d\n", 1436 wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n",
1441 wiphy_name(priv->hw->wiphy), ret); 1437 ret);
1442 return ret; 1438 return ret;
1443 } 1439 }
1444 1440
@@ -1503,8 +1499,8 @@ static void at76_work_set_promisc(struct work_struct *work)
1503 1499
1504 ret = at76_set_mib(priv, &priv->mib_buf); 1500 ret = at76_set_mib(priv, &priv->mib_buf);
1505 if (ret < 0) 1501 if (ret < 0)
1506 printk(KERN_ERR "%s: set_mib (promiscuous_mode) failed: %d\n", 1502 wiphy_err(priv->hw->wiphy,
1507 wiphy_name(priv->hw->wiphy), ret); 1503 "set_mib (promiscuous_mode) failed: %d\n", ret);
1508 1504
1509 mutex_unlock(&priv->mtx); 1505 mutex_unlock(&priv->mtx);
1510} 1506}
@@ -1649,6 +1645,58 @@ exit:
1649 return NULL; 1645 return NULL;
1650} 1646}
1651 1647
1648static int at76_join(struct at76_priv *priv)
1649{
1650 struct at76_req_join join;
1651 int ret;
1652
1653 memset(&join, 0, sizeof(struct at76_req_join));
1654 memcpy(join.essid, priv->essid, priv->essid_size);
1655 join.essid_size = priv->essid_size;
1656 memcpy(join.bssid, priv->bssid, ETH_ALEN);
1657 join.bss_type = INFRASTRUCTURE_MODE;
1658 join.channel = priv->channel;
1659 join.timeout = cpu_to_le16(2000);
1660
1661 at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
1662 ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
1663 sizeof(struct at76_req_join));
1664
1665 if (ret < 0) {
1666 wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n",
1667 ret);
1668 return 0;
1669 }
1670
1671 ret = at76_wait_completion(priv, CMD_JOIN);
1672 at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
1673 if (ret != CMD_STATUS_COMPLETE) {
1674 wiphy_err(priv->hw->wiphy, "at76_wait_completion failed: %d\n",
1675 ret);
1676 return 0;
1677 }
1678
1679 at76_set_pm_mode(priv);
1680
1681 return 0;
1682}
1683
1684static void at76_work_join_bssid(struct work_struct *work)
1685{
1686 struct at76_priv *priv = container_of(work, struct at76_priv,
1687 work_join_bssid);
1688
1689 if (priv->device_unplugged)
1690 return;
1691
1692 mutex_lock(&priv->mtx);
1693
1694 if (is_valid_ether_addr(priv->bssid))
1695 at76_join(priv);
1696
1697 mutex_unlock(&priv->mtx);
1698}
1699
1652static void at76_mac80211_tx_callback(struct urb *urb) 1700static void at76_mac80211_tx_callback(struct urb *urb)
1653{ 1701{
1654 struct at76_priv *priv = urb->context; 1702 struct at76_priv *priv = urb->context;
@@ -1686,16 +1734,32 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1686 struct at76_priv *priv = hw->priv; 1734 struct at76_priv *priv = hw->priv;
1687 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; 1735 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
1688 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1736 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1737 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1689 int padding, submit_len, ret; 1738 int padding, submit_len, ret;
1690 1739
1691 at76_dbg(DBG_MAC80211, "%s()", __func__); 1740 at76_dbg(DBG_MAC80211, "%s()", __func__);
1692 1741
1693 if (priv->tx_urb->status == -EINPROGRESS) { 1742 if (priv->tx_urb->status == -EINPROGRESS) {
1694 printk(KERN_ERR "%s: %s called while tx urb is pending\n", 1743 wiphy_err(priv->hw->wiphy,
1695 wiphy_name(priv->hw->wiphy), __func__); 1744 "%s called while tx urb is pending\n", __func__);
1696 return NETDEV_TX_BUSY; 1745 return NETDEV_TX_BUSY;
1697 } 1746 }
1698 1747
1748 /* The following code lines are important when the device is going to
1749 * authenticate with a new bssid. The driver must send CMD_JOIN before
1750 * an authentication frame is transmitted. For this to succeed, the
1751 * correct bssid of the AP must be known. As mac80211 does not inform
1752 * drivers about the bssid prior to the authentication process the
1753 * following workaround is necessary. If the TX frame is an
1754 * authentication frame extract the bssid and send the CMD_JOIN. */
1755 if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
1756 if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
1757 memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
1758 ieee80211_queue_work(hw, &priv->work_join_bssid);
1759 return NETDEV_TX_BUSY;
1760 }
1761 }
1762
1699 ieee80211_stop_queues(hw); 1763 ieee80211_stop_queues(hw);
1700 1764
1701 at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */ 1765 at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */
@@ -1725,13 +1789,12 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1725 submit_len, at76_mac80211_tx_callback, priv); 1789 submit_len, at76_mac80211_tx_callback, priv);
1726 ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC); 1790 ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
1727 if (ret) { 1791 if (ret) {
1728 printk(KERN_ERR "%s: error in tx submit urb: %d\n", 1792 wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret);
1729 wiphy_name(priv->hw->wiphy), ret);
1730 if (ret == -EINVAL) 1793 if (ret == -EINVAL)
1731 printk(KERN_ERR 1794 wiphy_err(priv->hw->wiphy,
1732 "%s: -EINVAL: tx urb %p hcpriv %p complete %p\n", 1795 "-einval: tx urb %p hcpriv %p complete %p\n",
1733 wiphy_name(priv->hw->wiphy), priv->tx_urb, 1796 priv->tx_urb,
1734 priv->tx_urb->hcpriv, priv->tx_urb->complete); 1797 priv->tx_urb->hcpriv, priv->tx_urb->complete);
1735 } 1798 }
1736 1799
1737 return 0; 1800 return 0;
@@ -1748,8 +1811,8 @@ static int at76_mac80211_start(struct ieee80211_hw *hw)
1748 1811
1749 ret = at76_submit_rx_urb(priv); 1812 ret = at76_submit_rx_urb(priv);
1750 if (ret < 0) { 1813 if (ret < 0) {
1751 printk(KERN_ERR "%s: open: submit_rx_urb failed: %d\n", 1814 wiphy_err(priv->hw->wiphy, "open: submit_rx_urb failed: %d\n",
1752 wiphy_name(priv->hw->wiphy), ret); 1815 ret);
1753 goto error; 1816 goto error;
1754 } 1817 }
1755 1818
@@ -1770,6 +1833,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
1770 at76_dbg(DBG_MAC80211, "%s()", __func__); 1833 at76_dbg(DBG_MAC80211, "%s()", __func__);
1771 1834
1772 cancel_delayed_work(&priv->dwork_hw_scan); 1835 cancel_delayed_work(&priv->dwork_hw_scan);
1836 cancel_work_sync(&priv->work_join_bssid);
1773 cancel_work_sync(&priv->work_set_promisc); 1837 cancel_work_sync(&priv->work_set_promisc);
1774 1838
1775 mutex_lock(&priv->mtx); 1839 mutex_lock(&priv->mtx);
@@ -1818,42 +1882,6 @@ static void at76_remove_interface(struct ieee80211_hw *hw,
1818 at76_dbg(DBG_MAC80211, "%s()", __func__); 1882 at76_dbg(DBG_MAC80211, "%s()", __func__);
1819} 1883}
1820 1884
1821static int at76_join(struct at76_priv *priv)
1822{
1823 struct at76_req_join join;
1824 int ret;
1825
1826 memset(&join, 0, sizeof(struct at76_req_join));
1827 memcpy(join.essid, priv->essid, priv->essid_size);
1828 join.essid_size = priv->essid_size;
1829 memcpy(join.bssid, priv->bssid, ETH_ALEN);
1830 join.bss_type = INFRASTRUCTURE_MODE;
1831 join.channel = priv->channel;
1832 join.timeout = cpu_to_le16(2000);
1833
1834 at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
1835 ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
1836 sizeof(struct at76_req_join));
1837
1838 if (ret < 0) {
1839 printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
1840 wiphy_name(priv->hw->wiphy), ret);
1841 return 0;
1842 }
1843
1844 ret = at76_wait_completion(priv, CMD_JOIN);
1845 at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
1846 if (ret != CMD_STATUS_COMPLETE) {
1847 printk(KERN_ERR "%s: at76_wait_completion failed: %d\n",
1848 wiphy_name(priv->hw->wiphy), ret);
1849 return 0;
1850 }
1851
1852 at76_set_pm_mode(priv);
1853
1854 return 0;
1855}
1856
1857static void at76_dwork_hw_scan(struct work_struct *work) 1885static void at76_dwork_hw_scan(struct work_struct *work)
1858{ 1886{
1859 struct at76_priv *priv = container_of(work, struct at76_priv, 1887 struct at76_priv *priv = container_of(work, struct at76_priv,
@@ -2107,6 +2135,7 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
2107 mutex_init(&priv->mtx); 2135 mutex_init(&priv->mtx);
2108 INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc); 2136 INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc);
2109 INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx); 2137 INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx);
2138 INIT_WORK(&priv->work_join_bssid, at76_work_join_bssid);
2110 INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan); 2139 INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan);
2111 2140
2112 tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0); 2141 tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0);
@@ -2281,14 +2310,12 @@ static int at76_init_new_device(struct at76_priv *priv,
2281 2310
2282 priv->mac80211_registered = 1; 2311 priv->mac80211_registered = 1;
2283 2312
2284 printk(KERN_INFO "%s: USB %s, MAC %pM, firmware %d.%d.%d-%d\n", 2313 wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n",
2285 wiphy_name(priv->hw->wiphy), 2314 dev_name(&interface->dev), priv->mac_addr,
2286 dev_name(&interface->dev), priv->mac_addr, 2315 priv->fw_version.major, priv->fw_version.minor,
2287 priv->fw_version.major, priv->fw_version.minor, 2316 priv->fw_version.patch, priv->fw_version.build);
2288 priv->fw_version.patch, priv->fw_version.build); 2317 wiphy_info(priv->hw->wiphy, "regulatory domain 0x%02x: %s\n",
2289 printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n", 2318 priv->regulatory_domain, priv->domain->name);
2290 wiphy_name(priv->hw->wiphy),
2291 priv->regulatory_domain, priv->domain->name);
2292 2319
2293exit: 2320exit:
2294 return ret; 2321 return ret;
@@ -2450,7 +2477,7 @@ static void at76_disconnect(struct usb_interface *interface)
2450 if (!priv) 2477 if (!priv)
2451 return; 2478 return;
2452 2479
2453 printk(KERN_INFO "%s: disconnecting\n", wiphy_name(priv->hw->wiphy)); 2480 wiphy_info(priv->hw->wiphy, "disconnecting\n");
2454 at76_delete_device(priv); 2481 at76_delete_device(priv);
2455 dev_printk(KERN_INFO, &interface->dev, "disconnected\n"); 2482 dev_printk(KERN_INFO, &interface->dev, "disconnected\n");
2456} 2483}
@@ -2508,5 +2535,6 @@ MODULE_AUTHOR("Balint Seeber <n0_5p4m_p13453@hotmail.com>");
2508MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>"); 2535MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>");
2509MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>"); 2536MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>");
2510MODULE_AUTHOR("Kalle Valo <kalle.valo@iki.fi>"); 2537MODULE_AUTHOR("Kalle Valo <kalle.valo@iki.fi>");
2538MODULE_AUTHOR("Sebastian Smolorz <sesmo@gmx.net>");
2511MODULE_DESCRIPTION(DRIVER_DESC); 2539MODULE_DESCRIPTION(DRIVER_DESC);
2512MODULE_LICENSE("GPL"); 2540MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 1ec5ccffdbc0..4a37447dfc01 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -99,7 +99,7 @@ struct hwcfg_r505 {
99 u8 reserved2[14]; 99 u8 reserved2[14];
100 u8 cr15_values[14]; 100 u8 cr15_values[14];
101 u8 reserved3[3]; 101 u8 reserved3[3];
102} __attribute__((packed)); 102} __packed;
103 103
104struct hwcfg_rfmd { 104struct hwcfg_rfmd {
105 u8 cr20_values[14]; 105 u8 cr20_values[14];
@@ -111,7 +111,7 @@ struct hwcfg_rfmd {
111 u8 low_power_values[14]; 111 u8 low_power_values[14];
112 u8 normal_power_values[14]; 112 u8 normal_power_values[14];
113 u8 reserved1[3]; 113 u8 reserved1[3];
114} __attribute__((packed)); 114} __packed;
115 115
116struct hwcfg_intersil { 116struct hwcfg_intersil {
117 u8 mac_addr[ETH_ALEN]; 117 u8 mac_addr[ETH_ALEN];
@@ -120,7 +120,7 @@ struct hwcfg_intersil {
120 u8 pidvid[4]; 120 u8 pidvid[4];
121 u8 regulatory_domain; 121 u8 regulatory_domain;
122 u8 reserved[1]; 122 u8 reserved[1];
123} __attribute__((packed)); 123} __packed;
124 124
125union at76_hwcfg { 125union at76_hwcfg {
126 struct hwcfg_intersil i; 126 struct hwcfg_intersil i;
@@ -149,14 +149,14 @@ struct at76_card_config {
149 u8 ssid_len; 149 u8 ssid_len;
150 u8 short_preamble; 150 u8 short_preamble;
151 __le16 beacon_period; 151 __le16 beacon_period;
152} __attribute__((packed)); 152} __packed;
153 153
154struct at76_command { 154struct at76_command {
155 u8 cmd; 155 u8 cmd;
156 u8 reserved; 156 u8 reserved;
157 __le16 size; 157 __le16 size;
158 u8 data[0]; 158 u8 data[0];
159} __attribute__((packed)); 159} __packed;
160 160
161/* Length of Atmel-specific Rx header before 802.11 frame */ 161/* Length of Atmel-specific Rx header before 802.11 frame */
162#define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet) 162#define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet)
@@ -171,7 +171,7 @@ struct at76_rx_buffer {
171 u8 noise_level; 171 u8 noise_level;
172 __le32 rx_time; 172 __le32 rx_time;
173 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; 173 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
174} __attribute__((packed)); 174} __packed;
175 175
176/* Length of Atmel-specific Tx header before 802.11 frame */ 176/* Length of Atmel-specific Tx header before 802.11 frame */
177#define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet) 177#define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet)
@@ -182,7 +182,7 @@ struct at76_tx_buffer {
182 u8 padding; 182 u8 padding;
183 u8 reserved[4]; 183 u8 reserved[4];
184 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; 184 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
185} __attribute__((packed)); 185} __packed;
186 186
187/* defines for scan_type below */ 187/* defines for scan_type below */
188#define SCAN_TYPE_ACTIVE 0 188#define SCAN_TYPE_ACTIVE 0
@@ -198,7 +198,7 @@ struct at76_req_scan {
198 __le16 max_channel_time; 198 __le16 max_channel_time;
199 u8 essid_size; 199 u8 essid_size;
200 u8 international_scan; 200 u8 international_scan;
201} __attribute__((packed)); 201} __packed;
202 202
203struct at76_req_ibss { 203struct at76_req_ibss {
204 u8 bssid[ETH_ALEN]; 204 u8 bssid[ETH_ALEN];
@@ -207,7 +207,7 @@ struct at76_req_ibss {
207 u8 channel; 207 u8 channel;
208 u8 essid_size; 208 u8 essid_size;
209 u8 reserved[3]; 209 u8 reserved[3];
210} __attribute__((packed)); 210} __packed;
211 211
212struct at76_req_join { 212struct at76_req_join {
213 u8 bssid[ETH_ALEN]; 213 u8 bssid[ETH_ALEN];
@@ -217,7 +217,7 @@ struct at76_req_join {
217 __le16 timeout; 217 __le16 timeout;
218 u8 essid_size; 218 u8 essid_size;
219 u8 reserved; 219 u8 reserved;
220} __attribute__((packed)); 220} __packed;
221 221
222struct set_mib_buffer { 222struct set_mib_buffer {
223 u8 type; 223 u8 type;
@@ -229,7 +229,7 @@ struct set_mib_buffer {
229 __le16 word; 229 __le16 word;
230 u8 addr[ETH_ALEN]; 230 u8 addr[ETH_ALEN];
231 } data; 231 } data;
232} __attribute__((packed)); 232} __packed;
233 233
234struct mib_local { 234struct mib_local {
235 u16 reserved0; 235 u16 reserved0;
@@ -241,14 +241,14 @@ struct mib_local {
241 u16 reserved2; 241 u16 reserved2;
242 u8 preamble_type; 242 u8 preamble_type;
243 u16 reserved3; 243 u16 reserved3;
244} __attribute__((packed)); 244} __packed;
245 245
246struct mib_mac_addr { 246struct mib_mac_addr {
247 u8 mac_addr[ETH_ALEN]; 247 u8 mac_addr[ETH_ALEN];
248 u8 res[2]; /* ??? */ 248 u8 res[2]; /* ??? */
249 u8 group_addr[4][ETH_ALEN]; 249 u8 group_addr[4][ETH_ALEN];
250 u8 group_addr_status[4]; 250 u8 group_addr_status[4];
251} __attribute__((packed)); 251} __packed;
252 252
253struct mib_mac { 253struct mib_mac {
254 __le32 max_tx_msdu_lifetime; 254 __le32 max_tx_msdu_lifetime;
@@ -269,7 +269,7 @@ struct mib_mac {
269 u8 desired_bssid[ETH_ALEN]; 269 u8 desired_bssid[ETH_ALEN];
270 u8 desired_bsstype; /* ad-hoc or infrastructure */ 270 u8 desired_bsstype; /* ad-hoc or infrastructure */
271 u8 reserved2; 271 u8 reserved2;
272} __attribute__((packed)); 272} __packed;
273 273
274struct mib_mac_mgmt { 274struct mib_mac_mgmt {
275 __le16 beacon_period; 275 __le16 beacon_period;
@@ -292,7 +292,7 @@ struct mib_mac_mgmt {
292 u8 multi_domain_capability_enabled; 292 u8 multi_domain_capability_enabled;
293 u8 country_string[3]; 293 u8 country_string[3];
294 u8 reserved[3]; 294 u8 reserved[3];
295} __attribute__((packed)); 295} __packed;
296 296
297struct mib_mac_wep { 297struct mib_mac_wep {
298 u8 privacy_invoked; /* 0 disable encr., 1 enable encr */ 298 u8 privacy_invoked; /* 0 disable encr., 1 enable encr */
@@ -303,7 +303,7 @@ struct mib_mac_wep {
303 __le32 wep_excluded_count; 303 __le32 wep_excluded_count;
304 u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN]; 304 u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
305 u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ 305 u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */
306} __attribute__((packed)); 306} __packed;
307 307
308struct mib_phy { 308struct mib_phy {
309 __le32 ed_threshold; 309 __le32 ed_threshold;
@@ -320,19 +320,19 @@ struct mib_phy {
320 u8 current_cca_mode; 320 u8 current_cca_mode;
321 u8 phy_type; 321 u8 phy_type;
322 u8 current_reg_domain; 322 u8 current_reg_domain;
323} __attribute__((packed)); 323} __packed;
324 324
325struct mib_fw_version { 325struct mib_fw_version {
326 u8 major; 326 u8 major;
327 u8 minor; 327 u8 minor;
328 u8 patch; 328 u8 patch;
329 u8 build; 329 u8 build;
330} __attribute__((packed)); 330} __packed;
331 331
332struct mib_mdomain { 332struct mib_mdomain {
333 u8 tx_powerlevel[14]; 333 u8 tx_powerlevel[14];
334 u8 channel_list[14]; /* 0 for invalid channels */ 334 u8 channel_list[14]; /* 0 for invalid channels */
335} __attribute__((packed)); 335} __packed;
336 336
337struct at76_fw_header { 337struct at76_fw_header {
338 __le32 crc; /* CRC32 of the whole image */ 338 __le32 crc; /* CRC32 of the whole image */
@@ -346,7 +346,7 @@ struct at76_fw_header {
346 __le32 int_fw_len; /* internal firmware image length */ 346 __le32 int_fw_len; /* internal firmware image length */
347 __le32 ext_fw_offset; /* external firmware image offset */ 347 __le32 ext_fw_offset; /* external firmware image offset */
348 __le32 ext_fw_len; /* external firmware image length */ 348 __le32 ext_fw_len; /* external firmware image length */
349} __attribute__((packed)); 349} __packed;
350 350
351/* a description of a regulatory domain and the allowed channels */ 351/* a description of a regulatory domain and the allowed channels */
352struct reg_domain { 352struct reg_domain {
@@ -387,6 +387,7 @@ struct at76_priv {
387 /* work queues */ 387 /* work queues */
388 struct work_struct work_set_promisc; 388 struct work_struct work_set_promisc;
389 struct work_struct work_submit_rx; 389 struct work_struct work_submit_rx;
390 struct work_struct work_join_bssid;
390 struct delayed_work dwork_hw_scan; 391 struct delayed_work dwork_hw_scan;
391 392
392 struct tasklet_struct rx_tasklet; 393 struct tasklet_struct rx_tasklet;
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index cf6f5c4174a6..4604de09a8b2 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
@@ -48,8 +48,7 @@ int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
48 48
49 err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL); 49 err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL);
50 if (err) 50 if (err)
51 printk(KERN_DEBUG "%s: writing memory failed\n", 51 wiphy_debug(ar->hw->wiphy, "writing memory failed\n");
52 wiphy_name(ar->hw->wiphy));
53 return err; 52 return err;
54} 53}
55 54
@@ -67,8 +66,8 @@ int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
67 err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf), 66 err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf),
68 (u8 *) buf, 0, NULL); 67 (u8 *) buf, 0, NULL);
69 if (err) 68 if (err)
70 printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n", 69 wiphy_debug(ar->hw->wiphy, "writing reg %#x (val %#x) failed\n",
71 wiphy_name(ar->hw->wiphy), reg, val); 70 reg, val);
72 return err; 71 return err;
73} 72}
74 73
diff --git a/drivers/net/wireless/ath/ar9170/led.c b/drivers/net/wireless/ath/ar9170/led.c
index 86c4e79f6bc8..832d90087f8a 100644
--- a/drivers/net/wireless/ath/ar9170/led.c
+++ b/drivers/net/wireless/ath/ar9170/led.c
@@ -133,8 +133,8 @@ static int ar9170_register_led(struct ar9170 *ar, int i, char *name,
133 err = led_classdev_register(wiphy_dev(ar->hw->wiphy), 133 err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
134 &ar->leds[i].l); 134 &ar->leds[i].l);
135 if (err) 135 if (err)
136 printk(KERN_ERR "%s: failed to register %s LED (%d).\n", 136 wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n",
137 wiphy_name(ar->hw->wiphy), ar->leds[i].name, err); 137 ar->leds[i].name, err);
138 else 138 else
139 ar->leds[i].registered = true; 139 ar->leds[i].registered = true;
140 140
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 2abc87578994..c67b05f3bcbd 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -198,12 +198,13 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
198 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; 198 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
199 struct ieee80211_hdr *hdr = (void *) txc->frame_data; 199 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
200 200
201 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d " 201 wiphy_debug(ar->hw->wiphy,
202 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n", 202 "=> FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
203 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 203 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
204 ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr), 204 skb, skb_get_queue_mapping(skb),
205 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control), 205 ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
206 jiffies_to_msecs(arinfo->timeout - jiffies)); 206 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
207 jiffies_to_msecs(arinfo->timeout - jiffies));
207} 208}
208 209
209static void __ar9170_dump_txqueue(struct ar9170 *ar, 210static void __ar9170_dump_txqueue(struct ar9170 *ar,
@@ -213,8 +214,8 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
213 int i = 0; 214 int i = 0;
214 215
215 printk(KERN_DEBUG "---[ cut here ]---\n"); 216 printk(KERN_DEBUG "---[ cut here ]---\n");
216 printk(KERN_DEBUG "%s: %d entries in queue.\n", 217 wiphy_debug(ar->hw->wiphy, "%d entries in queue.\n",
217 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 218 skb_queue_len(queue));
218 219
219 skb_queue_walk(queue, skb) { 220 skb_queue_walk(queue, skb) {
220 printk(KERN_DEBUG "index:%d =>\n", i++); 221 printk(KERN_DEBUG "index:%d =>\n", i++);
@@ -244,15 +245,14 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
244{ 245{
245 int i; 246 int i;
246 247
247 printk(KERN_DEBUG "%s: QoS queue stats\n", 248 wiphy_debug(ar->hw->wiphy, "qos queue stats\n");
248 wiphy_name(ar->hw->wiphy));
249 249
250 for (i = 0; i < __AR9170_NUM_TXQ; i++) 250 for (i = 0; i < __AR9170_NUM_TXQ; i++)
251 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d " 251 wiphy_debug(ar->hw->wiphy,
252 " stopped:%d\n", wiphy_name(ar->hw->wiphy), i, 252 "queue:%d limit:%d len:%d waitack:%d stopped:%d\n",
253 ar->tx_stats[i].limit, ar->tx_stats[i].len, 253 i, ar->tx_stats[i].limit, ar->tx_stats[i].len,
254 skb_queue_len(&ar->tx_status[i]), 254 skb_queue_len(&ar->tx_status[i]),
255 ieee80211_queue_stopped(ar->hw, i)); 255 ieee80211_queue_stopped(ar->hw, i));
256} 256}
257#endif /* AR9170_QUEUE_STOP_DEBUG */ 257#endif /* AR9170_QUEUE_STOP_DEBUG */
258 258
@@ -274,9 +274,9 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
274 274
275 if (time_is_before_jiffies(arinfo->timeout)) { 275 if (time_is_before_jiffies(arinfo->timeout)) {
276#ifdef AR9170_QUEUE_DEBUG 276#ifdef AR9170_QUEUE_DEBUG
277 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => " 277 wiphy_debug(ar->hw->wiphy,
278 "recycle\n", wiphy_name(ar->hw->wiphy), 278 "[%ld > %ld] frame expired => recycle\n",
279 jiffies, arinfo->timeout); 279 jiffies, arinfo->timeout);
280 ar9170_print_txheader(ar, skb); 280 ar9170_print_txheader(ar, skb);
281#endif /* AR9170_QUEUE_DEBUG */ 281#endif /* AR9170_QUEUE_DEBUG */
282 __skb_unlink(skb, queue); 282 __skb_unlink(skb, queue);
@@ -317,8 +317,8 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
317 break; 317 break;
318 318
319 default: 319 default:
320 printk(KERN_ERR "%s: invalid tx_status response (%x).\n", 320 wiphy_err(ar->hw->wiphy,
321 wiphy_name(ar->hw->wiphy), tx_status); 321 "invalid tx_status response (%x)\n", tx_status);
322 break; 322 break;
323 } 323 }
324 324
@@ -339,8 +339,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
339 339
340 if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) { 340 if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
341#ifdef AR9170_QUEUE_STOP_DEBUG 341#ifdef AR9170_QUEUE_STOP_DEBUG
342 printk(KERN_DEBUG "%s: wake queue %d\n", 342 wiphy_debug(ar->hw->wiphy, "wake queue %d\n", queue);
343 wiphy_name(ar->hw->wiphy), queue);
344 __ar9170_dump_txstats(ar); 343 __ar9170_dump_txstats(ar);
345#endif /* AR9170_QUEUE_STOP_DEBUG */ 344#endif /* AR9170_QUEUE_STOP_DEBUG */
346 ieee80211_wake_queue(ar->hw, queue); 345 ieee80211_wake_queue(ar->hw, queue);
@@ -387,9 +386,9 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
387 386
388 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) { 387 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
389#ifdef AR9170_QUEUE_DEBUG 388#ifdef AR9170_QUEUE_DEBUG
390 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n", 389 wiphy_debug(ar->hw->wiphy,
391 wiphy_name(ar->hw->wiphy), mac, 390 "skip frame => da %pm != %pm\n",
392 ieee80211_get_DA(hdr)); 391 mac, ieee80211_get_DA(hdr));
393 ar9170_print_txheader(ar, skb); 392 ar9170_print_txheader(ar, skb);
394#endif /* AR9170_QUEUE_DEBUG */ 393#endif /* AR9170_QUEUE_DEBUG */
395 continue; 394 continue;
@@ -400,8 +399,8 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
400 399
401 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) { 400 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
402#ifdef AR9170_QUEUE_DEBUG 401#ifdef AR9170_QUEUE_DEBUG
403 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n", 402 wiphy_debug(ar->hw->wiphy,
404 wiphy_name(ar->hw->wiphy), rate, r); 403 "skip frame => rate %d != %d\n", rate, r);
405 ar9170_print_txheader(ar, skb); 404 ar9170_print_txheader(ar, skb);
406#endif /* AR9170_QUEUE_DEBUG */ 405#endif /* AR9170_QUEUE_DEBUG */
407 continue; 406 continue;
@@ -413,9 +412,9 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
413 } 412 }
414 413
415#ifdef AR9170_QUEUE_DEBUG 414#ifdef AR9170_QUEUE_DEBUG
416 printk(KERN_ERR "%s: ESS:[%pM] does not have any " 415 wiphy_err(ar->hw->wiphy,
417 "outstanding frames in queue.\n", 416 "ESS:[%pM] does not have any outstanding frames in queue.\n",
418 wiphy_name(ar->hw->wiphy), mac); 417 mac);
419 __ar9170_dump_txqueue(ar, queue); 418 __ar9170_dump_txqueue(ar, queue);
420#endif /* AR9170_QUEUE_DEBUG */ 419#endif /* AR9170_QUEUE_DEBUG */
421 spin_unlock_irqrestore(&queue->lock, flags); 420 spin_unlock_irqrestore(&queue->lock, flags);
@@ -444,8 +443,8 @@ static void ar9170_tx_janitor(struct work_struct *work)
444 443
445 for (i = 0; i < __AR9170_NUM_TXQ; i++) { 444 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
446#ifdef AR9170_QUEUE_DEBUG 445#ifdef AR9170_QUEUE_DEBUG
447 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n", 446 wiphy_debug(ar->hw->wiphy, "garbage collector scans queue:%d\n",
448 wiphy_name(ar->hw->wiphy), i); 447 i);
449 ar9170_dump_txqueue(ar, &ar->tx_pending[i]); 448 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
450 ar9170_dump_txqueue(ar, &ar->tx_status[i]); 449 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
451#endif /* AR9170_QUEUE_DEBUG */ 450#endif /* AR9170_QUEUE_DEBUG */
@@ -495,8 +494,9 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
495 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >> 494 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
496 AR9170_TX_PHY_QOS_SHIFT; 495 AR9170_TX_PHY_QOS_SHIFT;
497#ifdef AR9170_QUEUE_DEBUG 496#ifdef AR9170_QUEUE_DEBUG
498 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n", 497 wiphy_debug(ar->hw->wiphy,
499 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q); 498 "recv tx_status for %pm, p:%08x, q:%d\n",
499 cmd->tx_status.dst, phy, q);
500#endif /* AR9170_QUEUE_DEBUG */ 500#endif /* AR9170_QUEUE_DEBUG */
501 501
502 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst, 502 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
@@ -582,7 +582,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
582 break; 582 break;
583 583
584 default: 584 default:
585 printk(KERN_INFO "received unhandled event %x\n", cmd->type); 585 pr_info("received unhandled event %x\n", cmd->type);
586 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len); 586 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
587 break; 587 break;
588 } 588 }
@@ -675,9 +675,9 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
675 /* TODO: update netdevice's RX dropped/errors statistics */ 675 /* TODO: update netdevice's RX dropped/errors statistics */
676 676
677 if (ar9170_nag_limiter(ar)) 677 if (ar9170_nag_limiter(ar))
678 printk(KERN_DEBUG "%s: received frame with " 678 wiphy_debug(ar->hw->wiphy,
679 "suspicious error code (%#x).\n", 679 "received frame with suspicious error code (%#x).\n",
680 wiphy_name(ar->hw->wiphy), error); 680 error);
681 681
682 return -EINVAL; 682 return -EINVAL;
683 } 683 }
@@ -704,9 +704,9 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
704 break; 704 break;
705 default: 705 default:
706 if (ar9170_nag_limiter(ar)) 706 if (ar9170_nag_limiter(ar))
707 printk(KERN_ERR "%s: invalid plcp cck rate " 707 wiphy_err(ar->hw->wiphy,
708 "(%x).\n", wiphy_name(ar->hw->wiphy), 708 "invalid plcp cck rate (%x).\n",
709 head->plcp[0]); 709 head->plcp[0]);
710 return -EINVAL; 710 return -EINVAL;
711 } 711 }
712 break; 712 break;
@@ -740,9 +740,9 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
740 break; 740 break;
741 default: 741 default:
742 if (ar9170_nag_limiter(ar)) 742 if (ar9170_nag_limiter(ar))
743 printk(KERN_ERR "%s: invalid plcp ofdm rate " 743 wiphy_err(ar->hw->wiphy,
744 "(%x).\n", wiphy_name(ar->hw->wiphy), 744 "invalid plcp ofdm rate (%x).\n",
745 head->plcp[0]); 745 head->plcp[0]);
746 return -EINVAL; 746 return -EINVAL;
747 } 747 }
748 if (status->band == IEEE80211_BAND_2GHZ) 748 if (status->band == IEEE80211_BAND_2GHZ)
@@ -761,8 +761,7 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
761 761
762 default: 762 default:
763 if (ar9170_nag_limiter(ar)) 763 if (ar9170_nag_limiter(ar))
764 printk(KERN_ERR "%s: invalid modulation\n", 764 wiphy_err(ar->hw->wiphy, "invalid modulation\n");
765 wiphy_name(ar->hw->wiphy));
766 return -EINVAL; 765 return -EINVAL;
767 } 766 }
768 767
@@ -863,8 +862,8 @@ static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
863 ar->rx_mpdu.has_plcp = true; 862 ar->rx_mpdu.has_plcp = true;
864 } else { 863 } else {
865 if (ar9170_nag_limiter(ar)) 864 if (ar9170_nag_limiter(ar))
866 printk(KERN_ERR "%s: plcp info is clipped.\n", 865 wiphy_err(ar->hw->wiphy,
867 wiphy_name(ar->hw->wiphy)); 866 "plcp info is clipped.\n");
868 return ; 867 return ;
869 } 868 }
870 break; 869 break;
@@ -877,8 +876,8 @@ static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
877 phy = (void *)(buf + mpdu_len); 876 phy = (void *)(buf + mpdu_len);
878 } else { 877 } else {
879 if (ar9170_nag_limiter(ar)) 878 if (ar9170_nag_limiter(ar))
880 printk(KERN_ERR "%s: frame tail is clipped.\n", 879 wiphy_err(ar->hw->wiphy,
881 wiphy_name(ar->hw->wiphy)); 880 "frame tail is clipped.\n");
882 return ; 881 return ;
883 } 882 }
884 883
@@ -888,9 +887,8 @@ static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
888 if (!ar9170_nag_limiter(ar)) 887 if (!ar9170_nag_limiter(ar))
889 return ; 888 return ;
890 889
891 printk(KERN_ERR "%s: rx stream did not start " 890 wiphy_err(ar->hw->wiphy,
892 "with a first_mpdu frame tag.\n", 891 "rx stream did not start with a first_mpdu frame tag.\n");
893 wiphy_name(ar->hw->wiphy));
894 892
895 return ; 893 return ;
896 } 894 }
@@ -954,8 +952,8 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
954 if (!ar->rx_failover_missing) { 952 if (!ar->rx_failover_missing) {
955 /* this is no "short read". */ 953 /* this is no "short read". */
956 if (ar9170_nag_limiter(ar)) { 954 if (ar9170_nag_limiter(ar)) {
957 printk(KERN_ERR "%s: missing tag!\n", 955 wiphy_err(ar->hw->wiphy,
958 wiphy_name(ar->hw->wiphy)); 956 "missing tag!\n");
959 goto err_telluser; 957 goto err_telluser;
960 } else 958 } else
961 goto err_silent; 959 goto err_silent;
@@ -963,9 +961,8 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
963 961
964 if (ar->rx_failover_missing > tlen) { 962 if (ar->rx_failover_missing > tlen) {
965 if (ar9170_nag_limiter(ar)) { 963 if (ar9170_nag_limiter(ar)) {
966 printk(KERN_ERR "%s: possible multi " 964 wiphy_err(ar->hw->wiphy,
967 "stream corruption!\n", 965 "possible multi stream corruption!\n");
968 wiphy_name(ar->hw->wiphy));
969 goto err_telluser; 966 goto err_telluser;
970 } else 967 } else
971 goto err_silent; 968 goto err_silent;
@@ -997,9 +994,8 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
997 if (ar->rx_failover_missing) { 994 if (ar->rx_failover_missing) {
998 /* TODO: handle double stream corruption. */ 995 /* TODO: handle double stream corruption. */
999 if (ar9170_nag_limiter(ar)) { 996 if (ar9170_nag_limiter(ar)) {
1000 printk(KERN_ERR "%s: double rx stream " 997 wiphy_err(ar->hw->wiphy,
1001 "corruption!\n", 998 "double rx stream corruption!\n");
1002 wiphy_name(ar->hw->wiphy));
1003 goto err_telluser; 999 goto err_telluser;
1004 } else 1000 } else
1005 goto err_silent; 1001 goto err_silent;
@@ -1042,9 +1038,9 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
1042 1038
1043 if (tlen) { 1039 if (tlen) {
1044 if (net_ratelimit()) 1040 if (net_ratelimit())
1045 printk(KERN_ERR "%s: %d bytes of unprocessed " 1041 wiphy_err(ar->hw->wiphy,
1046 "data left in rx stream!\n", 1042 "%d bytes of unprocessed data left in rx stream!\n",
1047 wiphy_name(ar->hw->wiphy), tlen); 1043 tlen);
1048 1044
1049 goto err_telluser; 1045 goto err_telluser;
1050 } 1046 }
@@ -1052,10 +1048,9 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
1052 return ; 1048 return ;
1053 1049
1054err_telluser: 1050err_telluser:
1055 printk(KERN_ERR "%s: damaged RX stream data [want:%d, " 1051 wiphy_err(ar->hw->wiphy,
1056 "data:%d, rx:%d, pending:%d ]\n", 1052 "damaged RX stream data [want:%d, data:%d, rx:%d, pending:%d ]\n",
1057 wiphy_name(ar->hw->wiphy), clen, wlen, tlen, 1053 clen, wlen, tlen, ar->rx_failover_missing);
1058 ar->rx_failover_missing);
1059 1054
1060 if (ar->rx_failover_missing) 1055 if (ar->rx_failover_missing)
1061 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET, 1056 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
@@ -1065,9 +1060,8 @@ err_telluser:
1065 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET, 1060 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
1066 skb->data, skb->len); 1061 skb->data, skb->len);
1067 1062
1068 printk(KERN_ERR "%s: please check your hardware and cables, if " 1063 wiphy_err(ar->hw->wiphy,
1069 "you see this message frequently.\n", 1064 "If you see this message frequently, please check your hardware and cables.\n");
1070 wiphy_name(ar->hw->wiphy));
1071 1065
1072err_silent: 1066err_silent:
1073 if (ar->rx_failover_missing) { 1067 if (ar->rx_failover_missing) {
@@ -1384,10 +1378,10 @@ static void ar9170_tx(struct ar9170 *ar)
1384 1378
1385 if (remaining_space < frames) { 1379 if (remaining_space < frames) {
1386#ifdef AR9170_QUEUE_DEBUG 1380#ifdef AR9170_QUEUE_DEBUG
1387 printk(KERN_DEBUG "%s: tx quota reached queue:%d, " 1381 wiphy_debug(ar->hw->wiphy,
1388 "remaining slots:%d, needed:%d\n", 1382 "tx quota reached queue:%d, "
1389 wiphy_name(ar->hw->wiphy), i, remaining_space, 1383 "remaining slots:%d, needed:%d\n",
1390 frames); 1384 i, remaining_space, frames);
1391#endif /* AR9170_QUEUE_DEBUG */ 1385#endif /* AR9170_QUEUE_DEBUG */
1392 frames = remaining_space; 1386 frames = remaining_space;
1393 } 1387 }
@@ -1396,18 +1390,14 @@ static void ar9170_tx(struct ar9170 *ar)
1396 ar->tx_stats[i].count += frames; 1390 ar->tx_stats[i].count += frames;
1397 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) { 1391 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1398#ifdef AR9170_QUEUE_DEBUG 1392#ifdef AR9170_QUEUE_DEBUG
1399 printk(KERN_DEBUG "%s: queue %d full\n", 1393 wiphy_debug(ar->hw->wiphy, "queue %d full\n", i);
1400 wiphy_name(ar->hw->wiphy), i); 1394 wiphy_debug(ar->hw->wiphy, "stuck frames: ===>\n");
1401
1402 printk(KERN_DEBUG "%s: stuck frames: ===>\n",
1403 wiphy_name(ar->hw->wiphy));
1404 ar9170_dump_txqueue(ar, &ar->tx_pending[i]); 1395 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1405 ar9170_dump_txqueue(ar, &ar->tx_status[i]); 1396 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1406#endif /* AR9170_QUEUE_DEBUG */ 1397#endif /* AR9170_QUEUE_DEBUG */
1407 1398
1408#ifdef AR9170_QUEUE_STOP_DEBUG 1399#ifdef AR9170_QUEUE_STOP_DEBUG
1409 printk(KERN_DEBUG "%s: stop queue %d\n", 1400 wiphy_debug(ar->hw->wiphy, "stop queue %d\n", i);
1410 wiphy_name(ar->hw->wiphy), i);
1411 __ar9170_dump_txstats(ar); 1401 __ar9170_dump_txstats(ar);
1412#endif /* AR9170_QUEUE_STOP_DEBUG */ 1402#endif /* AR9170_QUEUE_STOP_DEBUG */
1413 ieee80211_stop_queue(ar->hw, i); 1403 ieee80211_stop_queue(ar->hw, i);
@@ -1435,8 +1425,7 @@ static void ar9170_tx(struct ar9170 *ar)
1435 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1425 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1436 1426
1437#ifdef AR9170_QUEUE_DEBUG 1427#ifdef AR9170_QUEUE_DEBUG
1438 printk(KERN_DEBUG "%s: send frame q:%d =>\n", 1428 wiphy_debug(ar->hw->wiphy, "send frame q:%d =>\n", i);
1439 wiphy_name(ar->hw->wiphy), i);
1440 ar9170_print_txheader(ar, skb); 1429 ar9170_print_txheader(ar, skb);
1441#endif /* AR9170_QUEUE_DEBUG */ 1430#endif /* AR9170_QUEUE_DEBUG */
1442 1431
@@ -1453,26 +1442,25 @@ static void ar9170_tx(struct ar9170 *ar)
1453 } 1442 }
1454 1443
1455#ifdef AR9170_QUEUE_DEBUG 1444#ifdef AR9170_QUEUE_DEBUG
1456 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n", 1445 wiphy_debug(ar->hw->wiphy,
1457 wiphy_name(ar->hw->wiphy), i); 1446 "ar9170_tx report for queue %d\n", i);
1458 1447
1459 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n", 1448 wiphy_debug(ar->hw->wiphy,
1460 wiphy_name(ar->hw->wiphy)); 1449 "unprocessed pending frames left:\n");
1461 ar9170_dump_txqueue(ar, &ar->tx_pending[i]); 1450 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1462#endif /* AR9170_QUEUE_DEBUG */ 1451#endif /* AR9170_QUEUE_DEBUG */
1463 1452
1464 if (unlikely(frames_failed)) { 1453 if (unlikely(frames_failed)) {
1465#ifdef AR9170_QUEUE_DEBUG 1454#ifdef AR9170_QUEUE_DEBUG
1466 printk(KERN_DEBUG "%s: frames failed %d =>\n", 1455 wiphy_debug(ar->hw->wiphy,
1467 wiphy_name(ar->hw->wiphy), frames_failed); 1456 "frames failed %d =>\n", frames_failed);
1468#endif /* AR9170_QUEUE_DEBUG */ 1457#endif /* AR9170_QUEUE_DEBUG */
1469 1458
1470 spin_lock_irqsave(&ar->tx_stats_lock, flags); 1459 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1471 ar->tx_stats[i].len -= frames_failed; 1460 ar->tx_stats[i].len -= frames_failed;
1472 ar->tx_stats[i].count -= frames_failed; 1461 ar->tx_stats[i].count -= frames_failed;
1473#ifdef AR9170_QUEUE_STOP_DEBUG 1462#ifdef AR9170_QUEUE_STOP_DEBUG
1474 printk(KERN_DEBUG "%s: wake queue %d\n", 1463 wiphy_debug(ar->hw->wiphy, "wake queue %d\n", i);
1475 wiphy_name(ar->hw->wiphy), i);
1476 __ar9170_dump_txstats(ar); 1464 __ar9170_dump_txstats(ar);
1477#endif /* AR9170_QUEUE_STOP_DEBUG */ 1465#endif /* AR9170_QUEUE_STOP_DEBUG */
1478 ieee80211_wake_queue(ar->hw, i); 1466 ieee80211_wake_queue(ar->hw, i);
@@ -1917,6 +1905,24 @@ static int ar9170_get_stats(struct ieee80211_hw *hw,
1917 return 0; 1905 return 0;
1918} 1906}
1919 1907
1908static int ar9170_get_survey(struct ieee80211_hw *hw, int idx,
1909 struct survey_info *survey)
1910{
1911 struct ar9170 *ar = hw->priv;
1912 struct ieee80211_conf *conf = &hw->conf;
1913
1914 if (idx != 0)
1915 return -ENOENT;
1916
1917 /* TODO: update noise value, e.g. call ar9170_set_channel */
1918
1919 survey->channel = conf->channel;
1920 survey->filled = SURVEY_INFO_NOISE_DBM;
1921 survey->noise = ar->noise[0];
1922
1923 return 0;
1924}
1925
1920static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, 1926static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1921 const struct ieee80211_tx_queue_params *param) 1927 const struct ieee80211_tx_queue_params *param)
1922{ 1928{
@@ -1969,6 +1975,7 @@ static const struct ieee80211_ops ar9170_ops = {
1969 .get_tsf = ar9170_op_get_tsf, 1975 .get_tsf = ar9170_op_get_tsf,
1970 .set_key = ar9170_set_key, 1976 .set_key = ar9170_set_key,
1971 .get_stats = ar9170_get_stats, 1977 .get_stats = ar9170_get_stats,
1978 .get_survey = ar9170_get_survey,
1972 .ampdu_action = ar9170_ampdu_action, 1979 .ampdu_action = ar9170_ampdu_action,
1973}; 1980};
1974 1981
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index 45a415ea809a..0dbfcf79ac96 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -670,8 +670,7 @@ static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
670 ar9170_regwrite_finish(); 670 ar9170_regwrite_finish();
671 err = ar9170_regwrite_result(); 671 err = ar9170_regwrite_result();
672 if (err) 672 if (err)
673 printk(KERN_ERR "%s: rf init failed\n", 673 wiphy_err(ar->hw->wiphy, "rf init failed\n");
674 wiphy_name(ar->hw->wiphy));
675 return err; 674 return err;
676} 675}
677 676
@@ -1702,9 +1701,8 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1702 0x200 | ar->phy_heavy_clip); 1701 0x200 | ar->phy_heavy_clip);
1703 if (err) { 1702 if (err) {
1704 if (ar9170_nag_limiter(ar)) 1703 if (ar9170_nag_limiter(ar))
1705 printk(KERN_ERR "%s: failed to set " 1704 wiphy_err(ar->hw->wiphy,
1706 "heavy clip\n", 1705 "failed to set heavy clip\n");
1707 wiphy_name(ar->hw->wiphy));
1708 } 1706 }
1709 } 1707 }
1710 1708
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index cc09595b781a..2242a140e4fe 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -13,5 +13,6 @@ ath5k-y += base.o
13ath5k-y += led.o 13ath5k-y += led.o
14ath5k-y += rfkill.o 14ath5k-y += rfkill.o
15ath5k-y += ani.o 15ath5k-y += ani.o
16ath5k-y += sysfs.o
16ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 17ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
17obj-$(CONFIG_ATH5K) += ath5k.o 18obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index f2311ab35504..26dbe65fedb0 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -74,8 +74,8 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
74 const s8 fr[] = { -78, -80 }; 74 const s8 fr[] = { -78, -80 };
75#endif 75#endif
76 if (level < 0 || level >= ARRAY_SIZE(sz)) { 76 if (level < 0 || level >= ARRAY_SIZE(sz)) {
77 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, 77 ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range",
78 "level out of range %d", level); 78 level);
79 return; 79 return;
80 } 80 }
81 81
@@ -106,8 +106,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
106 106
107 if (level < 0 || level >= ARRAY_SIZE(val) || 107 if (level < 0 || level >= ARRAY_SIZE(val) ||
108 level > ah->ah_sc->ani_state.max_spur_level) { 108 level > ah->ah_sc->ani_state.max_spur_level) {
109 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, 109 ATH5K_ERR(ah->ah_sc, "spur immunity level %d out of range",
110 "level out of range %d", level); 110 level);
111 return; 111 return;
112 } 112 }
113 113
@@ -130,8 +130,7 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
130 const int val[] = { 0, 4, 8 }; 130 const int val[] = { 0, 4, 8 };
131 131
132 if (level < 0 || level >= ARRAY_SIZE(val)) { 132 if (level < 0 || level >= ARRAY_SIZE(val)) {
133 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, 133 ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
134 "level out of range %d", level);
135 return; 134 return;
136 } 135 }
137 136
@@ -481,14 +480,15 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
481 struct ath5k_ani_state *as = &ah->ah_sc->ani_state; 480 struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
482 int listen, ofdm_high, ofdm_low, cck_high, cck_low; 481 int listen, ofdm_high, ofdm_low, cck_high, cck_low;
483 482
484 if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
485 return;
486
487 /* get listen time since last call and add it to the counter because we 483 /* get listen time since last call and add it to the counter because we
488 * might not have restarted the "ani period" last time */ 484 * might not have restarted the "ani period" last time.
485 * always do this to calculate the busy time also in manual mode */
489 listen = ath5k_hw_ani_get_listen_time(ah, as); 486 listen = ath5k_hw_ani_get_listen_time(ah, as);
490 as->listen_time += listen; 487 as->listen_time += listen;
491 488
489 if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
490 return;
491
492 ath5k_ani_save_and_clear_phy_errors(ah, as); 492 ath5k_ani_save_and_clear_phy_errors(ah, as);
493 493
494 ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000; 494 ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 2785946f659a..ea6362a8988d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -204,6 +204,7 @@
204#define AR5K_TUNE_TPC_TXPOWER false 204#define AR5K_TUNE_TPC_TXPOWER false
205#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */ 205#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */ 206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
207#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
207 208
208#define AR5K_INIT_CARR_SENSE_EN 1 209#define AR5K_INIT_CARR_SENSE_EN 1
209 210
@@ -565,7 +566,7 @@ enum ath5k_pkt_type {
565) 566)
566 567
567/* 568/*
568 * DMA size definitions (2^n+2) 569 * DMA size definitions (2^(n+2))
569 */ 570 */
570enum ath5k_dmasize { 571enum ath5k_dmasize {
571 AR5K_DMASIZE_4B = 0, 572 AR5K_DMASIZE_4B = 0,
@@ -1118,6 +1119,7 @@ struct ath5k_hw {
1118 /* Calibration timestamp */ 1119 /* Calibration timestamp */
1119 unsigned long ah_cal_next_full; 1120 unsigned long ah_cal_next_full;
1120 unsigned long ah_cal_next_ani; 1121 unsigned long ah_cal_next_ani;
1122 unsigned long ah_cal_next_nf;
1121 1123
1122 /* Calibration mask */ 1124 /* Calibration mask */
1123 u8 ah_cal_mask; 1125 u8 ah_cal_mask;
@@ -1125,15 +1127,10 @@ struct ath5k_hw {
1125 /* 1127 /*
1126 * Function pointers 1128 * Function pointers
1127 */ 1129 */
1128 int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
1129 u32 size, unsigned int flags);
1130 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1130 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1131 unsigned int, unsigned int, int, enum ath5k_pkt_type, 1131 unsigned int, unsigned int, int, enum ath5k_pkt_type,
1132 unsigned int, unsigned int, unsigned int, unsigned int, 1132 unsigned int, unsigned int, unsigned int, unsigned int,
1133 unsigned int, unsigned int, unsigned int, unsigned int); 1133 unsigned int, unsigned int, unsigned int, unsigned int);
1134 int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1135 unsigned int, unsigned int, unsigned int, unsigned int,
1136 unsigned int, unsigned int);
1137 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1134 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1138 struct ath5k_tx_status *); 1135 struct ath5k_tx_status *);
1139 int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1136 int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *,
@@ -1148,6 +1145,9 @@ struct ath5k_hw {
1148int ath5k_hw_attach(struct ath5k_softc *sc); 1145int ath5k_hw_attach(struct ath5k_softc *sc);
1149void ath5k_hw_detach(struct ath5k_hw *ah); 1146void ath5k_hw_detach(struct ath5k_hw *ah);
1150 1147
1148int ath5k_sysfs_register(struct ath5k_softc *sc);
1149void ath5k_sysfs_unregister(struct ath5k_softc *sc);
1150
1151/* LED functions */ 1151/* LED functions */
1152int ath5k_init_leds(struct ath5k_softc *sc); 1152int ath5k_init_leds(struct ath5k_softc *sc);
1153void ath5k_led_enable(struct ath5k_softc *sc); 1153void ath5k_led_enable(struct ath5k_softc *sc);
@@ -1231,6 +1231,11 @@ int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
1231 1231
1232/* Hardware Descriptor Functions */ 1232/* Hardware Descriptor Functions */
1233int ath5k_hw_init_desc_functions(struct ath5k_hw *ah); 1233int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
1234int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
1235 u32 size, unsigned int flags);
1236int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
1237 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
1238 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3);
1234 1239
1235/* GPIO Functions */ 1240/* GPIO Functions */
1236void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); 1241void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
@@ -1270,6 +1275,7 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1270void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah); 1275void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
1271int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, 1276int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1272 struct ieee80211_channel *channel); 1277 struct ieee80211_channel *channel);
1278void ath5k_hw_update_noise_floor(struct ath5k_hw *ah);
1273/* Spur mitigation */ 1279/* Spur mitigation */
1274bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1280bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
1275 struct ieee80211_channel *channel); 1281 struct ieee80211_channel *channel);
@@ -1280,6 +1286,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
1280int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1286int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1281/* Antenna control */ 1287/* Antenna control */
1282void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode); 1288void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
1289void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode);
1283/* TX power setup */ 1290/* TX power setup */
1284int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, 1291int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1285 u8 ee_mode, u8 txpower); 1292 u8 ee_mode, u8 txpower);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 31c008042bfe..b32e28caeee2 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -352,8 +352,6 @@ err_free:
352 */ 352 */
353void ath5k_hw_detach(struct ath5k_hw *ah) 353void ath5k_hw_detach(struct ath5k_hw *ah)
354{ 354{
355 ATH5K_TRACE(ah->ah_sc);
356
357 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status); 355 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
358 356
359 if (ah->ah_rf_banks != NULL) 357 if (ah->ah_rf_banks != NULL)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 648972df369d..0d5de2574dd1 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -311,7 +311,8 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
311static int ath5k_txbuf_setup(struct ath5k_softc *sc, 311static int ath5k_txbuf_setup(struct ath5k_softc *sc,
312 struct ath5k_buf *bf, 312 struct ath5k_buf *bf,
313 struct ath5k_txq *txq, int padsize); 313 struct ath5k_txq *txq, int padsize);
314static inline void ath5k_txbuf_free(struct ath5k_softc *sc, 314
315static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
315 struct ath5k_buf *bf) 316 struct ath5k_buf *bf)
316{ 317{
317 BUG_ON(!bf); 318 BUG_ON(!bf);
@@ -321,9 +322,11 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
321 PCI_DMA_TODEVICE); 322 PCI_DMA_TODEVICE);
322 dev_kfree_skb_any(bf->skb); 323 dev_kfree_skb_any(bf->skb);
323 bf->skb = NULL; 324 bf->skb = NULL;
325 bf->skbaddr = 0;
326 bf->desc->ds_data = 0;
324} 327}
325 328
326static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, 329static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
327 struct ath5k_buf *bf) 330 struct ath5k_buf *bf)
328{ 331{
329 struct ath5k_hw *ah = sc->ah; 332 struct ath5k_hw *ah = sc->ah;
@@ -336,6 +339,8 @@ static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
336 PCI_DMA_FROMDEVICE); 339 PCI_DMA_FROMDEVICE);
337 dev_kfree_skb_any(bf->skb); 340 dev_kfree_skb_any(bf->skb);
338 bf->skb = NULL; 341 bf->skb = NULL;
342 bf->skbaddr = 0;
343 bf->desc->ds_data = 0;
339} 344}
340 345
341 346
@@ -352,7 +357,6 @@ static void ath5k_txq_release(struct ath5k_softc *sc);
352static int ath5k_rx_start(struct ath5k_softc *sc); 357static int ath5k_rx_start(struct ath5k_softc *sc);
353static void ath5k_rx_stop(struct ath5k_softc *sc); 358static void ath5k_rx_stop(struct ath5k_softc *sc);
354static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, 359static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
355 struct ath5k_desc *ds,
356 struct sk_buff *skb, 360 struct sk_buff *skb,
357 struct ath5k_rx_status *rs); 361 struct ath5k_rx_status *rs);
358static void ath5k_tasklet_rx(unsigned long data); 362static void ath5k_tasklet_rx(unsigned long data);
@@ -384,7 +388,7 @@ static int ath5k_init(struct ath5k_softc *sc);
384static int ath5k_stop_locked(struct ath5k_softc *sc); 388static int ath5k_stop_locked(struct ath5k_softc *sc);
385static int ath5k_stop_hw(struct ath5k_softc *sc); 389static int ath5k_stop_hw(struct ath5k_softc *sc);
386static irqreturn_t ath5k_intr(int irq, void *dev_id); 390static irqreturn_t ath5k_intr(int irq, void *dev_id);
387static void ath5k_tasklet_reset(unsigned long data); 391static void ath5k_reset_work(struct work_struct *work);
388 392
389static void ath5k_tasklet_calibrate(unsigned long data); 393static void ath5k_tasklet_calibrate(unsigned long data);
390 394
@@ -578,7 +582,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
578 spin_lock_init(&sc->block); 582 spin_lock_init(&sc->block);
579 583
580 /* Set private data */ 584 /* Set private data */
581 pci_set_drvdata(pdev, hw); 585 pci_set_drvdata(pdev, sc);
582 586
583 /* Setup interrupt handler */ 587 /* Setup interrupt handler */
584 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 588 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
@@ -694,25 +698,23 @@ err:
694static void __devexit 698static void __devexit
695ath5k_pci_remove(struct pci_dev *pdev) 699ath5k_pci_remove(struct pci_dev *pdev)
696{ 700{
697 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 701 struct ath5k_softc *sc = pci_get_drvdata(pdev);
698 struct ath5k_softc *sc = hw->priv;
699 702
700 ath5k_debug_finish_device(sc); 703 ath5k_debug_finish_device(sc);
701 ath5k_detach(pdev, hw); 704 ath5k_detach(pdev, sc->hw);
702 ath5k_hw_detach(sc->ah); 705 ath5k_hw_detach(sc->ah);
703 kfree(sc->ah); 706 kfree(sc->ah);
704 free_irq(pdev->irq, sc); 707 free_irq(pdev->irq, sc);
705 pci_iounmap(pdev, sc->iobase); 708 pci_iounmap(pdev, sc->iobase);
706 pci_release_region(pdev, 0); 709 pci_release_region(pdev, 0);
707 pci_disable_device(pdev); 710 pci_disable_device(pdev);
708 ieee80211_free_hw(hw); 711 ieee80211_free_hw(sc->hw);
709} 712}
710 713
711#ifdef CONFIG_PM_SLEEP 714#ifdef CONFIG_PM_SLEEP
712static int ath5k_pci_suspend(struct device *dev) 715static int ath5k_pci_suspend(struct device *dev)
713{ 716{
714 struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev)); 717 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
715 struct ath5k_softc *sc = hw->priv;
716 718
717 ath5k_led_off(sc); 719 ath5k_led_off(sc);
718 return 0; 720 return 0;
@@ -721,8 +723,7 @@ static int ath5k_pci_suspend(struct device *dev)
721static int ath5k_pci_resume(struct device *dev) 723static int ath5k_pci_resume(struct device *dev)
722{ 724{
723 struct pci_dev *pdev = to_pci_dev(dev); 725 struct pci_dev *pdev = to_pci_dev(dev);
724 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 726 struct ath5k_softc *sc = pci_get_drvdata(pdev);
725 struct ath5k_softc *sc = hw->priv;
726 727
727 /* 728 /*
728 * Suspend/Resume resets the PCI configuration space, so we have to 729 * Suspend/Resume resets the PCI configuration space, so we have to
@@ -768,7 +769,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
768 * return false w/o doing anything. MAC's that do 769 * return false w/o doing anything. MAC's that do
769 * support it will return true w/o doing anything. 770 * support it will return true w/o doing anything.
770 */ 771 */
771 ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); 772 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
773
772 if (ret < 0) 774 if (ret < 0)
773 goto err; 775 goto err;
774 if (ret > 0) 776 if (ret > 0)
@@ -829,11 +831,12 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
829 831
830 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); 832 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
831 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); 833 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
832 tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
833 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); 834 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
834 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); 835 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
835 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc); 836 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
836 837
838 INIT_WORK(&sc->reset_work, ath5k_reset_work);
839
837 ret = ath5k_eeprom_read_mac(ah, mac); 840 ret = ath5k_eeprom_read_mac(ah, mac);
838 if (ret) { 841 if (ret) {
839 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", 842 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
@@ -864,6 +867,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
864 867
865 ath5k_init_leds(sc); 868 ath5k_init_leds(sc);
866 869
870 ath5k_sysfs_register(sc);
871
867 return 0; 872 return 0;
868err_queues: 873err_queues:
869 ath5k_txq_release(sc); 874 ath5k_txq_release(sc);
@@ -899,6 +904,7 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
899 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); 904 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
900 ath5k_unregister_leds(sc); 905 ath5k_unregister_leds(sc);
901 906
907 ath5k_sysfs_unregister(sc);
902 /* 908 /*
903 * NB: can't reclaim these until after ieee80211_ifdetach 909 * NB: can't reclaim these until after ieee80211_ifdetach
904 * returns because we'll get called back to reclaim node 910 * returns because we'll get called back to reclaim node
@@ -1111,8 +1117,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
1111static int 1117static int
1112ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) 1118ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
1113{ 1119{
1114 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", 1120 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
1115 sc->curchan->center_freq, chan->center_freq); 1121 "channel set, resetting (%u -> %u MHz)\n",
1122 sc->curchan->center_freq, chan->center_freq);
1116 1123
1117 /* 1124 /*
1118 * To switch channels clear any pending DMA operations; 1125 * To switch channels clear any pending DMA operations;
@@ -1228,21 +1235,23 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1228 * not get overrun under high load (as can happen with a 1235 * not get overrun under high load (as can happen with a
1229 * 5212 when ANI processing enables PHY error frames). 1236 * 5212 when ANI processing enables PHY error frames).
1230 * 1237 *
1231 * To insure the last descriptor is self-linked we create 1238 * To ensure the last descriptor is self-linked we create
1232 * each descriptor as self-linked and add it to the end. As 1239 * each descriptor as self-linked and add it to the end. As
1233 * each additional descriptor is added the previous self-linked 1240 * each additional descriptor is added the previous self-linked
1234 * entry is ``fixed'' naturally. This should be safe even 1241 * entry is "fixed" naturally. This should be safe even
1235 * if DMA is happening. When processing RX interrupts we 1242 * if DMA is happening. When processing RX interrupts we
1236 * never remove/process the last, self-linked, entry on the 1243 * never remove/process the last, self-linked, entry on the
1237 * descriptor list. This insures the hardware always has 1244 * descriptor list. This ensures the hardware always has
1238 * someplace to write a new frame. 1245 * someplace to write a new frame.
1239 */ 1246 */
1240 ds = bf->desc; 1247 ds = bf->desc;
1241 ds->ds_link = bf->daddr; /* link to self */ 1248 ds->ds_link = bf->daddr; /* link to self */
1242 ds->ds_data = bf->skbaddr; 1249 ds->ds_data = bf->skbaddr;
1243 ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); 1250 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
1244 if (ret) 1251 if (ret) {
1252 ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
1245 return ret; 1253 return ret;
1254 }
1246 1255
1247 if (sc->rxlink != NULL) 1256 if (sc->rxlink != NULL)
1248 *sc->rxlink = bf->daddr; 1257 *sc->rxlink = bf->daddr;
@@ -1347,7 +1356,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1347 mrr_tries[i] = info->control.rates[i + 1].count; 1356 mrr_tries[i] = info->control.rates[i + 1].count;
1348 } 1357 }
1349 1358
1350 ah->ah_setup_mrr_tx_desc(ah, ds, 1359 ath5k_hw_setup_mrr_tx_desc(ah, ds,
1351 mrr_rate[0], mrr_tries[0], 1360 mrr_rate[0], mrr_tries[0],
1352 mrr_rate[1], mrr_tries[1], 1361 mrr_rate[1], mrr_tries[1],
1353 mrr_rate[2], mrr_tries[2]); 1362 mrr_rate[2], mrr_tries[2]);
@@ -1443,17 +1452,20 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1443{ 1452{
1444 struct ath5k_buf *bf; 1453 struct ath5k_buf *bf;
1445 1454
1446 ath5k_txbuf_free(sc, sc->bbuf); 1455 ath5k_txbuf_free_skb(sc, sc->bbuf);
1447 list_for_each_entry(bf, &sc->txbuf, list) 1456 list_for_each_entry(bf, &sc->txbuf, list)
1448 ath5k_txbuf_free(sc, bf); 1457 ath5k_txbuf_free_skb(sc, bf);
1449 list_for_each_entry(bf, &sc->rxbuf, list) 1458 list_for_each_entry(bf, &sc->rxbuf, list)
1450 ath5k_rxbuf_free(sc, bf); 1459 ath5k_rxbuf_free_skb(sc, bf);
1451 1460
1452 /* Free memory associated with all descriptors */ 1461 /* Free memory associated with all descriptors */
1453 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 1462 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
1463 sc->desc = NULL;
1464 sc->desc_daddr = 0;
1454 1465
1455 kfree(sc->bufptr); 1466 kfree(sc->bufptr);
1456 sc->bufptr = NULL; 1467 sc->bufptr = NULL;
1468 sc->bbuf = NULL;
1457} 1469}
1458 1470
1459 1471
@@ -1602,7 +1614,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1602 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1614 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1603 ath5k_debug_printtxbuf(sc, bf); 1615 ath5k_debug_printtxbuf(sc, bf);
1604 1616
1605 ath5k_txbuf_free(sc, bf); 1617 ath5k_txbuf_free_skb(sc, bf);
1606 1618
1607 spin_lock_bh(&sc->txbuflock); 1619 spin_lock_bh(&sc->txbuflock);
1608 list_move_tail(&bf->list, &sc->txbuf); 1620 list_move_tail(&bf->list, &sc->txbuf);
@@ -1716,13 +1728,11 @@ ath5k_rx_stop(struct ath5k_softc *sc)
1716 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ 1728 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1717 1729
1718 ath5k_debug_printrxbuffs(sc, ah); 1730 ath5k_debug_printrxbuffs(sc, ah);
1719
1720 sc->rxlink = NULL; /* just in case */
1721} 1731}
1722 1732
1723static unsigned int 1733static unsigned int
1724ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, 1734ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
1725 struct sk_buff *skb, struct ath5k_rx_status *rs) 1735 struct ath5k_rx_status *rs)
1726{ 1736{
1727 struct ath5k_hw *ah = sc->ah; 1737 struct ath5k_hw *ah = sc->ah;
1728 struct ath_common *common = ath5k_hw_common(ah); 1738 struct ath_common *common = ath5k_hw_common(ah);
@@ -1889,9 +1899,138 @@ static int ath5k_remove_padding(struct sk_buff *skb)
1889} 1899}
1890 1900
1891static void 1901static void
1892ath5k_tasklet_rx(unsigned long data) 1902ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1903 struct ath5k_rx_status *rs)
1893{ 1904{
1894 struct ieee80211_rx_status *rxs; 1905 struct ieee80211_rx_status *rxs;
1906
1907 /* The MAC header is padded to have 32-bit boundary if the
1908 * packet payload is non-zero. The general calculation for
1909 * padsize would take into account odd header lengths:
1910 * padsize = (4 - hdrlen % 4) % 4; However, since only
1911 * even-length headers are used, padding can only be 0 or 2
1912 * bytes and we can optimize this a bit. In addition, we must
1913 * not try to remove padding from short control frames that do
1914 * not have payload. */
1915 ath5k_remove_padding(skb);
1916
1917 rxs = IEEE80211_SKB_RXCB(skb);
1918
1919 rxs->flag = 0;
1920 if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
1921 rxs->flag |= RX_FLAG_MMIC_ERROR;
1922
1923 /*
1924 * always extend the mac timestamp, since this information is
1925 * also needed for proper IBSS merging.
1926 *
1927 * XXX: it might be too late to do it here, since rs_tstamp is
1928 * 15bit only. that means TSF extension has to be done within
1929 * 32768usec (about 32ms). it might be necessary to move this to
1930 * the interrupt handler, like it is done in madwifi.
1931 *
1932 * Unfortunately we don't know when the hardware takes the rx
1933 * timestamp (beginning of phy frame, data frame, end of rx?).
1934 * The only thing we know is that it is hardware specific...
1935 * On AR5213 it seems the rx timestamp is at the end of the
1936 * frame, but i'm not sure.
1937 *
1938 * NOTE: mac80211 defines mactime at the beginning of the first
1939 * data symbol. Since we don't have any time references it's
1940 * impossible to comply to that. This affects IBSS merge only
1941 * right now, so it's not too bad...
1942 */
1943 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
1944 rxs->flag |= RX_FLAG_TSFT;
1945
1946 rxs->freq = sc->curchan->center_freq;
1947 rxs->band = sc->curband->band;
1948
1949 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1950
1951 rxs->antenna = rs->rs_antenna;
1952
1953 if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1954 sc->stats.antenna_rx[rs->rs_antenna]++;
1955 else
1956 sc->stats.antenna_rx[0]++; /* invalid */
1957
1958 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
1959 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1960
1961 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1962 sc->curband->bitrates[rxs->rate_idx].hw_value_short)
1963 rxs->flag |= RX_FLAG_SHORTPRE;
1964
1965 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1966
1967 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1968
1969 /* check beacons in IBSS mode */
1970 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1971 ath5k_check_ibss_tsf(sc, skb, rxs);
1972
1973 ieee80211_rx(sc->hw, skb);
1974}
1975
1976/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
1977 *
1978 * Check if we want to further process this frame or not. Also update
1979 * statistics. Return true if we want this frame, false if not.
1980 */
1981static bool
1982ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1983{
1984 sc->stats.rx_all_count++;
1985
1986 if (unlikely(rs->rs_status)) {
1987 if (rs->rs_status & AR5K_RXERR_CRC)
1988 sc->stats.rxerr_crc++;
1989 if (rs->rs_status & AR5K_RXERR_FIFO)
1990 sc->stats.rxerr_fifo++;
1991 if (rs->rs_status & AR5K_RXERR_PHY) {
1992 sc->stats.rxerr_phy++;
1993 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1994 sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
1995 return false;
1996 }
1997 if (rs->rs_status & AR5K_RXERR_DECRYPT) {
1998 /*
1999 * Decrypt error. If the error occurred
2000 * because there was no hardware key, then
2001 * let the frame through so the upper layers
2002 * can process it. This is necessary for 5210
2003 * parts which have no way to setup a ``clear''
2004 * key cache entry.
2005 *
2006 * XXX do key cache faulting
2007 */
2008 sc->stats.rxerr_decrypt++;
2009 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
2010 !(rs->rs_status & AR5K_RXERR_CRC))
2011 return true;
2012 }
2013 if (rs->rs_status & AR5K_RXERR_MIC) {
2014 sc->stats.rxerr_mic++;
2015 return true;
2016 }
2017
2018 /* let crypto-error packets fall through in MNTR */
2019 if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
2020 sc->opmode != NL80211_IFTYPE_MONITOR)
2021 return false;
2022 }
2023
2024 if (unlikely(rs->rs_more)) {
2025 sc->stats.rxerr_jumbo++;
2026 return false;
2027 }
2028 return true;
2029}
2030
2031static void
2032ath5k_tasklet_rx(unsigned long data)
2033{
1895 struct ath5k_rx_status rs = {}; 2034 struct ath5k_rx_status rs = {};
1896 struct sk_buff *skb, *next_skb; 2035 struct sk_buff *skb, *next_skb;
1897 dma_addr_t next_skb_addr; 2036 dma_addr_t next_skb_addr;
@@ -1901,7 +2040,6 @@ ath5k_tasklet_rx(unsigned long data)
1901 struct ath5k_buf *bf; 2040 struct ath5k_buf *bf;
1902 struct ath5k_desc *ds; 2041 struct ath5k_desc *ds;
1903 int ret; 2042 int ret;
1904 int rx_flag;
1905 2043
1906 spin_lock(&sc->rxbuflock); 2044 spin_lock(&sc->rxbuflock);
1907 if (list_empty(&sc->rxbuf)) { 2045 if (list_empty(&sc->rxbuf)) {
@@ -1909,8 +2047,6 @@ ath5k_tasklet_rx(unsigned long data)
1909 goto unlock; 2047 goto unlock;
1910 } 2048 }
1911 do { 2049 do {
1912 rx_flag = 0;
1913
1914 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 2050 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1915 BUG_ON(bf->skb == NULL); 2051 BUG_ON(bf->skb == NULL);
1916 skb = bf->skb; 2052 skb = bf->skb;
@@ -1926,137 +2062,30 @@ ath5k_tasklet_rx(unsigned long data)
1926 else if (unlikely(ret)) { 2062 else if (unlikely(ret)) {
1927 ATH5K_ERR(sc, "error in processing rx descriptor\n"); 2063 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1928 sc->stats.rxerr_proc++; 2064 sc->stats.rxerr_proc++;
1929 spin_unlock(&sc->rxbuflock); 2065 break;
1930 return;
1931 } 2066 }
1932 2067
1933 sc->stats.rx_all_count++; 2068 if (ath5k_receive_frame_ok(sc, &rs)) {
1934 2069 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1935 if (unlikely(rs.rs_status)) {
1936 if (rs.rs_status & AR5K_RXERR_CRC)
1937 sc->stats.rxerr_crc++;
1938 if (rs.rs_status & AR5K_RXERR_FIFO)
1939 sc->stats.rxerr_fifo++;
1940 if (rs.rs_status & AR5K_RXERR_PHY) {
1941 sc->stats.rxerr_phy++;
1942 if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
1943 sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
1944 goto next;
1945 }
1946 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1947 /*
1948 * Decrypt error. If the error occurred
1949 * because there was no hardware key, then
1950 * let the frame through so the upper layers
1951 * can process it. This is necessary for 5210
1952 * parts which have no way to setup a ``clear''
1953 * key cache entry.
1954 *
1955 * XXX do key cache faulting
1956 */
1957 sc->stats.rxerr_decrypt++;
1958 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1959 !(rs.rs_status & AR5K_RXERR_CRC))
1960 goto accept;
1961 }
1962 if (rs.rs_status & AR5K_RXERR_MIC) {
1963 rx_flag |= RX_FLAG_MMIC_ERROR;
1964 sc->stats.rxerr_mic++;
1965 goto accept;
1966 }
1967 2070
1968 /* let crypto-error packets fall through in MNTR */ 2071 /*
1969 if ((rs.rs_status & 2072 * If we can't replace bf->skb with a new skb under
1970 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 2073 * memory pressure, just skip this packet
1971 sc->opmode != NL80211_IFTYPE_MONITOR) 2074 */
2075 if (!next_skb)
1972 goto next; 2076 goto next;
1973 }
1974 2077
1975 if (unlikely(rs.rs_more)) { 2078 pci_unmap_single(sc->pdev, bf->skbaddr,
1976 sc->stats.rxerr_jumbo++; 2079 common->rx_bufsize,
1977 goto next; 2080 PCI_DMA_FROMDEVICE);
1978 2081
1979 } 2082 skb_put(skb, rs.rs_datalen);
1980accept:
1981 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1982 2083
1983 /* 2084 ath5k_receive_frame(sc, skb, &rs);
1984 * If we can't replace bf->skb with a new skb under memory
1985 * pressure, just skip this packet
1986 */
1987 if (!next_skb)
1988 goto next;
1989
1990 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
1991 PCI_DMA_FROMDEVICE);
1992 skb_put(skb, rs.rs_datalen);
1993
1994 /* The MAC header is padded to have 32-bit boundary if the
1995 * packet payload is non-zero. The general calculation for
1996 * padsize would take into account odd header lengths:
1997 * padsize = (4 - hdrlen % 4) % 4; However, since only
1998 * even-length headers are used, padding can only be 0 or 2
1999 * bytes and we can optimize this a bit. In addition, we must
2000 * not try to remove padding from short control frames that do
2001 * not have payload. */
2002 ath5k_remove_padding(skb);
2003
2004 rxs = IEEE80211_SKB_RXCB(skb);
2005
2006 /*
2007 * always extend the mac timestamp, since this information is
2008 * also needed for proper IBSS merging.
2009 *
2010 * XXX: it might be too late to do it here, since rs_tstamp is
2011 * 15bit only. that means TSF extension has to be done within
2012 * 32768usec (about 32ms). it might be necessary to move this to
2013 * the interrupt handler, like it is done in madwifi.
2014 *
2015 * Unfortunately we don't know when the hardware takes the rx
2016 * timestamp (beginning of phy frame, data frame, end of rx?).
2017 * The only thing we know is that it is hardware specific...
2018 * On AR5213 it seems the rx timestamp is at the end of the
2019 * frame, but i'm not sure.
2020 *
2021 * NOTE: mac80211 defines mactime at the beginning of the first
2022 * data symbol. Since we don't have any time references it's
2023 * impossible to comply to that. This affects IBSS merge only
2024 * right now, so it's not too bad...
2025 */
2026 rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp);
2027 rxs->flag = rx_flag | RX_FLAG_TSFT;
2028
2029 rxs->freq = sc->curchan->center_freq;
2030 rxs->band = sc->curband->band;
2031
2032 rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
2033
2034 rxs->antenna = rs.rs_antenna;
2035
2036 if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
2037 sc->stats.antenna_rx[rs.rs_antenna]++;
2038 else
2039 sc->stats.antenna_rx[0]++; /* invalid */
2040
2041 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
2042 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
2043 2085
2044 if (rxs->rate_idx >= 0 && rs.rs_rate == 2086 bf->skb = next_skb;
2045 sc->curband->bitrates[rxs->rate_idx].hw_value_short) 2087 bf->skbaddr = next_skb_addr;
2046 rxs->flag |= RX_FLAG_SHORTPRE; 2088 }
2047
2048 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
2049
2050 ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
2051
2052 /* check beacons in IBSS mode */
2053 if (sc->opmode == NL80211_IFTYPE_ADHOC)
2054 ath5k_check_ibss_tsf(sc, skb, rxs);
2055
2056 ieee80211_rx(sc->hw, skb);
2057
2058 bf->skb = next_skb;
2059 bf->skbaddr = next_skb_addr;
2060next: 2089next:
2061 list_move_tail(&bf->list, &sc->rxbuf); 2090 list_move_tail(&bf->list, &sc->rxbuf);
2062 } while (ath5k_rxbuf_setup(sc, bf) == 0); 2091 } while (ath5k_rxbuf_setup(sc, bf) == 0);
@@ -2065,8 +2094,6 @@ unlock:
2065} 2094}
2066 2095
2067 2096
2068
2069
2070/*************\ 2097/*************\
2071* TX Handling * 2098* TX Handling *
2072\*************/ 2099\*************/
@@ -2266,8 +2293,8 @@ err_unmap:
2266 * frame contents are done as needed and the slot time is 2293 * frame contents are done as needed and the slot time is
2267 * also adjusted based on current state. 2294 * also adjusted based on current state.
2268 * 2295 *
2269 * This is called from software irq context (beacontq or restq 2296 * This is called from software irq context (beacontq tasklets)
2270 * tasklets) or user context from ath5k_beacon_config. 2297 * or user context from ath5k_beacon_config.
2271 */ 2298 */
2272static void 2299static void
2273ath5k_beacon_send(struct ath5k_softc *sc) 2300ath5k_beacon_send(struct ath5k_softc *sc)
@@ -2298,7 +2325,9 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2298 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2325 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2299 "stuck beacon time (%u missed)\n", 2326 "stuck beacon time (%u missed)\n",
2300 sc->bmisscount); 2327 sc->bmisscount);
2301 tasklet_schedule(&sc->restq); 2328 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2329 "stuck beacon, resetting\n");
2330 ieee80211_queue_work(sc->hw, &sc->reset_work);
2302 } 2331 }
2303 return; 2332 return;
2304 } 2333 }
@@ -2602,12 +2631,20 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2602 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2631 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2603 ath5k_rx_stop(sc); 2632 ath5k_rx_stop(sc);
2604 ath5k_hw_phy_disable(ah); 2633 ath5k_hw_phy_disable(ah);
2605 } else 2634 }
2606 sc->rxlink = NULL;
2607 2635
2608 return 0; 2636 return 0;
2609} 2637}
2610 2638
2639static void stop_tasklets(struct ath5k_softc *sc)
2640{
2641 tasklet_kill(&sc->rxtq);
2642 tasklet_kill(&sc->txtq);
2643 tasklet_kill(&sc->calib);
2644 tasklet_kill(&sc->beacontq);
2645 tasklet_kill(&sc->ani_tasklet);
2646}
2647
2611/* 2648/*
2612 * Stop the device, grabbing the top-level lock to protect 2649 * Stop the device, grabbing the top-level lock to protect
2613 * against concurrent entry through ath5k_init (which can happen 2650 * against concurrent entry through ath5k_init (which can happen
@@ -2647,17 +2684,12 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2647 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 2684 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2648 "putting device to sleep\n"); 2685 "putting device to sleep\n");
2649 } 2686 }
2650 ath5k_txbuf_free(sc, sc->bbuf); 2687 ath5k_txbuf_free_skb(sc, sc->bbuf);
2651 2688
2652 mmiowb(); 2689 mmiowb();
2653 mutex_unlock(&sc->lock); 2690 mutex_unlock(&sc->lock);
2654 2691
2655 tasklet_kill(&sc->rxtq); 2692 stop_tasklets(sc);
2656 tasklet_kill(&sc->txtq);
2657 tasklet_kill(&sc->restq);
2658 tasklet_kill(&sc->calib);
2659 tasklet_kill(&sc->beacontq);
2660 tasklet_kill(&sc->ani_tasklet);
2661 2693
2662 ath5k_rfkill_hw_stop(sc->ah); 2694 ath5k_rfkill_hw_stop(sc->ah);
2663 2695
@@ -2705,7 +2737,9 @@ ath5k_intr(int irq, void *dev_id)
2705 * Fatal errors are unrecoverable. 2737 * Fatal errors are unrecoverable.
2706 * Typically these are caused by DMA errors. 2738 * Typically these are caused by DMA errors.
2707 */ 2739 */
2708 tasklet_schedule(&sc->restq); 2740 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2741 "fatal int, resetting\n");
2742 ieee80211_queue_work(sc->hw, &sc->reset_work);
2709 } else if (unlikely(status & AR5K_INT_RXORN)) { 2743 } else if (unlikely(status & AR5K_INT_RXORN)) {
2710 /* 2744 /*
2711 * Receive buffers are full. Either the bus is busy or 2745 * Receive buffers are full. Either the bus is busy or
@@ -2717,8 +2751,11 @@ ath5k_intr(int irq, void *dev_id)
2717 * this guess is copied from the HAL. 2751 * this guess is copied from the HAL.
2718 */ 2752 */
2719 sc->stats.rxorn_intr++; 2753 sc->stats.rxorn_intr++;
2720 if (ah->ah_mac_srev < AR5K_SREV_AR5212) 2754 if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2721 tasklet_schedule(&sc->restq); 2755 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2756 "rx overrun, resetting\n");
2757 ieee80211_queue_work(sc->hw, &sc->reset_work);
2758 }
2722 else 2759 else
2723 tasklet_schedule(&sc->rxtq); 2760 tasklet_schedule(&sc->rxtq);
2724 } else { 2761 } else {
@@ -2731,7 +2768,7 @@ ath5k_intr(int irq, void *dev_id)
2731 * RXE bit is written, but it doesn't work at 2768 * RXE bit is written, but it doesn't work at
2732 * least on older hardware revs. 2769 * least on older hardware revs.
2733 */ 2770 */
2734 sc->rxlink = NULL; 2771 sc->stats.rxeol_intr++;
2735 } 2772 }
2736 if (status & AR5K_INT_TXURN) { 2773 if (status & AR5K_INT_TXURN) {
2737 /* bump tx trigger level */ 2774 /* bump tx trigger level */
@@ -2764,14 +2801,6 @@ ath5k_intr(int irq, void *dev_id)
2764 return IRQ_HANDLED; 2801 return IRQ_HANDLED;
2765} 2802}
2766 2803
2767static void
2768ath5k_tasklet_reset(unsigned long data)
2769{
2770 struct ath5k_softc *sc = (void *)data;
2771
2772 ath5k_reset(sc, sc->curchan);
2773}
2774
2775/* 2804/*
2776 * Periodically recalibrate the PHY to account 2805 * Periodically recalibrate the PHY to account
2777 * for temperature/environment changes. 2806 * for temperature/environment changes.
@@ -2785,10 +2814,6 @@ ath5k_tasklet_calibrate(unsigned long data)
2785 /* Only full calibration for now */ 2814 /* Only full calibration for now */
2786 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; 2815 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2787 2816
2788 /* Stop queues so that calibration
2789 * doesn't interfere with tx */
2790 ieee80211_stop_queues(sc->hw);
2791
2792 ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", 2817 ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2793 ieee80211_frequency_to_channel(sc->curchan->center_freq), 2818 ieee80211_frequency_to_channel(sc->curchan->center_freq),
2794 sc->curchan->hw_value); 2819 sc->curchan->hw_value);
@@ -2799,15 +2824,23 @@ ath5k_tasklet_calibrate(unsigned long data)
2799 * to load new gain values. 2824 * to load new gain values.
2800 */ 2825 */
2801 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); 2826 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2802 ath5k_reset(sc, sc->curchan); 2827 ieee80211_queue_work(sc->hw, &sc->reset_work);
2803 } 2828 }
2804 if (ath5k_hw_phy_calibrate(ah, sc->curchan)) 2829 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2805 ATH5K_ERR(sc, "calibration of channel %u failed\n", 2830 ATH5K_ERR(sc, "calibration of channel %u failed\n",
2806 ieee80211_frequency_to_channel( 2831 ieee80211_frequency_to_channel(
2807 sc->curchan->center_freq)); 2832 sc->curchan->center_freq));
2808 2833
2809 /* Wake queues */ 2834 /* Noise floor calibration interrupts rx/tx path while I/Q calibration
2810 ieee80211_wake_queues(sc->hw); 2835 * doesn't. We stop the queues so that calibration doesn't interfere
2836 * with TX and don't run it as often */
2837 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
2838 ah->ah_cal_next_nf = jiffies +
2839 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2840 ieee80211_stop_queues(sc->hw);
2841 ath5k_hw_update_noise_floor(ah);
2842 ieee80211_wake_queues(sc->hw);
2843 }
2811 2844
2812 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; 2845 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2813} 2846}
@@ -2895,6 +2928,8 @@ drop_packet:
2895/* 2928/*
2896 * Reset the hardware. If chan is not NULL, then also pause rx/tx 2929 * Reset the hardware. If chan is not NULL, then also pause rx/tx
2897 * and change to the given channel. 2930 * and change to the given channel.
2931 *
2932 * This should be called with sc->lock.
2898 */ 2933 */
2899static int 2934static int
2900ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) 2935ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
@@ -2904,8 +2939,11 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2904 2939
2905 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); 2940 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2906 2941
2942 ath5k_hw_set_imr(ah, 0);
2943 synchronize_irq(sc->pdev->irq);
2944 stop_tasklets(sc);
2945
2907 if (chan) { 2946 if (chan) {
2908 ath5k_hw_set_imr(ah, 0);
2909 ath5k_txq_cleanup(sc); 2947 ath5k_txq_cleanup(sc);
2910 ath5k_rx_stop(sc); 2948 ath5k_rx_stop(sc);
2911 2949
@@ -2926,6 +2964,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2926 2964
2927 ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode); 2965 ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
2928 2966
2967 ah->ah_cal_next_full = jiffies;
2968 ah->ah_cal_next_ani = jiffies;
2969 ah->ah_cal_next_nf = jiffies;
2970
2929 /* 2971 /*
2930 * Change channels and update the h/w rate map if we're switching; 2972 * Change channels and update the h/w rate map if we're switching;
2931 * e.g. 11a to 11b/g. 2973 * e.g. 11a to 11b/g.
@@ -2947,6 +2989,16 @@ err:
2947 return ret; 2989 return ret;
2948} 2990}
2949 2991
2992static void ath5k_reset_work(struct work_struct *work)
2993{
2994 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2995 reset_work);
2996
2997 mutex_lock(&sc->lock);
2998 ath5k_reset(sc, sc->curchan);
2999 mutex_unlock(&sc->lock);
3000}
3001
2950static int ath5k_start(struct ieee80211_hw *hw) 3002static int ath5k_start(struct ieee80211_hw *hw)
2951{ 3003{
2952 return ath5k_init(hw->priv); 3004 return ath5k_init(hw->priv);
@@ -3360,7 +3412,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
3360 3412
3361 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3413 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3362 3414
3363 ath5k_txbuf_free(sc, sc->bbuf); 3415 ath5k_txbuf_free_skb(sc, sc->bbuf);
3364 sc->bbuf->skb = skb; 3416 sc->bbuf->skb = skb;
3365 ret = ath5k_beacon_setup(sc, sc->bbuf); 3417 ret = ath5k_beacon_setup(sc, sc->bbuf);
3366 if (ret) 3418 if (ret)
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 56221bc7c8cd..dc1241f9c4e8 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -47,6 +47,7 @@
47#include <linux/if_ether.h> 47#include <linux/if_ether.h>
48#include <linux/leds.h> 48#include <linux/leds.h>
49#include <linux/rfkill.h> 49#include <linux/rfkill.h>
50#include <linux/workqueue.h>
50 51
51#include "ath5k.h" 52#include "ath5k.h"
52#include "debug.h" 53#include "debug.h"
@@ -136,6 +137,7 @@ struct ath5k_statistics {
136 137
137 unsigned int mib_intr; 138 unsigned int mib_intr;
138 unsigned int rxorn_intr; 139 unsigned int rxorn_intr;
140 unsigned int rxeol_intr;
139}; 141};
140 142
141#if CHAN_DEBUG 143#if CHAN_DEBUG
@@ -189,7 +191,7 @@ struct ath5k_softc {
189 unsigned int led_pin, /* GPIO pin for driving LED */ 191 unsigned int led_pin, /* GPIO pin for driving LED */
190 led_on; /* pin setting for LED on */ 192 led_on; /* pin setting for LED on */
191 193
192 struct tasklet_struct restq; /* reset tasklet */ 194 struct work_struct reset_work; /* deferred chip reset */
193 195
194 unsigned int rxbufsize; /* rx size based on mtu */ 196 unsigned int rxbufsize; /* rx size based on mtu */
195 struct list_head rxbuf; /* receive buffer */ 197 struct list_head rxbuf; /* receive buffer */
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 74f007126f41..beae519aa735 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -34,7 +34,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
34{ 34{
35 u16 ee_header; 35 u16 ee_header;
36 36
37 ATH5K_TRACE(ah->ah_sc);
38 /* Capabilities stored in the EEPROM */ 37 /* Capabilities stored in the EEPROM */
39 ee_header = ah->ah_capabilities.cap_eeprom.ee_header; 38 ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
40 39
@@ -123,8 +122,6 @@ int ath5k_hw_get_capability(struct ath5k_hw *ah,
123 enum ath5k_capability_type cap_type, 122 enum ath5k_capability_type cap_type,
124 u32 capability, u32 *result) 123 u32 capability, u32 *result)
125{ 124{
126 ATH5K_TRACE(ah->ah_sc);
127
128 switch (cap_type) { 125 switch (cap_type) {
129 case AR5K_CAP_NUM_TXQUEUES: 126 case AR5K_CAP_NUM_TXQUEUES:
130 if (result) { 127 if (result) {
@@ -173,8 +170,6 @@ yes:
173int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, 170int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
174 u16 assoc_id) 171 u16 assoc_id)
175{ 172{
176 ATH5K_TRACE(ah->ah_sc);
177
178 if (ah->ah_version == AR5K_AR5210) { 173 if (ah->ah_version == AR5K_AR5210) {
179 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, 174 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
180 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA); 175 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
@@ -186,8 +181,6 @@ int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
186 181
187int ath5k_hw_disable_pspoll(struct ath5k_hw *ah) 182int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
188{ 183{
189 ATH5K_TRACE(ah->ah_sc);
190
191 if (ah->ah_version == AR5K_AR5210) { 184 if (ah->ah_version == AR5K_AR5210) {
192 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, 185 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
193 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA); 186 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 6fb5c5ffa5b1..4cccc29964f6 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -239,6 +239,9 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
239 "TSF\t\t0x%016llx\tTU: %08x\n", 239 "TSF\t\t0x%016llx\tTU: %08x\n",
240 (unsigned long long)tsf, TSF_TO_TU(tsf)); 240 (unsigned long long)tsf, TSF_TO_TU(tsf));
241 241
242 if (len > sizeof(buf))
243 len = sizeof(buf);
244
242 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 245 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
243} 246}
244 247
@@ -278,7 +281,8 @@ static ssize_t write_file_reset(struct file *file,
278 size_t count, loff_t *ppos) 281 size_t count, loff_t *ppos)
279{ 282{
280 struct ath5k_softc *sc = file->private_data; 283 struct ath5k_softc *sc = file->private_data;
281 tasklet_schedule(&sc->restq); 284 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
285 ieee80211_queue_work(sc->hw, &sc->reset_work);
282 return count; 286 return count;
283} 287}
284 288
@@ -307,7 +311,6 @@ static const struct {
307 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, 311 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
308 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
309 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
310 { ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
311 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, 314 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
312 { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, 315 { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
313}; 316};
@@ -334,6 +337,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
334 sc->debug.level == dbg_info[i].level ? '+' : ' ', 337 sc->debug.level == dbg_info[i].level ? '+' : ' ',
335 dbg_info[i].level, dbg_info[i].desc); 338 dbg_info[i].level, dbg_info[i].desc);
336 339
340 if (len > sizeof(buf))
341 len = sizeof(buf);
342
337 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 343 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
338} 344}
339 345
@@ -426,6 +432,16 @@ static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
426 "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n", 432 "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
427 (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0); 433 (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
428 434
435 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
436 len += snprintf(buf+len, sizeof(buf)-len,
437 "\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
438 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
439 len += snprintf(buf+len, sizeof(buf)-len,
440 "AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
441
442 if (len > sizeof(buf))
443 len = sizeof(buf);
444
429 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 445 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
430} 446}
431 447
@@ -535,6 +551,9 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
535 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n", 551 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
536 st->tx_all_count); 552 st->tx_all_count);
537 553
554 if (len > sizeof(buf))
555 len = sizeof(buf);
556
538 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 557 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
539} 558}
540 559
@@ -674,6 +693,9 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
674 ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - 693 ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
675 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2))); 694 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
676 695
696 if (len > sizeof(buf))
697 len = sizeof(buf);
698
677 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 699 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
678} 700}
679 701
@@ -729,6 +751,69 @@ static const struct file_operations fops_ani = {
729}; 751};
730 752
731 753
754/* debugfs: queues etc */
755
756static ssize_t read_file_queue(struct file *file, char __user *user_buf,
757 size_t count, loff_t *ppos)
758{
759 struct ath5k_softc *sc = file->private_data;
760 char buf[700];
761 unsigned int len = 0;
762
763 struct ath5k_txq *txq;
764 struct ath5k_buf *bf, *bf0;
765 int i, n = 0;
766
767 len += snprintf(buf+len, sizeof(buf)-len,
768 "available txbuffers: %d\n", sc->txbuf_len);
769
770 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
771 txq = &sc->txqs[i];
772
773 len += snprintf(buf+len, sizeof(buf)-len,
774 "%02d: %ssetup\n", i, txq->setup ? "" : "not ");
775
776 if (!txq->setup)
777 continue;
778
779 list_for_each_entry_safe(bf, bf0, &txq->q, list)
780 n++;
781 len += snprintf(buf+len, sizeof(buf)-len, " len: %d\n", n);
782 }
783
784 if (len > sizeof(buf))
785 len = sizeof(buf);
786
787 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
788}
789
790static ssize_t write_file_queue(struct file *file,
791 const char __user *userbuf,
792 size_t count, loff_t *ppos)
793{
794 struct ath5k_softc *sc = file->private_data;
795 char buf[20];
796
797 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
798 return -EFAULT;
799
800 if (strncmp(buf, "start", 5) == 0)
801 ieee80211_wake_queues(sc->hw);
802 else if (strncmp(buf, "stop", 4) == 0)
803 ieee80211_stop_queues(sc->hw);
804
805 return count;
806}
807
808
809static const struct file_operations fops_queue = {
810 .read = read_file_queue,
811 .write = write_file_queue,
812 .open = ath5k_debugfs_open,
813 .owner = THIS_MODULE,
814};
815
816
732/* init */ 817/* init */
733 818
734void 819void
@@ -772,6 +857,11 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
772 S_IWUSR | S_IRUSR, 857 S_IWUSR | S_IRUSR,
773 sc->debug.debugfs_phydir, sc, 858 sc->debug.debugfs_phydir, sc,
774 &fops_ani); 859 &fops_ani);
860
861 sc->debug.debugfs_queue = debugfs_create_file("queue",
862 S_IWUSR | S_IRUSR,
863 sc->debug.debugfs_phydir, sc,
864 &fops_queue);
775} 865}
776 866
777void 867void
@@ -790,6 +880,7 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
790 debugfs_remove(sc->debug.debugfs_antenna); 880 debugfs_remove(sc->debug.debugfs_antenna);
791 debugfs_remove(sc->debug.debugfs_frameerrors); 881 debugfs_remove(sc->debug.debugfs_frameerrors);
792 debugfs_remove(sc->debug.debugfs_ani); 882 debugfs_remove(sc->debug.debugfs_ani);
883 debugfs_remove(sc->debug.debugfs_queue);
793 debugfs_remove(sc->debug.debugfs_phydir); 884 debugfs_remove(sc->debug.debugfs_phydir);
794} 885}
795 886
@@ -852,7 +943,7 @@ ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
852 ds, (unsigned long long)bf->daddr, 943 ds, (unsigned long long)bf->daddr,
853 ds->ds_link, ds->ds_data, 944 ds->ds_link, ds->ds_data,
854 rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1, 945 rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
855 rd->u.rx_stat.rx_status_0, rd->u.rx_stat.rx_status_0, 946 rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1,
856 !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'); 947 !done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
857} 948}
858 949
@@ -867,7 +958,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
867 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) 958 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
868 return; 959 return;
869 960
870 printk(KERN_DEBUG "rx queue %x, link %p\n", 961 printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
871 ath5k_hw_get_rxdp(ah), sc->rxlink); 962 ath5k_hw_get_rxdp(ah), sc->rxlink);
872 963
873 spin_lock_bh(&sc->rxbuflock); 964 spin_lock_bh(&sc->rxbuflock);
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index ddd5b3a99e8d..606ae94a9157 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -77,6 +77,7 @@ struct ath5k_dbg_info {
77 struct dentry *debugfs_antenna; 77 struct dentry *debugfs_antenna;
78 struct dentry *debugfs_frameerrors; 78 struct dentry *debugfs_frameerrors;
79 struct dentry *debugfs_ani; 79 struct dentry *debugfs_ani;
80 struct dentry *debugfs_queue;
80}; 81};
81 82
82/** 83/**
@@ -115,18 +116,12 @@ enum ath5k_debug_level {
115 ATH5K_DEBUG_DUMP_RX = 0x00000100, 116 ATH5K_DEBUG_DUMP_RX = 0x00000100,
116 ATH5K_DEBUG_DUMP_TX = 0x00000200, 117 ATH5K_DEBUG_DUMP_TX = 0x00000200,
117 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 118 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
118 ATH5K_DEBUG_TRACE = 0x00001000,
119 ATH5K_DEBUG_ANI = 0x00002000, 119 ATH5K_DEBUG_ANI = 0x00002000,
120 ATH5K_DEBUG_ANY = 0xffffffff 120 ATH5K_DEBUG_ANY = 0xffffffff
121}; 121};
122 122
123#ifdef CONFIG_ATH5K_DEBUG 123#ifdef CONFIG_ATH5K_DEBUG
124 124
125#define ATH5K_TRACE(_sc) do { \
126 if (unlikely((_sc)->debug.level & ATH5K_DEBUG_TRACE)) \
127 printk(KERN_DEBUG "ath5k trace %s:%d\n", __func__, __LINE__); \
128 } while (0)
129
130#define ATH5K_DBG(_sc, _m, _fmt, ...) do { \ 125#define ATH5K_DBG(_sc, _m, _fmt, ...) do { \
131 if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \ 126 if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \
132 ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \ 127 ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \
@@ -168,8 +163,6 @@ ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
168 163
169#include <linux/compiler.h> 164#include <linux/compiler.h>
170 165
171#define ATH5K_TRACE(_sc) typecheck(struct ath5k_softc *, (_sc))
172
173static inline void __attribute__ ((format (printf, 3, 4))) 166static inline void __attribute__ ((format (printf, 3, 4)))
174ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {} 167ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {}
175 168
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7d7b646ab65a..43244382f213 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -91,14 +91,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
91 tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN; 91 tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
92 92
93 /* 93 /*
94 * Verify and set header length 94 * Verify and set header length (only 5210)
95 * XXX: I only found that on 5210 code, does it work on 5211 ?
96 */ 95 */
97 if (ah->ah_version == AR5K_AR5210) { 96 if (ah->ah_version == AR5K_AR5210) {
98 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN) 97 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
99 return -EINVAL; 98 return -EINVAL;
100 tx_ctl->tx_control_0 |= 99 tx_ctl->tx_control_0 |=
101 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN); 100 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
102 } 101 }
103 102
104 /*Differences between 5210-5211*/ 103 /*Differences between 5210-5211*/
@@ -110,11 +109,11 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
110 case AR5K_PKT_TYPE_PIFS: 109 case AR5K_PKT_TYPE_PIFS:
111 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS; 110 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
112 default: 111 default:
113 frame_type = type /*<< 2 ?*/; 112 frame_type = type;
114 } 113 }
115 114
116 tx_ctl->tx_control_0 |= 115 tx_ctl->tx_control_0 |=
117 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) | 116 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
118 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE); 117 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
119 118
120 } else { 119 } else {
@@ -123,21 +122,30 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
123 AR5K_REG_SM(antenna_mode, 122 AR5K_REG_SM(antenna_mode,
124 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT); 123 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
125 tx_ctl->tx_control_1 |= 124 tx_ctl->tx_control_1 |=
126 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE); 125 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
127 } 126 }
127
128#define _TX_FLAGS(_c, _flag) \ 128#define _TX_FLAGS(_c, _flag) \
129 if (flags & AR5K_TXDESC_##_flag) { \ 129 if (flags & AR5K_TXDESC_##_flag) { \
130 tx_ctl->tx_control_##_c |= \ 130 tx_ctl->tx_control_##_c |= \
131 AR5K_2W_TX_DESC_CTL##_c##_##_flag; \ 131 AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
132 } 132 }
133 133#define _TX_FLAGS_5211(_c, _flag) \
134 if (flags & AR5K_TXDESC_##_flag) { \
135 tx_ctl->tx_control_##_c |= \
136 AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \
137 }
134 _TX_FLAGS(0, CLRDMASK); 138 _TX_FLAGS(0, CLRDMASK);
135 _TX_FLAGS(0, VEOL);
136 _TX_FLAGS(0, INTREQ); 139 _TX_FLAGS(0, INTREQ);
137 _TX_FLAGS(0, RTSENA); 140 _TX_FLAGS(0, RTSENA);
138 _TX_FLAGS(1, NOACK); 141
142 if (ah->ah_version == AR5K_AR5211) {
143 _TX_FLAGS_5211(0, VEOL);
144 _TX_FLAGS_5211(1, NOACK);
145 }
139 146
140#undef _TX_FLAGS 147#undef _TX_FLAGS
148#undef _TX_FLAGS_5211
141 149
142 /* 150 /*
143 * WEP crap 151 * WEP crap
@@ -147,7 +155,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
147 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; 155 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
148 tx_ctl->tx_control_1 |= 156 tx_ctl->tx_control_1 |=
149 AR5K_REG_SM(key_index, 157 AR5K_REG_SM(key_index,
150 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); 158 AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
151 } 159 }
152 160
153 /* 161 /*
@@ -156,7 +164,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
156 if ((ah->ah_version == AR5K_AR5210) && 164 if ((ah->ah_version == AR5K_AR5210) &&
157 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA))) 165 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
158 tx_ctl->tx_control_1 |= rtscts_duration & 166 tx_ctl->tx_control_1 |= rtscts_duration &
159 AR5K_2W_TX_DESC_CTL1_RTS_DURATION; 167 AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
160 168
161 return 0; 169 return 0;
162} 170}
@@ -176,7 +184,6 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
176 struct ath5k_hw_4w_tx_ctl *tx_ctl; 184 struct ath5k_hw_4w_tx_ctl *tx_ctl;
177 unsigned int frame_len; 185 unsigned int frame_len;
178 186
179 ATH5K_TRACE(ah->ah_sc);
180 tx_ctl = &desc->ud.ds_tx5212.tx_ctl; 187 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
181 188
182 /* 189 /*
@@ -256,7 +263,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
256 if (key_index != AR5K_TXKEYIX_INVALID) { 263 if (key_index != AR5K_TXKEYIX_INVALID) {
257 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; 264 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
258 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index, 265 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
259 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); 266 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
260 } 267 }
261 268
262 /* 269 /*
@@ -278,13 +285,17 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
278/* 285/*
279 * Initialize a 4-word multi rate retry tx control descriptor on 5212 286 * Initialize a 4-word multi rate retry tx control descriptor on 5212
280 */ 287 */
281static int 288int
282ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 289ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
283 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, 290 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
284 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) 291 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
285{ 292{
286 struct ath5k_hw_4w_tx_ctl *tx_ctl; 293 struct ath5k_hw_4w_tx_ctl *tx_ctl;
287 294
295 /* no mrr support for cards older than 5212 */
296 if (ah->ah_version < AR5K_AR5212)
297 return 0;
298
288 /* 299 /*
289 * Rates can be 0 as long as the retry count is 0 too. 300 * Rates can be 0 as long as the retry count is 0 too.
290 * A zero rate and nonzero retry count will put the HW into a mode where 301 * A zero rate and nonzero retry count will put the HW into a mode where
@@ -324,15 +335,6 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
324 return 0; 335 return 0;
325} 336}
326 337
327/* no mrr support for cards older than 5212 */
328static int
329ath5k_hw_setup_no_mrr(struct ath5k_hw *ah, struct ath5k_desc *desc,
330 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
331 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
332{
333 return 0;
334}
335
336/* 338/*
337 * Proccess the tx status descriptor on 5210/5211 339 * Proccess the tx status descriptor on 5210/5211
338 */ 340 */
@@ -342,8 +344,6 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
342 struct ath5k_hw_2w_tx_ctl *tx_ctl; 344 struct ath5k_hw_2w_tx_ctl *tx_ctl;
343 struct ath5k_hw_tx_status *tx_status; 345 struct ath5k_hw_tx_status *tx_status;
344 346
345 ATH5K_TRACE(ah->ah_sc);
346
347 tx_ctl = &desc->ud.ds_tx5210.tx_ctl; 347 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
348 tx_status = &desc->ud.ds_tx5210.tx_stat; 348 tx_status = &desc->ud.ds_tx5210.tx_stat;
349 349
@@ -396,8 +396,6 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
396 struct ath5k_hw_4w_tx_ctl *tx_ctl; 396 struct ath5k_hw_4w_tx_ctl *tx_ctl;
397 struct ath5k_hw_tx_status *tx_status; 397 struct ath5k_hw_tx_status *tx_status;
398 398
399 ATH5K_TRACE(ah->ah_sc);
400
401 tx_ctl = &desc->ud.ds_tx5212.tx_ctl; 399 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
402 tx_status = &desc->ud.ds_tx5212.tx_stat; 400 tx_status = &desc->ud.ds_tx5212.tx_stat;
403 401
@@ -419,11 +417,11 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
419 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, 417 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
420 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); 418 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
421 ts->ts_antenna = (tx_status->tx_status_1 & 419 ts->ts_antenna = (tx_status->tx_status_1 &
422 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1; 420 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
423 ts->ts_status = 0; 421 ts->ts_status = 0;
424 422
425 ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1, 423 ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1,
426 AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX); 424 AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
427 425
428 /* The longretry counter has the number of un-acked retries 426 /* The longretry counter has the number of un-acked retries
429 * for the final rate. To get the total number of retries 427 * for the final rate. To get the total number of retries
@@ -485,12 +483,11 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
485/* 483/*
486 * Initialize an rx control descriptor 484 * Initialize an rx control descriptor
487 */ 485 */
488static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 486int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
489 u32 size, unsigned int flags) 487 u32 size, unsigned int flags)
490{ 488{
491 struct ath5k_hw_rx_ctl *rx_ctl; 489 struct ath5k_hw_rx_ctl *rx_ctl;
492 490
493 ATH5K_TRACE(ah->ah_sc);
494 rx_ctl = &desc->ud.ds_rx.rx_ctl; 491 rx_ctl = &desc->ud.ds_rx.rx_ctl;
495 492
496 /* 493 /*
@@ -502,10 +499,11 @@ static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
502 */ 499 */
503 memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc)); 500 memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
504 501
502 if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
503 return -EINVAL;
504
505 /* Setup descriptor */ 505 /* Setup descriptor */
506 rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN; 506 rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
507 if (unlikely(rx_ctl->rx_control_1 != size))
508 return -EINVAL;
509 507
510 if (flags & AR5K_RXDESC_INTREQ) 508 if (flags & AR5K_RXDESC_INTREQ)
511 rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ; 509 rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
@@ -521,13 +519,15 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
521{ 519{
522 struct ath5k_hw_rx_status *rx_status; 520 struct ath5k_hw_rx_status *rx_status;
523 521
524 rx_status = &desc->ud.ds_rx.u.rx_stat; 522 rx_status = &desc->ud.ds_rx.rx_stat;
525 523
526 /* No frame received / not ready */ 524 /* No frame received / not ready */
527 if (unlikely(!(rx_status->rx_status_1 & 525 if (unlikely(!(rx_status->rx_status_1 &
528 AR5K_5210_RX_DESC_STATUS1_DONE))) 526 AR5K_5210_RX_DESC_STATUS1_DONE)))
529 return -EINPROGRESS; 527 return -EINPROGRESS;
530 528
529 memset(rs, 0, sizeof(struct ath5k_rx_status));
530
531 /* 531 /*
532 * Frame receive status 532 * Frame receive status
533 */ 533 */
@@ -537,15 +537,23 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
537 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); 537 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
538 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 538 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
539 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); 539 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
540 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
541 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
542 rs->rs_more = !!(rx_status->rx_status_0 & 540 rs->rs_more = !!(rx_status->rx_status_0 &
543 AR5K_5210_RX_DESC_STATUS0_MORE); 541 AR5K_5210_RX_DESC_STATUS0_MORE);
544 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */ 542 /* TODO: this timestamp is 13 bit, later on we assume 15 bit!
543 * also the HAL code for 5210 says the timestamp is bits [10..22] of the
544 * TSF, and extends the timestamp here to 15 bit.
545 * we need to check on 5210...
546 */
545 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 547 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
546 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 548 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
547 rs->rs_status = 0; 549
548 rs->rs_phyerr = 0; 550 if (ah->ah_version == AR5K_AR5211)
551 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
552 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
553 else
554 rs->rs_antenna = (rx_status->rx_status_0 &
555 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
556 ? 2 : 1;
549 557
550 /* 558 /*
551 * Key table status 559 * Key table status
@@ -560,19 +568,21 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
560 * Receive/descriptor errors 568 * Receive/descriptor errors
561 */ 569 */
562 if (!(rx_status->rx_status_1 & 570 if (!(rx_status->rx_status_1 &
563 AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { 571 AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
564 if (rx_status->rx_status_1 & 572 if (rx_status->rx_status_1 &
565 AR5K_5210_RX_DESC_STATUS1_CRC_ERROR) 573 AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
566 rs->rs_status |= AR5K_RXERR_CRC; 574 rs->rs_status |= AR5K_RXERR_CRC;
567 575
568 if (rx_status->rx_status_1 & 576 /* only on 5210 */
569 AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN) 577 if ((ah->ah_version == AR5K_AR5210) &&
578 (rx_status->rx_status_1 &
579 AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
570 rs->rs_status |= AR5K_RXERR_FIFO; 580 rs->rs_status |= AR5K_RXERR_FIFO;
571 581
572 if (rx_status->rx_status_1 & 582 if (rx_status->rx_status_1 &
573 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { 583 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
574 rs->rs_status |= AR5K_RXERR_PHY; 584 rs->rs_status |= AR5K_RXERR_PHY;
575 rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1, 585 rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
576 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); 586 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
577 } 587 }
578 588
@@ -588,22 +598,20 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
588 * Proccess the rx status descriptor on 5212 598 * Proccess the rx status descriptor on 5212
589 */ 599 */
590static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, 600static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
591 struct ath5k_desc *desc, struct ath5k_rx_status *rs) 601 struct ath5k_desc *desc,
602 struct ath5k_rx_status *rs)
592{ 603{
593 struct ath5k_hw_rx_status *rx_status; 604 struct ath5k_hw_rx_status *rx_status;
594 struct ath5k_hw_rx_error *rx_err;
595 605
596 ATH5K_TRACE(ah->ah_sc); 606 rx_status = &desc->ud.ds_rx.rx_stat;
597 rx_status = &desc->ud.ds_rx.u.rx_stat;
598
599 /* Overlay on error */
600 rx_err = &desc->ud.ds_rx.u.rx_err;
601 607
602 /* No frame received / not ready */ 608 /* No frame received / not ready */
603 if (unlikely(!(rx_status->rx_status_1 & 609 if (unlikely(!(rx_status->rx_status_1 &
604 AR5K_5212_RX_DESC_STATUS1_DONE))) 610 AR5K_5212_RX_DESC_STATUS1_DONE)))
605 return -EINPROGRESS; 611 return -EINPROGRESS;
606 612
613 memset(rs, 0, sizeof(struct ath5k_rx_status));
614
607 /* 615 /*
608 * Frame receive status 616 * Frame receive status
609 */ 617 */
@@ -619,15 +627,13 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
619 AR5K_5212_RX_DESC_STATUS0_MORE); 627 AR5K_5212_RX_DESC_STATUS0_MORE);
620 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 628 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
621 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 629 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
622 rs->rs_status = 0;
623 rs->rs_phyerr = 0;
624 630
625 /* 631 /*
626 * Key table status 632 * Key table status
627 */ 633 */
628 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID) 634 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
629 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, 635 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
630 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX); 636 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
631 else 637 else
632 rs->rs_keyix = AR5K_RXKEYIX_INVALID; 638 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
633 639
@@ -635,7 +641,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
635 * Receive/descriptor errors 641 * Receive/descriptor errors
636 */ 642 */
637 if (!(rx_status->rx_status_1 & 643 if (!(rx_status->rx_status_1 &
638 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { 644 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
639 if (rx_status->rx_status_1 & 645 if (rx_status->rx_status_1 &
640 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR) 646 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
641 rs->rs_status |= AR5K_RXERR_CRC; 647 rs->rs_status |= AR5K_RXERR_CRC;
@@ -643,9 +649,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
643 if (rx_status->rx_status_1 & 649 if (rx_status->rx_status_1 &
644 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { 650 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
645 rs->rs_status |= AR5K_RXERR_PHY; 651 rs->rs_status |= AR5K_RXERR_PHY;
646 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, 652 rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
647 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); 653 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
648 ath5k_ani_phy_error_report(ah, rs->rs_phyerr); 654 if (!ah->ah_capabilities.cap_has_phyerr_counters)
655 ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
649 } 656 }
650 657
651 if (rx_status->rx_status_1 & 658 if (rx_status->rx_status_1 &
@@ -656,7 +663,6 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
656 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR) 663 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
657 rs->rs_status |= AR5K_RXERR_MIC; 664 rs->rs_status |= AR5K_RXERR_MIC;
658 } 665 }
659
660 return 0; 666 return 0;
661} 667}
662 668
@@ -665,29 +671,15 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
665 */ 671 */
666int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) 672int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
667{ 673{
668
669 if (ah->ah_version != AR5K_AR5210 &&
670 ah->ah_version != AR5K_AR5211 &&
671 ah->ah_version != AR5K_AR5212)
672 return -ENOTSUPP;
673
674 if (ah->ah_version == AR5K_AR5212) { 674 if (ah->ah_version == AR5K_AR5212) {
675 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
676 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; 675 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
677 ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
678 ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status; 676 ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
679 } else { 677 ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
680 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc; 678 } else if (ah->ah_version <= AR5K_AR5211) {
681 ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc; 679 ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
682 ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_no_mrr;
683 ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status; 680 ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
684 }
685
686 if (ah->ah_version == AR5K_AR5212)
687 ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
688 else if (ah->ah_version <= AR5K_AR5211)
689 ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status; 681 ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
690 682 } else
683 return -ENOTSUPP;
691 return 0; 684 return 0;
692} 685}
693
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 64538fbe4167..b2adb2a281c2 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -17,28 +17,24 @@
17 */ 17 */
18 18
19/* 19/*
20 * Internal RX/TX descriptor structures 20 * RX/TX descriptor structures
21 * (rX: reserved fields possibily used by future versions of the ar5k chipset)
22 */ 21 */
23 22
24/* 23/*
25 * common hardware RX control descriptor 24 * Common hardware RX control descriptor
26 */ 25 */
27struct ath5k_hw_rx_ctl { 26struct ath5k_hw_rx_ctl {
28 u32 rx_control_0; /* RX control word 0 */ 27 u32 rx_control_0; /* RX control word 0 */
29 u32 rx_control_1; /* RX control word 1 */ 28 u32 rx_control_1; /* RX control word 1 */
30} __packed; 29} __packed;
31 30
32/* RX control word 0 field/sflags */
33#define AR5K_DESC_RX_CTL0 0x00000000
34
35/* RX control word 1 fields/flags */ 31/* RX control word 1 fields/flags */
36#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff 32#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
37#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 33#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
38 34
39/* 35/*
40 * common hardware RX status descriptor 36 * Common hardware RX status descriptor
41 * 5210/11 and 5212 differ only in the flags defined below 37 * 5210, 5211 and 5212 differ only in the fields and flags defined below
42 */ 38 */
43struct ath5k_hw_rx_status { 39struct ath5k_hw_rx_status {
44 u32 rx_status_0; /* RX status word 0 */ 40 u32 rx_status_0; /* RX status word 0 */
@@ -47,81 +43,69 @@ struct ath5k_hw_rx_status {
47 43
48/* 5210/5211 */ 44/* 5210/5211 */
49/* RX status word 0 fields/flags */ 45/* RX status word 0 fields/flags */
50#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff 46#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
51#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 47#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
52#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 48#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210 0x00004000 /* [5210] receive on ant 1 */
49#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 /* reception rate */
53#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15 50#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15
54#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 51#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 /* rssi */
55#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19 52#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
56#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000 53#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211 0x38000000 /* [5211] receive antenna */
57#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27 54#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211_S 27
58 55
59/* RX status word 1 fields/flags */ 56/* RX status word 1 fields/flags */
60#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 57#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
61#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 58#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* reception success */
62#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 59#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
63#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN 0x00000008 60#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210 0x00000008 /* [5210] FIFO overrun */
64#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 61#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decyption CRC failure */
65#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 62#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 /* PHY error */
66#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5 63#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5
67#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 64#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
68#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 65#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decyption key index */
69#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9 66#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9
70#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 67#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 /* 13 bit of TSF */
71#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15 68#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
72#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 69#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 /* key cache miss */
73 70
74/* 5212 */ 71/* 5212 */
75/* RX status word 0 fields/flags */ 72/* RX status word 0 fields/flags */
76#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff 73#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
77#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 74#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
78#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 75#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 /* decompression CRC error */
79#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 76#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 /* reception rate */
80#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15 77#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15
81#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 78#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 /* rssi */
82#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20 79#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
83#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 80#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 /* receive antenna */
84#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28 81#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
85 82
86/* RX status word 1 fields/flags */ 83/* RX status word 1 fields/flags */
87#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 84#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
88#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 85#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* frame reception success */
89#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 86#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
90#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 87#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 /* decryption CRC failure */
91#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 88#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 /* PHY error */
92#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 89#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 /* MIC decrypt error */
93#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 90#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
94#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 91#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 /* decryption key index */
95#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9 92#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9
96#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 93#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 /* first 15bit of the TSF */
97#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16 94#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16
98#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 95#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 /* key cache miss */
99 96#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE 0x0000ff00 /* phy error code overlays key index and valid fields */
100/* 97#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE_S 8
101 * common hardware RX error descriptor
102 */
103struct ath5k_hw_rx_error {
104 u32 rx_error_0; /* RX status word 0 */
105 u32 rx_error_1; /* RX status word 1 */
106} __packed;
107
108/* RX error word 0 fields/flags */
109#define AR5K_RX_DESC_ERROR0 0x00000000
110
111/* RX error word 1 fields/flags */
112#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
113#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
114 98
115/** 99/**
116 * enum ath5k_phy_error_code - PHY Error codes 100 * enum ath5k_phy_error_code - PHY Error codes
117 */ 101 */
118enum ath5k_phy_error_code { 102enum ath5k_phy_error_code {
119 AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */ 103 AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
120 AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */ 104 AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
121 AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */ 105 AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
122 AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */ 106 AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
123 AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */ 107 AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
124 AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */ 108 AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
125 AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */ 109 AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
126 AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */ 110 AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
127 /* these are specific to the 5212 */ 111 /* these are specific to the 5212 */
@@ -148,112 +132,111 @@ struct ath5k_hw_2w_tx_ctl {
148} __packed; 132} __packed;
149 133
150/* TX control word 0 fields/flags */ 134/* TX control word 0 fields/flags */
151#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff 135#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
152#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/ 136#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210 0x0003f000 /* [5210] header length */
153#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12 137#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210_S 12
154#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 138#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 /* tx rate */
155#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18 139#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18
156#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 140#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
157#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 141#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET_5210 0x00800000 /* [5210] long packet */
158#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET 0x00800000 /*[5210]*/ 142#define AR5K_2W_TX_DESC_CTL0_VEOL_5211 0x00800000 /* [5211] virtual end-of-list */
159#define AR5K_2W_TX_DESC_CTL0_VEOL 0x00800000 /*[5211]*/ 143#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
160#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE 0x1c000000 /*[5210]*/ 144#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 /* [5210] antenna selection */
161#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26 145#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 /* [5211] antenna selection */
162#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000
163#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000
164
165#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \ 146#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \
166 (ah->ah_version == AR5K_AR5210 ? \ 147 (ah->ah_version == AR5K_AR5210 ? \
167 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \ 148 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
168 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211) 149 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
169
170#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 150#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
171#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 151#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210 0x1c000000 /* [5210] frame type */
172#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 152#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210_S 26
153#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
154#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* key is valid */
173 155
174/* TX control word 1 fields/flags */ 156/* TX control word 1 fields/flags */
175#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff 157#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
176#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 158#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
177#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000 159#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 0x0007e000 /* [5210] key table index */
178#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000 160#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211 0x000fe000 /* [5211] key table index */
179 161#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX \
180#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX \
181 (ah->ah_version == AR5K_AR5210 ? \ 162 (ah->ah_version == AR5K_AR5210 ? \
182 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \ 163 AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 : \
183 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211) 164 AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211)
184 165#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_S 13
185#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 166#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211 0x00700000 /* [5211] frame type */
186#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/ 167#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211_S 20
187#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20 168#define AR5K_2W_TX_DESC_CTL1_NOACK_5211 0x00800000 /* [5211] no ACK */
188#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/ 169#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210 0xfff80000 /* [5210] lower 13 bit of duration */
189#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/
190 170
191/* Frame types */ 171/* Frame types */
192#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00 172#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0
193#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04 173#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 1
194#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08 174#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 2
195#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c 175#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 3
196#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 0x10 176#define AR5K_AR5211_TX_DESC_FRAME_TYPE_BEACON 3
177#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
178#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
197 179
198/* 180/*
199 * 5212 hardware 4-word TX control descriptor 181 * 5212 hardware 4-word TX control descriptor
200 */ 182 */
201struct ath5k_hw_4w_tx_ctl { 183struct ath5k_hw_4w_tx_ctl {
202 u32 tx_control_0; /* TX control word 0 */ 184 u32 tx_control_0; /* TX control word 0 */
185 u32 tx_control_1; /* TX control word 1 */
186 u32 tx_control_2; /* TX control word 2 */
187 u32 tx_control_3; /* TX control word 3 */
188} __packed;
203 189
204#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff 190/* TX control word 0 fields/flags */
205#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 191#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
192#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 /* transmit power */
206#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16 193#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16
207#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 194#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
208#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 195#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 /* virtual end-of-list */
209#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 196#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
210#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 197#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 /* TX antenna selection */
211#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 198#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
212#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 199#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
213#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 200#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* destination index valid */
214#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 201#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 /* precede frame with CTS */
215
216 u32 tx_control_1; /* TX control word 1 */
217 202
218#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff 203/* TX control word 1 fields/flags */
219#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 204#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
220#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX 0x000fe000 205#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
221#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 206#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX 0x000fe000 /* destination table index */
222#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 207#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX_S 13
208#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 /* frame type */
223#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20 209#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20
224#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 210#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 /* no ACK */
225#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 211#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 /* compression processing */
226#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25 212#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25
227#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 213#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 /* length of frame IV */
228#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27 214#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27
229#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 215#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 /* length of frame ICV */
230#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29 216#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29
231 217
232 u32 tx_control_2; /* TX control word 2 */ 218/* TX control word 2 fields/flags */
233 219#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff /* RTS/CTS duration */
234#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff 220#define AR5K_4W_TX_DESC_CTL2_DURATION_UPD_EN 0x00008000 /* frame duration update */
235#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE 0x00008000 221#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 /* series 0 max attempts */
236#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 222#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16
237#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16 223#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 /* series 1 max attempts */
238#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 224#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20
239#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20 225#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 /* series 2 max attempts */
240#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 226#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24
241#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24 227#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 /* series 3 max attempts */
242#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 228#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28
243#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28 229
244 230/* TX control word 3 fields/flags */
245 u32 tx_control_3; /* TX control word 3 */ 231#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f /* series 0 tx rate */
246 232#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 /* series 1 tx rate */
247#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f
248#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0
249#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5 233#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5
250#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 234#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 /* series 2 tx rate */
251#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10 235#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10
252#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 236#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 /* series 3 tx rate */
253#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15 237#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15
254#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 238#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
255#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20 239#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
256} __packed;
257 240
258/* 241/*
259 * Common TX status descriptor 242 * Common TX status descriptor
@@ -264,37 +247,34 @@ struct ath5k_hw_tx_status {
264} __packed; 247} __packed;
265 248
266/* TX status word 0 fields/flags */ 249/* TX status word 0 fields/flags */
267#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 250#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
268#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 251#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 /* excessive retries */
269#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 252#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 /* FIFO underrun */
270#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 253#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 /* TX filter indication */
271/*??? 254/* according to the HAL sources the spec has short/long retry counts reversed.
272#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT 0x000000f0 255 * we have it reversed to the HAL sources as well, for 5210 and 5211.
273#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S 4 256 * For 5212 these fields are defined as RTS_FAIL_COUNT and DATA_FAIL_COUNT,
274*/ 257 * but used respectively as SHORT and LONG retry count in the code later. This
275#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 258 * is consistent with the definitions here... TODO: check */
259#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 /* short retry count */
276#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4 260#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4
277/*??? 261#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 /* long retry count */
278#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT 0x00000f00
279#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S 8
280*/
281#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00
282#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8 262#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8
283#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT 0x0000f000 263#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5211 0x0000f000 /* [5211+] virtual collision count */
284#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S 12 264#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5212_S 12
285#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 265#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 /* TX timestamp */
286#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16 266#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16
287 267
288/* TX status word 1 fields/flags */ 268/* TX status word 1 fields/flags */
289#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 269#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 /* descriptor complete */
290#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe 270#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe /* TX sequence number */
291#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1 271#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1
292#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 272#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 /* signal strength of ACK */
293#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13 273#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13
294#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX 0x00600000 274#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212 0x00600000 /* [5212] final TX attempt series ix */
295#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21 275#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212_S 21
296#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000 276#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
297#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000 277#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
298 278
299/* 279/*
300 * 5210/5211 hardware TX descriptor 280 * 5210/5211 hardware TX descriptor
@@ -313,18 +293,15 @@ struct ath5k_hw_5212_tx_desc {
313} __packed; 293} __packed;
314 294
315/* 295/*
316 * common hardware RX descriptor 296 * Common hardware RX descriptor
317 */ 297 */
318struct ath5k_hw_all_rx_desc { 298struct ath5k_hw_all_rx_desc {
319 struct ath5k_hw_rx_ctl rx_ctl; 299 struct ath5k_hw_rx_ctl rx_ctl;
320 union { 300 struct ath5k_hw_rx_status rx_stat;
321 struct ath5k_hw_rx_status rx_stat;
322 struct ath5k_hw_rx_error rx_err;
323 } u;
324} __packed; 301} __packed;
325 302
326/* 303/*
327 * Atheros hardware descriptor 304 * Atheros hardware DMA descriptor
328 * This is read and written to by the hardware 305 * This is read and written to by the hardware
329 */ 306 */
330struct ath5k_desc { 307struct ath5k_desc {
@@ -346,4 +323,3 @@ struct ath5k_desc {
346#define AR5K_TXDESC_CTSENA 0x0008 323#define AR5K_TXDESC_CTSENA 0x0008
347#define AR5K_TXDESC_INTREQ 0x0010 324#define AR5K_TXDESC_INTREQ 0x0010
348#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/ 325#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
349
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 941b51130a6f..484f31870ba8 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -48,7 +48,6 @@
48 */ 48 */
49void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) 49void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
50{ 50{
51 ATH5K_TRACE(ah->ah_sc);
52 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 51 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
53 ath5k_hw_reg_read(ah, AR5K_CR); 52 ath5k_hw_reg_read(ah, AR5K_CR);
54} 53}
@@ -62,7 +61,6 @@ int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
62{ 61{
63 unsigned int i; 62 unsigned int i;
64 63
65 ATH5K_TRACE(ah->ah_sc);
66 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); 64 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
67 65
68 /* 66 /*
@@ -96,8 +94,6 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
96 */ 94 */
97void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 95void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
98{ 96{
99 ATH5K_TRACE(ah->ah_sc);
100
101 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); 97 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
102} 98}
103 99
@@ -125,7 +121,6 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
125{ 121{
126 u32 tx_queue; 122 u32 tx_queue;
127 123
128 ATH5K_TRACE(ah->ah_sc);
129 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 124 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
130 125
131 /* Return if queue is declared inactive */ 126 /* Return if queue is declared inactive */
@@ -186,7 +181,6 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
186 unsigned int i = 40; 181 unsigned int i = 40;
187 u32 tx_queue, pending; 182 u32 tx_queue, pending;
188 183
189 ATH5K_TRACE(ah->ah_sc);
190 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 184 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
191 185
192 /* Return if queue is declared inactive */ 186 /* Return if queue is declared inactive */
@@ -297,7 +291,6 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
297{ 291{
298 u16 tx_reg; 292 u16 tx_reg;
299 293
300 ATH5K_TRACE(ah->ah_sc);
301 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 294 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
302 295
303 /* 296 /*
@@ -340,7 +333,6 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
340{ 333{
341 u16 tx_reg; 334 u16 tx_reg;
342 335
343 ATH5K_TRACE(ah->ah_sc);
344 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 336 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
345 337
346 /* 338 /*
@@ -400,8 +392,6 @@ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
400 u32 trigger_level, imr; 392 u32 trigger_level, imr;
401 int ret = -EIO; 393 int ret = -EIO;
402 394
403 ATH5K_TRACE(ah->ah_sc);
404
405 /* 395 /*
406 * Disable interrupts by setting the mask 396 * Disable interrupts by setting the mask
407 */ 397 */
@@ -451,7 +441,6 @@ done:
451 */ 441 */
452bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) 442bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
453{ 443{
454 ATH5K_TRACE(ah->ah_sc);
455 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; 444 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
456} 445}
457 446
@@ -475,8 +464,6 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
475{ 464{
476 u32 data; 465 u32 data;
477 466
478 ATH5K_TRACE(ah->ah_sc);
479
480 /* 467 /*
481 * Read interrupt status from the Interrupt Status register 468 * Read interrupt status from the Interrupt Status register
482 * on 5210 469 * on 5210
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index ed0263672d6d..ae316fec4a6a 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -35,7 +35,6 @@ static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
35{ 35{
36 u32 status, timeout; 36 u32 status, timeout;
37 37
38 ATH5K_TRACE(ah->ah_sc);
39 /* 38 /*
40 * Initialize EEPROM access 39 * Initialize EEPROM access
41 */ 40 */
@@ -715,7 +714,7 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
715 714
716 /* Only one curve for RF5111 715 /* Only one curve for RF5111
717 * find out which one and place 716 * find out which one and place
718 * in in pd_curves. 717 * in pd_curves.
719 * Note: ee_x_gain is reversed here */ 718 * Note: ee_x_gain is reversed here */
720 for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) { 719 for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
721 720
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 64a27e73d02e..bc90503f4b7a 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -34,8 +34,6 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
34 /*5210 has different led mode handling*/ 34 /*5210 has different led mode handling*/
35 u32 led_5210; 35 u32 led_5210;
36 36
37 ATH5K_TRACE(ah->ah_sc);
38
39 /*Reset led status*/ 37 /*Reset led status*/
40 if (ah->ah_version != AR5K_AR5210) 38 if (ah->ah_version != AR5K_AR5210)
41 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, 39 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
@@ -82,7 +80,6 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
82 */ 80 */
83int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) 81int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
84{ 82{
85 ATH5K_TRACE(ah->ah_sc);
86 if (gpio >= AR5K_NUM_GPIO) 83 if (gpio >= AR5K_NUM_GPIO)
87 return -EINVAL; 84 return -EINVAL;
88 85
@@ -98,7 +95,6 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
98 */ 95 */
99int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) 96int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
100{ 97{
101 ATH5K_TRACE(ah->ah_sc);
102 if (gpio >= AR5K_NUM_GPIO) 98 if (gpio >= AR5K_NUM_GPIO)
103 return -EINVAL; 99 return -EINVAL;
104 100
@@ -114,7 +110,6 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
114 */ 110 */
115u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) 111u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
116{ 112{
117 ATH5K_TRACE(ah->ah_sc);
118 if (gpio >= AR5K_NUM_GPIO) 113 if (gpio >= AR5K_NUM_GPIO)
119 return 0xffffffff; 114 return 0xffffffff;
120 115
@@ -129,7 +124,6 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
129int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) 124int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
130{ 125{
131 u32 data; 126 u32 data;
132 ATH5K_TRACE(ah->ah_sc);
133 127
134 if (gpio >= AR5K_NUM_GPIO) 128 if (gpio >= AR5K_NUM_GPIO)
135 return -EINVAL; 129 return -EINVAL;
@@ -153,7 +147,6 @@ void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
153{ 147{
154 u32 data; 148 u32 data;
155 149
156 ATH5K_TRACE(ah->ah_sc);
157 if (gpio >= AR5K_NUM_GPIO) 150 if (gpio >= AR5K_NUM_GPIO)
158 return; 151 return;
159 152
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 5212e275f1c7..86fdb6ddfaaa 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -59,8 +59,6 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
59 59
60 beacon_reg = 0; 60 beacon_reg = 0;
61 61
62 ATH5K_TRACE(ah->ah_sc);
63
64 switch (op_mode) { 62 switch (op_mode) {
65 case NL80211_IFTYPE_ADHOC: 63 case NL80211_IFTYPE_ADHOC:
66 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE; 64 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
@@ -173,7 +171,6 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
173 */ 171 */
174static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) 172static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
175{ 173{
176 ATH5K_TRACE(ah->ah_sc);
177 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) 174 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
178 <= timeout) 175 <= timeout)
179 return -EINVAL; 176 return -EINVAL;
@@ -192,7 +189,6 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
192 */ 189 */
193static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) 190static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
194{ 191{
195 ATH5K_TRACE(ah->ah_sc);
196 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) 192 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
197 <= timeout) 193 <= timeout)
198 return -EINVAL; 194 return -EINVAL;
@@ -297,7 +293,6 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
297 u32 low_id, high_id; 293 u32 low_id, high_id;
298 u32 pcu_reg; 294 u32 pcu_reg;
299 295
300 ATH5K_TRACE(ah->ah_sc);
301 /* Set new station ID */ 296 /* Set new station ID */
302 memcpy(common->macaddr, mac, ETH_ALEN); 297 memcpy(common->macaddr, mac, ETH_ALEN);
303 298
@@ -357,7 +352,6 @@ void ath5k_hw_set_associd(struct ath5k_hw *ah)
357void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) 352void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
358{ 353{
359 struct ath_common *common = ath5k_hw_common(ah); 354 struct ath_common *common = ath5k_hw_common(ah);
360 ATH5K_TRACE(ah->ah_sc);
361 355
362 /* Cache bssid mask so that we can restore it 356 /* Cache bssid mask so that we can restore it
363 * on reset */ 357 * on reset */
@@ -382,7 +376,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
382 */ 376 */
383void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) 377void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
384{ 378{
385 ATH5K_TRACE(ah->ah_sc);
386 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); 379 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
387} 380}
388 381
@@ -397,7 +390,6 @@ void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
397 */ 390 */
398void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) 391void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
399{ 392{
400 ATH5K_TRACE(ah->ah_sc);
401 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); 393 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
402} 394}
403 395
@@ -406,8 +398,6 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
406 */ 398 */
407void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) 399void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
408{ 400{
409 ATH5K_TRACE(ah->ah_sc);
410 /* Set the multicat filter */
411 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); 401 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
412 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); 402 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
413} 403}
@@ -427,7 +417,6 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
427{ 417{
428 u32 data, filter = 0; 418 u32 data, filter = 0;
429 419
430 ATH5K_TRACE(ah->ah_sc);
431 filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER); 420 filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
432 421
433 /*Radar detection for 5212*/ 422 /*Radar detection for 5212*/
@@ -457,8 +446,6 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
457{ 446{
458 u32 data = 0; 447 u32 data = 0;
459 448
460 ATH5K_TRACE(ah->ah_sc);
461
462 /* Set PHY error filter register on 5212*/ 449 /* Set PHY error filter register on 5212*/
463 if (ah->ah_version == AR5K_AR5212) { 450 if (ah->ah_version == AR5K_AR5212) {
464 if (filter & AR5K_RX_FILTER_RADARERR) 451 if (filter & AR5K_RX_FILTER_RADARERR)
@@ -533,8 +520,6 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
533 520
534 WARN_ON( i == ATH5K_MAX_TSF_READ ); 521 WARN_ON( i == ATH5K_MAX_TSF_READ );
535 522
536 ATH5K_TRACE(ah->ah_sc);
537
538 return (((u64)tsf_upper1 << 32) | tsf_lower); 523 return (((u64)tsf_upper1 << 32) | tsf_lower);
539} 524}
540 525
@@ -548,8 +533,6 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
548 */ 533 */
549void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) 534void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
550{ 535{
551 ATH5K_TRACE(ah->ah_sc);
552
553 ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); 536 ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
554 ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); 537 ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
555} 538}
@@ -565,8 +548,6 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
565{ 548{
566 u32 val; 549 u32 val;
567 550
568 ATH5K_TRACE(ah->ah_sc);
569
570 val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF; 551 val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
571 552
572 /* 553 /*
@@ -586,7 +567,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
586{ 567{
587 u32 timer1, timer2, timer3; 568 u32 timer1, timer2, timer3;
588 569
589 ATH5K_TRACE(ah->ah_sc);
590 /* 570 /*
591 * Set the additional timers by mode 571 * Set the additional timers by mode
592 */ 572 */
@@ -674,7 +654,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
674 unsigned int i, type; 654 unsigned int i, type;
675 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET; 655 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
676 656
677 ATH5K_TRACE(ah->ah_sc);
678 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); 657 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
679 658
680 type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry)); 659 type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry));
@@ -749,8 +728,6 @@ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
749 bool is_tkip; 728 bool is_tkip;
750 const u8 *key_ptr; 729 const u8 *key_ptr;
751 730
752 ATH5K_TRACE(ah->ah_sc);
753
754 is_tkip = (key->alg == ALG_TKIP); 731 is_tkip = (key->alg == ALG_TKIP);
755 732
756 /* 733 /*
@@ -836,7 +813,6 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
836{ 813{
837 u32 low_id, high_id; 814 u32 low_id, high_id;
838 815
839 ATH5K_TRACE(ah->ah_sc);
840 /* Invalid entry (key table overflow) */ 816 /* Invalid entry (key table overflow) */
841 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); 817 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
842 818
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 492cbb15720d..6284c389ba18 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -378,8 +378,6 @@ enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
378 u32 data, type; 378 u32 data, type;
379 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 379 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
380 380
381 ATH5K_TRACE(ah->ah_sc);
382
383 if (ah->ah_rf_banks == NULL || 381 if (ah->ah_rf_banks == NULL ||
384 ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE) 382 ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE)
385 return AR5K_RFGAIN_INACTIVE; 383 return AR5K_RFGAIN_INACTIVE;
@@ -1167,7 +1165,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1167 * The median of the values in the history is then loaded into the 1165 * The median of the values in the history is then loaded into the
1168 * hardware for its own use for RSSI and CCA measurements. 1166 * hardware for its own use for RSSI and CCA measurements.
1169 */ 1167 */
1170static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) 1168void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1171{ 1169{
1172 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1170 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1173 u32 val; 1171 u32 val;
@@ -1248,7 +1246,6 @@ static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1248/* 1246/*
1249 * Perform a PHY calibration on RF5110 1247 * Perform a PHY calibration on RF5110
1250 * -Fix BPSK/QAM Constellation (I/Q correction) 1248 * -Fix BPSK/QAM Constellation (I/Q correction)
1251 * -Calculate Noise Floor
1252 */ 1249 */
1253static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, 1250static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1254 struct ieee80211_channel *channel) 1251 struct ieee80211_channel *channel)
@@ -1335,8 +1332,6 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1335 return ret; 1332 return ret;
1336 } 1333 }
1337 1334
1338 ath5k_hw_update_noise_floor(ah);
1339
1340 /* 1335 /*
1341 * Re-enable RX/TX and beacons 1336 * Re-enable RX/TX and beacons
1342 */ 1337 */
@@ -1348,22 +1343,20 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1348} 1343}
1349 1344
1350/* 1345/*
1351 * Perform a PHY calibration on RF5111/5112 and newer chips 1346 * Perform I/Q calibration on RF5111/5112 and newer chips
1352 */ 1347 */
1353static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah, 1348static int
1354 struct ieee80211_channel *channel) 1349ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
1355{ 1350{
1356 u32 i_pwr, q_pwr; 1351 u32 i_pwr, q_pwr;
1357 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; 1352 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
1358 int i; 1353 int i;
1359 ATH5K_TRACE(ah->ah_sc);
1360 1354
1361 if (!ah->ah_calibration || 1355 if (!ah->ah_calibration ||
1362 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) 1356 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
1363 goto done; 1357 return 0;
1364 1358
1365 /* Calibration has finished, get the results and re-run */ 1359 /* Calibration has finished, get the results and re-run */
1366
1367 /* work around empty results which can apparently happen on 5212 */ 1360 /* work around empty results which can apparently happen on 5212 */
1368 for (i = 0; i <= 10; i++) { 1361 for (i = 0; i <= 10; i++) {
1369 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR); 1362 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
@@ -1384,7 +1377,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1384 1377
1385 /* protect against divide by 0 and loss of sign bits */ 1378 /* protect against divide by 0 and loss of sign bits */
1386 if (i_coffd == 0 || q_coffd < 2) 1379 if (i_coffd == 0 || q_coffd < 2)
1387 goto done; 1380 return -1;
1388 1381
1389 i_coff = (-iq_corr) / i_coffd; 1382 i_coff = (-iq_corr) / i_coffd;
1390 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ 1383 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1410,17 +1403,6 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1410 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); 1403 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1411 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN); 1404 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
1412 1405
1413done:
1414
1415 /* TODO: Separate noise floor calibration from I/Q calibration
1416 * since noise floor calibration interrupts rx path while I/Q
1417 * calibration doesn't. We don't need to run noise floor calibration
1418 * as often as I/Q calibration.*/
1419 ath5k_hw_update_noise_floor(ah);
1420
1421 /* Initiate a gain_F calibration */
1422 ath5k_hw_request_rfgain_probe(ah);
1423
1424 return 0; 1406 return 0;
1425} 1407}
1426 1408
@@ -1434,8 +1416,10 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1434 1416
1435 if (ah->ah_radio == AR5K_RF5110) 1417 if (ah->ah_radio == AR5K_RF5110)
1436 ret = ath5k_hw_rf5110_calibrate(ah, channel); 1418 ret = ath5k_hw_rf5110_calibrate(ah, channel);
1437 else 1419 else {
1438 ret = ath5k_hw_rf511x_calibrate(ah, channel); 1420 ret = ath5k_hw_rf511x_iq_calibrate(ah);
1421 ath5k_hw_request_rfgain_probe(ah);
1422 }
1439 1423
1440 return ret; 1424 return ret;
1441} 1425}
@@ -1693,7 +1677,6 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1693 1677
1694int ath5k_hw_phy_disable(struct ath5k_hw *ah) 1678int ath5k_hw_phy_disable(struct ath5k_hw *ah)
1695{ 1679{
1696 ATH5K_TRACE(ah->ah_sc);
1697 /*Just a try M.F.*/ 1680 /*Just a try M.F.*/
1698 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); 1681 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
1699 1682
@@ -1709,8 +1692,6 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
1709 u32 srev; 1692 u32 srev;
1710 u16 ret; 1693 u16 ret;
1711 1694
1712 ATH5K_TRACE(ah->ah_sc);
1713
1714 /* 1695 /*
1715 * Set the radio chip access register 1696 * Set the radio chip access register
1716 */ 1697 */
@@ -1755,8 +1736,6 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
1755static void /*TODO:Boundary check*/ 1736static void /*TODO:Boundary check*/
1756ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) 1737ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
1757{ 1738{
1758 ATH5K_TRACE(ah->ah_sc);
1759
1760 if (ah->ah_version != AR5K_AR5210) 1739 if (ah->ah_version != AR5K_AR5210)
1761 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); 1740 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
1762} 1741}
@@ -1789,19 +1768,50 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
1789 1768
1790 if (enable) { 1769 if (enable) {
1791 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART, 1770 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
1792 AR5K_PHY_RESTART_DIV_GC, 0xc); 1771 AR5K_PHY_RESTART_DIV_GC, 4);
1793 1772
1794 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV, 1773 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
1795 AR5K_PHY_FAST_ANT_DIV_EN); 1774 AR5K_PHY_FAST_ANT_DIV_EN);
1796 } else { 1775 } else {
1797 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART, 1776 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
1798 AR5K_PHY_RESTART_DIV_GC, 0x8); 1777 AR5K_PHY_RESTART_DIV_GC, 0);
1799 1778
1800 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV, 1779 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
1801 AR5K_PHY_FAST_ANT_DIV_EN); 1780 AR5K_PHY_FAST_ANT_DIV_EN);
1802 } 1781 }
1803} 1782}
1804 1783
1784void
1785ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
1786{
1787 u8 ant0, ant1;
1788
1789 /*
1790 * In case a fixed antenna was set as default
1791 * use the same switch table twice.
1792 */
1793 if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
1794 ant0 = ant1 = AR5K_ANT_SWTABLE_A;
1795 else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
1796 ant0 = ant1 = AR5K_ANT_SWTABLE_B;
1797 else {
1798 ant0 = AR5K_ANT_SWTABLE_A;
1799 ant1 = AR5K_ANT_SWTABLE_B;
1800 }
1801
1802 /* Set antenna idle switch table */
1803 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
1804 AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
1805 (ah->ah_ant_ctl[ee_mode][AR5K_ANT_CTL] |
1806 AR5K_PHY_ANT_CTL_TXRX_EN));
1807
1808 /* Set antenna switch tables */
1809 ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant0],
1810 AR5K_PHY_ANT_SWITCH_TABLE_0);
1811 ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant1],
1812 AR5K_PHY_ANT_SWITCH_TABLE_1);
1813}
1814
1805/* 1815/*
1806 * Set antenna operating mode 1816 * Set antenna operating mode
1807 */ 1817 */
@@ -1823,8 +1833,6 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
1823 1833
1824 def_ant = ah->ah_def_ant; 1834 def_ant = ah->ah_def_ant;
1825 1835
1826 ATH5K_TRACE(ah->ah_sc);
1827
1828 switch (channel->hw_value & CHANNEL_MODES) { 1836 switch (channel->hw_value & CHANNEL_MODES) {
1829 case CHANNEL_A: 1837 case CHANNEL_A:
1830 case CHANNEL_T: 1838 case CHANNEL_T:
@@ -1923,6 +1931,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
1923 if (sta_id1) 1931 if (sta_id1)
1924 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1); 1932 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1);
1925 1933
1934 ath5k_hw_set_antenna_switch(ah, ee_mode);
1926 /* Note: set diversity before default antenna 1935 /* Note: set diversity before default antenna
1927 * because it won't work correctly */ 1936 * because it won't work correctly */
1928 ath5k_hw_set_fast_div(ah, ee_mode, fast_div); 1937 ath5k_hw_set_fast_div(ah, ee_mode, fast_div);
@@ -2988,7 +2997,6 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
2988 u8 type; 2997 u8 type;
2989 int ret; 2998 int ret;
2990 2999
2991 ATH5K_TRACE(ah->ah_sc);
2992 if (txpower > AR5K_TUNE_MAX_TXPOWER) { 3000 if (txpower > AR5K_TUNE_MAX_TXPOWER) {
2993 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); 3001 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
2994 return -EINVAL; 3002 return -EINVAL;
@@ -3084,8 +3092,6 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
3084 struct ieee80211_channel *channel = ah->ah_current_channel; 3092 struct ieee80211_channel *channel = ah->ah_current_channel;
3085 u8 ee_mode; 3093 u8 ee_mode;
3086 3094
3087 ATH5K_TRACE(ah->ah_sc);
3088
3089 switch (channel->hw_value & CHANNEL_MODES) { 3095 switch (channel->hw_value & CHANNEL_MODES) {
3090 case CHANNEL_A: 3096 case CHANNEL_A:
3091 case CHANNEL_T: 3097 case CHANNEL_T:
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index f5831da33f7b..4186ff4c6e9c 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -31,7 +31,6 @@ Queue Control Unit, DFS Control Unit Functions
31int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 31int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
32 struct ath5k_txq_info *queue_info) 32 struct ath5k_txq_info *queue_info)
33{ 33{
34 ATH5K_TRACE(ah->ah_sc);
35 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); 34 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
36 return 0; 35 return 0;
37} 36}
@@ -42,7 +41,6 @@ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
42int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 41int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
43 const struct ath5k_txq_info *queue_info) 42 const struct ath5k_txq_info *queue_info)
44{ 43{
45 ATH5K_TRACE(ah->ah_sc);
46 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 44 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
47 45
48 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 46 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
@@ -69,8 +67,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
69 unsigned int queue; 67 unsigned int queue;
70 int ret; 68 int ret;
71 69
72 ATH5K_TRACE(ah->ah_sc);
73
74 /* 70 /*
75 * Get queue by type 71 * Get queue by type
76 */ 72 */
@@ -149,7 +145,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
149u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) 145u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
150{ 146{
151 u32 pending; 147 u32 pending;
152 ATH5K_TRACE(ah->ah_sc);
153 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 148 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
154 149
155 /* Return if queue is declared inactive */ 150 /* Return if queue is declared inactive */
@@ -177,7 +172,6 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
177 */ 172 */
178void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) 173void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
179{ 174{
180 ATH5K_TRACE(ah->ah_sc);
181 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) 175 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
182 return; 176 return;
183 177
@@ -195,7 +189,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
195 u32 cw_min, cw_max, retry_lg, retry_sh; 189 u32 cw_min, cw_max, retry_lg, retry_sh;
196 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 190 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
197 191
198 ATH5K_TRACE(ah->ah_sc);
199 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 192 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
200 193
201 tq = &ah->ah_txq[queue]; 194 tq = &ah->ah_txq[queue];
@@ -523,8 +516,6 @@ int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
523{ 516{
524 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); 517 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
525 518
526 ATH5K_TRACE(ah->ah_sc);
527
528 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) 519 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
529 return -EINVAL; 520 return -EINVAL;
530 521
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 307f80e83f94..498aa28ea9e6 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -201,8 +201,6 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
201 int ret; 201 int ret;
202 u32 mask = val ? val : ~0U; 202 u32 mask = val ? val : ~0U;
203 203
204 ATH5K_TRACE(ah->ah_sc);
205
206 /* Read-and-clear RX Descriptor Pointer*/ 204 /* Read-and-clear RX Descriptor Pointer*/
207 ath5k_hw_reg_read(ah, AR5K_RXDP); 205 ath5k_hw_reg_read(ah, AR5K_RXDP);
208 206
@@ -246,7 +244,6 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
246 unsigned int i; 244 unsigned int i;
247 u32 staid, data; 245 u32 staid, data;
248 246
249 ATH5K_TRACE(ah->ah_sc);
250 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1); 247 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
251 248
252 switch (mode) { 249 switch (mode) {
@@ -393,8 +390,6 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
393 mode = 0; 390 mode = 0;
394 clock = 0; 391 clock = 0;
395 392
396 ATH5K_TRACE(ah->ah_sc);
397
398 /* Wakeup the device */ 393 /* Wakeup the device */
399 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); 394 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
400 if (ret) { 395 if (ret) {
@@ -734,7 +729,7 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
734} 729}
735 730
736static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, 731static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
737 struct ieee80211_channel *channel, u8 *ant, u8 ee_mode) 732 struct ieee80211_channel *channel, u8 ee_mode)
738{ 733{
739 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 734 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
740 s16 cck_ofdm_pwr_delta; 735 s16 cck_ofdm_pwr_delta;
@@ -768,17 +763,9 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
768 ee->ee_cck_ofdm_gain_delta; 763 ee->ee_cck_ofdm_gain_delta;
769 } 764 }
770 765
771 /* Set antenna idle switch table */ 766 /* XXX: necessary here? is called from ath5k_hw_set_antenna_mode()
772 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL, 767 * too */
773 AR5K_PHY_ANT_CTL_SWTABLE_IDLE, 768 ath5k_hw_set_antenna_switch(ah, ee_mode);
774 (ah->ah_ant_ctl[ee_mode][0] |
775 AR5K_PHY_ANT_CTL_TXRX_EN));
776
777 /* Set antenna switch tables */
778 ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[0]],
779 AR5K_PHY_ANT_SWITCH_TABLE_0);
780 ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[1]],
781 AR5K_PHY_ANT_SWITCH_TABLE_1);
782 769
783 /* Noise floor threshold */ 770 /* Noise floor threshold */
784 ath5k_hw_reg_write(ah, 771 ath5k_hw_reg_write(ah,
@@ -855,7 +842,6 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
855 AR5K_PHY_NF_THRESH62, 842 AR5K_PHY_NF_THRESH62,
856 ee->ee_thr_62[ee_mode]); 843 ee->ee_thr_62[ee_mode]);
857 844
858
859 /* False detect backoff for channels 845 /* False detect backoff for channels
860 * that have spur noise. Write the new 846 * that have spur noise. Write the new
861 * cyclic power RSSI threshold. */ 847 * cyclic power RSSI threshold. */
@@ -891,14 +877,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
891 struct ieee80211_channel *channel, bool change_channel) 877 struct ieee80211_channel *channel, bool change_channel)
892{ 878{
893 struct ath_common *common = ath5k_hw_common(ah); 879 struct ath_common *common = ath5k_hw_common(ah);
894 u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo; 880 u32 s_seq[10], s_led[3], staid1_flags, tsf_up, tsf_lo;
895 u32 phy_tst1; 881 u32 phy_tst1;
896 u8 mode, freq, ee_mode, ant[2]; 882 u8 mode, freq, ee_mode;
897 int i, ret; 883 int i, ret;
898 884
899 ATH5K_TRACE(ah->ah_sc);
900
901 s_ant = 0;
902 ee_mode = 0; 885 ee_mode = 0;
903 staid1_flags = 0; 886 staid1_flags = 0;
904 tsf_up = 0; 887 tsf_up = 0;
@@ -995,9 +978,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
995 } 978 }
996 } 979 }
997 980
998 /* Save default antenna */
999 s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
1000
1001 if (ah->ah_version == AR5K_AR5212) { 981 if (ah->ah_version == AR5K_AR5212) {
1002 /* Restore normal 32/40MHz clock operation 982 /* Restore normal 32/40MHz clock operation
1003 * to avoid register access delay on certain 983 * to avoid register access delay on certain
@@ -1094,22 +1074,17 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1094 /* Write OFDM timings on 5212*/ 1074 /* Write OFDM timings on 5212*/
1095 if (ah->ah_version == AR5K_AR5212 && 1075 if (ah->ah_version == AR5K_AR5212 &&
1096 channel->hw_value & CHANNEL_OFDM) { 1076 channel->hw_value & CHANNEL_OFDM) {
1097 struct ath5k_eeprom_info *ee =
1098 &ah->ah_capabilities.cap_eeprom;
1099 1077
1100 ret = ath5k_hw_write_ofdm_timings(ah, channel); 1078 ret = ath5k_hw_write_ofdm_timings(ah, channel);
1101 if (ret) 1079 if (ret)
1102 return ret; 1080 return ret;
1103 1081
1104 /* Note: According to docs we can have a newer 1082 /* Spur info is available only from EEPROM versions
1105 * EEPROM on old hardware, so we need to verify 1083 * bigger than 5.3 but but the EEPOM routines will use
1106 * that our hardware is new enough to have spur 1084 * static values for older versions */
1107 * mitigation registers (delta phase etc) */ 1085 if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
1108 if (ah->ah_mac_srev >= AR5K_SREV_AR5424 ||
1109 (ah->ah_mac_srev >= AR5K_SREV_AR5424 &&
1110 ee->ee_version >= AR5K_EEPROM_VERSION_5_3))
1111 ath5k_hw_set_spur_mitigation_filter(ah, 1086 ath5k_hw_set_spur_mitigation_filter(ah,
1112 channel); 1087 channel);
1113 } 1088 }
1114 1089
1115 /*Enable/disable 802.11b mode on 5111 1090 /*Enable/disable 802.11b mode on 5111
@@ -1123,21 +1098,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1123 AR5K_TXCFG_B_MODE); 1098 AR5K_TXCFG_B_MODE);
1124 } 1099 }
1125 1100
1126 /*
1127 * In case a fixed antenna was set as default
1128 * use the same switch table twice.
1129 */
1130 if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
1131 ant[0] = ant[1] = AR5K_ANT_SWTABLE_A;
1132 else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
1133 ant[0] = ant[1] = AR5K_ANT_SWTABLE_B;
1134 else {
1135 ant[0] = AR5K_ANT_SWTABLE_A;
1136 ant[1] = AR5K_ANT_SWTABLE_B;
1137 }
1138
1139 /* Commit values from EEPROM */ 1101 /* Commit values from EEPROM */
1140 ath5k_hw_commit_eeprom_settings(ah, channel, ant, ee_mode); 1102 ath5k_hw_commit_eeprom_settings(ah, channel, ee_mode);
1141 1103
1142 } else { 1104 } else {
1143 /* 1105 /*
@@ -1175,8 +1137,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1175 ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32); 1137 ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
1176 } 1138 }
1177 } 1139 }
1178
1179 ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
1180 } 1140 }
1181 1141
1182 /* Ledstate */ 1142 /* Ledstate */
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
new file mode 100644
index 000000000000..90757de7bf59
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -0,0 +1,116 @@
1#include <linux/device.h>
2#include <linux/pci.h>
3
4#include "base.h"
5#include "ath5k.h"
6#include "reg.h"
7
8#define SIMPLE_SHOW_STORE(name, get, set) \
9static ssize_t ath5k_attr_show_##name(struct device *dev, \
10 struct device_attribute *attr, \
11 char *buf) \
12{ \
13 struct ath5k_softc *sc = dev_get_drvdata(dev); \
14 return snprintf(buf, PAGE_SIZE, "%d\n", get); \
15} \
16 \
17static ssize_t ath5k_attr_store_##name(struct device *dev, \
18 struct device_attribute *attr, \
19 const char *buf, size_t count) \
20{ \
21 struct ath5k_softc *sc = dev_get_drvdata(dev); \
22 int val; \
23 \
24 val = (int)simple_strtoul(buf, NULL, 10); \
25 set(sc->ah, val); \
26 return count; \
27} \
28static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
29 ath5k_attr_show_##name, ath5k_attr_store_##name)
30
31#define SIMPLE_SHOW(name, get) \
32static ssize_t ath5k_attr_show_##name(struct device *dev, \
33 struct device_attribute *attr, \
34 char *buf) \
35{ \
36 struct ath5k_softc *sc = dev_get_drvdata(dev); \
37 return snprintf(buf, PAGE_SIZE, "%d\n", get); \
38} \
39static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
40
41/*** ANI ***/
42
43SIMPLE_SHOW_STORE(ani_mode, sc->ani_state.ani_mode, ath5k_ani_init);
44SIMPLE_SHOW_STORE(noise_immunity_level, sc->ani_state.noise_imm_level,
45 ath5k_ani_set_noise_immunity_level);
46SIMPLE_SHOW_STORE(spur_level, sc->ani_state.spur_level,
47 ath5k_ani_set_spur_immunity_level);
48SIMPLE_SHOW_STORE(firstep_level, sc->ani_state.firstep_level,
49 ath5k_ani_set_firstep_level);
50SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, sc->ani_state.ofdm_weak_sig,
51 ath5k_ani_set_ofdm_weak_signal_detection);
52SIMPLE_SHOW_STORE(cck_weak_signal_detection, sc->ani_state.cck_weak_sig,
53 ath5k_ani_set_cck_weak_signal_detection);
54SIMPLE_SHOW(spur_level_max, sc->ani_state.max_spur_level);
55
56static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
57 struct device_attribute *attr,
58 char *buf)
59{
60 return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
61}
62static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO,
63 ath5k_attr_show_noise_immunity_level_max, NULL);
64
65static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev,
66 struct device_attribute *attr,
67 char *buf)
68{
69 return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
70}
71static DEVICE_ATTR(firstep_level_max, S_IRUGO,
72 ath5k_attr_show_firstep_level_max, NULL);
73
74static struct attribute *ath5k_sysfs_entries_ani[] = {
75 &dev_attr_ani_mode.attr,
76 &dev_attr_noise_immunity_level.attr,
77 &dev_attr_spur_level.attr,
78 &dev_attr_firstep_level.attr,
79 &dev_attr_ofdm_weak_signal_detection.attr,
80 &dev_attr_cck_weak_signal_detection.attr,
81 &dev_attr_noise_immunity_level_max.attr,
82 &dev_attr_spur_level_max.attr,
83 &dev_attr_firstep_level_max.attr,
84 NULL
85};
86
87static struct attribute_group ath5k_attribute_group_ani = {
88 .name = "ani",
89 .attrs = ath5k_sysfs_entries_ani,
90};
91
92
93/*** register / unregister ***/
94
95int
96ath5k_sysfs_register(struct ath5k_softc *sc)
97{
98 struct device *dev = &sc->pdev->dev;
99 int err;
100
101 err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
102 if (err) {
103 ATH5K_ERR(sc, "failed to create sysfs group\n");
104 return err;
105 }
106
107 return 0;
108}
109
110void
111ath5k_sysfs_unregister(struct ath5k_softc *sc)
112{
113 struct device *dev = &sc->pdev->dev;
114
115 sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
116}
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index dd112be218ab..973ae4f49f35 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -32,7 +32,8 @@ ath9k_hw-y:= \
32 mac.o \ 32 mac.o \
33 ar9002_mac.o \ 33 ar9002_mac.o \
34 ar9003_mac.o \ 34 ar9003_mac.o \
35 ar9003_eeprom.o 35 ar9003_eeprom.o \
36 ar9003_paprd.o
36 37
37obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o 38obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
38 39
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 85fdd26039c8..1a984b02e9e5 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -131,11 +131,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
131 131
132 ah = sc->sc_ah; 132 ah = sc->sc_ah;
133 ath9k_hw_name(ah, hw_name, sizeof(hw_name)); 133 ath9k_hw_name(ah, hw_name, sizeof(hw_name));
134 printk(KERN_INFO 134 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
135 "%s: %s mem=0x%lx, irq=%d\n", 135 hw_name, (unsigned long)mem, irq);
136 wiphy_name(hw->wiphy),
137 hw_name,
138 (unsigned long)mem, irq);
139 136
140 return 0; 137 return 0;
141 138
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ba8b20f01594..cc648b6ae31c 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +17,99 @@
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h" 18#include "hw-ops.h"
19 19
20static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, 20struct ani_ofdm_level_entry {
21 struct ath9k_channel *chan) 21 int spur_immunity_level;
22 int fir_step_level;
23 int ofdm_weak_signal_on;
24};
25
26/* values here are relative to the INI */
27
28/*
29 * Legend:
30 *
31 * SI: Spur immunity
32 * FS: FIR Step
33 * WS: OFDM / CCK Weak Signal detection
34 * MRC-CCK: Maximal Ratio Combining for CCK
35 */
36
37static const struct ani_ofdm_level_entry ofdm_level_table[] = {
38 /* SI FS WS */
39 { 0, 0, 1 }, /* lvl 0 */
40 { 1, 1, 1 }, /* lvl 1 */
41 { 2, 2, 1 }, /* lvl 2 */
42 { 3, 2, 1 }, /* lvl 3 (default) */
43 { 4, 3, 1 }, /* lvl 4 */
44 { 5, 4, 1 }, /* lvl 5 */
45 { 6, 5, 1 }, /* lvl 6 */
46 { 7, 6, 1 }, /* lvl 7 */
47 { 7, 7, 1 }, /* lvl 8 */
48 { 7, 8, 0 } /* lvl 9 */
49};
50#define ATH9K_ANI_OFDM_NUM_LEVEL \
51 (sizeof(ofdm_level_table)/sizeof(ofdm_level_table[0]))
52#define ATH9K_ANI_OFDM_MAX_LEVEL \
53 (ATH9K_ANI_OFDM_NUM_LEVEL-1)
54#define ATH9K_ANI_OFDM_DEF_LEVEL \
55 3 /* default level - matches the INI settings */
56
57/*
58 * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm.
59 * With OFDM for single stream you just add up all antenna inputs, you're
60 * only interested in what you get after FFT. Signal aligment is also not
61 * required for OFDM because any phase difference adds up in the frequency
62 * domain.
63 *
64 * MRC requires extra work for use with CCK. You need to align the antenna
65 * signals from the different antenna before you can add the signals together.
66 * You need aligment of signals as CCK is in time domain, so addition can cancel
67 * your signal completely if phase is 180 degrees (think of adding sine waves).
68 * You also need to remove noise before the addition and this is where ANI
69 * MRC CCK comes into play. One of the antenna inputs may be stronger but
70 * lower SNR, so just adding after alignment can be dangerous.
71 *
72 * Regardless of alignment in time, the antenna signals add constructively after
73 * FFT and improve your reception. For more information:
74 *
75 * http://en.wikipedia.org/wiki/Maximal-ratio_combining
76 */
77
78struct ani_cck_level_entry {
79 int fir_step_level;
80 int mrc_cck_on;
81};
82
83static const struct ani_cck_level_entry cck_level_table[] = {
84 /* FS MRC-CCK */
85 { 0, 1 }, /* lvl 0 */
86 { 1, 1 }, /* lvl 1 */
87 { 2, 1 }, /* lvl 2 (default) */
88 { 3, 1 }, /* lvl 3 */
89 { 4, 0 }, /* lvl 4 */
90 { 5, 0 }, /* lvl 5 */
91 { 6, 0 }, /* lvl 6 */
92 { 7, 0 }, /* lvl 7 (only for high rssi) */
93 { 8, 0 } /* lvl 8 (only for high rssi) */
94};
95
96#define ATH9K_ANI_CCK_NUM_LEVEL \
97 (sizeof(cck_level_table)/sizeof(cck_level_table[0]))
98#define ATH9K_ANI_CCK_MAX_LEVEL \
99 (ATH9K_ANI_CCK_NUM_LEVEL-1)
100#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \
101 (ATH9K_ANI_CCK_NUM_LEVEL-3)
102#define ATH9K_ANI_CCK_DEF_LEVEL \
103 2 /* default level - matches the INI settings */
104
105/* Private to ani.c */
106static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
107{
108 ath9k_hw_private_ops(ah)->ani_lower_immunity(ah);
109}
110
111int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
112 struct ath9k_channel *chan)
22{ 113{
23 int i; 114 int i;
24 115
@@ -48,7 +139,7 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
48 stats->beacons += REG_READ(ah, AR_BEACON_CNT); 139 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
49} 140}
50 141
51static void ath9k_ani_restart(struct ath_hw *ah) 142static void ath9k_ani_restart_old(struct ath_hw *ah)
52{ 143{
53 struct ar5416AniState *aniState; 144 struct ar5416AniState *aniState;
54 struct ath_common *common = ath9k_hw_common(ah); 145 struct ath_common *common = ath9k_hw_common(ah);
@@ -96,7 +187,42 @@ static void ath9k_ani_restart(struct ath_hw *ah)
96 aniState->cckPhyErrCount = 0; 187 aniState->cckPhyErrCount = 0;
97} 188}
98 189
99static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) 190static void ath9k_ani_restart_new(struct ath_hw *ah)
191{
192 struct ar5416AniState *aniState;
193 struct ath_common *common = ath9k_hw_common(ah);
194
195 if (!DO_ANI(ah))
196 return;
197
198 aniState = ah->curani;
199 aniState->listenTime = 0;
200
201 aniState->ofdmPhyErrBase = 0;
202 aniState->cckPhyErrBase = 0;
203
204 ath_print(common, ATH_DBG_ANI,
205 "Writing ofdmbase=%08x cckbase=%08x\n",
206 aniState->ofdmPhyErrBase,
207 aniState->cckPhyErrBase);
208
209 ENABLE_REGWRITE_BUFFER(ah);
210
211 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
212 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
213 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
214 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
215
216 REGWRITE_BUFFER_FLUSH(ah);
217 DISABLE_REGWRITE_BUFFER(ah);
218
219 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
220
221 aniState->ofdmPhyErrCount = 0;
222 aniState->cckPhyErrCount = 0;
223}
224
225static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
100{ 226{
101 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 227 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
102 struct ar5416AniState *aniState; 228 struct ar5416AniState *aniState;
@@ -168,7 +294,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
168 } 294 }
169} 295}
170 296
171static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) 297static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
172{ 298{
173 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 299 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
174 struct ar5416AniState *aniState; 300 struct ar5416AniState *aniState;
@@ -206,7 +332,125 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
206 } 332 }
207} 333}
208 334
209static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) 335/* Adjust the OFDM Noise Immunity Level */
336static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
337{
338 struct ar5416AniState *aniState = ah->curani;
339 struct ath_common *common = ath9k_hw_common(ah);
340 const struct ani_ofdm_level_entry *entry_ofdm;
341 const struct ani_cck_level_entry *entry_cck;
342
343 aniState->noiseFloor = BEACON_RSSI(ah);
344
345 ath_print(common, ATH_DBG_ANI,
346 "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
347 aniState->ofdmNoiseImmunityLevel,
348 immunityLevel, aniState->noiseFloor,
349 aniState->rssiThrLow, aniState->rssiThrHigh);
350
351 aniState->ofdmNoiseImmunityLevel = immunityLevel;
352
353 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
354 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
355
356 if (aniState->spurImmunityLevel != entry_ofdm->spur_immunity_level)
357 ath9k_hw_ani_control(ah,
358 ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
359 entry_ofdm->spur_immunity_level);
360
361 if (aniState->firstepLevel != entry_ofdm->fir_step_level &&
362 entry_ofdm->fir_step_level >= entry_cck->fir_step_level)
363 ath9k_hw_ani_control(ah,
364 ATH9K_ANI_FIRSTEP_LEVEL,
365 entry_ofdm->fir_step_level);
366
367 if ((ah->opmode != NL80211_IFTYPE_STATION &&
368 ah->opmode != NL80211_IFTYPE_ADHOC) ||
369 aniState->noiseFloor <= aniState->rssiThrHigh) {
370 if (aniState->ofdmWeakSigDetectOff)
371 /* force on ofdm weak sig detect */
372 ath9k_hw_ani_control(ah,
373 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
374 true);
375 else if (aniState->ofdmWeakSigDetectOff ==
376 entry_ofdm->ofdm_weak_signal_on)
377 ath9k_hw_ani_control(ah,
378 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
379 entry_ofdm->ofdm_weak_signal_on);
380 }
381}
382
383static void ath9k_hw_ani_ofdm_err_trigger_new(struct ath_hw *ah)
384{
385 struct ar5416AniState *aniState;
386
387 if (!DO_ANI(ah))
388 return;
389
390 aniState = ah->curani;
391
392 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
393 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1);
394}
395
396/*
397 * Set the ANI settings to match an CCK level.
398 */
399static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
400{
401 struct ar5416AniState *aniState = ah->curani;
402 struct ath_common *common = ath9k_hw_common(ah);
403 const struct ani_ofdm_level_entry *entry_ofdm;
404 const struct ani_cck_level_entry *entry_cck;
405
406 aniState->noiseFloor = BEACON_RSSI(ah);
407 ath_print(common, ATH_DBG_ANI,
408 "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
409 aniState->cckNoiseImmunityLevel, immunityLevel,
410 aniState->noiseFloor, aniState->rssiThrLow,
411 aniState->rssiThrHigh);
412
413 if ((ah->opmode == NL80211_IFTYPE_STATION ||
414 ah->opmode == NL80211_IFTYPE_ADHOC) &&
415 aniState->noiseFloor <= aniState->rssiThrLow &&
416 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
417 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
418
419 aniState->cckNoiseImmunityLevel = immunityLevel;
420
421 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
422 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
423
424 if (aniState->firstepLevel != entry_cck->fir_step_level &&
425 entry_cck->fir_step_level >= entry_ofdm->fir_step_level)
426 ath9k_hw_ani_control(ah,
427 ATH9K_ANI_FIRSTEP_LEVEL,
428 entry_cck->fir_step_level);
429
430 /* Skip MRC CCK for pre AR9003 families */
431 if (!AR_SREV_9300_20_OR_LATER(ah))
432 return;
433
434 if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
435 ath9k_hw_ani_control(ah,
436 ATH9K_ANI_MRC_CCK,
437 entry_cck->mrc_cck_on);
438}
439
440static void ath9k_hw_ani_cck_err_trigger_new(struct ath_hw *ah)
441{
442 struct ar5416AniState *aniState;
443
444 if (!DO_ANI(ah))
445 return;
446
447 aniState = ah->curani;
448
449 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
450 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1);
451}
452
453static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
210{ 454{
211 struct ar5416AniState *aniState; 455 struct ar5416AniState *aniState;
212 int32_t rssi; 456 int32_t rssi;
@@ -259,9 +503,53 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
259 } 503 }
260} 504}
261 505
506/*
507 * only lower either OFDM or CCK errors per turn
508 * we lower the other one next time
509 */
510static void ath9k_hw_ani_lower_immunity_new(struct ath_hw *ah)
511{
512 struct ar5416AniState *aniState;
513
514 aniState = ah->curani;
515
516 /* lower OFDM noise immunity */
517 if (aniState->ofdmNoiseImmunityLevel > 0 &&
518 (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
519 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1);
520 return;
521 }
522
523 /* lower CCK noise immunity */
524 if (aniState->cckNoiseImmunityLevel > 0)
525 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1);
526}
527
528static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
529{
530 struct ath9k_channel *chan = ah->curchan;
531 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
532 u8 clockrate; /* in MHz */
533
534 if (!ah->curchan) /* should really check for CCK instead */
535 clockrate = ATH9K_CLOCK_RATE_CCK;
536 else if (conf->channel->band == IEEE80211_BAND_2GHZ)
537 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
538 else if (IS_CHAN_A_FAST_CLOCK(ah, chan))
539 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
540 else
541 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
542
543 if (conf_is_ht40(conf))
544 return clockrate * 2;
545
546 return clockrate * 2;
547}
548
262static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) 549static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
263{ 550{
264 struct ar5416AniState *aniState; 551 struct ar5416AniState *aniState;
552 struct ath_common *common = ath9k_hw_common(ah);
265 u32 txFrameCount, rxFrameCount, cycleCount; 553 u32 txFrameCount, rxFrameCount, cycleCount;
266 int32_t listenTime; 554 int32_t listenTime;
267 555
@@ -271,15 +559,31 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
271 559
272 aniState = ah->curani; 560 aniState = ah->curani;
273 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) { 561 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
274
275 listenTime = 0; 562 listenTime = 0;
276 ah->stats.ast_ani_lzero++; 563 ah->stats.ast_ani_lzero++;
564 ath_print(common, ATH_DBG_ANI,
565 "1st call: aniState->cycleCount=%d\n",
566 aniState->cycleCount);
277 } else { 567 } else {
278 int32_t ccdelta = cycleCount - aniState->cycleCount; 568 int32_t ccdelta = cycleCount - aniState->cycleCount;
279 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount; 569 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
280 int32_t tfdelta = txFrameCount - aniState->txFrameCount; 570 int32_t tfdelta = txFrameCount - aniState->txFrameCount;
281 listenTime = (ccdelta - rfdelta - tfdelta) / 44000; 571 int32_t clock_rate;
572
573 /*
574 * convert HW counter values to ms using mode
575 * specifix clock rate
576 */
577 clock_rate = ath9k_hw_chan_2_clockrate_mhz(ah) * 1000;;
578
579 listenTime = (ccdelta - rfdelta - tfdelta) / clock_rate;
580
581 ath_print(common, ATH_DBG_ANI,
582 "cyclecount=%d, rfcount=%d, "
583 "tfcount=%d, listenTime=%d CLOCK_RATE=%d\n",
584 ccdelta, rfdelta, tfdelta, listenTime, clock_rate);
282 } 585 }
586
283 aniState->cycleCount = cycleCount; 587 aniState->cycleCount = cycleCount;
284 aniState->txFrameCount = txFrameCount; 588 aniState->txFrameCount = txFrameCount;
285 aniState->rxFrameCount = rxFrameCount; 589 aniState->rxFrameCount = rxFrameCount;
@@ -287,7 +591,7 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
287 return listenTime; 591 return listenTime;
288} 592}
289 593
290void ath9k_ani_reset(struct ath_hw *ah) 594static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
291{ 595{
292 struct ar5416AniState *aniState; 596 struct ar5416AniState *aniState;
293 struct ath9k_channel *chan = ah->curchan; 597 struct ath9k_channel *chan = ah->curchan;
@@ -340,7 +644,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
340 ah->curani->cckTrigLow = 644 ah->curani->cckTrigLow =
341 ah->config.cck_trig_low; 645 ah->config.cck_trig_low;
342 } 646 }
343 ath9k_ani_restart(ah); 647 ath9k_ani_restart_old(ah);
344 return; 648 return;
345 } 649 }
346 650
@@ -362,7 +666,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
362 666
363 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) & 667 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
364 ~ATH9K_RX_FILTER_PHYERR); 668 ~ATH9K_RX_FILTER_PHYERR);
365 ath9k_ani_restart(ah); 669 ath9k_ani_restart_old(ah);
366 670
367 ENABLE_REGWRITE_BUFFER(ah); 671 ENABLE_REGWRITE_BUFFER(ah);
368 672
@@ -373,8 +677,102 @@ void ath9k_ani_reset(struct ath_hw *ah)
373 DISABLE_REGWRITE_BUFFER(ah); 677 DISABLE_REGWRITE_BUFFER(ah);
374} 678}
375 679
376void ath9k_hw_ani_monitor(struct ath_hw *ah, 680/*
377 struct ath9k_channel *chan) 681 * Restore the ANI parameters in the HAL and reset the statistics.
682 * This routine should be called for every hardware reset and for
683 * every channel change.
684 */
685static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning)
686{
687 struct ar5416AniState *aniState = ah->curani;
688 struct ath9k_channel *chan = ah->curchan;
689 struct ath_common *common = ath9k_hw_common(ah);
690
691 if (!DO_ANI(ah))
692 return;
693
694 BUG_ON(aniState == NULL);
695 ah->stats.ast_ani_reset++;
696
697 /* only allow a subset of functions in AP mode */
698 if (ah->opmode == NL80211_IFTYPE_AP) {
699 if (IS_CHAN_2GHZ(chan)) {
700 ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
701 ATH9K_ANI_FIRSTEP_LEVEL);
702 if (AR_SREV_9300_20_OR_LATER(ah))
703 ah->ani_function |= ATH9K_ANI_MRC_CCK;
704 } else
705 ah->ani_function = 0;
706 }
707
708 /* always allow mode (on/off) to be controlled */
709 ah->ani_function |= ATH9K_ANI_MODE;
710
711 if (is_scanning ||
712 (ah->opmode != NL80211_IFTYPE_STATION &&
713 ah->opmode != NL80211_IFTYPE_ADHOC)) {
714 /*
715 * If we're scanning or in AP mode, the defaults (ini)
716 * should be in place. For an AP we assume the historical
717 * levels for this channel are probably outdated so start
718 * from defaults instead.
719 */
720 if (aniState->ofdmNoiseImmunityLevel !=
721 ATH9K_ANI_OFDM_DEF_LEVEL ||
722 aniState->cckNoiseImmunityLevel !=
723 ATH9K_ANI_CCK_DEF_LEVEL) {
724 ath_print(common, ATH_DBG_ANI,
725 "Restore defaults: opmode %u "
726 "chan %d Mhz/0x%x is_scanning=%d "
727 "ofdm:%d cck:%d\n",
728 ah->opmode,
729 chan->channel,
730 chan->channelFlags,
731 is_scanning,
732 aniState->ofdmNoiseImmunityLevel,
733 aniState->cckNoiseImmunityLevel);
734
735 ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
736 ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
737 }
738 } else {
739 /*
740 * restore historical levels for this channel
741 */
742 ath_print(common, ATH_DBG_ANI,
743 "Restore history: opmode %u "
744 "chan %d Mhz/0x%x is_scanning=%d "
745 "ofdm:%d cck:%d\n",
746 ah->opmode,
747 chan->channel,
748 chan->channelFlags,
749 is_scanning,
750 aniState->ofdmNoiseImmunityLevel,
751 aniState->cckNoiseImmunityLevel);
752
753 ath9k_hw_set_ofdm_nil(ah,
754 aniState->ofdmNoiseImmunityLevel);
755 ath9k_hw_set_cck_nil(ah,
756 aniState->cckNoiseImmunityLevel);
757 }
758
759 /*
760 * enable phy counters if hw supports or if not, enable phy
761 * interrupts (so we can count each one)
762 */
763 ath9k_ani_restart_new(ah);
764
765 ENABLE_REGWRITE_BUFFER(ah);
766
767 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
768 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
769
770 REGWRITE_BUFFER_FLUSH(ah);
771 DISABLE_REGWRITE_BUFFER(ah);
772}
773
774static void ath9k_hw_ani_monitor_old(struct ath_hw *ah,
775 struct ath9k_channel *chan)
378{ 776{
379 struct ar5416AniState *aniState; 777 struct ar5416AniState *aniState;
380 struct ath_common *common = ath9k_hw_common(ah); 778 struct ath_common *common = ath9k_hw_common(ah);
@@ -390,7 +788,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
390 listenTime = ath9k_hw_ani_get_listen_time(ah); 788 listenTime = ath9k_hw_ani_get_listen_time(ah);
391 if (listenTime < 0) { 789 if (listenTime < 0) {
392 ah->stats.ast_ani_lneg++; 790 ah->stats.ast_ani_lneg++;
393 ath9k_ani_restart(ah); 791 ath9k_ani_restart_old(ah);
394 return; 792 return;
395 } 793 }
396 794
@@ -444,21 +842,166 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
444 aniState->cckPhyErrCount <= aniState->listenTime * 842 aniState->cckPhyErrCount <= aniState->listenTime *
445 aniState->cckTrigLow / 1000) 843 aniState->cckTrigLow / 1000)
446 ath9k_hw_ani_lower_immunity(ah); 844 ath9k_hw_ani_lower_immunity(ah);
447 ath9k_ani_restart(ah); 845 ath9k_ani_restart_old(ah);
448 } else if (aniState->listenTime > ah->aniperiod) { 846 } else if (aniState->listenTime > ah->aniperiod) {
449 if (aniState->ofdmPhyErrCount > aniState->listenTime * 847 if (aniState->ofdmPhyErrCount > aniState->listenTime *
450 aniState->ofdmTrigHigh / 1000) { 848 aniState->ofdmTrigHigh / 1000) {
451 ath9k_hw_ani_ofdm_err_trigger(ah); 849 ath9k_hw_ani_ofdm_err_trigger_old(ah);
452 ath9k_ani_restart(ah); 850 ath9k_ani_restart_old(ah);
453 } else if (aniState->cckPhyErrCount > 851 } else if (aniState->cckPhyErrCount >
454 aniState->listenTime * aniState->cckTrigHigh / 852 aniState->listenTime * aniState->cckTrigHigh /
455 1000) { 853 1000) {
456 ath9k_hw_ani_cck_err_trigger(ah); 854 ath9k_hw_ani_cck_err_trigger_old(ah);
457 ath9k_ani_restart(ah); 855 ath9k_ani_restart_old(ah);
856 }
857 }
858}
859
860static void ath9k_hw_ani_monitor_new(struct ath_hw *ah,
861 struct ath9k_channel *chan)
862{
863 struct ar5416AniState *aniState;
864 struct ath_common *common = ath9k_hw_common(ah);
865 int32_t listenTime;
866 u32 phyCnt1, phyCnt2;
867 u32 ofdmPhyErrCnt, cckPhyErrCnt;
868 u32 ofdmPhyErrRate, cckPhyErrRate;
869
870 if (!DO_ANI(ah))
871 return;
872
873 aniState = ah->curani;
874 if (WARN_ON(!aniState))
875 return;
876
877 listenTime = ath9k_hw_ani_get_listen_time(ah);
878 if (listenTime <= 0) {
879 ah->stats.ast_ani_lneg++;
880 /* restart ANI period if listenTime is invalid */
881 ath_print(common, ATH_DBG_ANI,
882 "listenTime=%d - on new ani monitor\n",
883 listenTime);
884 ath9k_ani_restart_new(ah);
885 return;
886 }
887
888 aniState->listenTime += listenTime;
889
890 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
891
892 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
893 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
894
895 if (phyCnt1 < aniState->ofdmPhyErrBase ||
896 phyCnt2 < aniState->cckPhyErrBase) {
897 if (phyCnt1 < aniState->ofdmPhyErrBase) {
898 ath_print(common, ATH_DBG_ANI,
899 "phyCnt1 0x%x, resetting "
900 "counter value to 0x%x\n",
901 phyCnt1,
902 aniState->ofdmPhyErrBase);
903 REG_WRITE(ah, AR_PHY_ERR_1,
904 aniState->ofdmPhyErrBase);
905 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
906 AR_PHY_ERR_OFDM_TIMING);
907 }
908 if (phyCnt2 < aniState->cckPhyErrBase) {
909 ath_print(common, ATH_DBG_ANI,
910 "phyCnt2 0x%x, resetting "
911 "counter value to 0x%x\n",
912 phyCnt2,
913 aniState->cckPhyErrBase);
914 REG_WRITE(ah, AR_PHY_ERR_2,
915 aniState->cckPhyErrBase);
916 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
917 AR_PHY_ERR_CCK_TIMING);
918 }
919 return;
920 }
921
922 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
923 ah->stats.ast_ani_ofdmerrs +=
924 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
925 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
926
927 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
928 ah->stats.ast_ani_cckerrs +=
929 cckPhyErrCnt - aniState->cckPhyErrCount;
930 aniState->cckPhyErrCount = cckPhyErrCnt;
931
932 ath_print(common, ATH_DBG_ANI,
933 "Errors: OFDM=0x%08x-0x%08x=%d "
934 "CCK=0x%08x-0x%08x=%d\n",
935 phyCnt1,
936 aniState->ofdmPhyErrBase,
937 ofdmPhyErrCnt,
938 phyCnt2,
939 aniState->cckPhyErrBase,
940 cckPhyErrCnt);
941
942 ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 /
943 aniState->listenTime;
944 cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
945 aniState->listenTime;
946
947 ath_print(common, ATH_DBG_ANI,
948 "listenTime=%d OFDM:%d errs=%d/s CCK:%d "
949 "errs=%d/s ofdm_turn=%d\n",
950 listenTime, aniState->ofdmNoiseImmunityLevel,
951 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
952 cckPhyErrRate, aniState->ofdmsTurn);
953
954 if (aniState->listenTime > 5 * ah->aniperiod) {
955 if (ofdmPhyErrRate <= aniState->ofdmTrigLow &&
956 cckPhyErrRate <= aniState->cckTrigLow) {
957 ath_print(common, ATH_DBG_ANI,
958 "1. listenTime=%d OFDM:%d errs=%d/s(<%d) "
959 "CCK:%d errs=%d/s(<%d) -> "
960 "ath9k_hw_ani_lower_immunity()\n",
961 aniState->listenTime,
962 aniState->ofdmNoiseImmunityLevel,
963 ofdmPhyErrRate,
964 aniState->ofdmTrigLow,
965 aniState->cckNoiseImmunityLevel,
966 cckPhyErrRate,
967 aniState->cckTrigLow);
968 ath9k_hw_ani_lower_immunity(ah);
969 aniState->ofdmsTurn = !aniState->ofdmsTurn;
970 }
971 ath_print(common, ATH_DBG_ANI,
972 "1 listenTime=%d ofdm=%d/s cck=%d/s - "
973 "calling ath9k_ani_restart_new()\n",
974 aniState->listenTime, ofdmPhyErrRate, cckPhyErrRate);
975 ath9k_ani_restart_new(ah);
976 } else if (aniState->listenTime > ah->aniperiod) {
977 /* check to see if need to raise immunity */
978 if (ofdmPhyErrRate > aniState->ofdmTrigHigh &&
979 (cckPhyErrRate <= aniState->cckTrigHigh ||
980 aniState->ofdmsTurn)) {
981 ath_print(common, ATH_DBG_ANI,
982 "2 listenTime=%d OFDM:%d errs=%d/s(>%d) -> "
983 "ath9k_hw_ani_ofdm_err_trigger_new()\n",
984 aniState->listenTime,
985 aniState->ofdmNoiseImmunityLevel,
986 ofdmPhyErrRate,
987 aniState->ofdmTrigHigh);
988 ath9k_hw_ani_ofdm_err_trigger_new(ah);
989 ath9k_ani_restart_new(ah);
990 aniState->ofdmsTurn = false;
991 } else if (cckPhyErrRate > aniState->cckTrigHigh) {
992 ath_print(common, ATH_DBG_ANI,
993 "3 listenTime=%d CCK:%d errs=%d/s(>%d) -> "
994 "ath9k_hw_ani_cck_err_trigger_new()\n",
995 aniState->listenTime,
996 aniState->cckNoiseImmunityLevel,
997 cckPhyErrRate,
998 aniState->cckTrigHigh);
999 ath9k_hw_ani_cck_err_trigger_new(ah);
1000 ath9k_ani_restart_new(ah);
1001 aniState->ofdmsTurn = true;
458 } 1002 }
459 } 1003 }
460} 1004}
461EXPORT_SYMBOL(ath9k_hw_ani_monitor);
462 1005
463void ath9k_enable_mib_counters(struct ath_hw *ah) 1006void ath9k_enable_mib_counters(struct ath_hw *ah)
464{ 1007{
@@ -495,6 +1038,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
495 REG_WRITE(ah, AR_FILT_OFDM, 0); 1038 REG_WRITE(ah, AR_FILT_OFDM, 0);
496 REG_WRITE(ah, AR_FILT_CCK, 0); 1039 REG_WRITE(ah, AR_FILT_CCK, 0);
497} 1040}
1041EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
498 1042
499u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, 1043u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
500 u32 *rxc_pcnt, 1044 u32 *rxc_pcnt,
@@ -542,7 +1086,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
542 * any of the MIB counters overflow/trigger so don't assume we're 1086 * any of the MIB counters overflow/trigger so don't assume we're
543 * here because a PHY error counter triggered. 1087 * here because a PHY error counter triggered.
544 */ 1088 */
545void ath9k_hw_procmibevent(struct ath_hw *ah) 1089static void ath9k_hw_proc_mib_event_old(struct ath_hw *ah)
546{ 1090{
547 u32 phyCnt1, phyCnt2; 1091 u32 phyCnt1, phyCnt2;
548 1092
@@ -555,8 +1099,15 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
555 /* Clear the mib counters and save them in the stats */ 1099 /* Clear the mib counters and save them in the stats */
556 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 1100 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
557 1101
558 if (!DO_ANI(ah)) 1102 if (!DO_ANI(ah)) {
1103 /*
1104 * We must always clear the interrupt cause by
1105 * resetting the phy error regs.
1106 */
1107 REG_WRITE(ah, AR_PHY_ERR_1, 0);
1108 REG_WRITE(ah, AR_PHY_ERR_2, 0);
559 return; 1109 return;
1110 }
560 1111
561 /* NB: these are not reset-on-read */ 1112 /* NB: these are not reset-on-read */
562 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); 1113 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
@@ -584,14 +1135,51 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
584 * check will never be true. 1135 * check will never be true.
585 */ 1136 */
586 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh) 1137 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
587 ath9k_hw_ani_ofdm_err_trigger(ah); 1138 ath9k_hw_ani_ofdm_err_trigger_new(ah);
588 if (aniState->cckPhyErrCount > aniState->cckTrigHigh) 1139 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
589 ath9k_hw_ani_cck_err_trigger(ah); 1140 ath9k_hw_ani_cck_err_trigger_old(ah);
590 /* NB: always restart to insure the h/w counters are reset */ 1141 /* NB: always restart to insure the h/w counters are reset */
591 ath9k_ani_restart(ah); 1142 ath9k_ani_restart_old(ah);
592 } 1143 }
593} 1144}
594EXPORT_SYMBOL(ath9k_hw_procmibevent); 1145
1146/*
1147 * Process a MIB interrupt. We may potentially be invoked because
1148 * any of the MIB counters overflow/trigger so don't assume we're
1149 * here because a PHY error counter triggered.
1150 */
1151static void ath9k_hw_proc_mib_event_new(struct ath_hw *ah)
1152{
1153 u32 phyCnt1, phyCnt2;
1154
1155 /* Reset these counters regardless */
1156 REG_WRITE(ah, AR_FILT_OFDM, 0);
1157 REG_WRITE(ah, AR_FILT_CCK, 0);
1158 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
1159 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
1160
1161 /* Clear the mib counters and save them in the stats */
1162 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
1163
1164 if (!DO_ANI(ah)) {
1165 /*
1166 * We must always clear the interrupt cause by
1167 * resetting the phy error regs.
1168 */
1169 REG_WRITE(ah, AR_PHY_ERR_1, 0);
1170 REG_WRITE(ah, AR_PHY_ERR_2, 0);
1171 return;
1172 }
1173
1174 /* NB: these are not reset-on-read */
1175 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
1176 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
1177
1178 /* NB: always restart to insure the h/w counters are reset */
1179 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
1180 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK))
1181 ath9k_ani_restart_new(ah);
1182}
595 1183
596void ath9k_hw_ani_setup(struct ath_hw *ah) 1184void ath9k_hw_ani_setup(struct ath_hw *ah)
597{ 1185{
@@ -619,22 +1207,70 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
619 1207
620 memset(ah->ani, 0, sizeof(ah->ani)); 1208 memset(ah->ani, 0, sizeof(ah->ani));
621 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) { 1209 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
622 ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH; 1210 if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
623 ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW; 1211 ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
624 ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH; 1212 ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
625 ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW; 1213
1214 ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_NEW;
1215 ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_NEW;
1216
1217 ah->ani[i].spurImmunityLevel =
1218 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
1219
1220 ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
1221
1222 ah->ani[i].ofdmPhyErrBase = 0;
1223 ah->ani[i].cckPhyErrBase = 0;
1224
1225 if (AR_SREV_9300_20_OR_LATER(ah))
1226 ah->ani[i].mrcCCKOff =
1227 !ATH9K_ANI_ENABLE_MRC_CCK;
1228 else
1229 ah->ani[i].mrcCCKOff = true;
1230
1231 ah->ani[i].ofdmsTurn = true;
1232 } else {
1233 ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
1234 ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
1235
1236 ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
1237 ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_OLD;
1238
1239 ah->ani[i].spurImmunityLevel =
1240 ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
1241 ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
1242
1243 ah->ani[i].ofdmPhyErrBase =
1244 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
1245 ah->ani[i].cckPhyErrBase =
1246 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH_OLD;
1247 ah->ani[i].cckWeakSigThreshold =
1248 ATH9K_ANI_CCK_WEAK_SIG_THR;
1249 }
1250
626 ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; 1251 ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
627 ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; 1252 ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
628 ah->ani[i].ofdmWeakSigDetectOff = 1253 ah->ani[i].ofdmWeakSigDetectOff =
629 !ATH9K_ANI_USE_OFDM_WEAK_SIG; 1254 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
630 ah->ani[i].cckWeakSigThreshold = 1255 ah->ani[i].cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
631 ATH9K_ANI_CCK_WEAK_SIG_THR; 1256 }
632 ah->ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; 1257
633 ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL; 1258 /*
634 ah->ani[i].ofdmPhyErrBase = 1259 * since we expect some ongoing maintenance on the tables, let's sanity
635 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH; 1260 * check here default level should not modify INI setting.
636 ah->ani[i].cckPhyErrBase = 1261 */
637 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH; 1262 if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
1263 const struct ani_ofdm_level_entry *entry_ofdm;
1264 const struct ani_cck_level_entry *entry_cck;
1265
1266 entry_ofdm = &ofdm_level_table[ATH9K_ANI_OFDM_DEF_LEVEL];
1267 entry_cck = &cck_level_table[ATH9K_ANI_CCK_DEF_LEVEL];
1268
1269 ah->aniperiod = ATH9K_ANI_PERIOD_NEW;
1270 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
1271 } else {
1272 ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
1273 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
638 } 1274 }
639 1275
640 ath_print(common, ATH_DBG_ANI, 1276 ath_print(common, ATH_DBG_ANI,
@@ -653,7 +1289,34 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
653 1289
654 ath9k_enable_mib_counters(ah); 1290 ath9k_enable_mib_counters(ah);
655 1291
656 ah->aniperiod = ATH9K_ANI_PERIOD;
657 if (ah->config.enable_ani) 1292 if (ah->config.enable_ani)
658 ah->proc_phyerr |= HAL_PROCESS_ANI; 1293 ah->proc_phyerr |= HAL_PROCESS_ANI;
659} 1294}
1295
1296void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah)
1297{
1298 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1299 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
1300
1301 priv_ops->ani_reset = ath9k_ani_reset_old;
1302 priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_old;
1303
1304 ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_old;
1305 ops->ani_monitor = ath9k_hw_ani_monitor_old;
1306
1307 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v1\n");
1308}
1309
1310void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah)
1311{
1312 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1313 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
1314
1315 priv_ops->ani_reset = ath9k_ani_reset_new;
1316 priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_new;
1317
1318 ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_new;
1319 ops->ani_monitor = ath9k_hw_ani_monitor_new;
1320
1321 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v2\n");
1322}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 3356762ea384..f4d0a4d48b37 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -23,23 +23,55 @@
23 23
24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) 24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
25 25
26#define ATH9K_ANI_OFDM_TRIG_HIGH 500 26/* units are errors per second */
27#define ATH9K_ANI_OFDM_TRIG_LOW 200 27#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
28#define ATH9K_ANI_CCK_TRIG_HIGH 200 28#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000
29#define ATH9K_ANI_CCK_TRIG_LOW 100 29
30/* units are errors per second */
31#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
32#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
33
34/* units are errors per second */
35#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
36#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600
37
38/* units are errors per second */
39#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
40#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300
41
30#define ATH9K_ANI_NOISE_IMMUNE_LVL 4 42#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
31#define ATH9K_ANI_USE_OFDM_WEAK_SIG true 43#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
32#define ATH9K_ANI_CCK_WEAK_SIG_THR false 44#define ATH9K_ANI_CCK_WEAK_SIG_THR false
33#define ATH9K_ANI_SPUR_IMMUNE_LVL 7 45
34#define ATH9K_ANI_FIRSTEP_LVL 0 46#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7
47#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3
48
49#define ATH9K_ANI_FIRSTEP_LVL_OLD 0
50#define ATH9K_ANI_FIRSTEP_LVL_NEW 2
51
35#define ATH9K_ANI_RSSI_THR_HIGH 40 52#define ATH9K_ANI_RSSI_THR_HIGH 40
36#define ATH9K_ANI_RSSI_THR_LOW 7 53#define ATH9K_ANI_RSSI_THR_LOW 7
37#define ATH9K_ANI_PERIOD 100 54
55#define ATH9K_ANI_PERIOD_OLD 100
56#define ATH9K_ANI_PERIOD_NEW 1000
57
58/* in ms */
59#define ATH9K_ANI_POLLINTERVAL_OLD 100
60#define ATH9K_ANI_POLLINTERVAL_NEW 1000
38 61
39#define HAL_NOISE_IMMUNE_MAX 4 62#define HAL_NOISE_IMMUNE_MAX 4
40#define HAL_SPUR_IMMUNE_MAX 7 63#define HAL_SPUR_IMMUNE_MAX 7
41#define HAL_FIRST_STEP_MAX 2 64#define HAL_FIRST_STEP_MAX 2
42 65
66#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0
67#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20
68#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
69#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
70
71#define ATH9K_ANI_ENABLE_MRC_CCK true
72
73/* values here are relative to the INI */
74
43enum ath9k_ani_cmd { 75enum ath9k_ani_cmd {
44 ATH9K_ANI_PRESENT = 0x1, 76 ATH9K_ANI_PRESENT = 0x1,
45 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2, 77 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
@@ -49,7 +81,8 @@ enum ath9k_ani_cmd {
49 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20, 81 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
50 ATH9K_ANI_MODE = 0x40, 82 ATH9K_ANI_MODE = 0x40,
51 ATH9K_ANI_PHYERR_RESET = 0x80, 83 ATH9K_ANI_PHYERR_RESET = 0x80,
52 ATH9K_ANI_ALL = 0xff 84 ATH9K_ANI_MRC_CCK = 0x100,
85 ATH9K_ANI_ALL = 0xfff
53}; 86};
54 87
55struct ath9k_mib_stats { 88struct ath9k_mib_stats {
@@ -60,9 +93,31 @@ struct ath9k_mib_stats {
60 u32 beacons; 93 u32 beacons;
61}; 94};
62 95
96/* INI default values for ANI registers */
97struct ath9k_ani_default {
98 u16 m1ThreshLow;
99 u16 m2ThreshLow;
100 u16 m1Thresh;
101 u16 m2Thresh;
102 u16 m2CountThr;
103 u16 m2CountThrLow;
104 u16 m1ThreshLowExt;
105 u16 m2ThreshLowExt;
106 u16 m1ThreshExt;
107 u16 m2ThreshExt;
108 u16 firstep;
109 u16 firstepLow;
110 u16 cycpwrThr1;
111 u16 cycpwrThr1Ext;
112};
113
63struct ar5416AniState { 114struct ar5416AniState {
64 struct ath9k_channel *c; 115 struct ath9k_channel *c;
65 u8 noiseImmunityLevel; 116 u8 noiseImmunityLevel;
117 u8 ofdmNoiseImmunityLevel;
118 u8 cckNoiseImmunityLevel;
119 bool ofdmsTurn;
120 u8 mrcCCKOff;
66 u8 spurImmunityLevel; 121 u8 spurImmunityLevel;
67 u8 firstepLevel; 122 u8 firstepLevel;
68 u8 ofdmWeakSigDetectOff; 123 u8 ofdmWeakSigDetectOff;
@@ -85,6 +140,7 @@ struct ar5416AniState {
85 int16_t pktRssi[2]; 140 int16_t pktRssi[2];
86 int16_t ofdmErrRssi[2]; 141 int16_t ofdmErrRssi[2];
87 int16_t cckErrRssi[2]; 142 int16_t cckErrRssi[2];
143 struct ath9k_ani_default iniDef;
88}; 144};
89 145
90struct ar5416Stats { 146struct ar5416Stats {
@@ -108,15 +164,13 @@ struct ar5416Stats {
108}; 164};
109#define ah_mibStats stats.ast_mibstats 165#define ah_mibStats stats.ast_mibstats
110 166
111void ath9k_ani_reset(struct ath_hw *ah);
112void ath9k_hw_ani_monitor(struct ath_hw *ah,
113 struct ath9k_channel *chan);
114void ath9k_enable_mib_counters(struct ath_hw *ah); 167void ath9k_enable_mib_counters(struct ath_hw *ah);
115void ath9k_hw_disable_mib_counters(struct ath_hw *ah); 168void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
116u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt, 169u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
117 u32 *rxf_pcnt, u32 *txf_pcnt); 170 u32 *rxf_pcnt, u32 *txf_pcnt);
118void ath9k_hw_procmibevent(struct ath_hw *ah);
119void ath9k_hw_ani_setup(struct ath_hw *ah); 171void ath9k_hw_ani_setup(struct ath_hw *ah);
120void ath9k_hw_ani_init(struct ath_hw *ah); 172void ath9k_hw_ani_init(struct ath_hw *ah);
173int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
174 struct ath9k_channel *chan);
121 175
122#endif /* ANI_H */ 176#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
index 025c31ac6146..36f7d0639db3 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -14,729 +14,660 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#ifndef INITVALS_AR5008_H
18#define INITVALS_AR5008_H
19
20static const u32 ar5416Modes[][6] = { 17static const u32 ar5416Modes[][6] = {
21 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 18 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
22 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 19 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
23 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 20 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
24 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, 21 {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008},
25 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 22 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
26 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, 23 {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf},
27 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, 24 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
28 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a }, 25 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
29 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 26 {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
30 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 27 {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
31 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 28 {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
32 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 29 {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
33 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 30 {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
34 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 31 {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
35 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 }, 32 {0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0},
36 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 33 {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
37 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 34 {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
38 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 35 {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
39 { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de }, 36 {0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de},
40 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, 37 {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
41 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e }, 38 {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
42 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 }, 39 {0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18},
43 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 40 {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
44 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 }, 41 {0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190},
45 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, 42 {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
46 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 43 {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
47 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 }, 44 {0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134},
48 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b }, 45 {0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b},
49 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 }, 46 {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
50 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 47 {0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80},
51 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 48 {0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80},
52 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 49 {0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80},
53 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 }, 50 {0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120},
54 { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 }, 51 {0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00},
55 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, 52 {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
56 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 53 {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
57 { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c }, 54 {0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c},
58 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 55 {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
59 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 56 {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
60 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 57 {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
61 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 58 {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
62 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, 59 {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
63 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, 60 {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788},
64 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, 61 {0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120},
65 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, 62 {0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120},
66 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, 63 {0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120},
67 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 64 {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
68 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 65 {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
69 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, 66 {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa},
70 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, 67 {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
71 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, 68 {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402},
72 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, 69 {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06},
73 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, 70 {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b},
74 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, 71 {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b},
75 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, 72 {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a},
76 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, 73 {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf},
77 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, 74 {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f},
78 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, 75 {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f},
79 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, 76 {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f},
80 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, 77 {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000},
81 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 78 {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
82 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 79 {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
83 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 80 {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
84}; 81};
85 82
86static const u32 ar5416Common[][2] = { 83static const u32 ar5416Common[][2] = {
87 { 0x0000000c, 0x00000000 }, 84 /* Addr allmodes */
88 { 0x00000030, 0x00020015 }, 85 {0x0000000c, 0x00000000},
89 { 0x00000034, 0x00000005 }, 86 {0x00000030, 0x00020015},
90 { 0x00000040, 0x00000000 }, 87 {0x00000034, 0x00000005},
91 { 0x00000044, 0x00000008 }, 88 {0x00000040, 0x00000000},
92 { 0x00000048, 0x00000008 }, 89 {0x00000044, 0x00000008},
93 { 0x0000004c, 0x00000010 }, 90 {0x00000048, 0x00000008},
94 { 0x00000050, 0x00000000 }, 91 {0x0000004c, 0x00000010},
95 { 0x00000054, 0x0000001f }, 92 {0x00000050, 0x00000000},
96 { 0x00000800, 0x00000000 }, 93 {0x00000054, 0x0000001f},
97 { 0x00000804, 0x00000000 }, 94 {0x00000800, 0x00000000},
98 { 0x00000808, 0x00000000 }, 95 {0x00000804, 0x00000000},
99 { 0x0000080c, 0x00000000 }, 96 {0x00000808, 0x00000000},
100 { 0x00000810, 0x00000000 }, 97 {0x0000080c, 0x00000000},
101 { 0x00000814, 0x00000000 }, 98 {0x00000810, 0x00000000},
102 { 0x00000818, 0x00000000 }, 99 {0x00000814, 0x00000000},
103 { 0x0000081c, 0x00000000 }, 100 {0x00000818, 0x00000000},
104 { 0x00000820, 0x00000000 }, 101 {0x0000081c, 0x00000000},
105 { 0x00000824, 0x00000000 }, 102 {0x00000820, 0x00000000},
106 { 0x00001040, 0x002ffc0f }, 103 {0x00000824, 0x00000000},
107 { 0x00001044, 0x002ffc0f }, 104 {0x00001040, 0x002ffc0f},
108 { 0x00001048, 0x002ffc0f }, 105 {0x00001044, 0x002ffc0f},
109 { 0x0000104c, 0x002ffc0f }, 106 {0x00001048, 0x002ffc0f},
110 { 0x00001050, 0x002ffc0f }, 107 {0x0000104c, 0x002ffc0f},
111 { 0x00001054, 0x002ffc0f }, 108 {0x00001050, 0x002ffc0f},
112 { 0x00001058, 0x002ffc0f }, 109 {0x00001054, 0x002ffc0f},
113 { 0x0000105c, 0x002ffc0f }, 110 {0x00001058, 0x002ffc0f},
114 { 0x00001060, 0x002ffc0f }, 111 {0x0000105c, 0x002ffc0f},
115 { 0x00001064, 0x002ffc0f }, 112 {0x00001060, 0x002ffc0f},
116 { 0x00001230, 0x00000000 }, 113 {0x00001064, 0x002ffc0f},
117 { 0x00001270, 0x00000000 }, 114 {0x00001230, 0x00000000},
118 { 0x00001038, 0x00000000 }, 115 {0x00001270, 0x00000000},
119 { 0x00001078, 0x00000000 }, 116 {0x00001038, 0x00000000},
120 { 0x000010b8, 0x00000000 }, 117 {0x00001078, 0x00000000},
121 { 0x000010f8, 0x00000000 }, 118 {0x000010b8, 0x00000000},
122 { 0x00001138, 0x00000000 }, 119 {0x000010f8, 0x00000000},
123 { 0x00001178, 0x00000000 }, 120 {0x00001138, 0x00000000},
124 { 0x000011b8, 0x00000000 }, 121 {0x00001178, 0x00000000},
125 { 0x000011f8, 0x00000000 }, 122 {0x000011b8, 0x00000000},
126 { 0x00001238, 0x00000000 }, 123 {0x000011f8, 0x00000000},
127 { 0x00001278, 0x00000000 }, 124 {0x00001238, 0x00000000},
128 { 0x000012b8, 0x00000000 }, 125 {0x00001278, 0x00000000},
129 { 0x000012f8, 0x00000000 }, 126 {0x000012b8, 0x00000000},
130 { 0x00001338, 0x00000000 }, 127 {0x000012f8, 0x00000000},
131 { 0x00001378, 0x00000000 }, 128 {0x00001338, 0x00000000},
132 { 0x000013b8, 0x00000000 }, 129 {0x00001378, 0x00000000},
133 { 0x000013f8, 0x00000000 }, 130 {0x000013b8, 0x00000000},
134 { 0x00001438, 0x00000000 }, 131 {0x000013f8, 0x00000000},
135 { 0x00001478, 0x00000000 }, 132 {0x00001438, 0x00000000},
136 { 0x000014b8, 0x00000000 }, 133 {0x00001478, 0x00000000},
137 { 0x000014f8, 0x00000000 }, 134 {0x000014b8, 0x00000000},
138 { 0x00001538, 0x00000000 }, 135 {0x000014f8, 0x00000000},
139 { 0x00001578, 0x00000000 }, 136 {0x00001538, 0x00000000},
140 { 0x000015b8, 0x00000000 }, 137 {0x00001578, 0x00000000},
141 { 0x000015f8, 0x00000000 }, 138 {0x000015b8, 0x00000000},
142 { 0x00001638, 0x00000000 }, 139 {0x000015f8, 0x00000000},
143 { 0x00001678, 0x00000000 }, 140 {0x00001638, 0x00000000},
144 { 0x000016b8, 0x00000000 }, 141 {0x00001678, 0x00000000},
145 { 0x000016f8, 0x00000000 }, 142 {0x000016b8, 0x00000000},
146 { 0x00001738, 0x00000000 }, 143 {0x000016f8, 0x00000000},
147 { 0x00001778, 0x00000000 }, 144 {0x00001738, 0x00000000},
148 { 0x000017b8, 0x00000000 }, 145 {0x00001778, 0x00000000},
149 { 0x000017f8, 0x00000000 }, 146 {0x000017b8, 0x00000000},
150 { 0x0000103c, 0x00000000 }, 147 {0x000017f8, 0x00000000},
151 { 0x0000107c, 0x00000000 }, 148 {0x0000103c, 0x00000000},
152 { 0x000010bc, 0x00000000 }, 149 {0x0000107c, 0x00000000},
153 { 0x000010fc, 0x00000000 }, 150 {0x000010bc, 0x00000000},
154 { 0x0000113c, 0x00000000 }, 151 {0x000010fc, 0x00000000},
155 { 0x0000117c, 0x00000000 }, 152 {0x0000113c, 0x00000000},
156 { 0x000011bc, 0x00000000 }, 153 {0x0000117c, 0x00000000},
157 { 0x000011fc, 0x00000000 }, 154 {0x000011bc, 0x00000000},
158 { 0x0000123c, 0x00000000 }, 155 {0x000011fc, 0x00000000},
159 { 0x0000127c, 0x00000000 }, 156 {0x0000123c, 0x00000000},
160 { 0x000012bc, 0x00000000 }, 157 {0x0000127c, 0x00000000},
161 { 0x000012fc, 0x00000000 }, 158 {0x000012bc, 0x00000000},
162 { 0x0000133c, 0x00000000 }, 159 {0x000012fc, 0x00000000},
163 { 0x0000137c, 0x00000000 }, 160 {0x0000133c, 0x00000000},
164 { 0x000013bc, 0x00000000 }, 161 {0x0000137c, 0x00000000},
165 { 0x000013fc, 0x00000000 }, 162 {0x000013bc, 0x00000000},
166 { 0x0000143c, 0x00000000 }, 163 {0x000013fc, 0x00000000},
167 { 0x0000147c, 0x00000000 }, 164 {0x0000143c, 0x00000000},
168 { 0x00004030, 0x00000002 }, 165 {0x0000147c, 0x00000000},
169 { 0x0000403c, 0x00000002 }, 166 {0x00004030, 0x00000002},
170 { 0x00007010, 0x00000000 }, 167 {0x0000403c, 0x00000002},
171 { 0x00007038, 0x000004c2 }, 168 {0x00007010, 0x00000000},
172 { 0x00008004, 0x00000000 }, 169 {0x00007038, 0x000004c2},
173 { 0x00008008, 0x00000000 }, 170 {0x00008004, 0x00000000},
174 { 0x0000800c, 0x00000000 }, 171 {0x00008008, 0x00000000},
175 { 0x00008018, 0x00000700 }, 172 {0x0000800c, 0x00000000},
176 { 0x00008020, 0x00000000 }, 173 {0x00008018, 0x00000700},
177 { 0x00008038, 0x00000000 }, 174 {0x00008020, 0x00000000},
178 { 0x0000803c, 0x00000000 }, 175 {0x00008038, 0x00000000},
179 { 0x00008048, 0x40000000 }, 176 {0x0000803c, 0x00000000},
180 { 0x00008054, 0x00000000 }, 177 {0x00008048, 0x40000000},
181 { 0x00008058, 0x00000000 }, 178 {0x00008054, 0x00000000},
182 { 0x0000805c, 0x000fc78f }, 179 {0x00008058, 0x00000000},
183 { 0x00008060, 0x0000000f }, 180 {0x0000805c, 0x000fc78f},
184 { 0x00008064, 0x00000000 }, 181 {0x00008060, 0x0000000f},
185 { 0x000080c0, 0x2a82301a }, 182 {0x00008064, 0x00000000},
186 { 0x000080c4, 0x05dc01e0 }, 183 {0x000080c0, 0x2a82301a},
187 { 0x000080c8, 0x1f402710 }, 184 {0x000080c4, 0x05dc01e0},
188 { 0x000080cc, 0x01f40000 }, 185 {0x000080c8, 0x1f402710},
189 { 0x000080d0, 0x00001e00 }, 186 {0x000080cc, 0x01f40000},
190 { 0x000080d4, 0x00000000 }, 187 {0x000080d0, 0x00001e00},
191 { 0x000080d8, 0x00400000 }, 188 {0x000080d4, 0x00000000},
192 { 0x000080e0, 0xffffffff }, 189 {0x000080d8, 0x00400000},
193 { 0x000080e4, 0x0000ffff }, 190 {0x000080e0, 0xffffffff},
194 { 0x000080e8, 0x003f3f3f }, 191 {0x000080e4, 0x0000ffff},
195 { 0x000080ec, 0x00000000 }, 192 {0x000080e8, 0x003f3f3f},
196 { 0x000080f0, 0x00000000 }, 193 {0x000080ec, 0x00000000},
197 { 0x000080f4, 0x00000000 }, 194 {0x000080f0, 0x00000000},
198 { 0x000080f8, 0x00000000 }, 195 {0x000080f4, 0x00000000},
199 { 0x000080fc, 0x00020000 }, 196 {0x000080f8, 0x00000000},
200 { 0x00008100, 0x00020000 }, 197 {0x000080fc, 0x00020000},
201 { 0x00008104, 0x00000001 }, 198 {0x00008100, 0x00020000},
202 { 0x00008108, 0x00000052 }, 199 {0x00008104, 0x00000001},
203 { 0x0000810c, 0x00000000 }, 200 {0x00008108, 0x00000052},
204 { 0x00008110, 0x00000168 }, 201 {0x0000810c, 0x00000000},
205 { 0x00008118, 0x000100aa }, 202 {0x00008110, 0x00000168},
206 { 0x0000811c, 0x00003210 }, 203 {0x00008118, 0x000100aa},
207 { 0x00008124, 0x00000000 }, 204 {0x0000811c, 0x00003210},
208 { 0x00008128, 0x00000000 }, 205 {0x00008124, 0x00000000},
209 { 0x0000812c, 0x00000000 }, 206 {0x00008128, 0x00000000},
210 { 0x00008130, 0x00000000 }, 207 {0x0000812c, 0x00000000},
211 { 0x00008134, 0x00000000 }, 208 {0x00008130, 0x00000000},
212 { 0x00008138, 0x00000000 }, 209 {0x00008134, 0x00000000},
213 { 0x0000813c, 0x00000000 }, 210 {0x00008138, 0x00000000},
214 { 0x00008144, 0xffffffff }, 211 {0x0000813c, 0x00000000},
215 { 0x00008168, 0x00000000 }, 212 {0x00008144, 0xffffffff},
216 { 0x0000816c, 0x00000000 }, 213 {0x00008168, 0x00000000},
217 { 0x00008170, 0x32143320 }, 214 {0x0000816c, 0x00000000},
218 { 0x00008174, 0xfaa4fa50 }, 215 {0x00008170, 0x32143320},
219 { 0x00008178, 0x00000100 }, 216 {0x00008174, 0xfaa4fa50},
220 { 0x0000817c, 0x00000000 }, 217 {0x00008178, 0x00000100},
221 { 0x000081c4, 0x00000000 }, 218 {0x0000817c, 0x00000000},
222 { 0x000081ec, 0x00000000 }, 219 {0x000081c4, 0x00000000},
223 { 0x000081f0, 0x00000000 }, 220 {0x000081ec, 0x00000000},
224 { 0x000081f4, 0x00000000 }, 221 {0x000081f0, 0x00000000},
225 { 0x000081f8, 0x00000000 }, 222 {0x000081f4, 0x00000000},
226 { 0x000081fc, 0x00000000 }, 223 {0x000081f8, 0x00000000},
227 { 0x00008200, 0x00000000 }, 224 {0x000081fc, 0x00000000},
228 { 0x00008204, 0x00000000 }, 225 {0x00008200, 0x00000000},
229 { 0x00008208, 0x00000000 }, 226 {0x00008204, 0x00000000},
230 { 0x0000820c, 0x00000000 }, 227 {0x00008208, 0x00000000},
231 { 0x00008210, 0x00000000 }, 228 {0x0000820c, 0x00000000},
232 { 0x00008214, 0x00000000 }, 229 {0x00008210, 0x00000000},
233 { 0x00008218, 0x00000000 }, 230 {0x00008214, 0x00000000},
234 { 0x0000821c, 0x00000000 }, 231 {0x00008218, 0x00000000},
235 { 0x00008220, 0x00000000 }, 232 {0x0000821c, 0x00000000},
236 { 0x00008224, 0x00000000 }, 233 {0x00008220, 0x00000000},
237 { 0x00008228, 0x00000000 }, 234 {0x00008224, 0x00000000},
238 { 0x0000822c, 0x00000000 }, 235 {0x00008228, 0x00000000},
239 { 0x00008230, 0x00000000 }, 236 {0x0000822c, 0x00000000},
240 { 0x00008234, 0x00000000 }, 237 {0x00008230, 0x00000000},
241 { 0x00008238, 0x00000000 }, 238 {0x00008234, 0x00000000},
242 { 0x0000823c, 0x00000000 }, 239 {0x00008238, 0x00000000},
243 { 0x00008240, 0x00100000 }, 240 {0x0000823c, 0x00000000},
244 { 0x00008244, 0x0010f400 }, 241 {0x00008240, 0x00100000},
245 { 0x00008248, 0x00000100 }, 242 {0x00008244, 0x0010f400},
246 { 0x0000824c, 0x0001e800 }, 243 {0x00008248, 0x00000100},
247 { 0x00008250, 0x00000000 }, 244 {0x0000824c, 0x0001e800},
248 { 0x00008254, 0x00000000 }, 245 {0x00008250, 0x00000000},
249 { 0x00008258, 0x00000000 }, 246 {0x00008254, 0x00000000},
250 { 0x0000825c, 0x400000ff }, 247 {0x00008258, 0x00000000},
251 { 0x00008260, 0x00080922 }, 248 {0x0000825c, 0x400000ff},
252 { 0x00008264, 0x88000010 }, 249 {0x00008260, 0x00080922},
253 { 0x00008270, 0x00000000 }, 250 {0x00008264, 0x88000010},
254 { 0x00008274, 0x40000000 }, 251 {0x00008270, 0x00000000},
255 { 0x00008278, 0x003e4180 }, 252 {0x00008274, 0x40000000},
256 { 0x0000827c, 0x00000000 }, 253 {0x00008278, 0x003e4180},
257 { 0x00008284, 0x0000002c }, 254 {0x0000827c, 0x00000000},
258 { 0x00008288, 0x0000002c }, 255 {0x00008284, 0x0000002c},
259 { 0x0000828c, 0x00000000 }, 256 {0x00008288, 0x0000002c},
260 { 0x00008294, 0x00000000 }, 257 {0x0000828c, 0x00000000},
261 { 0x00008298, 0x00000000 }, 258 {0x00008294, 0x00000000},
262 { 0x00008300, 0x00000000 }, 259 {0x00008298, 0x00000000},
263 { 0x00008304, 0x00000000 }, 260 {0x00008300, 0x00000000},
264 { 0x00008308, 0x00000000 }, 261 {0x00008304, 0x00000000},
265 { 0x0000830c, 0x00000000 }, 262 {0x00008308, 0x00000000},
266 { 0x00008310, 0x00000000 }, 263 {0x0000830c, 0x00000000},
267 { 0x00008314, 0x00000000 }, 264 {0x00008310, 0x00000000},
268 { 0x00008318, 0x00000000 }, 265 {0x00008314, 0x00000000},
269 { 0x00008328, 0x00000000 }, 266 {0x00008318, 0x00000000},
270 { 0x0000832c, 0x00000007 }, 267 {0x00008328, 0x00000000},
271 { 0x00008330, 0x00000302 }, 268 {0x0000832c, 0x00000007},
272 { 0x00008334, 0x00000e00 }, 269 {0x00008330, 0x00000302},
273 { 0x00008338, 0x00070000 }, 270 {0x00008334, 0x00000e00},
274 { 0x0000833c, 0x00000000 }, 271 {0x00008338, 0x00070000},
275 { 0x00008340, 0x000107ff }, 272 {0x0000833c, 0x00000000},
276 { 0x00009808, 0x00000000 }, 273 {0x00008340, 0x000107ff},
277 { 0x0000980c, 0xad848e19 }, 274 {0x00009808, 0x00000000},
278 { 0x00009810, 0x7d14e000 }, 275 {0x0000980c, 0xad848e19},
279 { 0x00009814, 0x9c0a9f6b }, 276 {0x00009810, 0x7d14e000},
280 { 0x0000981c, 0x00000000 }, 277 {0x00009814, 0x9c0a9f6b},
281 { 0x0000982c, 0x0000a000 }, 278 {0x0000981c, 0x00000000},
282 { 0x00009830, 0x00000000 }, 279 {0x0000982c, 0x0000a000},
283 { 0x0000983c, 0x00200400 }, 280 {0x00009830, 0x00000000},
284 { 0x00009840, 0x206a002e }, 281 {0x0000983c, 0x00200400},
285 { 0x0000984c, 0x1284233c }, 282 {0x00009840, 0x206a002e},
286 { 0x00009854, 0x00000859 }, 283 {0x0000984c, 0x1284233c},
287 { 0x00009900, 0x00000000 }, 284 {0x00009854, 0x00000859},
288 { 0x00009904, 0x00000000 }, 285 {0x00009900, 0x00000000},
289 { 0x00009908, 0x00000000 }, 286 {0x00009904, 0x00000000},
290 { 0x0000990c, 0x00000000 }, 287 {0x00009908, 0x00000000},
291 { 0x0000991c, 0x10000fff }, 288 {0x0000990c, 0x00000000},
292 { 0x00009920, 0x05100000 }, 289 {0x0000991c, 0x10000fff},
293 { 0x0000a920, 0x05100000 }, 290 {0x00009920, 0x05100000},
294 { 0x0000b920, 0x05100000 }, 291 {0x0000a920, 0x05100000},
295 { 0x00009928, 0x00000001 }, 292 {0x0000b920, 0x05100000},
296 { 0x0000992c, 0x00000004 }, 293 {0x00009928, 0x00000001},
297 { 0x00009934, 0x1e1f2022 }, 294 {0x0000992c, 0x00000004},
298 { 0x00009938, 0x0a0b0c0d }, 295 {0x00009934, 0x1e1f2022},
299 { 0x0000993c, 0x00000000 }, 296 {0x00009938, 0x0a0b0c0d},
300 { 0x00009948, 0x9280b212 }, 297 {0x0000993c, 0x00000000},
301 { 0x0000994c, 0x00020028 }, 298 {0x00009948, 0x9280b212},
302 { 0x00009954, 0x5d50e188 }, 299 {0x0000994c, 0x00020028},
303 { 0x00009958, 0x00081fff }, 300 {0x00009954, 0x5d50e188},
304 { 0x0000c95c, 0x004b6a8e }, 301 {0x00009958, 0x00081fff},
305 { 0x0000c968, 0x000003ce }, 302 {0x0000c95c, 0x004b6a8e},
306 { 0x00009970, 0x190fb515 }, 303 {0x0000c968, 0x000003ce},
307 { 0x00009974, 0x00000000 }, 304 {0x00009970, 0x190fb515},
308 { 0x00009978, 0x00000001 }, 305 {0x00009974, 0x00000000},
309 { 0x0000997c, 0x00000000 }, 306 {0x00009978, 0x00000001},
310 { 0x00009980, 0x00000000 }, 307 {0x0000997c, 0x00000000},
311 { 0x00009984, 0x00000000 }, 308 {0x00009980, 0x00000000},
312 { 0x00009988, 0x00000000 }, 309 {0x00009984, 0x00000000},
313 { 0x0000998c, 0x00000000 }, 310 {0x00009988, 0x00000000},
314 { 0x00009990, 0x00000000 }, 311 {0x0000998c, 0x00000000},
315 { 0x00009994, 0x00000000 }, 312 {0x00009990, 0x00000000},
316 { 0x00009998, 0x00000000 }, 313 {0x00009994, 0x00000000},
317 { 0x0000999c, 0x00000000 }, 314 {0x00009998, 0x00000000},
318 { 0x000099a0, 0x00000000 }, 315 {0x0000999c, 0x00000000},
319 { 0x000099a4, 0x00000001 }, 316 {0x000099a0, 0x00000000},
320 { 0x000099a8, 0x001fff00 }, 317 {0x000099a4, 0x00000001},
321 { 0x000099ac, 0x00000000 }, 318 {0x000099a8, 0x001fff00},
322 { 0x000099b0, 0x03051000 }, 319 {0x000099ac, 0x00000000},
323 { 0x000099dc, 0x00000000 }, 320 {0x000099b0, 0x03051000},
324 { 0x000099e0, 0x00000200 }, 321 {0x000099dc, 0x00000000},
325 { 0x000099e4, 0xaaaaaaaa }, 322 {0x000099e0, 0x00000200},
326 { 0x000099e8, 0x3c466478 }, 323 {0x000099e4, 0xaaaaaaaa},
327 { 0x000099ec, 0x000000aa }, 324 {0x000099e8, 0x3c466478},
328 { 0x000099fc, 0x00001042 }, 325 {0x000099ec, 0x000000aa},
329 { 0x00009b00, 0x00000000 }, 326 {0x000099fc, 0x00001042},
330 { 0x00009b04, 0x00000001 }, 327 {0x00009b00, 0x00000000},
331 { 0x00009b08, 0x00000002 }, 328 {0x00009b04, 0x00000001},
332 { 0x00009b0c, 0x00000003 }, 329 {0x00009b08, 0x00000002},
333 { 0x00009b10, 0x00000004 }, 330 {0x00009b0c, 0x00000003},
334 { 0x00009b14, 0x00000005 }, 331 {0x00009b10, 0x00000004},
335 { 0x00009b18, 0x00000008 }, 332 {0x00009b14, 0x00000005},
336 { 0x00009b1c, 0x00000009 }, 333 {0x00009b18, 0x00000008},
337 { 0x00009b20, 0x0000000a }, 334 {0x00009b1c, 0x00000009},
338 { 0x00009b24, 0x0000000b }, 335 {0x00009b20, 0x0000000a},
339 { 0x00009b28, 0x0000000c }, 336 {0x00009b24, 0x0000000b},
340 { 0x00009b2c, 0x0000000d }, 337 {0x00009b28, 0x0000000c},
341 { 0x00009b30, 0x00000010 }, 338 {0x00009b2c, 0x0000000d},
342 { 0x00009b34, 0x00000011 }, 339 {0x00009b30, 0x00000010},
343 { 0x00009b38, 0x00000012 }, 340 {0x00009b34, 0x00000011},
344 { 0x00009b3c, 0x00000013 }, 341 {0x00009b38, 0x00000012},
345 { 0x00009b40, 0x00000014 }, 342 {0x00009b3c, 0x00000013},
346 { 0x00009b44, 0x00000015 }, 343 {0x00009b40, 0x00000014},
347 { 0x00009b48, 0x00000018 }, 344 {0x00009b44, 0x00000015},
348 { 0x00009b4c, 0x00000019 }, 345 {0x00009b48, 0x00000018},
349 { 0x00009b50, 0x0000001a }, 346 {0x00009b4c, 0x00000019},
350 { 0x00009b54, 0x0000001b }, 347 {0x00009b50, 0x0000001a},
351 { 0x00009b58, 0x0000001c }, 348 {0x00009b54, 0x0000001b},
352 { 0x00009b5c, 0x0000001d }, 349 {0x00009b58, 0x0000001c},
353 { 0x00009b60, 0x00000020 }, 350 {0x00009b5c, 0x0000001d},
354 { 0x00009b64, 0x00000021 }, 351 {0x00009b60, 0x00000020},
355 { 0x00009b68, 0x00000022 }, 352 {0x00009b64, 0x00000021},
356 { 0x00009b6c, 0x00000023 }, 353 {0x00009b68, 0x00000022},
357 { 0x00009b70, 0x00000024 }, 354 {0x00009b6c, 0x00000023},
358 { 0x00009b74, 0x00000025 }, 355 {0x00009b70, 0x00000024},
359 { 0x00009b78, 0x00000028 }, 356 {0x00009b74, 0x00000025},
360 { 0x00009b7c, 0x00000029 }, 357 {0x00009b78, 0x00000028},
361 { 0x00009b80, 0x0000002a }, 358 {0x00009b7c, 0x00000029},
362 { 0x00009b84, 0x0000002b }, 359 {0x00009b80, 0x0000002a},
363 { 0x00009b88, 0x0000002c }, 360 {0x00009b84, 0x0000002b},
364 { 0x00009b8c, 0x0000002d }, 361 {0x00009b88, 0x0000002c},
365 { 0x00009b90, 0x00000030 }, 362 {0x00009b8c, 0x0000002d},
366 { 0x00009b94, 0x00000031 }, 363 {0x00009b90, 0x00000030},
367 { 0x00009b98, 0x00000032 }, 364 {0x00009b94, 0x00000031},
368 { 0x00009b9c, 0x00000033 }, 365 {0x00009b98, 0x00000032},
369 { 0x00009ba0, 0x00000034 }, 366 {0x00009b9c, 0x00000033},
370 { 0x00009ba4, 0x00000035 }, 367 {0x00009ba0, 0x00000034},
371 { 0x00009ba8, 0x00000035 }, 368 {0x00009ba4, 0x00000035},
372 { 0x00009bac, 0x00000035 }, 369 {0x00009ba8, 0x00000035},
373 { 0x00009bb0, 0x00000035 }, 370 {0x00009bac, 0x00000035},
374 { 0x00009bb4, 0x00000035 }, 371 {0x00009bb0, 0x00000035},
375 { 0x00009bb8, 0x00000035 }, 372 {0x00009bb4, 0x00000035},
376 { 0x00009bbc, 0x00000035 }, 373 {0x00009bb8, 0x00000035},
377 { 0x00009bc0, 0x00000035 }, 374 {0x00009bbc, 0x00000035},
378 { 0x00009bc4, 0x00000035 }, 375 {0x00009bc0, 0x00000035},
379 { 0x00009bc8, 0x00000035 }, 376 {0x00009bc4, 0x00000035},
380 { 0x00009bcc, 0x00000035 }, 377 {0x00009bc8, 0x00000035},
381 { 0x00009bd0, 0x00000035 }, 378 {0x00009bcc, 0x00000035},
382 { 0x00009bd4, 0x00000035 }, 379 {0x00009bd0, 0x00000035},
383 { 0x00009bd8, 0x00000035 }, 380 {0x00009bd4, 0x00000035},
384 { 0x00009bdc, 0x00000035 }, 381 {0x00009bd8, 0x00000035},
385 { 0x00009be0, 0x00000035 }, 382 {0x00009bdc, 0x00000035},
386 { 0x00009be4, 0x00000035 }, 383 {0x00009be0, 0x00000035},
387 { 0x00009be8, 0x00000035 }, 384 {0x00009be4, 0x00000035},
388 { 0x00009bec, 0x00000035 }, 385 {0x00009be8, 0x00000035},
389 { 0x00009bf0, 0x00000035 }, 386 {0x00009bec, 0x00000035},
390 { 0x00009bf4, 0x00000035 }, 387 {0x00009bf0, 0x00000035},
391 { 0x00009bf8, 0x00000010 }, 388 {0x00009bf4, 0x00000035},
392 { 0x00009bfc, 0x0000001a }, 389 {0x00009bf8, 0x00000010},
393 { 0x0000a210, 0x40806333 }, 390 {0x00009bfc, 0x0000001a},
394 { 0x0000a214, 0x00106c10 }, 391 {0x0000a210, 0x40806333},
395 { 0x0000a218, 0x009c4060 }, 392 {0x0000a214, 0x00106c10},
396 { 0x0000a220, 0x018830c6 }, 393 {0x0000a218, 0x009c4060},
397 { 0x0000a224, 0x00000400 }, 394 {0x0000a220, 0x018830c6},
398 { 0x0000a228, 0x00000bb5 }, 395 {0x0000a224, 0x00000400},
399 { 0x0000a22c, 0x00000011 }, 396 {0x0000a228, 0x00000bb5},
400 { 0x0000a234, 0x20202020 }, 397 {0x0000a22c, 0x00000011},
401 { 0x0000a238, 0x20202020 }, 398 {0x0000a234, 0x20202020},
402 { 0x0000a23c, 0x13c889af }, 399 {0x0000a238, 0x20202020},
403 { 0x0000a240, 0x38490a20 }, 400 {0x0000a23c, 0x13c889af},
404 { 0x0000a244, 0x00007bb6 }, 401 {0x0000a240, 0x38490a20},
405 { 0x0000a248, 0x0fff3ffc }, 402 {0x0000a244, 0x00007bb6},
406 { 0x0000a24c, 0x00000001 }, 403 {0x0000a248, 0x0fff3ffc},
407 { 0x0000a250, 0x0000a000 }, 404 {0x0000a24c, 0x00000001},
408 { 0x0000a254, 0x00000000 }, 405 {0x0000a250, 0x0000a000},
409 { 0x0000a258, 0x0cc75380 }, 406 {0x0000a254, 0x00000000},
410 { 0x0000a25c, 0x0f0f0f01 }, 407 {0x0000a258, 0x0cc75380},
411 { 0x0000a260, 0xdfa91f01 }, 408 {0x0000a25c, 0x0f0f0f01},
412 { 0x0000a268, 0x00000000 }, 409 {0x0000a260, 0xdfa91f01},
413 { 0x0000a26c, 0x0e79e5c6 }, 410 {0x0000a268, 0x00000000},
414 { 0x0000b26c, 0x0e79e5c6 }, 411 {0x0000a26c, 0x0e79e5c6},
415 { 0x0000c26c, 0x0e79e5c6 }, 412 {0x0000b26c, 0x0e79e5c6},
416 { 0x0000d270, 0x00820820 }, 413 {0x0000c26c, 0x0e79e5c6},
417 { 0x0000a278, 0x1ce739ce }, 414 {0x0000d270, 0x00820820},
418 { 0x0000a27c, 0x051701ce }, 415 {0x0000a278, 0x1ce739ce},
419 { 0x0000a338, 0x00000000 }, 416 {0x0000a27c, 0x051701ce},
420 { 0x0000a33c, 0x00000000 }, 417 {0x0000a338, 0x00000000},
421 { 0x0000a340, 0x00000000 }, 418 {0x0000a33c, 0x00000000},
422 { 0x0000a344, 0x00000000 }, 419 {0x0000a340, 0x00000000},
423 { 0x0000a348, 0x3fffffff }, 420 {0x0000a344, 0x00000000},
424 { 0x0000a34c, 0x3fffffff }, 421 {0x0000a348, 0x3fffffff},
425 { 0x0000a350, 0x3fffffff }, 422 {0x0000a34c, 0x3fffffff},
426 { 0x0000a354, 0x0003ffff }, 423 {0x0000a350, 0x3fffffff},
427 { 0x0000a358, 0x79a8aa1f }, 424 {0x0000a354, 0x0003ffff},
428 { 0x0000d35c, 0x07ffffef }, 425 {0x0000a358, 0x79a8aa1f},
429 { 0x0000d360, 0x0fffffe7 }, 426 {0x0000d35c, 0x07ffffef},
430 { 0x0000d364, 0x17ffffe5 }, 427 {0x0000d360, 0x0fffffe7},
431 { 0x0000d368, 0x1fffffe4 }, 428 {0x0000d364, 0x17ffffe5},
432 { 0x0000d36c, 0x37ffffe3 }, 429 {0x0000d368, 0x1fffffe4},
433 { 0x0000d370, 0x3fffffe3 }, 430 {0x0000d36c, 0x37ffffe3},
434 { 0x0000d374, 0x57ffffe3 }, 431 {0x0000d370, 0x3fffffe3},
435 { 0x0000d378, 0x5fffffe2 }, 432 {0x0000d374, 0x57ffffe3},
436 { 0x0000d37c, 0x7fffffe2 }, 433 {0x0000d378, 0x5fffffe2},
437 { 0x0000d380, 0x7f3c7bba }, 434 {0x0000d37c, 0x7fffffe2},
438 { 0x0000d384, 0xf3307ff0 }, 435 {0x0000d380, 0x7f3c7bba},
439 { 0x0000a388, 0x08000000 }, 436 {0x0000d384, 0xf3307ff0},
440 { 0x0000a38c, 0x20202020 }, 437 {0x0000a388, 0x08000000},
441 { 0x0000a390, 0x20202020 }, 438 {0x0000a38c, 0x20202020},
442 { 0x0000a394, 0x1ce739ce }, 439 {0x0000a390, 0x20202020},
443 { 0x0000a398, 0x000001ce }, 440 {0x0000a394, 0x1ce739ce},
444 { 0x0000a39c, 0x00000001 }, 441 {0x0000a398, 0x000001ce},
445 { 0x0000a3a0, 0x00000000 }, 442 {0x0000a39c, 0x00000001},
446 { 0x0000a3a4, 0x00000000 }, 443 {0x0000a3a0, 0x00000000},
447 { 0x0000a3a8, 0x00000000 }, 444 {0x0000a3a4, 0x00000000},
448 { 0x0000a3ac, 0x00000000 }, 445 {0x0000a3a8, 0x00000000},
449 { 0x0000a3b0, 0x00000000 }, 446 {0x0000a3ac, 0x00000000},
450 { 0x0000a3b4, 0x00000000 }, 447 {0x0000a3b0, 0x00000000},
451 { 0x0000a3b8, 0x00000000 }, 448 {0x0000a3b4, 0x00000000},
452 { 0x0000a3bc, 0x00000000 }, 449 {0x0000a3b8, 0x00000000},
453 { 0x0000a3c0, 0x00000000 }, 450 {0x0000a3bc, 0x00000000},
454 { 0x0000a3c4, 0x00000000 }, 451 {0x0000a3c0, 0x00000000},
455 { 0x0000a3c8, 0x00000246 }, 452 {0x0000a3c4, 0x00000000},
456 { 0x0000a3cc, 0x20202020 }, 453 {0x0000a3c8, 0x00000246},
457 { 0x0000a3d0, 0x20202020 }, 454 {0x0000a3cc, 0x20202020},
458 { 0x0000a3d4, 0x20202020 }, 455 {0x0000a3d0, 0x20202020},
459 { 0x0000a3dc, 0x1ce739ce }, 456 {0x0000a3d4, 0x20202020},
460 { 0x0000a3e0, 0x000001ce }, 457 {0x0000a3dc, 0x1ce739ce},
458 {0x0000a3e0, 0x000001ce},
461}; 459};
462 460
463static const u32 ar5416Bank0[][2] = { 461static const u32 ar5416Bank0[][2] = {
464 { 0x000098b0, 0x1e5795e5 }, 462 /* Addr allmodes */
465 { 0x000098e0, 0x02008020 }, 463 {0x000098b0, 0x1e5795e5},
464 {0x000098e0, 0x02008020},
466}; 465};
467 466
468static const u32 ar5416BB_RfGain[][3] = { 467static const u32 ar5416BB_RfGain[][3] = {
469 { 0x00009a00, 0x00000000, 0x00000000 }, 468 /* Addr 5G_HT20 5G_HT40 */
470 { 0x00009a04, 0x00000040, 0x00000040 }, 469 {0x00009a00, 0x00000000, 0x00000000},
471 { 0x00009a08, 0x00000080, 0x00000080 }, 470 {0x00009a04, 0x00000040, 0x00000040},
472 { 0x00009a0c, 0x000001a1, 0x00000141 }, 471 {0x00009a08, 0x00000080, 0x00000080},
473 { 0x00009a10, 0x000001e1, 0x00000181 }, 472 {0x00009a0c, 0x000001a1, 0x00000141},
474 { 0x00009a14, 0x00000021, 0x000001c1 }, 473 {0x00009a10, 0x000001e1, 0x00000181},
475 { 0x00009a18, 0x00000061, 0x00000001 }, 474 {0x00009a14, 0x00000021, 0x000001c1},
476 { 0x00009a1c, 0x00000168, 0x00000041 }, 475 {0x00009a18, 0x00000061, 0x00000001},
477 { 0x00009a20, 0x000001a8, 0x000001a8 }, 476 {0x00009a1c, 0x00000168, 0x00000041},
478 { 0x00009a24, 0x000001e8, 0x000001e8 }, 477 {0x00009a20, 0x000001a8, 0x000001a8},
479 { 0x00009a28, 0x00000028, 0x00000028 }, 478 {0x00009a24, 0x000001e8, 0x000001e8},
480 { 0x00009a2c, 0x00000068, 0x00000068 }, 479 {0x00009a28, 0x00000028, 0x00000028},
481 { 0x00009a30, 0x00000189, 0x000000a8 }, 480 {0x00009a2c, 0x00000068, 0x00000068},
482 { 0x00009a34, 0x000001c9, 0x00000169 }, 481 {0x00009a30, 0x00000189, 0x000000a8},
483 { 0x00009a38, 0x00000009, 0x000001a9 }, 482 {0x00009a34, 0x000001c9, 0x00000169},
484 { 0x00009a3c, 0x00000049, 0x000001e9 }, 483 {0x00009a38, 0x00000009, 0x000001a9},
485 { 0x00009a40, 0x00000089, 0x00000029 }, 484 {0x00009a3c, 0x00000049, 0x000001e9},
486 { 0x00009a44, 0x00000170, 0x00000069 }, 485 {0x00009a40, 0x00000089, 0x00000029},
487 { 0x00009a48, 0x000001b0, 0x00000190 }, 486 {0x00009a44, 0x00000170, 0x00000069},
488 { 0x00009a4c, 0x000001f0, 0x000001d0 }, 487 {0x00009a48, 0x000001b0, 0x00000190},
489 { 0x00009a50, 0x00000030, 0x00000010 }, 488 {0x00009a4c, 0x000001f0, 0x000001d0},
490 { 0x00009a54, 0x00000070, 0x00000050 }, 489 {0x00009a50, 0x00000030, 0x00000010},
491 { 0x00009a58, 0x00000191, 0x00000090 }, 490 {0x00009a54, 0x00000070, 0x00000050},
492 { 0x00009a5c, 0x000001d1, 0x00000151 }, 491 {0x00009a58, 0x00000191, 0x00000090},
493 { 0x00009a60, 0x00000011, 0x00000191 }, 492 {0x00009a5c, 0x000001d1, 0x00000151},
494 { 0x00009a64, 0x00000051, 0x000001d1 }, 493 {0x00009a60, 0x00000011, 0x00000191},
495 { 0x00009a68, 0x00000091, 0x00000011 }, 494 {0x00009a64, 0x00000051, 0x000001d1},
496 { 0x00009a6c, 0x000001b8, 0x00000051 }, 495 {0x00009a68, 0x00000091, 0x00000011},
497 { 0x00009a70, 0x000001f8, 0x00000198 }, 496 {0x00009a6c, 0x000001b8, 0x00000051},
498 { 0x00009a74, 0x00000038, 0x000001d8 }, 497 {0x00009a70, 0x000001f8, 0x00000198},
499 { 0x00009a78, 0x00000078, 0x00000018 }, 498 {0x00009a74, 0x00000038, 0x000001d8},
500 { 0x00009a7c, 0x00000199, 0x00000058 }, 499 {0x00009a78, 0x00000078, 0x00000018},
501 { 0x00009a80, 0x000001d9, 0x00000098 }, 500 {0x00009a7c, 0x00000199, 0x00000058},
502 { 0x00009a84, 0x00000019, 0x00000159 }, 501 {0x00009a80, 0x000001d9, 0x00000098},
503 { 0x00009a88, 0x00000059, 0x00000199 }, 502 {0x00009a84, 0x00000019, 0x00000159},
504 { 0x00009a8c, 0x00000099, 0x000001d9 }, 503 {0x00009a88, 0x00000059, 0x00000199},
505 { 0x00009a90, 0x000000d9, 0x00000019 }, 504 {0x00009a8c, 0x00000099, 0x000001d9},
506 { 0x00009a94, 0x000000f9, 0x00000059 }, 505 {0x00009a90, 0x000000d9, 0x00000019},
507 { 0x00009a98, 0x000000f9, 0x00000099 }, 506 {0x00009a94, 0x000000f9, 0x00000059},
508 { 0x00009a9c, 0x000000f9, 0x000000d9 }, 507 {0x00009a98, 0x000000f9, 0x00000099},
509 { 0x00009aa0, 0x000000f9, 0x000000f9 }, 508 {0x00009a9c, 0x000000f9, 0x000000d9},
510 { 0x00009aa4, 0x000000f9, 0x000000f9 }, 509 {0x00009aa0, 0x000000f9, 0x000000f9},
511 { 0x00009aa8, 0x000000f9, 0x000000f9 }, 510 {0x00009aa4, 0x000000f9, 0x000000f9},
512 { 0x00009aac, 0x000000f9, 0x000000f9 }, 511 {0x00009aa8, 0x000000f9, 0x000000f9},
513 { 0x00009ab0, 0x000000f9, 0x000000f9 }, 512 {0x00009aac, 0x000000f9, 0x000000f9},
514 { 0x00009ab4, 0x000000f9, 0x000000f9 }, 513 {0x00009ab0, 0x000000f9, 0x000000f9},
515 { 0x00009ab8, 0x000000f9, 0x000000f9 }, 514 {0x00009ab4, 0x000000f9, 0x000000f9},
516 { 0x00009abc, 0x000000f9, 0x000000f9 }, 515 {0x00009ab8, 0x000000f9, 0x000000f9},
517 { 0x00009ac0, 0x000000f9, 0x000000f9 }, 516 {0x00009abc, 0x000000f9, 0x000000f9},
518 { 0x00009ac4, 0x000000f9, 0x000000f9 }, 517 {0x00009ac0, 0x000000f9, 0x000000f9},
519 { 0x00009ac8, 0x000000f9, 0x000000f9 }, 518 {0x00009ac4, 0x000000f9, 0x000000f9},
520 { 0x00009acc, 0x000000f9, 0x000000f9 }, 519 {0x00009ac8, 0x000000f9, 0x000000f9},
521 { 0x00009ad0, 0x000000f9, 0x000000f9 }, 520 {0x00009acc, 0x000000f9, 0x000000f9},
522 { 0x00009ad4, 0x000000f9, 0x000000f9 }, 521 {0x00009ad0, 0x000000f9, 0x000000f9},
523 { 0x00009ad8, 0x000000f9, 0x000000f9 }, 522 {0x00009ad4, 0x000000f9, 0x000000f9},
524 { 0x00009adc, 0x000000f9, 0x000000f9 }, 523 {0x00009ad8, 0x000000f9, 0x000000f9},
525 { 0x00009ae0, 0x000000f9, 0x000000f9 }, 524 {0x00009adc, 0x000000f9, 0x000000f9},
526 { 0x00009ae4, 0x000000f9, 0x000000f9 }, 525 {0x00009ae0, 0x000000f9, 0x000000f9},
527 { 0x00009ae8, 0x000000f9, 0x000000f9 }, 526 {0x00009ae4, 0x000000f9, 0x000000f9},
528 { 0x00009aec, 0x000000f9, 0x000000f9 }, 527 {0x00009ae8, 0x000000f9, 0x000000f9},
529 { 0x00009af0, 0x000000f9, 0x000000f9 }, 528 {0x00009aec, 0x000000f9, 0x000000f9},
530 { 0x00009af4, 0x000000f9, 0x000000f9 }, 529 {0x00009af0, 0x000000f9, 0x000000f9},
531 { 0x00009af8, 0x000000f9, 0x000000f9 }, 530 {0x00009af4, 0x000000f9, 0x000000f9},
532 { 0x00009afc, 0x000000f9, 0x000000f9 }, 531 {0x00009af8, 0x000000f9, 0x000000f9},
532 {0x00009afc, 0x000000f9, 0x000000f9},
533}; 533};
534 534
535static const u32 ar5416Bank1[][2] = { 535static const u32 ar5416Bank1[][2] = {
536 { 0x000098b0, 0x02108421 }, 536 /* Addr allmodes */
537 { 0x000098ec, 0x00000008 }, 537 {0x000098b0, 0x02108421},
538 {0x000098ec, 0x00000008},
538}; 539};
539 540
540static const u32 ar5416Bank2[][2] = { 541static const u32 ar5416Bank2[][2] = {
541 { 0x000098b0, 0x0e73ff17 }, 542 /* Addr allmodes */
542 { 0x000098e0, 0x00000420 }, 543 {0x000098b0, 0x0e73ff17},
544 {0x000098e0, 0x00000420},
543}; 545};
544 546
545static const u32 ar5416Bank3[][3] = { 547static const u32 ar5416Bank3[][3] = {
546 { 0x000098f0, 0x01400018, 0x01c00018 }, 548 /* Addr 5G_HT20 5G_HT40 */
549 {0x000098f0, 0x01400018, 0x01c00018},
547}; 550};
548 551
549static const u32 ar5416Bank6[][3] = { 552static const u32 ar5416Bank6[][3] = {
550 553 /* Addr 5G_HT20 5G_HT40 */
551 { 0x0000989c, 0x00000000, 0x00000000 }, 554 {0x0000989c, 0x00000000, 0x00000000},
552 { 0x0000989c, 0x00000000, 0x00000000 }, 555 {0x0000989c, 0x00000000, 0x00000000},
553 { 0x0000989c, 0x00000000, 0x00000000 }, 556 {0x0000989c, 0x00000000, 0x00000000},
554 { 0x0000989c, 0x00e00000, 0x00e00000 }, 557 {0x0000989c, 0x00e00000, 0x00e00000},
555 { 0x0000989c, 0x005e0000, 0x005e0000 }, 558 {0x0000989c, 0x005e0000, 0x005e0000},
556 { 0x0000989c, 0x00120000, 0x00120000 }, 559 {0x0000989c, 0x00120000, 0x00120000},
557 { 0x0000989c, 0x00620000, 0x00620000 }, 560 {0x0000989c, 0x00620000, 0x00620000},
558 { 0x0000989c, 0x00020000, 0x00020000 }, 561 {0x0000989c, 0x00020000, 0x00020000},
559 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 562 {0x0000989c, 0x00ff0000, 0x00ff0000},
560 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 563 {0x0000989c, 0x00ff0000, 0x00ff0000},
561 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 564 {0x0000989c, 0x00ff0000, 0x00ff0000},
562 { 0x0000989c, 0x40ff0000, 0x40ff0000 }, 565 {0x0000989c, 0x40ff0000, 0x40ff0000},
563 { 0x0000989c, 0x005f0000, 0x005f0000 }, 566 {0x0000989c, 0x005f0000, 0x005f0000},
564 { 0x0000989c, 0x00870000, 0x00870000 }, 567 {0x0000989c, 0x00870000, 0x00870000},
565 { 0x0000989c, 0x00f90000, 0x00f90000 }, 568 {0x0000989c, 0x00f90000, 0x00f90000},
566 { 0x0000989c, 0x007b0000, 0x007b0000 }, 569 {0x0000989c, 0x007b0000, 0x007b0000},
567 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 570 {0x0000989c, 0x00ff0000, 0x00ff0000},
568 { 0x0000989c, 0x00f50000, 0x00f50000 }, 571 {0x0000989c, 0x00f50000, 0x00f50000},
569 { 0x0000989c, 0x00dc0000, 0x00dc0000 }, 572 {0x0000989c, 0x00dc0000, 0x00dc0000},
570 { 0x0000989c, 0x00110000, 0x00110000 }, 573 {0x0000989c, 0x00110000, 0x00110000},
571 { 0x0000989c, 0x006100a8, 0x006100a8 }, 574 {0x0000989c, 0x006100a8, 0x006100a8},
572 { 0x0000989c, 0x004210a2, 0x004210a2 }, 575 {0x0000989c, 0x004210a2, 0x004210a2},
573 { 0x0000989c, 0x0014008f, 0x0014008f }, 576 {0x0000989c, 0x0014008f, 0x0014008f},
574 { 0x0000989c, 0x00c40003, 0x00c40003 }, 577 {0x0000989c, 0x00c40003, 0x00c40003},
575 { 0x0000989c, 0x003000f2, 0x003000f2 }, 578 {0x0000989c, 0x003000f2, 0x003000f2},
576 { 0x0000989c, 0x00440016, 0x00440016 }, 579 {0x0000989c, 0x00440016, 0x00440016},
577 { 0x0000989c, 0x00410040, 0x00410040 }, 580 {0x0000989c, 0x00410040, 0x00410040},
578 { 0x0000989c, 0x0001805e, 0x0001805e }, 581 {0x0000989c, 0x0001805e, 0x0001805e},
579 { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, 582 {0x0000989c, 0x0000c0ab, 0x0000c0ab},
580 { 0x0000989c, 0x000000f1, 0x000000f1 }, 583 {0x0000989c, 0x000000f1, 0x000000f1},
581 { 0x0000989c, 0x00002081, 0x00002081 }, 584 {0x0000989c, 0x00002081, 0x00002081},
582 { 0x0000989c, 0x000000d4, 0x000000d4 }, 585 {0x0000989c, 0x000000d4, 0x000000d4},
583 { 0x000098d0, 0x0000000f, 0x0010000f }, 586 {0x000098d0, 0x0000000f, 0x0010000f},
584}; 587};
585 588
586static const u32 ar5416Bank6TPC[][3] = { 589static const u32 ar5416Bank6TPC[][3] = {
587 { 0x0000989c, 0x00000000, 0x00000000 }, 590 /* Addr 5G_HT20 5G_HT40 */
588 { 0x0000989c, 0x00000000, 0x00000000 }, 591 {0x0000989c, 0x00000000, 0x00000000},
589 { 0x0000989c, 0x00000000, 0x00000000 }, 592 {0x0000989c, 0x00000000, 0x00000000},
590 { 0x0000989c, 0x00e00000, 0x00e00000 }, 593 {0x0000989c, 0x00000000, 0x00000000},
591 { 0x0000989c, 0x005e0000, 0x005e0000 }, 594 {0x0000989c, 0x00e00000, 0x00e00000},
592 { 0x0000989c, 0x00120000, 0x00120000 }, 595 {0x0000989c, 0x005e0000, 0x005e0000},
593 { 0x0000989c, 0x00620000, 0x00620000 }, 596 {0x0000989c, 0x00120000, 0x00120000},
594 { 0x0000989c, 0x00020000, 0x00020000 }, 597 {0x0000989c, 0x00620000, 0x00620000},
595 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 598 {0x0000989c, 0x00020000, 0x00020000},
596 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 599 {0x0000989c, 0x00ff0000, 0x00ff0000},
597 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 600 {0x0000989c, 0x00ff0000, 0x00ff0000},
598 { 0x0000989c, 0x40ff0000, 0x40ff0000 }, 601 {0x0000989c, 0x00ff0000, 0x00ff0000},
599 { 0x0000989c, 0x005f0000, 0x005f0000 }, 602 {0x0000989c, 0x40ff0000, 0x40ff0000},
600 { 0x0000989c, 0x00870000, 0x00870000 }, 603 {0x0000989c, 0x005f0000, 0x005f0000},
601 { 0x0000989c, 0x00f90000, 0x00f90000 }, 604 {0x0000989c, 0x00870000, 0x00870000},
602 { 0x0000989c, 0x007b0000, 0x007b0000 }, 605 {0x0000989c, 0x00f90000, 0x00f90000},
603 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 606 {0x0000989c, 0x007b0000, 0x007b0000},
604 { 0x0000989c, 0x00f50000, 0x00f50000 }, 607 {0x0000989c, 0x00ff0000, 0x00ff0000},
605 { 0x0000989c, 0x00dc0000, 0x00dc0000 }, 608 {0x0000989c, 0x00f50000, 0x00f50000},
606 { 0x0000989c, 0x00110000, 0x00110000 }, 609 {0x0000989c, 0x00dc0000, 0x00dc0000},
607 { 0x0000989c, 0x006100a8, 0x006100a8 }, 610 {0x0000989c, 0x00110000, 0x00110000},
608 { 0x0000989c, 0x00423022, 0x00423022 }, 611 {0x0000989c, 0x006100a8, 0x006100a8},
609 { 0x0000989c, 0x201400df, 0x201400df }, 612 {0x0000989c, 0x00423022, 0x00423022},
610 { 0x0000989c, 0x00c40002, 0x00c40002 }, 613 {0x0000989c, 0x201400df, 0x201400df},
611 { 0x0000989c, 0x003000f2, 0x003000f2 }, 614 {0x0000989c, 0x00c40002, 0x00c40002},
612 { 0x0000989c, 0x00440016, 0x00440016 }, 615 {0x0000989c, 0x003000f2, 0x003000f2},
613 { 0x0000989c, 0x00410040, 0x00410040 }, 616 {0x0000989c, 0x00440016, 0x00440016},
614 { 0x0000989c, 0x0001805e, 0x0001805e }, 617 {0x0000989c, 0x00410040, 0x00410040},
615 { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, 618 {0x0000989c, 0x0001805e, 0x0001805e},
616 { 0x0000989c, 0x000000e1, 0x000000e1 }, 619 {0x0000989c, 0x0000c0ab, 0x0000c0ab},
617 { 0x0000989c, 0x00007081, 0x00007081 }, 620 {0x0000989c, 0x000000e1, 0x000000e1},
618 { 0x0000989c, 0x000000d4, 0x000000d4 }, 621 {0x0000989c, 0x00007081, 0x00007081},
619 { 0x000098d0, 0x0000000f, 0x0010000f }, 622 {0x0000989c, 0x000000d4, 0x000000d4},
623 {0x000098d0, 0x0000000f, 0x0010000f},
620}; 624};
621 625
622static const u32 ar5416Bank7[][2] = { 626static const u32 ar5416Bank7[][2] = {
623 { 0x0000989c, 0x00000500 }, 627 /* Addr allmodes */
624 { 0x0000989c, 0x00000800 }, 628 {0x0000989c, 0x00000500},
625 { 0x000098cc, 0x0000000e }, 629 {0x0000989c, 0x00000800},
630 {0x000098cc, 0x0000000e},
626}; 631};
627 632
628static const u32 ar5416Addac[][2] = { 633static const u32 ar5416Addac[][2] = {
629 {0x0000989c, 0x00000000 }, 634 /* Addr allmodes */
630 {0x0000989c, 0x00000003 }, 635 {0x0000989c, 0x00000000},
631 {0x0000989c, 0x00000000 }, 636 {0x0000989c, 0x00000003},
632 {0x0000989c, 0x0000000c }, 637 {0x0000989c, 0x00000000},
633 {0x0000989c, 0x00000000 }, 638 {0x0000989c, 0x0000000c},
634 {0x0000989c, 0x00000030 }, 639 {0x0000989c, 0x00000000},
635 {0x0000989c, 0x00000000 }, 640 {0x0000989c, 0x00000030},
636 {0x0000989c, 0x00000000 }, 641 {0x0000989c, 0x00000000},
637 {0x0000989c, 0x00000000 }, 642 {0x0000989c, 0x00000000},
638 {0x0000989c, 0x00000000 }, 643 {0x0000989c, 0x00000000},
639 {0x0000989c, 0x00000000 }, 644 {0x0000989c, 0x00000000},
640 {0x0000989c, 0x00000000 }, 645 {0x0000989c, 0x00000000},
641 {0x0000989c, 0x00000000 }, 646 {0x0000989c, 0x00000000},
642 {0x0000989c, 0x00000000 }, 647 {0x0000989c, 0x00000000},
643 {0x0000989c, 0x00000000 }, 648 {0x0000989c, 0x00000000},
644 {0x0000989c, 0x00000000 }, 649 {0x0000989c, 0x00000000},
645 {0x0000989c, 0x00000000 }, 650 {0x0000989c, 0x00000000},
646 {0x0000989c, 0x00000000 }, 651 {0x0000989c, 0x00000000},
647 {0x0000989c, 0x00000060 }, 652 {0x0000989c, 0x00000000},
648 {0x0000989c, 0x00000000 }, 653 {0x0000989c, 0x00000060},
649 {0x0000989c, 0x00000000 }, 654 {0x0000989c, 0x00000000},
650 {0x0000989c, 0x00000000 }, 655 {0x0000989c, 0x00000000},
651 {0x0000989c, 0x00000000 }, 656 {0x0000989c, 0x00000000},
652 {0x0000989c, 0x00000000 }, 657 {0x0000989c, 0x00000000},
653 {0x0000989c, 0x00000000 }, 658 {0x0000989c, 0x00000000},
654 {0x0000989c, 0x00000000 }, 659 {0x0000989c, 0x00000000},
655 {0x0000989c, 0x00000000 }, 660 {0x0000989c, 0x00000000},
656 {0x0000989c, 0x00000000 }, 661 {0x0000989c, 0x00000000},
657 {0x0000989c, 0x00000000 }, 662 {0x0000989c, 0x00000000},
658 {0x0000989c, 0x00000000 }, 663 {0x0000989c, 0x00000000},
659 {0x0000989c, 0x00000000 }, 664 {0x0000989c, 0x00000000},
660 {0x0000989c, 0x00000058 }, 665 {0x0000989c, 0x00000000},
661 {0x0000989c, 0x00000000 }, 666 {0x0000989c, 0x00000058},
662 {0x0000989c, 0x00000000 }, 667 {0x0000989c, 0x00000000},
663 {0x0000989c, 0x00000000 }, 668 {0x0000989c, 0x00000000},
664 {0x0000989c, 0x00000000 }, 669 {0x0000989c, 0x00000000},
665 {0x000098cc, 0x00000000 }, 670 {0x0000989c, 0x00000000},
666}; 671 {0x000098cc, 0x00000000},
667
668static const u32 ar5416Modes_9100[][6] = {
669 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
670 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
671 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
672 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
673 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
674 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
675 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
676 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
677 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
678 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
679 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
680 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
681 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
682 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
683 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
684 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
685 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
686 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
687 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
688 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
689 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
690 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
691 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
692 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
693 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
694 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
695 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
696 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
697 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
698 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
699#ifdef TB243
700 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
701 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
702 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
703 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
704#else
705 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
706 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
707 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
708 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
709#endif
710 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
711 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
712 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
713 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
714 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
715 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
716 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
717 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
718 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
719 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
720 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
721 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
722 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
723 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
724 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
725 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
726 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
727 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
728 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
729 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
730 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
731 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
732 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
733 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
734 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
735 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
736 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
737 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
738 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
739 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
740}; 672};
741 673
742#endif /* INITVALS_AR5008_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index b2c17c98bb38..3d2c8679bc85 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -19,7 +19,30 @@
19#include "../regd.h" 19#include "../regd.h"
20#include "ar9002_phy.h" 20#include "ar9002_phy.h"
21 21
22/* All code below is for non single-chip solutions */ 22/* All code below is for AR5008, AR9001, AR9002 */
23
24static const int firstep_table[] =
25/* level: 0 1 2 3 4 5 6 7 8 */
26 { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
27
28static const int cycpwrThr1_table[] =
29/* level: 0 1 2 3 4 5 6 7 8 */
30 { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
31
32/*
33 * register values to turn OFDM weak signal detection OFF
34 */
35static const int m1ThreshLow_off = 127;
36static const int m2ThreshLow_off = 127;
37static const int m1Thresh_off = 127;
38static const int m2Thresh_off = 127;
39static const int m2CountThr_off = 31;
40static const int m2CountThrLow_off = 63;
41static const int m1ThreshLowExt_off = 127;
42static const int m2ThreshLowExt_off = 127;
43static const int m1ThreshExt_off = 127;
44static const int m2ThreshExt_off = 127;
45
23 46
24/** 47/**
25 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters 48 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
@@ -742,17 +765,6 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
742 return -EINVAL; 765 return -EINVAL;
743 } 766 }
744 767
745 if (AR_SREV_9287_12_OR_LATER(ah)) {
746 /* Enable ASYNC FIFO */
747 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
748 AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
749 REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
750 REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
751 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
752 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
753 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
754 }
755
756 /* 768 /*
757 * Set correct baseband to analog shift setting to 769 * Set correct baseband to analog shift setting to
758 * access analog chips. 770 * access analog chips.
@@ -1037,8 +1049,9 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
1037 return pll; 1049 return pll;
1038} 1050}
1039 1051
1040static bool ar5008_hw_ani_control(struct ath_hw *ah, 1052static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1041 enum ath9k_ani_cmd cmd, int param) 1053 enum ath9k_ani_cmd cmd,
1054 int param)
1042{ 1055{
1043 struct ar5416AniState *aniState = ah->curani; 1056 struct ar5416AniState *aniState = ah->curani;
1044 struct ath_common *common = ath9k_hw_common(ah); 1057 struct ath_common *common = ath9k_hw_common(ah);
@@ -1220,129 +1233,377 @@ static bool ar5008_hw_ani_control(struct ath_hw *ah,
1220 return true; 1233 return true;
1221} 1234}
1222 1235
1236static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1237 enum ath9k_ani_cmd cmd,
1238 int param)
1239{
1240 struct ar5416AniState *aniState = ah->curani;
1241 struct ath_common *common = ath9k_hw_common(ah);
1242 struct ath9k_channel *chan = ah->curchan;
1243 s32 value, value2;
1244
1245 switch (cmd & ah->ani_function) {
1246 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1247 /*
1248 * on == 1 means ofdm weak signal detection is ON
1249 * on == 1 is the default, for less noise immunity
1250 *
1251 * on == 0 means ofdm weak signal detection is OFF
1252 * on == 0 means more noise imm
1253 */
1254 u32 on = param ? 1 : 0;
1255 /*
1256 * make register setting for default
1257 * (weak sig detect ON) come from INI file
1258 */
1259 int m1ThreshLow = on ?
1260 aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
1261 int m2ThreshLow = on ?
1262 aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
1263 int m1Thresh = on ?
1264 aniState->iniDef.m1Thresh : m1Thresh_off;
1265 int m2Thresh = on ?
1266 aniState->iniDef.m2Thresh : m2Thresh_off;
1267 int m2CountThr = on ?
1268 aniState->iniDef.m2CountThr : m2CountThr_off;
1269 int m2CountThrLow = on ?
1270 aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
1271 int m1ThreshLowExt = on ?
1272 aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
1273 int m2ThreshLowExt = on ?
1274 aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
1275 int m1ThreshExt = on ?
1276 aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
1277 int m2ThreshExt = on ?
1278 aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
1279
1280 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1281 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
1282 m1ThreshLow);
1283 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1284 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
1285 m2ThreshLow);
1286 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1287 AR_PHY_SFCORR_M1_THRESH, m1Thresh);
1288 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1289 AR_PHY_SFCORR_M2_THRESH, m2Thresh);
1290 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1291 AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
1292 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1293 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
1294 m2CountThrLow);
1295
1296 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1297 AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
1298 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1299 AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
1300 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1301 AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
1302 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1303 AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
1304
1305 if (on)
1306 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
1307 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1308 else
1309 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
1310 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1311
1312 if (!on != aniState->ofdmWeakSigDetectOff) {
1313 ath_print(common, ATH_DBG_ANI,
1314 "** ch %d: ofdm weak signal: %s=>%s\n",
1315 chan->channel,
1316 !aniState->ofdmWeakSigDetectOff ?
1317 "on" : "off",
1318 on ? "on" : "off");
1319 if (on)
1320 ah->stats.ast_ani_ofdmon++;
1321 else
1322 ah->stats.ast_ani_ofdmoff++;
1323 aniState->ofdmWeakSigDetectOff = !on;
1324 }
1325 break;
1326 }
1327 case ATH9K_ANI_FIRSTEP_LEVEL:{
1328 u32 level = param;
1329
1330 if (level >= ARRAY_SIZE(firstep_table)) {
1331 ath_print(common, ATH_DBG_ANI,
1332 "ATH9K_ANI_FIRSTEP_LEVEL: level "
1333 "out of range (%u > %u)\n",
1334 level,
1335 (unsigned) ARRAY_SIZE(firstep_table));
1336 return false;
1337 }
1338
1339 /*
1340 * make register setting relative to default
1341 * from INI file & cap value
1342 */
1343 value = firstep_table[level] -
1344 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
1345 aniState->iniDef.firstep;
1346 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1347 value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
1348 if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
1349 value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
1350 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1351 AR_PHY_FIND_SIG_FIRSTEP,
1352 value);
1353 /*
1354 * we need to set first step low register too
1355 * make register setting relative to default
1356 * from INI file & cap value
1357 */
1358 value2 = firstep_table[level] -
1359 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
1360 aniState->iniDef.firstepLow;
1361 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1362 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
1363 if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
1364 value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
1365
1366 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
1367 AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
1368
1369 if (level != aniState->firstepLevel) {
1370 ath_print(common, ATH_DBG_ANI,
1371 "** ch %d: level %d=>%d[def:%d] "
1372 "firstep[level]=%d ini=%d\n",
1373 chan->channel,
1374 aniState->firstepLevel,
1375 level,
1376 ATH9K_ANI_FIRSTEP_LVL_NEW,
1377 value,
1378 aniState->iniDef.firstep);
1379 ath_print(common, ATH_DBG_ANI,
1380 "** ch %d: level %d=>%d[def:%d] "
1381 "firstep_low[level]=%d ini=%d\n",
1382 chan->channel,
1383 aniState->firstepLevel,
1384 level,
1385 ATH9K_ANI_FIRSTEP_LVL_NEW,
1386 value2,
1387 aniState->iniDef.firstepLow);
1388 if (level > aniState->firstepLevel)
1389 ah->stats.ast_ani_stepup++;
1390 else if (level < aniState->firstepLevel)
1391 ah->stats.ast_ani_stepdown++;
1392 aniState->firstepLevel = level;
1393 }
1394 break;
1395 }
1396 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1397 u32 level = param;
1398
1399 if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
1400 ath_print(common, ATH_DBG_ANI,
1401 "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
1402 "out of range (%u > %u)\n",
1403 level,
1404 (unsigned) ARRAY_SIZE(cycpwrThr1_table));
1405 return false;
1406 }
1407 /*
1408 * make register setting relative to default
1409 * from INI file & cap value
1410 */
1411 value = cycpwrThr1_table[level] -
1412 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
1413 aniState->iniDef.cycpwrThr1;
1414 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1415 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
1416 if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
1417 value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
1418 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
1419 AR_PHY_TIMING5_CYCPWR_THR1,
1420 value);
1421
1422 /*
1423 * set AR_PHY_EXT_CCA for extension channel
1424 * make register setting relative to default
1425 * from INI file & cap value
1426 */
1427 value2 = cycpwrThr1_table[level] -
1428 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
1429 aniState->iniDef.cycpwrThr1Ext;
1430 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1431 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
1432 if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
1433 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
1434 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
1435 AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
1436
1437 if (level != aniState->spurImmunityLevel) {
1438 ath_print(common, ATH_DBG_ANI,
1439 "** ch %d: level %d=>%d[def:%d] "
1440 "cycpwrThr1[level]=%d ini=%d\n",
1441 chan->channel,
1442 aniState->spurImmunityLevel,
1443 level,
1444 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
1445 value,
1446 aniState->iniDef.cycpwrThr1);
1447 ath_print(common, ATH_DBG_ANI,
1448 "** ch %d: level %d=>%d[def:%d] "
1449 "cycpwrThr1Ext[level]=%d ini=%d\n",
1450 chan->channel,
1451 aniState->spurImmunityLevel,
1452 level,
1453 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
1454 value2,
1455 aniState->iniDef.cycpwrThr1Ext);
1456 if (level > aniState->spurImmunityLevel)
1457 ah->stats.ast_ani_spurup++;
1458 else if (level < aniState->spurImmunityLevel)
1459 ah->stats.ast_ani_spurdown++;
1460 aniState->spurImmunityLevel = level;
1461 }
1462 break;
1463 }
1464 case ATH9K_ANI_MRC_CCK:
1465 /*
1466 * You should not see this as AR5008, AR9001, AR9002
1467 * does not have hardware support for MRC CCK.
1468 */
1469 WARN_ON(1);
1470 break;
1471 case ATH9K_ANI_PRESENT:
1472 break;
1473 default:
1474 ath_print(common, ATH_DBG_ANI,
1475 "invalid cmd %u\n", cmd);
1476 return false;
1477 }
1478
1479 ath_print(common, ATH_DBG_ANI,
1480 "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
1481 "MRCcck=%s listenTime=%d CC=%d listen=%d "
1482 "ofdmErrs=%d cckErrs=%d\n",
1483 aniState->spurImmunityLevel,
1484 !aniState->ofdmWeakSigDetectOff ? "on" : "off",
1485 aniState->firstepLevel,
1486 !aniState->mrcCCKOff ? "on" : "off",
1487 aniState->listenTime,
1488 aniState->cycleCount,
1489 aniState->listenTime,
1490 aniState->ofdmPhyErrCount,
1491 aniState->cckPhyErrCount);
1492 return true;
1493}
1494
1223static void ar5008_hw_do_getnf(struct ath_hw *ah, 1495static void ar5008_hw_do_getnf(struct ath_hw *ah,
1224 int16_t nfarray[NUM_NF_READINGS]) 1496 int16_t nfarray[NUM_NF_READINGS])
1225{ 1497{
1226 struct ath_common *common = ath9k_hw_common(ah);
1227 int16_t nf; 1498 int16_t nf;
1228 1499
1229 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); 1500 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1230 if (nf & 0x100) 1501 nfarray[0] = sign_extend(nf, 9);
1231 nf = 0 - ((nf ^ 0x1ff) + 1);
1232 ath_print(common, ATH_DBG_CALIBRATE,
1233 "NF calibrated [ctl] [chain 0] is %d\n", nf);
1234 nfarray[0] = nf;
1235 1502
1236 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR); 1503 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
1237 if (nf & 0x100) 1504 nfarray[1] = sign_extend(nf, 9);
1238 nf = 0 - ((nf ^ 0x1ff) + 1);
1239 ath_print(common, ATH_DBG_CALIBRATE,
1240 "NF calibrated [ctl] [chain 1] is %d\n", nf);
1241 nfarray[1] = nf;
1242 1505
1243 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR); 1506 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
1244 if (nf & 0x100) 1507 nfarray[2] = sign_extend(nf, 9);
1245 nf = 0 - ((nf ^ 0x1ff) + 1); 1508
1246 ath_print(common, ATH_DBG_CALIBRATE, 1509 if (!IS_CHAN_HT40(ah->curchan))
1247 "NF calibrated [ctl] [chain 2] is %d\n", nf); 1510 return;
1248 nfarray[2] = nf;
1249 1511
1250 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1512 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
1251 if (nf & 0x100) 1513 nfarray[3] = sign_extend(nf, 9);
1252 nf = 0 - ((nf ^ 0x1ff) + 1);
1253 ath_print(common, ATH_DBG_CALIBRATE,
1254 "NF calibrated [ext] [chain 0] is %d\n", nf);
1255 nfarray[3] = nf;
1256 1514
1257 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR); 1515 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
1258 if (nf & 0x100) 1516 nfarray[4] = sign_extend(nf, 9);
1259 nf = 0 - ((nf ^ 0x1ff) + 1);
1260 ath_print(common, ATH_DBG_CALIBRATE,
1261 "NF calibrated [ext] [chain 1] is %d\n", nf);
1262 nfarray[4] = nf;
1263 1517
1264 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR); 1518 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
1265 if (nf & 0x100) 1519 nfarray[5] = sign_extend(nf, 9);
1266 nf = 0 - ((nf ^ 0x1ff) + 1);
1267 ath_print(common, ATH_DBG_CALIBRATE,
1268 "NF calibrated [ext] [chain 2] is %d\n", nf);
1269 nfarray[5] = nf;
1270} 1520}
1271 1521
1272static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) 1522/*
1523 * Initialize the ANI register values with default (ini) values.
1524 * This routine is called during a (full) hardware reset after
1525 * all the registers are initialised from the INI.
1526 */
1527static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1273{ 1528{
1274 struct ath9k_nfcal_hist *h; 1529 struct ar5416AniState *aniState;
1275 int i, j; 1530 struct ath_common *common = ath9k_hw_common(ah);
1276 int32_t val; 1531 struct ath9k_channel *chan = ah->curchan;
1277 const u32 ar5416_cca_regs[6] = { 1532 struct ath9k_ani_default *iniDef;
1278 AR_PHY_CCA, 1533 int index;
1279 AR_PHY_CH1_CCA, 1534 u32 val;
1280 AR_PHY_CH2_CCA,
1281 AR_PHY_EXT_CCA,
1282 AR_PHY_CH1_EXT_CCA,
1283 AR_PHY_CH2_EXT_CCA
1284 };
1285 u8 chainmask, rx_chain_status;
1286
1287 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
1288 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
1289 chainmask = 0x9;
1290 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
1291 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
1292 chainmask = 0x1B;
1293 else
1294 chainmask = 0x09;
1295 } else {
1296 if (rx_chain_status & 0x4)
1297 chainmask = 0x3F;
1298 else if (rx_chain_status & 0x2)
1299 chainmask = 0x1B;
1300 else
1301 chainmask = 0x09;
1302 }
1303
1304 h = ah->nfCalHist;
1305
1306 for (i = 0; i < NUM_NF_READINGS; i++) {
1307 if (chainmask & (1 << i)) {
1308 val = REG_READ(ah, ar5416_cca_regs[i]);
1309 val &= 0xFFFFFE00;
1310 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1311 REG_WRITE(ah, ar5416_cca_regs[i], val);
1312 }
1313 }
1314
1315 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1316 AR_PHY_AGC_CONTROL_ENABLE_NF);
1317 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1318 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1319 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1320
1321 for (j = 0; j < 5; j++) {
1322 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1323 AR_PHY_AGC_CONTROL_NF) == 0)
1324 break;
1325 udelay(50);
1326 }
1327 1535
1328 ENABLE_REGWRITE_BUFFER(ah); 1536 index = ath9k_hw_get_ani_channel_idx(ah, chan);
1537 aniState = &ah->ani[index];
1538 ah->curani = aniState;
1539 iniDef = &aniState->iniDef;
1329 1540
1330 for (i = 0; i < NUM_NF_READINGS; i++) { 1541 ath_print(common, ATH_DBG_ANI,
1331 if (chainmask & (1 << i)) { 1542 "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
1332 val = REG_READ(ah, ar5416_cca_regs[i]); 1543 ah->hw_version.macVersion,
1333 val &= 0xFFFFFE00; 1544 ah->hw_version.macRev,
1334 val |= (((u32) (-50) << 1) & 0x1ff); 1545 ah->opmode,
1335 REG_WRITE(ah, ar5416_cca_regs[i], val); 1546 chan->channel,
1336 } 1547 chan->channelFlags);
1337 } 1548
1549 val = REG_READ(ah, AR_PHY_SFCORR);
1550 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
1551 iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
1552 iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
1553
1554 val = REG_READ(ah, AR_PHY_SFCORR_LOW);
1555 iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
1556 iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
1557 iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
1558
1559 val = REG_READ(ah, AR_PHY_SFCORR_EXT);
1560 iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
1561 iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
1562 iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
1563 iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
1564 iniDef->firstep = REG_READ_FIELD(ah,
1565 AR_PHY_FIND_SIG,
1566 AR_PHY_FIND_SIG_FIRSTEP);
1567 iniDef->firstepLow = REG_READ_FIELD(ah,
1568 AR_PHY_FIND_SIG_LOW,
1569 AR_PHY_FIND_SIG_FIRSTEP_LOW);
1570 iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
1571 AR_PHY_TIMING5,
1572 AR_PHY_TIMING5_CYCPWR_THR1);
1573 iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
1574 AR_PHY_EXT_CCA,
1575 AR_PHY_EXT_TIMING5_CYCPWR_THR1);
1576
1577 /* these levels just got reset to defaults by the INI */
1578 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
1579 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
1580 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
1581 aniState->mrcCCKOff = true; /* not available on pre AR9003 */
1582
1583 aniState->cycleCount = 0;
1584}
1338 1585
1339 REGWRITE_BUFFER_FLUSH(ah); 1586static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
1340 DISABLE_REGWRITE_BUFFER(ah); 1587{
1588 ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ;
1589 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ;
1590 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_5416_2GHZ;
1591 ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ;
1592 ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ;
1593 ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ;
1341} 1594}
1342 1595
1343void ar5008_hw_attach_phy_ops(struct ath_hw *ah) 1596void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1344{ 1597{
1345 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1598 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1599 const u32 ar5416_cca_regs[6] = {
1600 AR_PHY_CCA,
1601 AR_PHY_CH1_CCA,
1602 AR_PHY_CH2_CCA,
1603 AR_PHY_EXT_CCA,
1604 AR_PHY_CH1_EXT_CCA,
1605 AR_PHY_CH2_EXT_CCA
1606 };
1346 1607
1347 priv_ops->rf_set_freq = ar5008_hw_set_channel; 1608 priv_ops->rf_set_freq = ar5008_hw_set_channel;
1348 priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate; 1609 priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
@@ -1361,9 +1622,13 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1361 priv_ops->enable_rfkill = ar5008_hw_enable_rfkill; 1622 priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
1362 priv_ops->restore_chainmask = ar5008_restore_chainmask; 1623 priv_ops->restore_chainmask = ar5008_restore_chainmask;
1363 priv_ops->set_diversity = ar5008_set_diversity; 1624 priv_ops->set_diversity = ar5008_set_diversity;
1364 priv_ops->ani_control = ar5008_hw_ani_control;
1365 priv_ops->do_getnf = ar5008_hw_do_getnf; 1625 priv_ops->do_getnf = ar5008_hw_do_getnf;
1366 priv_ops->loadnf = ar5008_hw_loadnf; 1626
1627 if (modparam_force_new_ani) {
1628 priv_ops->ani_control = ar5008_hw_ani_control_new;
1629 priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
1630 } else
1631 priv_ops->ani_control = ar5008_hw_ani_control_old;
1367 1632
1368 if (AR_SREV_9100(ah)) 1633 if (AR_SREV_9100(ah))
1369 priv_ops->compute_pll_control = ar9100_hw_compute_pll_control; 1634 priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
@@ -1371,4 +1636,7 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1371 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control; 1636 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
1372 else 1637 else
1373 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control; 1638 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
1639
1640 ar5008_hw_set_nf_limits(ah);
1641 memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
1374} 1642}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index 0b94bd385b0a..69a94c7e45cb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -1,1254 +1,1357 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17static const u32 ar5416Modes_9100[][6] = {
18 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
19 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
20 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
21 {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008},
22 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
23 {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf},
24 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
25 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
26 {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
27 {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
28 {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
29 {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
30 {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
31 {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
32 {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0},
33 {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
34 {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
35 {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
36 {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2, 0x6c48b0e2},
37 {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
38 {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
39 {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18},
40 {0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
41 {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
42 {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
43 {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
44 {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
45 {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
46 {0x00009940, 0x00750604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204},
47 {0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020},
48 {0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e},
49 {0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff},
50 {0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
51 {0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
52 {0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
53 {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
54 {0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00},
55 {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
56 {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
57 {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
58 {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
59 {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
60 {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
61 {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
62 {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
63 {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788},
64 {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
65 {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
66 {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
67 {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
68 {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
69 {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa},
70 {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
71 {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402},
72 {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06},
73 {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b},
74 {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b},
75 {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a},
76 {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf},
77 {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f},
78 {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f},
79 {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f},
80 {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000},
81 {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
82 {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
83 {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
84};
1 85
2static const u32 ar5416Common_9100[][2] = { 86static const u32 ar5416Common_9100[][2] = {
3 { 0x0000000c, 0x00000000 }, 87 /* Addr allmodes */
4 { 0x00000030, 0x00020015 }, 88 {0x0000000c, 0x00000000},
5 { 0x00000034, 0x00000005 }, 89 {0x00000030, 0x00020015},
6 { 0x00000040, 0x00000000 }, 90 {0x00000034, 0x00000005},
7 { 0x00000044, 0x00000008 }, 91 {0x00000040, 0x00000000},
8 { 0x00000048, 0x00000008 }, 92 {0x00000044, 0x00000008},
9 { 0x0000004c, 0x00000010 }, 93 {0x00000048, 0x00000008},
10 { 0x00000050, 0x00000000 }, 94 {0x0000004c, 0x00000010},
11 { 0x00000054, 0x0000001f }, 95 {0x00000050, 0x00000000},
12 { 0x00000800, 0x00000000 }, 96 {0x00000054, 0x0000001f},
13 { 0x00000804, 0x00000000 }, 97 {0x00000800, 0x00000000},
14 { 0x00000808, 0x00000000 }, 98 {0x00000804, 0x00000000},
15 { 0x0000080c, 0x00000000 }, 99 {0x00000808, 0x00000000},
16 { 0x00000810, 0x00000000 }, 100 {0x0000080c, 0x00000000},
17 { 0x00000814, 0x00000000 }, 101 {0x00000810, 0x00000000},
18 { 0x00000818, 0x00000000 }, 102 {0x00000814, 0x00000000},
19 { 0x0000081c, 0x00000000 }, 103 {0x00000818, 0x00000000},
20 { 0x00000820, 0x00000000 }, 104 {0x0000081c, 0x00000000},
21 { 0x00000824, 0x00000000 }, 105 {0x00000820, 0x00000000},
22 { 0x00001040, 0x002ffc0f }, 106 {0x00000824, 0x00000000},
23 { 0x00001044, 0x002ffc0f }, 107 {0x00001040, 0x002ffc0f},
24 { 0x00001048, 0x002ffc0f }, 108 {0x00001044, 0x002ffc0f},
25 { 0x0000104c, 0x002ffc0f }, 109 {0x00001048, 0x002ffc0f},
26 { 0x00001050, 0x002ffc0f }, 110 {0x0000104c, 0x002ffc0f},
27 { 0x00001054, 0x002ffc0f }, 111 {0x00001050, 0x002ffc0f},
28 { 0x00001058, 0x002ffc0f }, 112 {0x00001054, 0x002ffc0f},
29 { 0x0000105c, 0x002ffc0f }, 113 {0x00001058, 0x002ffc0f},
30 { 0x00001060, 0x002ffc0f }, 114 {0x0000105c, 0x002ffc0f},
31 { 0x00001064, 0x002ffc0f }, 115 {0x00001060, 0x002ffc0f},
32 { 0x00001230, 0x00000000 }, 116 {0x00001064, 0x002ffc0f},
33 { 0x00001270, 0x00000000 }, 117 {0x00001230, 0x00000000},
34 { 0x00001038, 0x00000000 }, 118 {0x00001270, 0x00000000},
35 { 0x00001078, 0x00000000 }, 119 {0x00001038, 0x00000000},
36 { 0x000010b8, 0x00000000 }, 120 {0x00001078, 0x00000000},
37 { 0x000010f8, 0x00000000 }, 121 {0x000010b8, 0x00000000},
38 { 0x00001138, 0x00000000 }, 122 {0x000010f8, 0x00000000},
39 { 0x00001178, 0x00000000 }, 123 {0x00001138, 0x00000000},
40 { 0x000011b8, 0x00000000 }, 124 {0x00001178, 0x00000000},
41 { 0x000011f8, 0x00000000 }, 125 {0x000011b8, 0x00000000},
42 { 0x00001238, 0x00000000 }, 126 {0x000011f8, 0x00000000},
43 { 0x00001278, 0x00000000 }, 127 {0x00001238, 0x00000000},
44 { 0x000012b8, 0x00000000 }, 128 {0x00001278, 0x00000000},
45 { 0x000012f8, 0x00000000 }, 129 {0x000012b8, 0x00000000},
46 { 0x00001338, 0x00000000 }, 130 {0x000012f8, 0x00000000},
47 { 0x00001378, 0x00000000 }, 131 {0x00001338, 0x00000000},
48 { 0x000013b8, 0x00000000 }, 132 {0x00001378, 0x00000000},
49 { 0x000013f8, 0x00000000 }, 133 {0x000013b8, 0x00000000},
50 { 0x00001438, 0x00000000 }, 134 {0x000013f8, 0x00000000},
51 { 0x00001478, 0x00000000 }, 135 {0x00001438, 0x00000000},
52 { 0x000014b8, 0x00000000 }, 136 {0x00001478, 0x00000000},
53 { 0x000014f8, 0x00000000 }, 137 {0x000014b8, 0x00000000},
54 { 0x00001538, 0x00000000 }, 138 {0x000014f8, 0x00000000},
55 { 0x00001578, 0x00000000 }, 139 {0x00001538, 0x00000000},
56 { 0x000015b8, 0x00000000 }, 140 {0x00001578, 0x00000000},
57 { 0x000015f8, 0x00000000 }, 141 {0x000015b8, 0x00000000},
58 { 0x00001638, 0x00000000 }, 142 {0x000015f8, 0x00000000},
59 { 0x00001678, 0x00000000 }, 143 {0x00001638, 0x00000000},
60 { 0x000016b8, 0x00000000 }, 144 {0x00001678, 0x00000000},
61 { 0x000016f8, 0x00000000 }, 145 {0x000016b8, 0x00000000},
62 { 0x00001738, 0x00000000 }, 146 {0x000016f8, 0x00000000},
63 { 0x00001778, 0x00000000 }, 147 {0x00001738, 0x00000000},
64 { 0x000017b8, 0x00000000 }, 148 {0x00001778, 0x00000000},
65 { 0x000017f8, 0x00000000 }, 149 {0x000017b8, 0x00000000},
66 { 0x0000103c, 0x00000000 }, 150 {0x000017f8, 0x00000000},
67 { 0x0000107c, 0x00000000 }, 151 {0x0000103c, 0x00000000},
68 { 0x000010bc, 0x00000000 }, 152 {0x0000107c, 0x00000000},
69 { 0x000010fc, 0x00000000 }, 153 {0x000010bc, 0x00000000},
70 { 0x0000113c, 0x00000000 }, 154 {0x000010fc, 0x00000000},
71 { 0x0000117c, 0x00000000 }, 155 {0x0000113c, 0x00000000},
72 { 0x000011bc, 0x00000000 }, 156 {0x0000117c, 0x00000000},
73 { 0x000011fc, 0x00000000 }, 157 {0x000011bc, 0x00000000},
74 { 0x0000123c, 0x00000000 }, 158 {0x000011fc, 0x00000000},
75 { 0x0000127c, 0x00000000 }, 159 {0x0000123c, 0x00000000},
76 { 0x000012bc, 0x00000000 }, 160 {0x0000127c, 0x00000000},
77 { 0x000012fc, 0x00000000 }, 161 {0x000012bc, 0x00000000},
78 { 0x0000133c, 0x00000000 }, 162 {0x000012fc, 0x00000000},
79 { 0x0000137c, 0x00000000 }, 163 {0x0000133c, 0x00000000},
80 { 0x000013bc, 0x00000000 }, 164 {0x0000137c, 0x00000000},
81 { 0x000013fc, 0x00000000 }, 165 {0x000013bc, 0x00000000},
82 { 0x0000143c, 0x00000000 }, 166 {0x000013fc, 0x00000000},
83 { 0x0000147c, 0x00000000 }, 167 {0x0000143c, 0x00000000},
84 { 0x00020010, 0x00000003 }, 168 {0x0000147c, 0x00000000},
85 { 0x00020038, 0x000004c2 }, 169 {0x00020010, 0x00000003},
86 { 0x00008004, 0x00000000 }, 170 {0x00020038, 0x000004c2},
87 { 0x00008008, 0x00000000 }, 171 {0x00008004, 0x00000000},
88 { 0x0000800c, 0x00000000 }, 172 {0x00008008, 0x00000000},
89 { 0x00008018, 0x00000700 }, 173 {0x0000800c, 0x00000000},
90 { 0x00008020, 0x00000000 }, 174 {0x00008018, 0x00000700},
91 { 0x00008038, 0x00000000 }, 175 {0x00008020, 0x00000000},
92 { 0x0000803c, 0x00000000 }, 176 {0x00008038, 0x00000000},
93 { 0x00008048, 0x40000000 }, 177 {0x0000803c, 0x00000000},
94 { 0x00008054, 0x00004000 }, 178 {0x00008048, 0x40000000},
95 { 0x00008058, 0x00000000 }, 179 {0x00008054, 0x00004000},
96 { 0x0000805c, 0x000fc78f }, 180 {0x00008058, 0x00000000},
97 { 0x00008060, 0x0000000f }, 181 {0x0000805c, 0x000fc78f},
98 { 0x00008064, 0x00000000 }, 182 {0x00008060, 0x0000000f},
99 { 0x000080c0, 0x2a82301a }, 183 {0x00008064, 0x00000000},
100 { 0x000080c4, 0x05dc01e0 }, 184 {0x000080c0, 0x2a82301a},
101 { 0x000080c8, 0x1f402710 }, 185 {0x000080c4, 0x05dc01e0},
102 { 0x000080cc, 0x01f40000 }, 186 {0x000080c8, 0x1f402710},
103 { 0x000080d0, 0x00001e00 }, 187 {0x000080cc, 0x01f40000},
104 { 0x000080d4, 0x00000000 }, 188 {0x000080d0, 0x00001e00},
105 { 0x000080d8, 0x00400000 }, 189 {0x000080d4, 0x00000000},
106 { 0x000080e0, 0xffffffff }, 190 {0x000080d8, 0x00400000},
107 { 0x000080e4, 0x0000ffff }, 191 {0x000080e0, 0xffffffff},
108 { 0x000080e8, 0x003f3f3f }, 192 {0x000080e4, 0x0000ffff},
109 { 0x000080ec, 0x00000000 }, 193 {0x000080e8, 0x003f3f3f},
110 { 0x000080f0, 0x00000000 }, 194 {0x000080ec, 0x00000000},
111 { 0x000080f4, 0x00000000 }, 195 {0x000080f0, 0x00000000},
112 { 0x000080f8, 0x00000000 }, 196 {0x000080f4, 0x00000000},
113 { 0x000080fc, 0x00020000 }, 197 {0x000080f8, 0x00000000},
114 { 0x00008100, 0x00020000 }, 198 {0x000080fc, 0x00020000},
115 { 0x00008104, 0x00000001 }, 199 {0x00008100, 0x00020000},
116 { 0x00008108, 0x00000052 }, 200 {0x00008104, 0x00000001},
117 { 0x0000810c, 0x00000000 }, 201 {0x00008108, 0x00000052},
118 { 0x00008110, 0x00000168 }, 202 {0x0000810c, 0x00000000},
119 { 0x00008118, 0x000100aa }, 203 {0x00008110, 0x00000168},
120 { 0x0000811c, 0x00003210 }, 204 {0x00008118, 0x000100aa},
121 { 0x00008120, 0x08f04800 }, 205 {0x0000811c, 0x00003210},
122 { 0x00008124, 0x00000000 }, 206 {0x00008120, 0x08f04800},
123 { 0x00008128, 0x00000000 }, 207 {0x00008124, 0x00000000},
124 { 0x0000812c, 0x00000000 }, 208 {0x00008128, 0x00000000},
125 { 0x00008130, 0x00000000 }, 209 {0x0000812c, 0x00000000},
126 { 0x00008134, 0x00000000 }, 210 {0x00008130, 0x00000000},
127 { 0x00008138, 0x00000000 }, 211 {0x00008134, 0x00000000},
128 { 0x0000813c, 0x00000000 }, 212 {0x00008138, 0x00000000},
129 { 0x00008144, 0x00000000 }, 213 {0x0000813c, 0x00000000},
130 { 0x00008168, 0x00000000 }, 214 {0x00008144, 0x00000000},
131 { 0x0000816c, 0x00000000 }, 215 {0x00008168, 0x00000000},
132 { 0x00008170, 0x32143320 }, 216 {0x0000816c, 0x00000000},
133 { 0x00008174, 0xfaa4fa50 }, 217 {0x00008170, 0x32143320},
134 { 0x00008178, 0x00000100 }, 218 {0x00008174, 0xfaa4fa50},
135 { 0x0000817c, 0x00000000 }, 219 {0x00008178, 0x00000100},
136 { 0x000081c4, 0x00000000 }, 220 {0x0000817c, 0x00000000},
137 { 0x000081d0, 0x00003210 }, 221 {0x000081c4, 0x00000000},
138 { 0x000081ec, 0x00000000 }, 222 {0x000081d0, 0x00003210},
139 { 0x000081f0, 0x00000000 }, 223 {0x000081ec, 0x00000000},
140 { 0x000081f4, 0x00000000 }, 224 {0x000081f0, 0x00000000},
141 { 0x000081f8, 0x00000000 }, 225 {0x000081f4, 0x00000000},
142 { 0x000081fc, 0x00000000 }, 226 {0x000081f8, 0x00000000},
143 { 0x00008200, 0x00000000 }, 227 {0x000081fc, 0x00000000},
144 { 0x00008204, 0x00000000 }, 228 {0x00008200, 0x00000000},
145 { 0x00008208, 0x00000000 }, 229 {0x00008204, 0x00000000},
146 { 0x0000820c, 0x00000000 }, 230 {0x00008208, 0x00000000},
147 { 0x00008210, 0x00000000 }, 231 {0x0000820c, 0x00000000},
148 { 0x00008214, 0x00000000 }, 232 {0x00008210, 0x00000000},
149 { 0x00008218, 0x00000000 }, 233 {0x00008214, 0x00000000},
150 { 0x0000821c, 0x00000000 }, 234 {0x00008218, 0x00000000},
151 { 0x00008220, 0x00000000 }, 235 {0x0000821c, 0x00000000},
152 { 0x00008224, 0x00000000 }, 236 {0x00008220, 0x00000000},
153 { 0x00008228, 0x00000000 }, 237 {0x00008224, 0x00000000},
154 { 0x0000822c, 0x00000000 }, 238 {0x00008228, 0x00000000},
155 { 0x00008230, 0x00000000 }, 239 {0x0000822c, 0x00000000},
156 { 0x00008234, 0x00000000 }, 240 {0x00008230, 0x00000000},
157 { 0x00008238, 0x00000000 }, 241 {0x00008234, 0x00000000},
158 { 0x0000823c, 0x00000000 }, 242 {0x00008238, 0x00000000},
159 { 0x00008240, 0x00100000 }, 243 {0x0000823c, 0x00000000},
160 { 0x00008244, 0x0010f400 }, 244 {0x00008240, 0x00100000},
161 { 0x00008248, 0x00000100 }, 245 {0x00008244, 0x0010f400},
162 { 0x0000824c, 0x0001e800 }, 246 {0x00008248, 0x00000100},
163 { 0x00008250, 0x00000000 }, 247 {0x0000824c, 0x0001e800},
164 { 0x00008254, 0x00000000 }, 248 {0x00008250, 0x00000000},
165 { 0x00008258, 0x00000000 }, 249 {0x00008254, 0x00000000},
166 { 0x0000825c, 0x400000ff }, 250 {0x00008258, 0x00000000},
167 { 0x00008260, 0x00080922 }, 251 {0x0000825c, 0x400000ff},
168 { 0x00008270, 0x00000000 }, 252 {0x00008260, 0x00080922},
169 { 0x00008274, 0x40000000 }, 253 {0x00008270, 0x00000000},
170 { 0x00008278, 0x003e4180 }, 254 {0x00008274, 0x40000000},
171 { 0x0000827c, 0x00000000 }, 255 {0x00008278, 0x003e4180},
172 { 0x00008284, 0x0000002c }, 256 {0x0000827c, 0x00000000},
173 { 0x00008288, 0x0000002c }, 257 {0x00008284, 0x0000002c},
174 { 0x0000828c, 0x00000000 }, 258 {0x00008288, 0x0000002c},
175 { 0x00008294, 0x00000000 }, 259 {0x0000828c, 0x00000000},
176 { 0x00008298, 0x00000000 }, 260 {0x00008294, 0x00000000},
177 { 0x00008300, 0x00000000 }, 261 {0x00008298, 0x00000000},
178 { 0x00008304, 0x00000000 }, 262 {0x00008300, 0x00000000},
179 { 0x00008308, 0x00000000 }, 263 {0x00008304, 0x00000000},
180 { 0x0000830c, 0x00000000 }, 264 {0x00008308, 0x00000000},
181 { 0x00008310, 0x00000000 }, 265 {0x0000830c, 0x00000000},
182 { 0x00008314, 0x00000000 }, 266 {0x00008310, 0x00000000},
183 { 0x00008318, 0x00000000 }, 267 {0x00008314, 0x00000000},
184 { 0x00008328, 0x00000000 }, 268 {0x00008318, 0x00000000},
185 { 0x0000832c, 0x00000007 }, 269 {0x00008328, 0x00000000},
186 { 0x00008330, 0x00000302 }, 270 {0x0000832c, 0x00000007},
187 { 0x00008334, 0x00000e00 }, 271 {0x00008330, 0x00000302},
188 { 0x00008338, 0x00000000 }, 272 {0x00008334, 0x00000e00},
189 { 0x0000833c, 0x00000000 }, 273 {0x00008338, 0x00000000},
190 { 0x00008340, 0x000107ff }, 274 {0x0000833c, 0x00000000},
191 { 0x00009808, 0x00000000 }, 275 {0x00008340, 0x000107ff},
192 { 0x0000980c, 0xad848e19 }, 276 {0x00009808, 0x00000000},
193 { 0x00009810, 0x7d14e000 }, 277 {0x0000980c, 0xad848e19},
194 { 0x00009814, 0x9c0a9f6b }, 278 {0x00009810, 0x7d14e000},
195 { 0x0000981c, 0x00000000 }, 279 {0x00009814, 0x9c0a9f6b},
196 { 0x0000982c, 0x0000a000 }, 280 {0x0000981c, 0x00000000},
197 { 0x00009830, 0x00000000 }, 281 {0x0000982c, 0x0000a000},
198 { 0x0000983c, 0x00200400 }, 282 {0x00009830, 0x00000000},
199 { 0x00009840, 0x206a01ae }, 283 {0x0000983c, 0x00200400},
200 { 0x0000984c, 0x1284233c }, 284 {0x00009840, 0x206a01ae},
201 { 0x00009854, 0x00000859 }, 285 {0x0000984c, 0x1284233c},
202 { 0x00009900, 0x00000000 }, 286 {0x00009854, 0x00000859},
203 { 0x00009904, 0x00000000 }, 287 {0x00009900, 0x00000000},
204 { 0x00009908, 0x00000000 }, 288 {0x00009904, 0x00000000},
205 { 0x0000990c, 0x00000000 }, 289 {0x00009908, 0x00000000},
206 { 0x0000991c, 0x10000fff }, 290 {0x0000990c, 0x00000000},
207 { 0x00009920, 0x05100000 }, 291 {0x0000991c, 0x10000fff},
208 { 0x0000a920, 0x05100000 }, 292 {0x00009920, 0x05100000},
209 { 0x0000b920, 0x05100000 }, 293 {0x0000a920, 0x05100000},
210 { 0x00009928, 0x00000001 }, 294 {0x0000b920, 0x05100000},
211 { 0x0000992c, 0x00000004 }, 295 {0x00009928, 0x00000001},
212 { 0x00009934, 0x1e1f2022 }, 296 {0x0000992c, 0x00000004},
213 { 0x00009938, 0x0a0b0c0d }, 297 {0x00009934, 0x1e1f2022},
214 { 0x0000993c, 0x00000000 }, 298 {0x00009938, 0x0a0b0c0d},
215 { 0x00009948, 0x9280b212 }, 299 {0x0000993c, 0x00000000},
216 { 0x0000994c, 0x00020028 }, 300 {0x00009948, 0x9280b212},
217 { 0x0000c95c, 0x004b6a8e }, 301 {0x0000994c, 0x00020028},
218 { 0x0000c968, 0x000003ce }, 302 {0x0000c95c, 0x004b6a8e},
219 { 0x00009970, 0x190fb515 }, 303 {0x0000c968, 0x000003ce},
220 { 0x00009974, 0x00000000 }, 304 {0x00009970, 0x190fb515},
221 { 0x00009978, 0x00000001 }, 305 {0x00009974, 0x00000000},
222 { 0x0000997c, 0x00000000 }, 306 {0x00009978, 0x00000001},
223 { 0x00009980, 0x00000000 }, 307 {0x0000997c, 0x00000000},
224 { 0x00009984, 0x00000000 }, 308 {0x00009980, 0x00000000},
225 { 0x00009988, 0x00000000 }, 309 {0x00009984, 0x00000000},
226 { 0x0000998c, 0x00000000 }, 310 {0x00009988, 0x00000000},
227 { 0x00009990, 0x00000000 }, 311 {0x0000998c, 0x00000000},
228 { 0x00009994, 0x00000000 }, 312 {0x00009990, 0x00000000},
229 { 0x00009998, 0x00000000 }, 313 {0x00009994, 0x00000000},
230 { 0x0000999c, 0x00000000 }, 314 {0x00009998, 0x00000000},
231 { 0x000099a0, 0x00000000 }, 315 {0x0000999c, 0x00000000},
232 { 0x000099a4, 0x00000001 }, 316 {0x000099a0, 0x00000000},
233 { 0x000099a8, 0x201fff00 }, 317 {0x000099a4, 0x00000001},
234 { 0x000099ac, 0x006f0000 }, 318 {0x000099a8, 0x201fff00},
235 { 0x000099b0, 0x03051000 }, 319 {0x000099ac, 0x006f0000},
236 { 0x000099dc, 0x00000000 }, 320 {0x000099b0, 0x03051000},
237 { 0x000099e0, 0x00000200 }, 321 {0x000099dc, 0x00000000},
238 { 0x000099e4, 0xaaaaaaaa }, 322 {0x000099e0, 0x00000200},
239 { 0x000099e8, 0x3c466478 }, 323 {0x000099e4, 0xaaaaaaaa},
240 { 0x000099ec, 0x0cc80caa }, 324 {0x000099e8, 0x3c466478},
241 { 0x000099fc, 0x00001042 }, 325 {0x000099ec, 0x0cc80caa},
242 { 0x00009b00, 0x00000000 }, 326 {0x000099fc, 0x00001042},
243 { 0x00009b04, 0x00000001 }, 327 {0x00009b00, 0x00000000},
244 { 0x00009b08, 0x00000002 }, 328 {0x00009b04, 0x00000001},
245 { 0x00009b0c, 0x00000003 }, 329 {0x00009b08, 0x00000002},
246 { 0x00009b10, 0x00000004 }, 330 {0x00009b0c, 0x00000003},
247 { 0x00009b14, 0x00000005 }, 331 {0x00009b10, 0x00000004},
248 { 0x00009b18, 0x00000008 }, 332 {0x00009b14, 0x00000005},
249 { 0x00009b1c, 0x00000009 }, 333 {0x00009b18, 0x00000008},
250 { 0x00009b20, 0x0000000a }, 334 {0x00009b1c, 0x00000009},
251 { 0x00009b24, 0x0000000b }, 335 {0x00009b20, 0x0000000a},
252 { 0x00009b28, 0x0000000c }, 336 {0x00009b24, 0x0000000b},
253 { 0x00009b2c, 0x0000000d }, 337 {0x00009b28, 0x0000000c},
254 { 0x00009b30, 0x00000010 }, 338 {0x00009b2c, 0x0000000d},
255 { 0x00009b34, 0x00000011 }, 339 {0x00009b30, 0x00000010},
256 { 0x00009b38, 0x00000012 }, 340 {0x00009b34, 0x00000011},
257 { 0x00009b3c, 0x00000013 }, 341 {0x00009b38, 0x00000012},
258 { 0x00009b40, 0x00000014 }, 342 {0x00009b3c, 0x00000013},
259 { 0x00009b44, 0x00000015 }, 343 {0x00009b40, 0x00000014},
260 { 0x00009b48, 0x00000018 }, 344 {0x00009b44, 0x00000015},
261 { 0x00009b4c, 0x00000019 }, 345 {0x00009b48, 0x00000018},
262 { 0x00009b50, 0x0000001a }, 346 {0x00009b4c, 0x00000019},
263 { 0x00009b54, 0x0000001b }, 347 {0x00009b50, 0x0000001a},
264 { 0x00009b58, 0x0000001c }, 348 {0x00009b54, 0x0000001b},
265 { 0x00009b5c, 0x0000001d }, 349 {0x00009b58, 0x0000001c},
266 { 0x00009b60, 0x00000020 }, 350 {0x00009b5c, 0x0000001d},
267 { 0x00009b64, 0x00000021 }, 351 {0x00009b60, 0x00000020},
268 { 0x00009b68, 0x00000022 }, 352 {0x00009b64, 0x00000021},
269 { 0x00009b6c, 0x00000023 }, 353 {0x00009b68, 0x00000022},
270 { 0x00009b70, 0x00000024 }, 354 {0x00009b6c, 0x00000023},
271 { 0x00009b74, 0x00000025 }, 355 {0x00009b70, 0x00000024},
272 { 0x00009b78, 0x00000028 }, 356 {0x00009b74, 0x00000025},
273 { 0x00009b7c, 0x00000029 }, 357 {0x00009b78, 0x00000028},
274 { 0x00009b80, 0x0000002a }, 358 {0x00009b7c, 0x00000029},
275 { 0x00009b84, 0x0000002b }, 359 {0x00009b80, 0x0000002a},
276 { 0x00009b88, 0x0000002c }, 360 {0x00009b84, 0x0000002b},
277 { 0x00009b8c, 0x0000002d }, 361 {0x00009b88, 0x0000002c},
278 { 0x00009b90, 0x00000030 }, 362 {0x00009b8c, 0x0000002d},
279 { 0x00009b94, 0x00000031 }, 363 {0x00009b90, 0x00000030},
280 { 0x00009b98, 0x00000032 }, 364 {0x00009b94, 0x00000031},
281 { 0x00009b9c, 0x00000033 }, 365 {0x00009b98, 0x00000032},
282 { 0x00009ba0, 0x00000034 }, 366 {0x00009b9c, 0x00000033},
283 { 0x00009ba4, 0x00000035 }, 367 {0x00009ba0, 0x00000034},
284 { 0x00009ba8, 0x00000035 }, 368 {0x00009ba4, 0x00000035},
285 { 0x00009bac, 0x00000035 }, 369 {0x00009ba8, 0x00000035},
286 { 0x00009bb0, 0x00000035 }, 370 {0x00009bac, 0x00000035},
287 { 0x00009bb4, 0x00000035 }, 371 {0x00009bb0, 0x00000035},
288 { 0x00009bb8, 0x00000035 }, 372 {0x00009bb4, 0x00000035},
289 { 0x00009bbc, 0x00000035 }, 373 {0x00009bb8, 0x00000035},
290 { 0x00009bc0, 0x00000035 }, 374 {0x00009bbc, 0x00000035},
291 { 0x00009bc4, 0x00000035 }, 375 {0x00009bc0, 0x00000035},
292 { 0x00009bc8, 0x00000035 }, 376 {0x00009bc4, 0x00000035},
293 { 0x00009bcc, 0x00000035 }, 377 {0x00009bc8, 0x00000035},
294 { 0x00009bd0, 0x00000035 }, 378 {0x00009bcc, 0x00000035},
295 { 0x00009bd4, 0x00000035 }, 379 {0x00009bd0, 0x00000035},
296 { 0x00009bd8, 0x00000035 }, 380 {0x00009bd4, 0x00000035},
297 { 0x00009bdc, 0x00000035 }, 381 {0x00009bd8, 0x00000035},
298 { 0x00009be0, 0x00000035 }, 382 {0x00009bdc, 0x00000035},
299 { 0x00009be4, 0x00000035 }, 383 {0x00009be0, 0x00000035},
300 { 0x00009be8, 0x00000035 }, 384 {0x00009be4, 0x00000035},
301 { 0x00009bec, 0x00000035 }, 385 {0x00009be8, 0x00000035},
302 { 0x00009bf0, 0x00000035 }, 386 {0x00009bec, 0x00000035},
303 { 0x00009bf4, 0x00000035 }, 387 {0x00009bf0, 0x00000035},
304 { 0x00009bf8, 0x00000010 }, 388 {0x00009bf4, 0x00000035},
305 { 0x00009bfc, 0x0000001a }, 389 {0x00009bf8, 0x00000010},
306 { 0x0000a210, 0x40806333 }, 390 {0x00009bfc, 0x0000001a},
307 { 0x0000a214, 0x00106c10 }, 391 {0x0000a210, 0x40806333},
308 { 0x0000a218, 0x009c4060 }, 392 {0x0000a214, 0x00106c10},
309 { 0x0000a220, 0x018830c6 }, 393 {0x0000a218, 0x009c4060},
310 { 0x0000a224, 0x00000400 }, 394 {0x0000a220, 0x018830c6},
311 { 0x0000a228, 0x001a0bb5 }, 395 {0x0000a224, 0x00000400},
312 { 0x0000a22c, 0x00000000 }, 396 {0x0000a228, 0x001a0bb5},
313 { 0x0000a234, 0x20202020 }, 397 {0x0000a22c, 0x00000000},
314 { 0x0000a238, 0x20202020 }, 398 {0x0000a234, 0x20202020},
315 { 0x0000a23c, 0x13c889ae }, 399 {0x0000a238, 0x20202020},
316 { 0x0000a240, 0x38490a20 }, 400 {0x0000a23c, 0x13c889af},
317 { 0x0000a244, 0x00007bb6 }, 401 {0x0000a240, 0x38490a20},
318 { 0x0000a248, 0x0fff3ffc }, 402 {0x0000a244, 0x00007bb6},
319 { 0x0000a24c, 0x00000001 }, 403 {0x0000a248, 0x0fff3ffc},
320 { 0x0000a250, 0x0000a000 }, 404 {0x0000a24c, 0x00000001},
321 { 0x0000a254, 0x00000000 }, 405 {0x0000a250, 0x0000e000},
322 { 0x0000a258, 0x0cc75380 }, 406 {0x0000a254, 0x00000000},
323 { 0x0000a25c, 0x0f0f0f01 }, 407 {0x0000a258, 0x0cc75380},
324 { 0x0000a260, 0xdfa91f01 }, 408 {0x0000a25c, 0x0f0f0f01},
325 { 0x0000a268, 0x00000001 }, 409 {0x0000a260, 0xdfa91f01},
326 { 0x0000a26c, 0x0ebae9c6 }, 410 {0x0000a268, 0x00000001},
327 { 0x0000b26c, 0x0ebae9c6 }, 411 {0x0000a26c, 0x0ebae9c6},
328 { 0x0000c26c, 0x0ebae9c6 }, 412 {0x0000b26c, 0x0ebae9c6},
329 { 0x0000d270, 0x00820820 }, 413 {0x0000c26c, 0x0ebae9c6},
330 { 0x0000a278, 0x1ce739ce }, 414 {0x0000d270, 0x00820820},
331 { 0x0000a27c, 0x050701ce }, 415 {0x0000a278, 0x1ce739ce},
332 { 0x0000a338, 0x00000000 }, 416 {0x0000a27c, 0x050701ce},
333 { 0x0000a33c, 0x00000000 }, 417 {0x0000a338, 0x00000000},
334 { 0x0000a340, 0x00000000 }, 418 {0x0000a33c, 0x00000000},
335 { 0x0000a344, 0x00000000 }, 419 {0x0000a340, 0x00000000},
336 { 0x0000a348, 0x3fffffff }, 420 {0x0000a344, 0x00000000},
337 { 0x0000a34c, 0x3fffffff }, 421 {0x0000a348, 0x3fffffff},
338 { 0x0000a350, 0x3fffffff }, 422 {0x0000a34c, 0x3fffffff},
339 { 0x0000a354, 0x0003ffff }, 423 {0x0000a350, 0x3fffffff},
340 { 0x0000a358, 0x79a8aa33 }, 424 {0x0000a354, 0x0003ffff},
341 { 0x0000d35c, 0x07ffffef }, 425 {0x0000a358, 0x79a8aa33},
342 { 0x0000d360, 0x0fffffe7 }, 426 {0x0000d35c, 0x07ffffef},
343 { 0x0000d364, 0x17ffffe5 }, 427 {0x0000d360, 0x0fffffe7},
344 { 0x0000d368, 0x1fffffe4 }, 428 {0x0000d364, 0x17ffffe5},
345 { 0x0000d36c, 0x37ffffe3 }, 429 {0x0000d368, 0x1fffffe4},
346 { 0x0000d370, 0x3fffffe3 }, 430 {0x0000d36c, 0x37ffffe3},
347 { 0x0000d374, 0x57ffffe3 }, 431 {0x0000d370, 0x3fffffe3},
348 { 0x0000d378, 0x5fffffe2 }, 432 {0x0000d374, 0x57ffffe3},
349 { 0x0000d37c, 0x7fffffe2 }, 433 {0x0000d378, 0x5fffffe2},
350 { 0x0000d380, 0x7f3c7bba }, 434 {0x0000d37c, 0x7fffffe2},
351 { 0x0000d384, 0xf3307ff0 }, 435 {0x0000d380, 0x7f3c7bba},
352 { 0x0000a388, 0x0c000000 }, 436 {0x0000d384, 0xf3307ff0},
353 { 0x0000a38c, 0x20202020 }, 437 {0x0000a388, 0x0c000000},
354 { 0x0000a390, 0x20202020 }, 438 {0x0000a38c, 0x20202020},
355 { 0x0000a394, 0x1ce739ce }, 439 {0x0000a390, 0x20202020},
356 { 0x0000a398, 0x000001ce }, 440 {0x0000a394, 0x1ce739ce},
357 { 0x0000a39c, 0x00000001 }, 441 {0x0000a398, 0x000001ce},
358 { 0x0000a3a0, 0x00000000 }, 442 {0x0000a39c, 0x00000001},
359 { 0x0000a3a4, 0x00000000 }, 443 {0x0000a3a0, 0x00000000},
360 { 0x0000a3a8, 0x00000000 }, 444 {0x0000a3a4, 0x00000000},
361 { 0x0000a3ac, 0x00000000 }, 445 {0x0000a3a8, 0x00000000},
362 { 0x0000a3b0, 0x00000000 }, 446 {0x0000a3ac, 0x00000000},
363 { 0x0000a3b4, 0x00000000 }, 447 {0x0000a3b0, 0x00000000},
364 { 0x0000a3b8, 0x00000000 }, 448 {0x0000a3b4, 0x00000000},
365 { 0x0000a3bc, 0x00000000 }, 449 {0x0000a3b8, 0x00000000},
366 { 0x0000a3c0, 0x00000000 }, 450 {0x0000a3bc, 0x00000000},
367 { 0x0000a3c4, 0x00000000 }, 451 {0x0000a3c0, 0x00000000},
368 { 0x0000a3c8, 0x00000246 }, 452 {0x0000a3c4, 0x00000000},
369 { 0x0000a3cc, 0x20202020 }, 453 {0x0000a3c8, 0x00000246},
370 { 0x0000a3d0, 0x20202020 }, 454 {0x0000a3cc, 0x20202020},
371 { 0x0000a3d4, 0x20202020 }, 455 {0x0000a3d0, 0x20202020},
372 { 0x0000a3dc, 0x1ce739ce }, 456 {0x0000a3d4, 0x20202020},
373 { 0x0000a3e0, 0x000001ce }, 457 {0x0000a3dc, 0x1ce739ce},
458 {0x0000a3e0, 0x000001ce},
374}; 459};
375 460
376static const u32 ar5416Bank0_9100[][2] = { 461static const u32 ar5416Bank0_9100[][2] = {
377 { 0x000098b0, 0x1e5795e5 }, 462 /* Addr allmodes */
378 { 0x000098e0, 0x02008020 }, 463 {0x000098b0, 0x1e5795e5},
464 {0x000098e0, 0x02008020},
379}; 465};
380 466
381static const u32 ar5416BB_RfGain_9100[][3] = { 467static const u32 ar5416BB_RfGain_9100[][3] = {
382 { 0x00009a00, 0x00000000, 0x00000000 }, 468 /* Addr 5G_HT20 5G_HT40 */
383 { 0x00009a04, 0x00000040, 0x00000040 }, 469 {0x00009a00, 0x00000000, 0x00000000},
384 { 0x00009a08, 0x00000080, 0x00000080 }, 470 {0x00009a04, 0x00000040, 0x00000040},
385 { 0x00009a0c, 0x000001a1, 0x00000141 }, 471 {0x00009a08, 0x00000080, 0x00000080},
386 { 0x00009a10, 0x000001e1, 0x00000181 }, 472 {0x00009a0c, 0x000001a1, 0x00000141},
387 { 0x00009a14, 0x00000021, 0x000001c1 }, 473 {0x00009a10, 0x000001e1, 0x00000181},
388 { 0x00009a18, 0x00000061, 0x00000001 }, 474 {0x00009a14, 0x00000021, 0x000001c1},
389 { 0x00009a1c, 0x00000168, 0x00000041 }, 475 {0x00009a18, 0x00000061, 0x00000001},
390 { 0x00009a20, 0x000001a8, 0x000001a8 }, 476 {0x00009a1c, 0x00000168, 0x00000041},
391 { 0x00009a24, 0x000001e8, 0x000001e8 }, 477 {0x00009a20, 0x000001a8, 0x000001a8},
392 { 0x00009a28, 0x00000028, 0x00000028 }, 478 {0x00009a24, 0x000001e8, 0x000001e8},
393 { 0x00009a2c, 0x00000068, 0x00000068 }, 479 {0x00009a28, 0x00000028, 0x00000028},
394 { 0x00009a30, 0x00000189, 0x000000a8 }, 480 {0x00009a2c, 0x00000068, 0x00000068},
395 { 0x00009a34, 0x000001c9, 0x00000169 }, 481 {0x00009a30, 0x00000189, 0x000000a8},
396 { 0x00009a38, 0x00000009, 0x000001a9 }, 482 {0x00009a34, 0x000001c9, 0x00000169},
397 { 0x00009a3c, 0x00000049, 0x000001e9 }, 483 {0x00009a38, 0x00000009, 0x000001a9},
398 { 0x00009a40, 0x00000089, 0x00000029 }, 484 {0x00009a3c, 0x00000049, 0x000001e9},
399 { 0x00009a44, 0x00000170, 0x00000069 }, 485 {0x00009a40, 0x00000089, 0x00000029},
400 { 0x00009a48, 0x000001b0, 0x00000190 }, 486 {0x00009a44, 0x00000170, 0x00000069},
401 { 0x00009a4c, 0x000001f0, 0x000001d0 }, 487 {0x00009a48, 0x000001b0, 0x00000190},
402 { 0x00009a50, 0x00000030, 0x00000010 }, 488 {0x00009a4c, 0x000001f0, 0x000001d0},
403 { 0x00009a54, 0x00000070, 0x00000050 }, 489 {0x00009a50, 0x00000030, 0x00000010},
404 { 0x00009a58, 0x00000191, 0x00000090 }, 490 {0x00009a54, 0x00000070, 0x00000050},
405 { 0x00009a5c, 0x000001d1, 0x00000151 }, 491 {0x00009a58, 0x00000191, 0x00000090},
406 { 0x00009a60, 0x00000011, 0x00000191 }, 492 {0x00009a5c, 0x000001d1, 0x00000151},
407 { 0x00009a64, 0x00000051, 0x000001d1 }, 493 {0x00009a60, 0x00000011, 0x00000191},
408 { 0x00009a68, 0x00000091, 0x00000011 }, 494 {0x00009a64, 0x00000051, 0x000001d1},
409 { 0x00009a6c, 0x000001b8, 0x00000051 }, 495 {0x00009a68, 0x00000091, 0x00000011},
410 { 0x00009a70, 0x000001f8, 0x00000198 }, 496 {0x00009a6c, 0x000001b8, 0x00000051},
411 { 0x00009a74, 0x00000038, 0x000001d8 }, 497 {0x00009a70, 0x000001f8, 0x00000198},
412 { 0x00009a78, 0x00000078, 0x00000018 }, 498 {0x00009a74, 0x00000038, 0x000001d8},
413 { 0x00009a7c, 0x00000199, 0x00000058 }, 499 {0x00009a78, 0x00000078, 0x00000018},
414 { 0x00009a80, 0x000001d9, 0x00000098 }, 500 {0x00009a7c, 0x00000199, 0x00000058},
415 { 0x00009a84, 0x00000019, 0x00000159 }, 501 {0x00009a80, 0x000001d9, 0x00000098},
416 { 0x00009a88, 0x00000059, 0x00000199 }, 502 {0x00009a84, 0x00000019, 0x00000159},
417 { 0x00009a8c, 0x00000099, 0x000001d9 }, 503 {0x00009a88, 0x00000059, 0x00000199},
418 { 0x00009a90, 0x000000d9, 0x00000019 }, 504 {0x00009a8c, 0x00000099, 0x000001d9},
419 { 0x00009a94, 0x000000f9, 0x00000059 }, 505 {0x00009a90, 0x000000d9, 0x00000019},
420 { 0x00009a98, 0x000000f9, 0x00000099 }, 506 {0x00009a94, 0x000000f9, 0x00000059},
421 { 0x00009a9c, 0x000000f9, 0x000000d9 }, 507 {0x00009a98, 0x000000f9, 0x00000099},
422 { 0x00009aa0, 0x000000f9, 0x000000f9 }, 508 {0x00009a9c, 0x000000f9, 0x000000d9},
423 { 0x00009aa4, 0x000000f9, 0x000000f9 }, 509 {0x00009aa0, 0x000000f9, 0x000000f9},
424 { 0x00009aa8, 0x000000f9, 0x000000f9 }, 510 {0x00009aa4, 0x000000f9, 0x000000f9},
425 { 0x00009aac, 0x000000f9, 0x000000f9 }, 511 {0x00009aa8, 0x000000f9, 0x000000f9},
426 { 0x00009ab0, 0x000000f9, 0x000000f9 }, 512 {0x00009aac, 0x000000f9, 0x000000f9},
427 { 0x00009ab4, 0x000000f9, 0x000000f9 }, 513 {0x00009ab0, 0x000000f9, 0x000000f9},
428 { 0x00009ab8, 0x000000f9, 0x000000f9 }, 514 {0x00009ab4, 0x000000f9, 0x000000f9},
429 { 0x00009abc, 0x000000f9, 0x000000f9 }, 515 {0x00009ab8, 0x000000f9, 0x000000f9},
430 { 0x00009ac0, 0x000000f9, 0x000000f9 }, 516 {0x00009abc, 0x000000f9, 0x000000f9},
431 { 0x00009ac4, 0x000000f9, 0x000000f9 }, 517 {0x00009ac0, 0x000000f9, 0x000000f9},
432 { 0x00009ac8, 0x000000f9, 0x000000f9 }, 518 {0x00009ac4, 0x000000f9, 0x000000f9},
433 { 0x00009acc, 0x000000f9, 0x000000f9 }, 519 {0x00009ac8, 0x000000f9, 0x000000f9},
434 { 0x00009ad0, 0x000000f9, 0x000000f9 }, 520 {0x00009acc, 0x000000f9, 0x000000f9},
435 { 0x00009ad4, 0x000000f9, 0x000000f9 }, 521 {0x00009ad0, 0x000000f9, 0x000000f9},
436 { 0x00009ad8, 0x000000f9, 0x000000f9 }, 522 {0x00009ad4, 0x000000f9, 0x000000f9},
437 { 0x00009adc, 0x000000f9, 0x000000f9 }, 523 {0x00009ad8, 0x000000f9, 0x000000f9},
438 { 0x00009ae0, 0x000000f9, 0x000000f9 }, 524 {0x00009adc, 0x000000f9, 0x000000f9},
439 { 0x00009ae4, 0x000000f9, 0x000000f9 }, 525 {0x00009ae0, 0x000000f9, 0x000000f9},
440 { 0x00009ae8, 0x000000f9, 0x000000f9 }, 526 {0x00009ae4, 0x000000f9, 0x000000f9},
441 { 0x00009aec, 0x000000f9, 0x000000f9 }, 527 {0x00009ae8, 0x000000f9, 0x000000f9},
442 { 0x00009af0, 0x000000f9, 0x000000f9 }, 528 {0x00009aec, 0x000000f9, 0x000000f9},
443 { 0x00009af4, 0x000000f9, 0x000000f9 }, 529 {0x00009af0, 0x000000f9, 0x000000f9},
444 { 0x00009af8, 0x000000f9, 0x000000f9 }, 530 {0x00009af4, 0x000000f9, 0x000000f9},
445 { 0x00009afc, 0x000000f9, 0x000000f9 }, 531 {0x00009af8, 0x000000f9, 0x000000f9},
532 {0x00009afc, 0x000000f9, 0x000000f9},
446}; 533};
447 534
448static const u32 ar5416Bank1_9100[][2] = { 535static const u32 ar5416Bank1_9100[][2] = {
449 { 0x000098b0, 0x02108421}, 536 /* Addr allmodes */
450 { 0x000098ec, 0x00000008}, 537 {0x000098b0, 0x02108421},
538 {0x000098ec, 0x00000008},
451}; 539};
452 540
453static const u32 ar5416Bank2_9100[][2] = { 541static const u32 ar5416Bank2_9100[][2] = {
454 { 0x000098b0, 0x0e73ff17}, 542 /* Addr allmodes */
455 { 0x000098e0, 0x00000420}, 543 {0x000098b0, 0x0e73ff17},
544 {0x000098e0, 0x00000420},
456}; 545};
457 546
458static const u32 ar5416Bank3_9100[][3] = { 547static const u32 ar5416Bank3_9100[][3] = {
459 { 0x000098f0, 0x01400018, 0x01c00018 }, 548 /* Addr 5G_HT20 5G_HT40 */
549 {0x000098f0, 0x01400018, 0x01c00018},
460}; 550};
461 551
462static const u32 ar5416Bank6_9100[][3] = { 552static const u32 ar5416Bank6_9100[][3] = {
463 553 /* Addr 5G_HT20 5G_HT40 */
464 { 0x0000989c, 0x00000000, 0x00000000 }, 554 {0x0000989c, 0x00000000, 0x00000000},
465 { 0x0000989c, 0x00000000, 0x00000000 }, 555 {0x0000989c, 0x00000000, 0x00000000},
466 { 0x0000989c, 0x00000000, 0x00000000 }, 556 {0x0000989c, 0x00000000, 0x00000000},
467 { 0x0000989c, 0x00e00000, 0x00e00000 }, 557 {0x0000989c, 0x00e00000, 0x00e00000},
468 { 0x0000989c, 0x005e0000, 0x005e0000 }, 558 {0x0000989c, 0x005e0000, 0x005e0000},
469 { 0x0000989c, 0x00120000, 0x00120000 }, 559 {0x0000989c, 0x00120000, 0x00120000},
470 { 0x0000989c, 0x00620000, 0x00620000 }, 560 {0x0000989c, 0x00620000, 0x00620000},
471 { 0x0000989c, 0x00020000, 0x00020000 }, 561 {0x0000989c, 0x00020000, 0x00020000},
472 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 562 {0x0000989c, 0x00ff0000, 0x00ff0000},
473 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 563 {0x0000989c, 0x00ff0000, 0x00ff0000},
474 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 564 {0x0000989c, 0x00ff0000, 0x00ff0000},
475 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 565 {0x0000989c, 0x00ff0000, 0x00ff0000},
476 { 0x0000989c, 0x005f0000, 0x005f0000 }, 566 {0x0000989c, 0x005f0000, 0x005f0000},
477 { 0x0000989c, 0x00870000, 0x00870000 }, 567 {0x0000989c, 0x00870000, 0x00870000},
478 { 0x0000989c, 0x00f90000, 0x00f90000 }, 568 {0x0000989c, 0x00f90000, 0x00f90000},
479 { 0x0000989c, 0x007b0000, 0x007b0000 }, 569 {0x0000989c, 0x007b0000, 0x007b0000},
480 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 570 {0x0000989c, 0x00ff0000, 0x00ff0000},
481 { 0x0000989c, 0x00f50000, 0x00f50000 }, 571 {0x0000989c, 0x00f50000, 0x00f50000},
482 { 0x0000989c, 0x00dc0000, 0x00dc0000 }, 572 {0x0000989c, 0x00dc0000, 0x00dc0000},
483 { 0x0000989c, 0x00110000, 0x00110000 }, 573 {0x0000989c, 0x00110000, 0x00110000},
484 { 0x0000989c, 0x006100a8, 0x006100a8 }, 574 {0x0000989c, 0x006100a8, 0x006100a8},
485 { 0x0000989c, 0x004210a2, 0x004210a2 }, 575 {0x0000989c, 0x004210a2, 0x004210a2},
486 { 0x0000989c, 0x0014000f, 0x0014000f }, 576 {0x0000989c, 0x0014000f, 0x0014000f},
487 { 0x0000989c, 0x00c40002, 0x00c40002 }, 577 {0x0000989c, 0x00c40002, 0x00c40002},
488 { 0x0000989c, 0x003000f2, 0x003000f2 }, 578 {0x0000989c, 0x003000f2, 0x003000f2},
489 { 0x0000989c, 0x00440016, 0x00440016 }, 579 {0x0000989c, 0x00440016, 0x00440016},
490 { 0x0000989c, 0x00410040, 0x00410040 }, 580 {0x0000989c, 0x00410040, 0x00410040},
491 { 0x0000989c, 0x000180d6, 0x000180d6 }, 581 {0x0000989c, 0x000180d6, 0x000180d6},
492 { 0x0000989c, 0x0000c0aa, 0x0000c0aa }, 582 {0x0000989c, 0x0000c0aa, 0x0000c0aa},
493 { 0x0000989c, 0x000000b1, 0x000000b1 }, 583 {0x0000989c, 0x000000b1, 0x000000b1},
494 { 0x0000989c, 0x00002000, 0x00002000 }, 584 {0x0000989c, 0x00002000, 0x00002000},
495 { 0x0000989c, 0x000000d4, 0x000000d4 }, 585 {0x0000989c, 0x000000d4, 0x000000d4},
496 { 0x000098d0, 0x0000000f, 0x0010000f }, 586 {0x000098d0, 0x0000000f, 0x0010000f},
497}; 587};
498 588
499
500static const u32 ar5416Bank6TPC_9100[][3] = { 589static const u32 ar5416Bank6TPC_9100[][3] = {
501 590 /* Addr 5G_HT20 5G_HT40 */
502 { 0x0000989c, 0x00000000, 0x00000000 }, 591 {0x0000989c, 0x00000000, 0x00000000},
503 { 0x0000989c, 0x00000000, 0x00000000 }, 592 {0x0000989c, 0x00000000, 0x00000000},
504 { 0x0000989c, 0x00000000, 0x00000000 }, 593 {0x0000989c, 0x00000000, 0x00000000},
505 { 0x0000989c, 0x00e00000, 0x00e00000 }, 594 {0x0000989c, 0x00e00000, 0x00e00000},
506 { 0x0000989c, 0x005e0000, 0x005e0000 }, 595 {0x0000989c, 0x005e0000, 0x005e0000},
507 { 0x0000989c, 0x00120000, 0x00120000 }, 596 {0x0000989c, 0x00120000, 0x00120000},
508 { 0x0000989c, 0x00620000, 0x00620000 }, 597 {0x0000989c, 0x00620000, 0x00620000},
509 { 0x0000989c, 0x00020000, 0x00020000 }, 598 {0x0000989c, 0x00020000, 0x00020000},
510 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 599 {0x0000989c, 0x00ff0000, 0x00ff0000},
511 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 600 {0x0000989c, 0x00ff0000, 0x00ff0000},
512 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 601 {0x0000989c, 0x00ff0000, 0x00ff0000},
513 { 0x0000989c, 0x40ff0000, 0x40ff0000 }, 602 {0x0000989c, 0x40ff0000, 0x40ff0000},
514 { 0x0000989c, 0x005f0000, 0x005f0000 }, 603 {0x0000989c, 0x005f0000, 0x005f0000},
515 { 0x0000989c, 0x00870000, 0x00870000 }, 604 {0x0000989c, 0x00870000, 0x00870000},
516 { 0x0000989c, 0x00f90000, 0x00f90000 }, 605 {0x0000989c, 0x00f90000, 0x00f90000},
517 { 0x0000989c, 0x007b0000, 0x007b0000 }, 606 {0x0000989c, 0x007b0000, 0x007b0000},
518 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 607 {0x0000989c, 0x00ff0000, 0x00ff0000},
519 { 0x0000989c, 0x00f50000, 0x00f50000 }, 608 {0x0000989c, 0x00f50000, 0x00f50000},
520 { 0x0000989c, 0x00dc0000, 0x00dc0000 }, 609 {0x0000989c, 0x00dc0000, 0x00dc0000},
521 { 0x0000989c, 0x00110000, 0x00110000 }, 610 {0x0000989c, 0x00110000, 0x00110000},
522 { 0x0000989c, 0x006100a8, 0x006100a8 }, 611 {0x0000989c, 0x006100a8, 0x006100a8},
523 { 0x0000989c, 0x00423022, 0x00423022 }, 612 {0x0000989c, 0x00423022, 0x00423022},
524 { 0x0000989c, 0x2014008f, 0x2014008f }, 613 {0x0000989c, 0x2014008f, 0x2014008f},
525 { 0x0000989c, 0x00c40002, 0x00c40002 }, 614 {0x0000989c, 0x00c40002, 0x00c40002},
526 { 0x0000989c, 0x003000f2, 0x003000f2 }, 615 {0x0000989c, 0x003000f2, 0x003000f2},
527 { 0x0000989c, 0x00440016, 0x00440016 }, 616 {0x0000989c, 0x00440016, 0x00440016},
528 { 0x0000989c, 0x00410040, 0x00410040 }, 617 {0x0000989c, 0x00410040, 0x00410040},
529 { 0x0000989c, 0x0001805e, 0x0001805e }, 618 {0x0000989c, 0x0001805e, 0x0001805e},
530 { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, 619 {0x0000989c, 0x0000c0ab, 0x0000c0ab},
531 { 0x0000989c, 0x000000e1, 0x000000e1 }, 620 {0x0000989c, 0x000000e1, 0x000000e1},
532 { 0x0000989c, 0x00007080, 0x00007080 }, 621 {0x0000989c, 0x00007080, 0x00007080},
533 { 0x0000989c, 0x000000d4, 0x000000d4 }, 622 {0x0000989c, 0x000000d4, 0x000000d4},
534 { 0x000098d0, 0x0000000f, 0x0010000f }, 623 {0x000098d0, 0x0000000f, 0x0010000f},
535}; 624};
536 625
537static const u32 ar5416Bank7_9100[][2] = { 626static const u32 ar5416Bank7_9100[][2] = {
538 { 0x0000989c, 0x00000500 }, 627 /* Addr allmodes */
539 { 0x0000989c, 0x00000800 }, 628 {0x0000989c, 0x00000500},
540 { 0x000098cc, 0x0000000e }, 629 {0x0000989c, 0x00000800},
630 {0x000098cc, 0x0000000e},
541}; 631};
542 632
543static const u32 ar5416Addac_9100[][2] = { 633static const u32 ar5416Addac_9100[][2] = {
544 {0x0000989c, 0x00000000 }, 634 /* Addr allmodes */
545 {0x0000989c, 0x00000000 }, 635 {0x0000989c, 0x00000000},
546 {0x0000989c, 0x00000000 }, 636 {0x0000989c, 0x00000000},
547 {0x0000989c, 0x00000000 }, 637 {0x0000989c, 0x00000000},
548 {0x0000989c, 0x00000000 }, 638 {0x0000989c, 0x00000000},
549 {0x0000989c, 0x00000000 }, 639 {0x0000989c, 0x00000000},
550 {0x0000989c, 0x00000000 }, 640 {0x0000989c, 0x00000000},
551 {0x0000989c, 0x00000010 }, 641 {0x0000989c, 0x00000000},
552 {0x0000989c, 0x00000000 }, 642 {0x0000989c, 0x00000010},
553 {0x0000989c, 0x00000000 }, 643 {0x0000989c, 0x00000000},
554 {0x0000989c, 0x00000000 }, 644 {0x0000989c, 0x00000000},
555 {0x0000989c, 0x00000000 }, 645 {0x0000989c, 0x00000000},
556 {0x0000989c, 0x00000000 }, 646 {0x0000989c, 0x00000000},
557 {0x0000989c, 0x00000000 }, 647 {0x0000989c, 0x00000000},
558 {0x0000989c, 0x00000000 }, 648 {0x0000989c, 0x00000000},
559 {0x0000989c, 0x00000000 }, 649 {0x0000989c, 0x00000000},
560 {0x0000989c, 0x00000000 }, 650 {0x0000989c, 0x00000000},
561 {0x0000989c, 0x00000000 }, 651 {0x0000989c, 0x00000000},
562 {0x0000989c, 0x00000000 }, 652 {0x0000989c, 0x00000000},
563 {0x0000989c, 0x00000000 }, 653 {0x0000989c, 0x00000000},
564 {0x0000989c, 0x00000000 }, 654 {0x0000989c, 0x00000000},
565 {0x0000989c, 0x000000c0 }, 655 {0x0000989c, 0x00000000},
566 {0x0000989c, 0x00000015 }, 656 {0x0000989c, 0x000000c0},
567 {0x0000989c, 0x00000000 }, 657 {0x0000989c, 0x00000015},
568 {0x0000989c, 0x00000000 }, 658 {0x0000989c, 0x00000000},
569 {0x0000989c, 0x00000000 }, 659 {0x0000989c, 0x00000000},
570 {0x0000989c, 0x00000000 }, 660 {0x0000989c, 0x00000000},
571 {0x0000989c, 0x00000000 }, 661 {0x0000989c, 0x00000000},
572 {0x0000989c, 0x00000000 }, 662 {0x0000989c, 0x00000000},
573 {0x0000989c, 0x00000000 }, 663 {0x0000989c, 0x00000000},
574 {0x0000989c, 0x00000000 }, 664 {0x0000989c, 0x00000000},
575 {0x000098cc, 0x00000000 }, 665 {0x0000989c, 0x00000000},
666 {0x000098cc, 0x00000000},
576}; 667};
577 668
578static const u32 ar5416Modes_9160[][6] = { 669static const u32 ar5416Modes_9160[][6] = {
579 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 670 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
580 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 671 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
581 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 672 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
582 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, 673 {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008},
583 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 674 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
584 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, 675 {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf},
585 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 676 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
586 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 677 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
587 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 678 {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
588 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 679 {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
589 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 680 {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
590 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 681 {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
591 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, 682 {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
592 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 683 {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
593 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 684 {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0},
594 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 685 {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
595 { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 }, 686 {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
596 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, 687 {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
597 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e }, 688 {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2, 0x6c48b0e2},
598 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, 689 {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
599 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 690 {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
600 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, 691 {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18},
601 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, 692 {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
602 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 693 {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
603 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 694 {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
604 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, 695 {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
605 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 }, 696 {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
606 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, 697 {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
607 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, 698 {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
608 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, 699 {0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
609 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, 700 {0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
610 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce }, 701 {0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
611 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 }, 702 {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
612 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, 703 {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce},
613 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 704 {0x000099bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00},
614 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, 705 {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
615 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 706 {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
616 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 707 {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
617 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 708 {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
618 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 709 {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
619 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, 710 {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
620 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, 711 {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
621 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, 712 {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
622 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, 713 {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788},
623 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, 714 {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
624 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 715 {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
625 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 716 {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
626 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, 717 {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
627 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, 718 {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
628 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, 719 {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa},
629 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, 720 {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
630 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, 721 {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402},
631 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, 722 {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06},
632 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, 723 {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b},
633 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, 724 {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b},
634 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, 725 {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a},
635 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, 726 {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf},
636 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, 727 {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f},
637 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, 728 {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f},
638 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 729 {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f},
639 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 730 {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000},
640 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 731 {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
732 {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
733 {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
641}; 734};
642 735
643static const u32 ar5416Common_9160[][2] = { 736static const u32 ar5416Common_9160[][2] = {
644 { 0x0000000c, 0x00000000 }, 737 /* Addr allmodes */
645 { 0x00000030, 0x00020015 }, 738 {0x0000000c, 0x00000000},
646 { 0x00000034, 0x00000005 }, 739 {0x00000030, 0x00020015},
647 { 0x00000040, 0x00000000 }, 740 {0x00000034, 0x00000005},
648 { 0x00000044, 0x00000008 }, 741 {0x00000040, 0x00000000},
649 { 0x00000048, 0x00000008 }, 742 {0x00000044, 0x00000008},
650 { 0x0000004c, 0x00000010 }, 743 {0x00000048, 0x00000008},
651 { 0x00000050, 0x00000000 }, 744 {0x0000004c, 0x00000010},
652 { 0x00000054, 0x0000001f }, 745 {0x00000050, 0x00000000},
653 { 0x00000800, 0x00000000 }, 746 {0x00000054, 0x0000001f},
654 { 0x00000804, 0x00000000 }, 747 {0x00000800, 0x00000000},
655 { 0x00000808, 0x00000000 }, 748 {0x00000804, 0x00000000},
656 { 0x0000080c, 0x00000000 }, 749 {0x00000808, 0x00000000},
657 { 0x00000810, 0x00000000 }, 750 {0x0000080c, 0x00000000},
658 { 0x00000814, 0x00000000 }, 751 {0x00000810, 0x00000000},
659 { 0x00000818, 0x00000000 }, 752 {0x00000814, 0x00000000},
660 { 0x0000081c, 0x00000000 }, 753 {0x00000818, 0x00000000},
661 { 0x00000820, 0x00000000 }, 754 {0x0000081c, 0x00000000},
662 { 0x00000824, 0x00000000 }, 755 {0x00000820, 0x00000000},
663 { 0x00001040, 0x002ffc0f }, 756 {0x00000824, 0x00000000},
664 { 0x00001044, 0x002ffc0f }, 757 {0x00001040, 0x002ffc0f},
665 { 0x00001048, 0x002ffc0f }, 758 {0x00001044, 0x002ffc0f},
666 { 0x0000104c, 0x002ffc0f }, 759 {0x00001048, 0x002ffc0f},
667 { 0x00001050, 0x002ffc0f }, 760 {0x0000104c, 0x002ffc0f},
668 { 0x00001054, 0x002ffc0f }, 761 {0x00001050, 0x002ffc0f},
669 { 0x00001058, 0x002ffc0f }, 762 {0x00001054, 0x002ffc0f},
670 { 0x0000105c, 0x002ffc0f }, 763 {0x00001058, 0x002ffc0f},
671 { 0x00001060, 0x002ffc0f }, 764 {0x0000105c, 0x002ffc0f},
672 { 0x00001064, 0x002ffc0f }, 765 {0x00001060, 0x002ffc0f},
673 { 0x00001230, 0x00000000 }, 766 {0x00001064, 0x002ffc0f},
674 { 0x00001270, 0x00000000 }, 767 {0x00001230, 0x00000000},
675 { 0x00001038, 0x00000000 }, 768 {0x00001270, 0x00000000},
676 { 0x00001078, 0x00000000 }, 769 {0x00001038, 0x00000000},
677 { 0x000010b8, 0x00000000 }, 770 {0x00001078, 0x00000000},
678 { 0x000010f8, 0x00000000 }, 771 {0x000010b8, 0x00000000},
679 { 0x00001138, 0x00000000 }, 772 {0x000010f8, 0x00000000},
680 { 0x00001178, 0x00000000 }, 773 {0x00001138, 0x00000000},
681 { 0x000011b8, 0x00000000 }, 774 {0x00001178, 0x00000000},
682 { 0x000011f8, 0x00000000 }, 775 {0x000011b8, 0x00000000},
683 { 0x00001238, 0x00000000 }, 776 {0x000011f8, 0x00000000},
684 { 0x00001278, 0x00000000 }, 777 {0x00001238, 0x00000000},
685 { 0x000012b8, 0x00000000 }, 778 {0x00001278, 0x00000000},
686 { 0x000012f8, 0x00000000 }, 779 {0x000012b8, 0x00000000},
687 { 0x00001338, 0x00000000 }, 780 {0x000012f8, 0x00000000},
688 { 0x00001378, 0x00000000 }, 781 {0x00001338, 0x00000000},
689 { 0x000013b8, 0x00000000 }, 782 {0x00001378, 0x00000000},
690 { 0x000013f8, 0x00000000 }, 783 {0x000013b8, 0x00000000},
691 { 0x00001438, 0x00000000 }, 784 {0x000013f8, 0x00000000},
692 { 0x00001478, 0x00000000 }, 785 {0x00001438, 0x00000000},
693 { 0x000014b8, 0x00000000 }, 786 {0x00001478, 0x00000000},
694 { 0x000014f8, 0x00000000 }, 787 {0x000014b8, 0x00000000},
695 { 0x00001538, 0x00000000 }, 788 {0x000014f8, 0x00000000},
696 { 0x00001578, 0x00000000 }, 789 {0x00001538, 0x00000000},
697 { 0x000015b8, 0x00000000 }, 790 {0x00001578, 0x00000000},
698 { 0x000015f8, 0x00000000 }, 791 {0x000015b8, 0x00000000},
699 { 0x00001638, 0x00000000 }, 792 {0x000015f8, 0x00000000},
700 { 0x00001678, 0x00000000 }, 793 {0x00001638, 0x00000000},
701 { 0x000016b8, 0x00000000 }, 794 {0x00001678, 0x00000000},
702 { 0x000016f8, 0x00000000 }, 795 {0x000016b8, 0x00000000},
703 { 0x00001738, 0x00000000 }, 796 {0x000016f8, 0x00000000},
704 { 0x00001778, 0x00000000 }, 797 {0x00001738, 0x00000000},
705 { 0x000017b8, 0x00000000 }, 798 {0x00001778, 0x00000000},
706 { 0x000017f8, 0x00000000 }, 799 {0x000017b8, 0x00000000},
707 { 0x0000103c, 0x00000000 }, 800 {0x000017f8, 0x00000000},
708 { 0x0000107c, 0x00000000 }, 801 {0x0000103c, 0x00000000},
709 { 0x000010bc, 0x00000000 }, 802 {0x0000107c, 0x00000000},
710 { 0x000010fc, 0x00000000 }, 803 {0x000010bc, 0x00000000},
711 { 0x0000113c, 0x00000000 }, 804 {0x000010fc, 0x00000000},
712 { 0x0000117c, 0x00000000 }, 805 {0x0000113c, 0x00000000},
713 { 0x000011bc, 0x00000000 }, 806 {0x0000117c, 0x00000000},
714 { 0x000011fc, 0x00000000 }, 807 {0x000011bc, 0x00000000},
715 { 0x0000123c, 0x00000000 }, 808 {0x000011fc, 0x00000000},
716 { 0x0000127c, 0x00000000 }, 809 {0x0000123c, 0x00000000},
717 { 0x000012bc, 0x00000000 }, 810 {0x0000127c, 0x00000000},
718 { 0x000012fc, 0x00000000 }, 811 {0x000012bc, 0x00000000},
719 { 0x0000133c, 0x00000000 }, 812 {0x000012fc, 0x00000000},
720 { 0x0000137c, 0x00000000 }, 813 {0x0000133c, 0x00000000},
721 { 0x000013bc, 0x00000000 }, 814 {0x0000137c, 0x00000000},
722 { 0x000013fc, 0x00000000 }, 815 {0x000013bc, 0x00000000},
723 { 0x0000143c, 0x00000000 }, 816 {0x000013fc, 0x00000000},
724 { 0x0000147c, 0x00000000 }, 817 {0x0000143c, 0x00000000},
725 { 0x00004030, 0x00000002 }, 818 {0x0000147c, 0x00000000},
726 { 0x0000403c, 0x00000002 }, 819 {0x00004030, 0x00000002},
727 { 0x00007010, 0x00000020 }, 820 {0x0000403c, 0x00000002},
728 { 0x00007038, 0x000004c2 }, 821 {0x00007010, 0x00000020},
729 { 0x00008004, 0x00000000 }, 822 {0x00007038, 0x000004c2},
730 { 0x00008008, 0x00000000 }, 823 {0x00008004, 0x00000000},
731 { 0x0000800c, 0x00000000 }, 824 {0x00008008, 0x00000000},
732 { 0x00008018, 0x00000700 }, 825 {0x0000800c, 0x00000000},
733 { 0x00008020, 0x00000000 }, 826 {0x00008018, 0x00000700},
734 { 0x00008038, 0x00000000 }, 827 {0x00008020, 0x00000000},
735 { 0x0000803c, 0x00000000 }, 828 {0x00008038, 0x00000000},
736 { 0x00008048, 0x40000000 }, 829 {0x0000803c, 0x00000000},
737 { 0x00008054, 0x00000000 }, 830 {0x00008048, 0x40000000},
738 { 0x00008058, 0x00000000 }, 831 {0x00008054, 0x00000000},
739 { 0x0000805c, 0x000fc78f }, 832 {0x00008058, 0x00000000},
740 { 0x00008060, 0x0000000f }, 833 {0x0000805c, 0x000fc78f},
741 { 0x00008064, 0x00000000 }, 834 {0x00008060, 0x0000000f},
742 { 0x000080c0, 0x2a82301a }, 835 {0x00008064, 0x00000000},
743 { 0x000080c4, 0x05dc01e0 }, 836 {0x000080c0, 0x2a82301a},
744 { 0x000080c8, 0x1f402710 }, 837 {0x000080c4, 0x05dc01e0},
745 { 0x000080cc, 0x01f40000 }, 838 {0x000080c8, 0x1f402710},
746 { 0x000080d0, 0x00001e00 }, 839 {0x000080cc, 0x01f40000},
747 { 0x000080d4, 0x00000000 }, 840 {0x000080d0, 0x00001e00},
748 { 0x000080d8, 0x00400000 }, 841 {0x000080d4, 0x00000000},
749 { 0x000080e0, 0xffffffff }, 842 {0x000080d8, 0x00400000},
750 { 0x000080e4, 0x0000ffff }, 843 {0x000080e0, 0xffffffff},
751 { 0x000080e8, 0x003f3f3f }, 844 {0x000080e4, 0x0000ffff},
752 { 0x000080ec, 0x00000000 }, 845 {0x000080e8, 0x003f3f3f},
753 { 0x000080f0, 0x00000000 }, 846 {0x000080ec, 0x00000000},
754 { 0x000080f4, 0x00000000 }, 847 {0x000080f0, 0x00000000},
755 { 0x000080f8, 0x00000000 }, 848 {0x000080f4, 0x00000000},
756 { 0x000080fc, 0x00020000 }, 849 {0x000080f8, 0x00000000},
757 { 0x00008100, 0x00020000 }, 850 {0x000080fc, 0x00020000},
758 { 0x00008104, 0x00000001 }, 851 {0x00008100, 0x00020000},
759 { 0x00008108, 0x00000052 }, 852 {0x00008104, 0x00000001},
760 { 0x0000810c, 0x00000000 }, 853 {0x00008108, 0x00000052},
761 { 0x00008110, 0x00000168 }, 854 {0x0000810c, 0x00000000},
762 { 0x00008118, 0x000100aa }, 855 {0x00008110, 0x00000168},
763 { 0x0000811c, 0x00003210 }, 856 {0x00008118, 0x000100aa},
764 { 0x00008120, 0x08f04800 }, 857 {0x0000811c, 0x00003210},
765 { 0x00008124, 0x00000000 }, 858 {0x00008124, 0x00000000},
766 { 0x00008128, 0x00000000 }, 859 {0x00008128, 0x00000000},
767 { 0x0000812c, 0x00000000 }, 860 {0x0000812c, 0x00000000},
768 { 0x00008130, 0x00000000 }, 861 {0x00008130, 0x00000000},
769 { 0x00008134, 0x00000000 }, 862 {0x00008134, 0x00000000},
770 { 0x00008138, 0x00000000 }, 863 {0x00008138, 0x00000000},
771 { 0x0000813c, 0x00000000 }, 864 {0x0000813c, 0x00000000},
772 { 0x00008144, 0xffffffff }, 865 {0x00008144, 0xffffffff},
773 { 0x00008168, 0x00000000 }, 866 {0x00008168, 0x00000000},
774 { 0x0000816c, 0x00000000 }, 867 {0x0000816c, 0x00000000},
775 { 0x00008170, 0x32143320 }, 868 {0x00008170, 0x32143320},
776 { 0x00008174, 0xfaa4fa50 }, 869 {0x00008174, 0xfaa4fa50},
777 { 0x00008178, 0x00000100 }, 870 {0x00008178, 0x00000100},
778 { 0x0000817c, 0x00000000 }, 871 {0x0000817c, 0x00000000},
779 { 0x000081c4, 0x00000000 }, 872 {0x000081c4, 0x00000000},
780 { 0x000081d0, 0x00003210 }, 873 {0x000081ec, 0x00000000},
781 { 0x000081ec, 0x00000000 }, 874 {0x000081f0, 0x00000000},
782 { 0x000081f0, 0x00000000 }, 875 {0x000081f4, 0x00000000},
783 { 0x000081f4, 0x00000000 }, 876 {0x000081f8, 0x00000000},
784 { 0x000081f8, 0x00000000 }, 877 {0x000081fc, 0x00000000},
785 { 0x000081fc, 0x00000000 }, 878 {0x00008200, 0x00000000},
786 { 0x00008200, 0x00000000 }, 879 {0x00008204, 0x00000000},
787 { 0x00008204, 0x00000000 }, 880 {0x00008208, 0x00000000},
788 { 0x00008208, 0x00000000 }, 881 {0x0000820c, 0x00000000},
789 { 0x0000820c, 0x00000000 }, 882 {0x00008210, 0x00000000},
790 { 0x00008210, 0x00000000 }, 883 {0x00008214, 0x00000000},
791 { 0x00008214, 0x00000000 }, 884 {0x00008218, 0x00000000},
792 { 0x00008218, 0x00000000 }, 885 {0x0000821c, 0x00000000},
793 { 0x0000821c, 0x00000000 }, 886 {0x00008220, 0x00000000},
794 { 0x00008220, 0x00000000 }, 887 {0x00008224, 0x00000000},
795 { 0x00008224, 0x00000000 }, 888 {0x00008228, 0x00000000},
796 { 0x00008228, 0x00000000 }, 889 {0x0000822c, 0x00000000},
797 { 0x0000822c, 0x00000000 }, 890 {0x00008230, 0x00000000},
798 { 0x00008230, 0x00000000 }, 891 {0x00008234, 0x00000000},
799 { 0x00008234, 0x00000000 }, 892 {0x00008238, 0x00000000},
800 { 0x00008238, 0x00000000 }, 893 {0x0000823c, 0x00000000},
801 { 0x0000823c, 0x00000000 }, 894 {0x00008240, 0x00100000},
802 { 0x00008240, 0x00100000 }, 895 {0x00008244, 0x0010f400},
803 { 0x00008244, 0x0010f400 }, 896 {0x00008248, 0x00000100},
804 { 0x00008248, 0x00000100 }, 897 {0x0000824c, 0x0001e800},
805 { 0x0000824c, 0x0001e800 }, 898 {0x00008250, 0x00000000},
806 { 0x00008250, 0x00000000 }, 899 {0x00008254, 0x00000000},
807 { 0x00008254, 0x00000000 }, 900 {0x00008258, 0x00000000},
808 { 0x00008258, 0x00000000 }, 901 {0x0000825c, 0x400000ff},
809 { 0x0000825c, 0x400000ff }, 902 {0x00008260, 0x00080922},
810 { 0x00008260, 0x00080922 }, 903 {0x00008264, 0x88a00010},
811 { 0x00008270, 0x00000000 }, 904 {0x00008270, 0x00000000},
812 { 0x00008274, 0x40000000 }, 905 {0x00008274, 0x40000000},
813 { 0x00008278, 0x003e4180 }, 906 {0x00008278, 0x003e4180},
814 { 0x0000827c, 0x00000000 }, 907 {0x0000827c, 0x00000000},
815 { 0x00008284, 0x0000002c }, 908 {0x00008284, 0x0000002c},
816 { 0x00008288, 0x0000002c }, 909 {0x00008288, 0x0000002c},
817 { 0x0000828c, 0x00000000 }, 910 {0x0000828c, 0x00000000},
818 { 0x00008294, 0x00000000 }, 911 {0x00008294, 0x00000000},
819 { 0x00008298, 0x00000000 }, 912 {0x00008298, 0x00000000},
820 { 0x00008300, 0x00000000 }, 913 {0x00008300, 0x00000000},
821 { 0x00008304, 0x00000000 }, 914 {0x00008304, 0x00000000},
822 { 0x00008308, 0x00000000 }, 915 {0x00008308, 0x00000000},
823 { 0x0000830c, 0x00000000 }, 916 {0x0000830c, 0x00000000},
824 { 0x00008310, 0x00000000 }, 917 {0x00008310, 0x00000000},
825 { 0x00008314, 0x00000000 }, 918 {0x00008314, 0x00000000},
826 { 0x00008318, 0x00000000 }, 919 {0x00008318, 0x00000000},
827 { 0x00008328, 0x00000000 }, 920 {0x00008328, 0x00000000},
828 { 0x0000832c, 0x00000007 }, 921 {0x0000832c, 0x00000007},
829 { 0x00008330, 0x00000302 }, 922 {0x00008330, 0x00000302},
830 { 0x00008334, 0x00000e00 }, 923 {0x00008334, 0x00000e00},
831 { 0x00008338, 0x00ff0000 }, 924 {0x00008338, 0x00ff0000},
832 { 0x0000833c, 0x00000000 }, 925 {0x0000833c, 0x00000000},
833 { 0x00008340, 0x000107ff }, 926 {0x00008340, 0x000107ff},
834 { 0x00009808, 0x00000000 }, 927 {0x00009808, 0x00000000},
835 { 0x0000980c, 0xad848e19 }, 928 {0x0000980c, 0xad848e19},
836 { 0x00009810, 0x7d14e000 }, 929 {0x00009810, 0x7d14e000},
837 { 0x00009814, 0x9c0a9f6b }, 930 {0x00009814, 0x9c0a9f6b},
838 { 0x0000981c, 0x00000000 }, 931 {0x0000981c, 0x00000000},
839 { 0x0000982c, 0x0000a000 }, 932 {0x0000982c, 0x0000a000},
840 { 0x00009830, 0x00000000 }, 933 {0x00009830, 0x00000000},
841 { 0x0000983c, 0x00200400 }, 934 {0x0000983c, 0x00200400},
842 { 0x00009840, 0x206a01ae }, 935 {0x00009840, 0x206a01ae},
843 { 0x0000984c, 0x1284233c }, 936 {0x0000984c, 0x1284233c},
844 { 0x00009854, 0x00000859 }, 937 {0x00009854, 0x00000859},
845 { 0x00009900, 0x00000000 }, 938 {0x00009900, 0x00000000},
846 { 0x00009904, 0x00000000 }, 939 {0x00009904, 0x00000000},
847 { 0x00009908, 0x00000000 }, 940 {0x00009908, 0x00000000},
848 { 0x0000990c, 0x00000000 }, 941 {0x0000990c, 0x00000000},
849 { 0x0000991c, 0x10000fff }, 942 {0x0000991c, 0x10000fff},
850 { 0x00009920, 0x05100000 }, 943 {0x00009920, 0x05100000},
851 { 0x0000a920, 0x05100000 }, 944 {0x0000a920, 0x05100000},
852 { 0x0000b920, 0x05100000 }, 945 {0x0000b920, 0x05100000},
853 { 0x00009928, 0x00000001 }, 946 {0x00009928, 0x00000001},
854 { 0x0000992c, 0x00000004 }, 947 {0x0000992c, 0x00000004},
855 { 0x00009934, 0x1e1f2022 }, 948 {0x00009934, 0x1e1f2022},
856 { 0x00009938, 0x0a0b0c0d }, 949 {0x00009938, 0x0a0b0c0d},
857 { 0x0000993c, 0x00000000 }, 950 {0x0000993c, 0x00000000},
858 { 0x00009948, 0x9280b212 }, 951 {0x00009948, 0x9280b212},
859 { 0x0000994c, 0x00020028 }, 952 {0x0000994c, 0x00020028},
860 { 0x00009954, 0x5f3ca3de }, 953 {0x00009954, 0x5f3ca3de},
861 { 0x00009958, 0x2108ecff }, 954 {0x00009958, 0x2108ecff},
862 { 0x00009940, 0x00750604 }, 955 {0x00009940, 0x00750604},
863 { 0x0000c95c, 0x004b6a8e }, 956 {0x0000c95c, 0x004b6a8e},
864 { 0x00009970, 0x190fb515 }, 957 {0x00009970, 0x190fb515},
865 { 0x00009974, 0x00000000 }, 958 {0x00009974, 0x00000000},
866 { 0x00009978, 0x00000001 }, 959 {0x00009978, 0x00000001},
867 { 0x0000997c, 0x00000000 }, 960 {0x0000997c, 0x00000000},
868 { 0x00009980, 0x00000000 }, 961 {0x00009980, 0x00000000},
869 { 0x00009984, 0x00000000 }, 962 {0x00009984, 0x00000000},
870 { 0x00009988, 0x00000000 }, 963 {0x00009988, 0x00000000},
871 { 0x0000998c, 0x00000000 }, 964 {0x0000998c, 0x00000000},
872 { 0x00009990, 0x00000000 }, 965 {0x00009990, 0x00000000},
873 { 0x00009994, 0x00000000 }, 966 {0x00009994, 0x00000000},
874 { 0x00009998, 0x00000000 }, 967 {0x00009998, 0x00000000},
875 { 0x0000999c, 0x00000000 }, 968 {0x0000999c, 0x00000000},
876 { 0x000099a0, 0x00000000 }, 969 {0x000099a0, 0x00000000},
877 { 0x000099a4, 0x00000001 }, 970 {0x000099a4, 0x00000001},
878 { 0x000099a8, 0x201fff00 }, 971 {0x000099a8, 0x201fff00},
879 { 0x000099ac, 0x006f0000 }, 972 {0x000099ac, 0x006f0000},
880 { 0x000099b0, 0x03051000 }, 973 {0x000099b0, 0x03051000},
881 { 0x000099dc, 0x00000000 }, 974 {0x000099dc, 0x00000000},
882 { 0x000099e0, 0x00000200 }, 975 {0x000099e0, 0x00000200},
883 { 0x000099e4, 0xaaaaaaaa }, 976 {0x000099e4, 0xaaaaaaaa},
884 { 0x000099e8, 0x3c466478 }, 977 {0x000099e8, 0x3c466478},
885 { 0x000099ec, 0x0cc80caa }, 978 {0x000099ec, 0x0cc80caa},
886 { 0x000099fc, 0x00001042 }, 979 {0x000099fc, 0x00001042},
887 { 0x00009b00, 0x00000000 }, 980 {0x00009b00, 0x00000000},
888 { 0x00009b04, 0x00000001 }, 981 {0x00009b04, 0x00000001},
889 { 0x00009b08, 0x00000002 }, 982 {0x00009b08, 0x00000002},
890 { 0x00009b0c, 0x00000003 }, 983 {0x00009b0c, 0x00000003},
891 { 0x00009b10, 0x00000004 }, 984 {0x00009b10, 0x00000004},
892 { 0x00009b14, 0x00000005 }, 985 {0x00009b14, 0x00000005},
893 { 0x00009b18, 0x00000008 }, 986 {0x00009b18, 0x00000008},
894 { 0x00009b1c, 0x00000009 }, 987 {0x00009b1c, 0x00000009},
895 { 0x00009b20, 0x0000000a }, 988 {0x00009b20, 0x0000000a},
896 { 0x00009b24, 0x0000000b }, 989 {0x00009b24, 0x0000000b},
897 { 0x00009b28, 0x0000000c }, 990 {0x00009b28, 0x0000000c},
898 { 0x00009b2c, 0x0000000d }, 991 {0x00009b2c, 0x0000000d},
899 { 0x00009b30, 0x00000010 }, 992 {0x00009b30, 0x00000010},
900 { 0x00009b34, 0x00000011 }, 993 {0x00009b34, 0x00000011},
901 { 0x00009b38, 0x00000012 }, 994 {0x00009b38, 0x00000012},
902 { 0x00009b3c, 0x00000013 }, 995 {0x00009b3c, 0x00000013},
903 { 0x00009b40, 0x00000014 }, 996 {0x00009b40, 0x00000014},
904 { 0x00009b44, 0x00000015 }, 997 {0x00009b44, 0x00000015},
905 { 0x00009b48, 0x00000018 }, 998 {0x00009b48, 0x00000018},
906 { 0x00009b4c, 0x00000019 }, 999 {0x00009b4c, 0x00000019},
907 { 0x00009b50, 0x0000001a }, 1000 {0x00009b50, 0x0000001a},
908 { 0x00009b54, 0x0000001b }, 1001 {0x00009b54, 0x0000001b},
909 { 0x00009b58, 0x0000001c }, 1002 {0x00009b58, 0x0000001c},
910 { 0x00009b5c, 0x0000001d }, 1003 {0x00009b5c, 0x0000001d},
911 { 0x00009b60, 0x00000020 }, 1004 {0x00009b60, 0x00000020},
912 { 0x00009b64, 0x00000021 }, 1005 {0x00009b64, 0x00000021},
913 { 0x00009b68, 0x00000022 }, 1006 {0x00009b68, 0x00000022},
914 { 0x00009b6c, 0x00000023 }, 1007 {0x00009b6c, 0x00000023},
915 { 0x00009b70, 0x00000024 }, 1008 {0x00009b70, 0x00000024},
916 { 0x00009b74, 0x00000025 }, 1009 {0x00009b74, 0x00000025},
917 { 0x00009b78, 0x00000028 }, 1010 {0x00009b78, 0x00000028},
918 { 0x00009b7c, 0x00000029 }, 1011 {0x00009b7c, 0x00000029},
919 { 0x00009b80, 0x0000002a }, 1012 {0x00009b80, 0x0000002a},
920 { 0x00009b84, 0x0000002b }, 1013 {0x00009b84, 0x0000002b},
921 { 0x00009b88, 0x0000002c }, 1014 {0x00009b88, 0x0000002c},
922 { 0x00009b8c, 0x0000002d }, 1015 {0x00009b8c, 0x0000002d},
923 { 0x00009b90, 0x00000030 }, 1016 {0x00009b90, 0x00000030},
924 { 0x00009b94, 0x00000031 }, 1017 {0x00009b94, 0x00000031},
925 { 0x00009b98, 0x00000032 }, 1018 {0x00009b98, 0x00000032},
926 { 0x00009b9c, 0x00000033 }, 1019 {0x00009b9c, 0x00000033},
927 { 0x00009ba0, 0x00000034 }, 1020 {0x00009ba0, 0x00000034},
928 { 0x00009ba4, 0x00000035 }, 1021 {0x00009ba4, 0x00000035},
929 { 0x00009ba8, 0x00000035 }, 1022 {0x00009ba8, 0x00000035},
930 { 0x00009bac, 0x00000035 }, 1023 {0x00009bac, 0x00000035},
931 { 0x00009bb0, 0x00000035 }, 1024 {0x00009bb0, 0x00000035},
932 { 0x00009bb4, 0x00000035 }, 1025 {0x00009bb4, 0x00000035},
933 { 0x00009bb8, 0x00000035 }, 1026 {0x00009bb8, 0x00000035},
934 { 0x00009bbc, 0x00000035 }, 1027 {0x00009bbc, 0x00000035},
935 { 0x00009bc0, 0x00000035 }, 1028 {0x00009bc0, 0x00000035},
936 { 0x00009bc4, 0x00000035 }, 1029 {0x00009bc4, 0x00000035},
937 { 0x00009bc8, 0x00000035 }, 1030 {0x00009bc8, 0x00000035},
938 { 0x00009bcc, 0x00000035 }, 1031 {0x00009bcc, 0x00000035},
939 { 0x00009bd0, 0x00000035 }, 1032 {0x00009bd0, 0x00000035},
940 { 0x00009bd4, 0x00000035 }, 1033 {0x00009bd4, 0x00000035},
941 { 0x00009bd8, 0x00000035 }, 1034 {0x00009bd8, 0x00000035},
942 { 0x00009bdc, 0x00000035 }, 1035 {0x00009bdc, 0x00000035},
943 { 0x00009be0, 0x00000035 }, 1036 {0x00009be0, 0x00000035},
944 { 0x00009be4, 0x00000035 }, 1037 {0x00009be4, 0x00000035},
945 { 0x00009be8, 0x00000035 }, 1038 {0x00009be8, 0x00000035},
946 { 0x00009bec, 0x00000035 }, 1039 {0x00009bec, 0x00000035},
947 { 0x00009bf0, 0x00000035 }, 1040 {0x00009bf0, 0x00000035},
948 { 0x00009bf4, 0x00000035 }, 1041 {0x00009bf4, 0x00000035},
949 { 0x00009bf8, 0x00000010 }, 1042 {0x00009bf8, 0x00000010},
950 { 0x00009bfc, 0x0000001a }, 1043 {0x00009bfc, 0x0000001a},
951 { 0x0000a210, 0x40806333 }, 1044 {0x0000a210, 0x40806333},
952 { 0x0000a214, 0x00106c10 }, 1045 {0x0000a214, 0x00106c10},
953 { 0x0000a218, 0x009c4060 }, 1046 {0x0000a218, 0x009c4060},
954 { 0x0000a220, 0x018830c6 }, 1047 {0x0000a220, 0x018830c6},
955 { 0x0000a224, 0x00000400 }, 1048 {0x0000a224, 0x00000400},
956 { 0x0000a228, 0x001a0bb5 }, 1049 {0x0000a228, 0x001a0bb5},
957 { 0x0000a22c, 0x00000000 }, 1050 {0x0000a22c, 0x00000000},
958 { 0x0000a234, 0x20202020 }, 1051 {0x0000a234, 0x20202020},
959 { 0x0000a238, 0x20202020 }, 1052 {0x0000a238, 0x20202020},
960 { 0x0000a23c, 0x13c889af }, 1053 {0x0000a23c, 0x13c889af},
961 { 0x0000a240, 0x38490a20 }, 1054 {0x0000a240, 0x38490a20},
962 { 0x0000a244, 0x00007bb6 }, 1055 {0x0000a244, 0x00007bb6},
963 { 0x0000a248, 0x0fff3ffc }, 1056 {0x0000a248, 0x0fff3ffc},
964 { 0x0000a24c, 0x00000001 }, 1057 {0x0000a24c, 0x00000001},
965 { 0x0000a250, 0x0000e000 }, 1058 {0x0000a250, 0x0000e000},
966 { 0x0000a254, 0x00000000 }, 1059 {0x0000a254, 0x00000000},
967 { 0x0000a258, 0x0cc75380 }, 1060 {0x0000a258, 0x0cc75380},
968 { 0x0000a25c, 0x0f0f0f01 }, 1061 {0x0000a25c, 0x0f0f0f01},
969 { 0x0000a260, 0xdfa91f01 }, 1062 {0x0000a260, 0xdfa91f01},
970 { 0x0000a268, 0x00000001 }, 1063 {0x0000a268, 0x00000001},
971 { 0x0000a26c, 0x0ebae9c6 }, 1064 {0x0000a26c, 0x0e79e5c6},
972 { 0x0000b26c, 0x0ebae9c6 }, 1065 {0x0000b26c, 0x0e79e5c6},
973 { 0x0000c26c, 0x0ebae9c6 }, 1066 {0x0000c26c, 0x0e79e5c6},
974 { 0x0000d270, 0x00820820 }, 1067 {0x0000d270, 0x00820820},
975 { 0x0000a278, 0x1ce739ce }, 1068 {0x0000a278, 0x1ce739ce},
976 { 0x0000a27c, 0x050701ce }, 1069 {0x0000a27c, 0x050701ce},
977 { 0x0000a338, 0x00000000 }, 1070 {0x0000a338, 0x00000000},
978 { 0x0000a33c, 0x00000000 }, 1071 {0x0000a33c, 0x00000000},
979 { 0x0000a340, 0x00000000 }, 1072 {0x0000a340, 0x00000000},
980 { 0x0000a344, 0x00000000 }, 1073 {0x0000a344, 0x00000000},
981 { 0x0000a348, 0x3fffffff }, 1074 {0x0000a348, 0x3fffffff},
982 { 0x0000a34c, 0x3fffffff }, 1075 {0x0000a34c, 0x3fffffff},
983 { 0x0000a350, 0x3fffffff }, 1076 {0x0000a350, 0x3fffffff},
984 { 0x0000a354, 0x0003ffff }, 1077 {0x0000a354, 0x0003ffff},
985 { 0x0000a358, 0x79bfaa03 }, 1078 {0x0000a358, 0x79bfaa03},
986 { 0x0000d35c, 0x07ffffef }, 1079 {0x0000d35c, 0x07ffffef},
987 { 0x0000d360, 0x0fffffe7 }, 1080 {0x0000d360, 0x0fffffe7},
988 { 0x0000d364, 0x17ffffe5 }, 1081 {0x0000d364, 0x17ffffe5},
989 { 0x0000d368, 0x1fffffe4 }, 1082 {0x0000d368, 0x1fffffe4},
990 { 0x0000d36c, 0x37ffffe3 }, 1083 {0x0000d36c, 0x37ffffe3},
991 { 0x0000d370, 0x3fffffe3 }, 1084 {0x0000d370, 0x3fffffe3},
992 { 0x0000d374, 0x57ffffe3 }, 1085 {0x0000d374, 0x57ffffe3},
993 { 0x0000d378, 0x5fffffe2 }, 1086 {0x0000d378, 0x5fffffe2},
994 { 0x0000d37c, 0x7fffffe2 }, 1087 {0x0000d37c, 0x7fffffe2},
995 { 0x0000d380, 0x7f3c7bba }, 1088 {0x0000d380, 0x7f3c7bba},
996 { 0x0000d384, 0xf3307ff0 }, 1089 {0x0000d384, 0xf3307ff0},
997 { 0x0000a388, 0x0c000000 }, 1090 {0x0000a388, 0x0c000000},
998 { 0x0000a38c, 0x20202020 }, 1091 {0x0000a38c, 0x20202020},
999 { 0x0000a390, 0x20202020 }, 1092 {0x0000a390, 0x20202020},
1000 { 0x0000a394, 0x1ce739ce }, 1093 {0x0000a394, 0x1ce739ce},
1001 { 0x0000a398, 0x000001ce }, 1094 {0x0000a398, 0x000001ce},
1002 { 0x0000a39c, 0x00000001 }, 1095 {0x0000a39c, 0x00000001},
1003 { 0x0000a3a0, 0x00000000 }, 1096 {0x0000a3a0, 0x00000000},
1004 { 0x0000a3a4, 0x00000000 }, 1097 {0x0000a3a4, 0x00000000},
1005 { 0x0000a3a8, 0x00000000 }, 1098 {0x0000a3a8, 0x00000000},
1006 { 0x0000a3ac, 0x00000000 }, 1099 {0x0000a3ac, 0x00000000},
1007 { 0x0000a3b0, 0x00000000 }, 1100 {0x0000a3b0, 0x00000000},
1008 { 0x0000a3b4, 0x00000000 }, 1101 {0x0000a3b4, 0x00000000},
1009 { 0x0000a3b8, 0x00000000 }, 1102 {0x0000a3b8, 0x00000000},
1010 { 0x0000a3bc, 0x00000000 }, 1103 {0x0000a3bc, 0x00000000},
1011 { 0x0000a3c0, 0x00000000 }, 1104 {0x0000a3c0, 0x00000000},
1012 { 0x0000a3c4, 0x00000000 }, 1105 {0x0000a3c4, 0x00000000},
1013 { 0x0000a3c8, 0x00000246 }, 1106 {0x0000a3c8, 0x00000246},
1014 { 0x0000a3cc, 0x20202020 }, 1107 {0x0000a3cc, 0x20202020},
1015 { 0x0000a3d0, 0x20202020 }, 1108 {0x0000a3d0, 0x20202020},
1016 { 0x0000a3d4, 0x20202020 }, 1109 {0x0000a3d4, 0x20202020},
1017 { 0x0000a3dc, 0x1ce739ce }, 1110 {0x0000a3dc, 0x1ce739ce},
1018 { 0x0000a3e0, 0x000001ce }, 1111 {0x0000a3e0, 0x000001ce},
1019}; 1112};
1020 1113
1021static const u32 ar5416Bank0_9160[][2] = { 1114static const u32 ar5416Bank0_9160[][2] = {
1022 { 0x000098b0, 0x1e5795e5 }, 1115 /* Addr allmodes */
1023 { 0x000098e0, 0x02008020 }, 1116 {0x000098b0, 0x1e5795e5},
1117 {0x000098e0, 0x02008020},
1024}; 1118};
1025 1119
1026static const u32 ar5416BB_RfGain_9160[][3] = { 1120static const u32 ar5416BB_RfGain_9160[][3] = {
1027 { 0x00009a00, 0x00000000, 0x00000000 }, 1121 /* Addr 5G_HT20 5G_HT40 */
1028 { 0x00009a04, 0x00000040, 0x00000040 }, 1122 {0x00009a00, 0x00000000, 0x00000000},
1029 { 0x00009a08, 0x00000080, 0x00000080 }, 1123 {0x00009a04, 0x00000040, 0x00000040},
1030 { 0x00009a0c, 0x000001a1, 0x00000141 }, 1124 {0x00009a08, 0x00000080, 0x00000080},
1031 { 0x00009a10, 0x000001e1, 0x00000181 }, 1125 {0x00009a0c, 0x000001a1, 0x00000141},
1032 { 0x00009a14, 0x00000021, 0x000001c1 }, 1126 {0x00009a10, 0x000001e1, 0x00000181},
1033 { 0x00009a18, 0x00000061, 0x00000001 }, 1127 {0x00009a14, 0x00000021, 0x000001c1},
1034 { 0x00009a1c, 0x00000168, 0x00000041 }, 1128 {0x00009a18, 0x00000061, 0x00000001},
1035 { 0x00009a20, 0x000001a8, 0x000001a8 }, 1129 {0x00009a1c, 0x00000168, 0x00000041},
1036 { 0x00009a24, 0x000001e8, 0x000001e8 }, 1130 {0x00009a20, 0x000001a8, 0x000001a8},
1037 { 0x00009a28, 0x00000028, 0x00000028 }, 1131 {0x00009a24, 0x000001e8, 0x000001e8},
1038 { 0x00009a2c, 0x00000068, 0x00000068 }, 1132 {0x00009a28, 0x00000028, 0x00000028},
1039 { 0x00009a30, 0x00000189, 0x000000a8 }, 1133 {0x00009a2c, 0x00000068, 0x00000068},
1040 { 0x00009a34, 0x000001c9, 0x00000169 }, 1134 {0x00009a30, 0x00000189, 0x000000a8},
1041 { 0x00009a38, 0x00000009, 0x000001a9 }, 1135 {0x00009a34, 0x000001c9, 0x00000169},
1042 { 0x00009a3c, 0x00000049, 0x000001e9 }, 1136 {0x00009a38, 0x00000009, 0x000001a9},
1043 { 0x00009a40, 0x00000089, 0x00000029 }, 1137 {0x00009a3c, 0x00000049, 0x000001e9},
1044 { 0x00009a44, 0x00000170, 0x00000069 }, 1138 {0x00009a40, 0x00000089, 0x00000029},
1045 { 0x00009a48, 0x000001b0, 0x00000190 }, 1139 {0x00009a44, 0x00000170, 0x00000069},
1046 { 0x00009a4c, 0x000001f0, 0x000001d0 }, 1140 {0x00009a48, 0x000001b0, 0x00000190},
1047 { 0x00009a50, 0x00000030, 0x00000010 }, 1141 {0x00009a4c, 0x000001f0, 0x000001d0},
1048 { 0x00009a54, 0x00000070, 0x00000050 }, 1142 {0x00009a50, 0x00000030, 0x00000010},
1049 { 0x00009a58, 0x00000191, 0x00000090 }, 1143 {0x00009a54, 0x00000070, 0x00000050},
1050 { 0x00009a5c, 0x000001d1, 0x00000151 }, 1144 {0x00009a58, 0x00000191, 0x00000090},
1051 { 0x00009a60, 0x00000011, 0x00000191 }, 1145 {0x00009a5c, 0x000001d1, 0x00000151},
1052 { 0x00009a64, 0x00000051, 0x000001d1 }, 1146 {0x00009a60, 0x00000011, 0x00000191},
1053 { 0x00009a68, 0x00000091, 0x00000011 }, 1147 {0x00009a64, 0x00000051, 0x000001d1},
1054 { 0x00009a6c, 0x000001b8, 0x00000051 }, 1148 {0x00009a68, 0x00000091, 0x00000011},
1055 { 0x00009a70, 0x000001f8, 0x00000198 }, 1149 {0x00009a6c, 0x000001b8, 0x00000051},
1056 { 0x00009a74, 0x00000038, 0x000001d8 }, 1150 {0x00009a70, 0x000001f8, 0x00000198},
1057 { 0x00009a78, 0x00000078, 0x00000018 }, 1151 {0x00009a74, 0x00000038, 0x000001d8},
1058 { 0x00009a7c, 0x00000199, 0x00000058 }, 1152 {0x00009a78, 0x00000078, 0x00000018},
1059 { 0x00009a80, 0x000001d9, 0x00000098 }, 1153 {0x00009a7c, 0x00000199, 0x00000058},
1060 { 0x00009a84, 0x00000019, 0x00000159 }, 1154 {0x00009a80, 0x000001d9, 0x00000098},
1061 { 0x00009a88, 0x00000059, 0x00000199 }, 1155 {0x00009a84, 0x00000019, 0x00000159},
1062 { 0x00009a8c, 0x00000099, 0x000001d9 }, 1156 {0x00009a88, 0x00000059, 0x00000199},
1063 { 0x00009a90, 0x000000d9, 0x00000019 }, 1157 {0x00009a8c, 0x00000099, 0x000001d9},
1064 { 0x00009a94, 0x000000f9, 0x00000059 }, 1158 {0x00009a90, 0x000000d9, 0x00000019},
1065 { 0x00009a98, 0x000000f9, 0x00000099 }, 1159 {0x00009a94, 0x000000f9, 0x00000059},
1066 { 0x00009a9c, 0x000000f9, 0x000000d9 }, 1160 {0x00009a98, 0x000000f9, 0x00000099},
1067 { 0x00009aa0, 0x000000f9, 0x000000f9 }, 1161 {0x00009a9c, 0x000000f9, 0x000000d9},
1068 { 0x00009aa4, 0x000000f9, 0x000000f9 }, 1162 {0x00009aa0, 0x000000f9, 0x000000f9},
1069 { 0x00009aa8, 0x000000f9, 0x000000f9 }, 1163 {0x00009aa4, 0x000000f9, 0x000000f9},
1070 { 0x00009aac, 0x000000f9, 0x000000f9 }, 1164 {0x00009aa8, 0x000000f9, 0x000000f9},
1071 { 0x00009ab0, 0x000000f9, 0x000000f9 }, 1165 {0x00009aac, 0x000000f9, 0x000000f9},
1072 { 0x00009ab4, 0x000000f9, 0x000000f9 }, 1166 {0x00009ab0, 0x000000f9, 0x000000f9},
1073 { 0x00009ab8, 0x000000f9, 0x000000f9 }, 1167 {0x00009ab4, 0x000000f9, 0x000000f9},
1074 { 0x00009abc, 0x000000f9, 0x000000f9 }, 1168 {0x00009ab8, 0x000000f9, 0x000000f9},
1075 { 0x00009ac0, 0x000000f9, 0x000000f9 }, 1169 {0x00009abc, 0x000000f9, 0x000000f9},
1076 { 0x00009ac4, 0x000000f9, 0x000000f9 }, 1170 {0x00009ac0, 0x000000f9, 0x000000f9},
1077 { 0x00009ac8, 0x000000f9, 0x000000f9 }, 1171 {0x00009ac4, 0x000000f9, 0x000000f9},
1078 { 0x00009acc, 0x000000f9, 0x000000f9 }, 1172 {0x00009ac8, 0x000000f9, 0x000000f9},
1079 { 0x00009ad0, 0x000000f9, 0x000000f9 }, 1173 {0x00009acc, 0x000000f9, 0x000000f9},
1080 { 0x00009ad4, 0x000000f9, 0x000000f9 }, 1174 {0x00009ad0, 0x000000f9, 0x000000f9},
1081 { 0x00009ad8, 0x000000f9, 0x000000f9 }, 1175 {0x00009ad4, 0x000000f9, 0x000000f9},
1082 { 0x00009adc, 0x000000f9, 0x000000f9 }, 1176 {0x00009ad8, 0x000000f9, 0x000000f9},
1083 { 0x00009ae0, 0x000000f9, 0x000000f9 }, 1177 {0x00009adc, 0x000000f9, 0x000000f9},
1084 { 0x00009ae4, 0x000000f9, 0x000000f9 }, 1178 {0x00009ae0, 0x000000f9, 0x000000f9},
1085 { 0x00009ae8, 0x000000f9, 0x000000f9 }, 1179 {0x00009ae4, 0x000000f9, 0x000000f9},
1086 { 0x00009aec, 0x000000f9, 0x000000f9 }, 1180 {0x00009ae8, 0x000000f9, 0x000000f9},
1087 { 0x00009af0, 0x000000f9, 0x000000f9 }, 1181 {0x00009aec, 0x000000f9, 0x000000f9},
1088 { 0x00009af4, 0x000000f9, 0x000000f9 }, 1182 {0x00009af0, 0x000000f9, 0x000000f9},
1089 { 0x00009af8, 0x000000f9, 0x000000f9 }, 1183 {0x00009af4, 0x000000f9, 0x000000f9},
1090 { 0x00009afc, 0x000000f9, 0x000000f9 }, 1184 {0x00009af8, 0x000000f9, 0x000000f9},
1185 {0x00009afc, 0x000000f9, 0x000000f9},
1091}; 1186};
1092 1187
1093static const u32 ar5416Bank1_9160[][2] = { 1188static const u32 ar5416Bank1_9160[][2] = {
1094 { 0x000098b0, 0x02108421 }, 1189 /* Addr allmodes */
1095 { 0x000098ec, 0x00000008 }, 1190 {0x000098b0, 0x02108421},
1191 {0x000098ec, 0x00000008},
1096}; 1192};
1097 1193
1098static const u32 ar5416Bank2_9160[][2] = { 1194static const u32 ar5416Bank2_9160[][2] = {
1099 { 0x000098b0, 0x0e73ff17 }, 1195 /* Addr allmodes */
1100 { 0x000098e0, 0x00000420 }, 1196 {0x000098b0, 0x0e73ff17},
1197 {0x000098e0, 0x00000420},
1101}; 1198};
1102 1199
1103static const u32 ar5416Bank3_9160[][3] = { 1200static const u32 ar5416Bank3_9160[][3] = {
1104 { 0x000098f0, 0x01400018, 0x01c00018 }, 1201 /* Addr 5G_HT20 5G_HT40 */
1202 {0x000098f0, 0x01400018, 0x01c00018},
1105}; 1203};
1106 1204
1107static const u32 ar5416Bank6_9160[][3] = { 1205static const u32 ar5416Bank6_9160[][3] = {
1108 { 0x0000989c, 0x00000000, 0x00000000 }, 1206 /* Addr 5G_HT20 5G_HT40 */
1109 { 0x0000989c, 0x00000000, 0x00000000 }, 1207 {0x0000989c, 0x00000000, 0x00000000},
1110 { 0x0000989c, 0x00000000, 0x00000000 }, 1208 {0x0000989c, 0x00000000, 0x00000000},
1111 { 0x0000989c, 0x00e00000, 0x00e00000 }, 1209 {0x0000989c, 0x00000000, 0x00000000},
1112 { 0x0000989c, 0x005e0000, 0x005e0000 }, 1210 {0x0000989c, 0x00e00000, 0x00e00000},
1113 { 0x0000989c, 0x00120000, 0x00120000 }, 1211 {0x0000989c, 0x005e0000, 0x005e0000},
1114 { 0x0000989c, 0x00620000, 0x00620000 }, 1212 {0x0000989c, 0x00120000, 0x00120000},
1115 { 0x0000989c, 0x00020000, 0x00020000 }, 1213 {0x0000989c, 0x00620000, 0x00620000},
1116 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1214 {0x0000989c, 0x00020000, 0x00020000},
1117 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1215 {0x0000989c, 0x00ff0000, 0x00ff0000},
1118 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1216 {0x0000989c, 0x00ff0000, 0x00ff0000},
1119 { 0x0000989c, 0x40ff0000, 0x40ff0000 }, 1217 {0x0000989c, 0x00ff0000, 0x00ff0000},
1120 { 0x0000989c, 0x005f0000, 0x005f0000 }, 1218 {0x0000989c, 0x40ff0000, 0x40ff0000},
1121 { 0x0000989c, 0x00870000, 0x00870000 }, 1219 {0x0000989c, 0x005f0000, 0x005f0000},
1122 { 0x0000989c, 0x00f90000, 0x00f90000 }, 1220 {0x0000989c, 0x00870000, 0x00870000},
1123 { 0x0000989c, 0x007b0000, 0x007b0000 }, 1221 {0x0000989c, 0x00f90000, 0x00f90000},
1124 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1222 {0x0000989c, 0x007b0000, 0x007b0000},
1125 { 0x0000989c, 0x00f50000, 0x00f50000 }, 1223 {0x0000989c, 0x00ff0000, 0x00ff0000},
1126 { 0x0000989c, 0x00dc0000, 0x00dc0000 }, 1224 {0x0000989c, 0x00f50000, 0x00f50000},
1127 { 0x0000989c, 0x00110000, 0x00110000 }, 1225 {0x0000989c, 0x00dc0000, 0x00dc0000},
1128 { 0x0000989c, 0x006100a8, 0x006100a8 }, 1226 {0x0000989c, 0x00110000, 0x00110000},
1129 { 0x0000989c, 0x004210a2, 0x004210a2 }, 1227 {0x0000989c, 0x006100a8, 0x006100a8},
1130 { 0x0000989c, 0x0014008f, 0x0014008f }, 1228 {0x0000989c, 0x004210a2, 0x004210a2},
1131 { 0x0000989c, 0x00c40003, 0x00c40003 }, 1229 {0x0000989c, 0x0014008f, 0x0014008f},
1132 { 0x0000989c, 0x003000f2, 0x003000f2 }, 1230 {0x0000989c, 0x00c40003, 0x00c40003},
1133 { 0x0000989c, 0x00440016, 0x00440016 }, 1231 {0x0000989c, 0x003000f2, 0x003000f2},
1134 { 0x0000989c, 0x00410040, 0x00410040 }, 1232 {0x0000989c, 0x00440016, 0x00440016},
1135 { 0x0000989c, 0x0001805e, 0x0001805e }, 1233 {0x0000989c, 0x00410040, 0x00410040},
1136 { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, 1234 {0x0000989c, 0x0001805e, 0x0001805e},
1137 { 0x0000989c, 0x000000f1, 0x000000f1 }, 1235 {0x0000989c, 0x0000c0ab, 0x0000c0ab},
1138 { 0x0000989c, 0x00002081, 0x00002081 }, 1236 {0x0000989c, 0x000000f1, 0x000000f1},
1139 { 0x0000989c, 0x000000d4, 0x000000d4 }, 1237 {0x0000989c, 0x00002081, 0x00002081},
1140 { 0x000098d0, 0x0000000f, 0x0010000f }, 1238 {0x0000989c, 0x000000d4, 0x000000d4},
1239 {0x000098d0, 0x0000000f, 0x0010000f},
1141}; 1240};
1142 1241
1143static const u32 ar5416Bank6TPC_9160[][3] = { 1242static const u32 ar5416Bank6TPC_9160[][3] = {
1144 { 0x0000989c, 0x00000000, 0x00000000 }, 1243 /* Addr 5G_HT20 5G_HT40 */
1145 { 0x0000989c, 0x00000000, 0x00000000 }, 1244 {0x0000989c, 0x00000000, 0x00000000},
1146 { 0x0000989c, 0x00000000, 0x00000000 }, 1245 {0x0000989c, 0x00000000, 0x00000000},
1147 { 0x0000989c, 0x00e00000, 0x00e00000 }, 1246 {0x0000989c, 0x00000000, 0x00000000},
1148 { 0x0000989c, 0x005e0000, 0x005e0000 }, 1247 {0x0000989c, 0x00e00000, 0x00e00000},
1149 { 0x0000989c, 0x00120000, 0x00120000 }, 1248 {0x0000989c, 0x005e0000, 0x005e0000},
1150 { 0x0000989c, 0x00620000, 0x00620000 }, 1249 {0x0000989c, 0x00120000, 0x00120000},
1151 { 0x0000989c, 0x00020000, 0x00020000 }, 1250 {0x0000989c, 0x00620000, 0x00620000},
1152 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1251 {0x0000989c, 0x00020000, 0x00020000},
1153 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1252 {0x0000989c, 0x00ff0000, 0x00ff0000},
1154 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1253 {0x0000989c, 0x00ff0000, 0x00ff0000},
1155 { 0x0000989c, 0x40ff0000, 0x40ff0000 }, 1254 {0x0000989c, 0x00ff0000, 0x00ff0000},
1156 { 0x0000989c, 0x005f0000, 0x005f0000 }, 1255 {0x0000989c, 0x40ff0000, 0x40ff0000},
1157 { 0x0000989c, 0x00870000, 0x00870000 }, 1256 {0x0000989c, 0x005f0000, 0x005f0000},
1158 { 0x0000989c, 0x00f90000, 0x00f90000 }, 1257 {0x0000989c, 0x00870000, 0x00870000},
1159 { 0x0000989c, 0x007b0000, 0x007b0000 }, 1258 {0x0000989c, 0x00f90000, 0x00f90000},
1160 { 0x0000989c, 0x00ff0000, 0x00ff0000 }, 1259 {0x0000989c, 0x007b0000, 0x007b0000},
1161 { 0x0000989c, 0x00f50000, 0x00f50000 }, 1260 {0x0000989c, 0x00ff0000, 0x00ff0000},
1162 { 0x0000989c, 0x00dc0000, 0x00dc0000 }, 1261 {0x0000989c, 0x00f50000, 0x00f50000},
1163 { 0x0000989c, 0x00110000, 0x00110000 }, 1262 {0x0000989c, 0x00dc0000, 0x00dc0000},
1164 { 0x0000989c, 0x006100a8, 0x006100a8 }, 1263 {0x0000989c, 0x00110000, 0x00110000},
1165 { 0x0000989c, 0x00423022, 0x00423022 }, 1264 {0x0000989c, 0x006100a8, 0x006100a8},
1166 { 0x0000989c, 0x2014008f, 0x2014008f }, 1265 {0x0000989c, 0x00423022, 0x00423022},
1167 { 0x0000989c, 0x00c40002, 0x00c40002 }, 1266 {0x0000989c, 0x2014008f, 0x2014008f},
1168 { 0x0000989c, 0x003000f2, 0x003000f2 }, 1267 {0x0000989c, 0x00c40002, 0x00c40002},
1169 { 0x0000989c, 0x00440016, 0x00440016 }, 1268 {0x0000989c, 0x003000f2, 0x003000f2},
1170 { 0x0000989c, 0x00410040, 0x00410040 }, 1269 {0x0000989c, 0x00440016, 0x00440016},
1171 { 0x0000989c, 0x0001805e, 0x0001805e }, 1270 {0x0000989c, 0x00410040, 0x00410040},
1172 { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, 1271 {0x0000989c, 0x0001805e, 0x0001805e},
1173 { 0x0000989c, 0x000000e1, 0x000000e1 }, 1272 {0x0000989c, 0x0000c0ab, 0x0000c0ab},
1174 { 0x0000989c, 0x00007080, 0x00007080 }, 1273 {0x0000989c, 0x000000e1, 0x000000e1},
1175 { 0x0000989c, 0x000000d4, 0x000000d4 }, 1274 {0x0000989c, 0x00007080, 0x00007080},
1176 { 0x000098d0, 0x0000000f, 0x0010000f }, 1275 {0x0000989c, 0x000000d4, 0x000000d4},
1276 {0x000098d0, 0x0000000f, 0x0010000f},
1177}; 1277};
1178 1278
1179static const u32 ar5416Bank7_9160[][2] = { 1279static const u32 ar5416Bank7_9160[][2] = {
1180 { 0x0000989c, 0x00000500 }, 1280 /* Addr allmodes */
1181 { 0x0000989c, 0x00000800 }, 1281 {0x0000989c, 0x00000500},
1182 { 0x000098cc, 0x0000000e }, 1282 {0x0000989c, 0x00000800},
1283 {0x000098cc, 0x0000000e},
1183}; 1284};
1184 1285
1185static const u32 ar5416Addac_9160[][2] = { 1286static const u32 ar5416Addac_9160[][2] = {
1186 {0x0000989c, 0x00000000 }, 1287 /* Addr allmodes */
1187 {0x0000989c, 0x00000000 }, 1288 {0x0000989c, 0x00000000},
1188 {0x0000989c, 0x00000000 }, 1289 {0x0000989c, 0x00000000},
1189 {0x0000989c, 0x00000000 }, 1290 {0x0000989c, 0x00000000},
1190 {0x0000989c, 0x00000000 }, 1291 {0x0000989c, 0x00000000},
1191 {0x0000989c, 0x00000000 }, 1292 {0x0000989c, 0x00000000},
1192 {0x0000989c, 0x000000c0 }, 1293 {0x0000989c, 0x00000000},
1193 {0x0000989c, 0x00000018 }, 1294 {0x0000989c, 0x000000c0},
1194 {0x0000989c, 0x00000004 }, 1295 {0x0000989c, 0x00000018},
1195 {0x0000989c, 0x00000000 }, 1296 {0x0000989c, 0x00000004},
1196 {0x0000989c, 0x00000000 }, 1297 {0x0000989c, 0x00000000},
1197 {0x0000989c, 0x00000000 }, 1298 {0x0000989c, 0x00000000},
1198 {0x0000989c, 0x00000000 }, 1299 {0x0000989c, 0x00000000},
1199 {0x0000989c, 0x00000000 }, 1300 {0x0000989c, 0x00000000},
1200 {0x0000989c, 0x00000000 }, 1301 {0x0000989c, 0x00000000},
1201 {0x0000989c, 0x00000000 }, 1302 {0x0000989c, 0x00000000},
1202 {0x0000989c, 0x00000000 }, 1303 {0x0000989c, 0x00000000},
1203 {0x0000989c, 0x00000000 }, 1304 {0x0000989c, 0x00000000},
1204 {0x0000989c, 0x00000000 }, 1305 {0x0000989c, 0x00000000},
1205 {0x0000989c, 0x00000000 }, 1306 {0x0000989c, 0x00000000},
1206 {0x0000989c, 0x00000000 }, 1307 {0x0000989c, 0x00000000},
1207 {0x0000989c, 0x000000c0 }, 1308 {0x0000989c, 0x00000000},
1208 {0x0000989c, 0x00000019 }, 1309 {0x0000989c, 0x000000c0},
1209 {0x0000989c, 0x00000004 }, 1310 {0x0000989c, 0x00000019},
1210 {0x0000989c, 0x00000000 }, 1311 {0x0000989c, 0x00000004},
1211 {0x0000989c, 0x00000000 }, 1312 {0x0000989c, 0x00000000},
1212 {0x0000989c, 0x00000000 }, 1313 {0x0000989c, 0x00000000},
1213 {0x0000989c, 0x00000004 }, 1314 {0x0000989c, 0x00000000},
1214 {0x0000989c, 0x00000003 }, 1315 {0x0000989c, 0x00000004},
1215 {0x0000989c, 0x00000008 }, 1316 {0x0000989c, 0x00000003},
1216 {0x0000989c, 0x00000000 }, 1317 {0x0000989c, 0x00000008},
1217 {0x000098cc, 0x00000000 }, 1318 {0x0000989c, 0x00000000},
1319 {0x000098cc, 0x00000000},
1218}; 1320};
1219 1321
1220static const u32 ar5416Addac_91601_1[][2] = { 1322static const u32 ar5416Addac_9160_1_1[][2] = {
1221 {0x0000989c, 0x00000000 }, 1323 /* Addr allmodes */
1222 {0x0000989c, 0x00000000 }, 1324 {0x0000989c, 0x00000000},
1223 {0x0000989c, 0x00000000 }, 1325 {0x0000989c, 0x00000000},
1224 {0x0000989c, 0x00000000 }, 1326 {0x0000989c, 0x00000000},
1225 {0x0000989c, 0x00000000 }, 1327 {0x0000989c, 0x00000000},
1226 {0x0000989c, 0x00000000 }, 1328 {0x0000989c, 0x00000000},
1227 {0x0000989c, 0x000000c0 }, 1329 {0x0000989c, 0x00000000},
1228 {0x0000989c, 0x00000018 }, 1330 {0x0000989c, 0x000000c0},
1229 {0x0000989c, 0x00000004 }, 1331 {0x0000989c, 0x00000018},
1230 {0x0000989c, 0x00000000 }, 1332 {0x0000989c, 0x00000004},
1231 {0x0000989c, 0x00000000 }, 1333 {0x0000989c, 0x00000000},
1232 {0x0000989c, 0x00000000 }, 1334 {0x0000989c, 0x00000000},
1233 {0x0000989c, 0x00000000 }, 1335 {0x0000989c, 0x00000000},
1234 {0x0000989c, 0x00000000 }, 1336 {0x0000989c, 0x00000000},
1235 {0x0000989c, 0x00000000 }, 1337 {0x0000989c, 0x00000000},
1236 {0x0000989c, 0x00000000 }, 1338 {0x0000989c, 0x00000000},
1237 {0x0000989c, 0x00000000 }, 1339 {0x0000989c, 0x00000000},
1238 {0x0000989c, 0x00000000 }, 1340 {0x0000989c, 0x00000000},
1239 {0x0000989c, 0x00000000 }, 1341 {0x0000989c, 0x00000000},
1240 {0x0000989c, 0x00000000 }, 1342 {0x0000989c, 0x00000000},
1241 {0x0000989c, 0x00000000 }, 1343 {0x0000989c, 0x00000000},
1242 {0x0000989c, 0x000000c0 }, 1344 {0x0000989c, 0x00000000},
1243 {0x0000989c, 0x00000019 }, 1345 {0x0000989c, 0x000000c0},
1244 {0x0000989c, 0x00000004 }, 1346 {0x0000989c, 0x00000019},
1245 {0x0000989c, 0x00000000 }, 1347 {0x0000989c, 0x00000004},
1246 {0x0000989c, 0x00000000 }, 1348 {0x0000989c, 0x00000000},
1247 {0x0000989c, 0x00000000 }, 1349 {0x0000989c, 0x00000000},
1248 {0x0000989c, 0x00000000 }, 1350 {0x0000989c, 0x00000000},
1249 {0x0000989c, 0x00000000 }, 1351 {0x0000989c, 0x00000000},
1250 {0x0000989c, 0x00000000 }, 1352 {0x0000989c, 0x00000000},
1251 {0x0000989c, 0x00000000 }, 1353 {0x0000989c, 0x00000000},
1252 {0x000098cc, 0x00000000 }, 1354 {0x0000989c, 0x00000000},
1355 {0x000098cc, 0x00000000},
1253}; 1356};
1254 1357
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 5fdbb53b47e0..dabafb874c36 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -239,7 +239,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
239 if (qCoff > 15) 239 if (qCoff > 15)
240 qCoff = 15; 240 qCoff = 15;
241 else if (qCoff <= -16) 241 else if (qCoff <= -16)
242 qCoff = 16; 242 qCoff = -16;
243 243
244 ath_print(common, ATH_DBG_CALIBRATE, 244 ath_print(common, ATH_DBG_CALIBRATE,
245 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", 245 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index a8a8cdc04afa..303c63da5ea3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -18,6 +18,11 @@
18#include "ar5008_initvals.h" 18#include "ar5008_initvals.h"
19#include "ar9001_initvals.h" 19#include "ar9001_initvals.h"
20#include "ar9002_initvals.h" 20#include "ar9002_initvals.h"
21#include "ar9002_phy.h"
22
23int modparam_force_new_ani;
24module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
25MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002");
21 26
22/* General hardware code for the A5008/AR9001/AR9002 hadware families */ 27/* General hardware code for the A5008/AR9001/AR9002 hadware families */
23 28
@@ -80,21 +85,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
80 ar9287PciePhy_clkreq_always_on_L1_9287_1_1, 85 ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
81 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1), 86 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
82 2); 87 2);
83 } else if (AR_SREV_9287_10_OR_LATER(ah)) {
84 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
85 ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
86 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
87 ARRAY_SIZE(ar9287Common_9287_1_0), 2);
88
89 if (ah->config.pcie_clock_req)
90 INIT_INI_ARRAY(&ah->iniPcieSerdes,
91 ar9287PciePhy_clkreq_off_L1_9287_1_0,
92 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
93 else
94 INIT_INI_ARRAY(&ah->iniPcieSerdes,
95 ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
96 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
97 2);
98 } else if (AR_SREV_9285_12_OR_LATER(ah)) { 88 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
99 89
100 90
@@ -113,21 +103,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
113 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2), 103 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
114 2); 104 2);
115 } 105 }
116 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
117 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
118 ARRAY_SIZE(ar9285Modes_9285), 6);
119 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
120 ARRAY_SIZE(ar9285Common_9285), 2);
121
122 if (ah->config.pcie_clock_req) {
123 INIT_INI_ARRAY(&ah->iniPcieSerdes,
124 ar9285PciePhy_clkreq_off_L1_9285,
125 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
126 } else {
127 INIT_INI_ARRAY(&ah->iniPcieSerdes,
128 ar9285PciePhy_clkreq_always_on_L1_9285,
129 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
130 }
131 } else if (AR_SREV_9280_20_OR_LATER(ah)) { 106 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
132 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2, 107 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
133 ARRAY_SIZE(ar9280Modes_9280_2), 6); 108 ARRAY_SIZE(ar9280Modes_9280_2), 6);
@@ -146,11 +121,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
146 INIT_INI_ARRAY(&ah->iniModesAdditional, 121 INIT_INI_ARRAY(&ah->iniModesAdditional,
147 ar9280Modes_fast_clock_9280_2, 122 ar9280Modes_fast_clock_9280_2,
148 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3); 123 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
149 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
150 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
151 ARRAY_SIZE(ar9280Modes_9280), 6);
152 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
153 ARRAY_SIZE(ar9280Common_9280), 2);
154 } else if (AR_SREV_9160_10_OR_LATER(ah)) { 124 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
155 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160, 125 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
156 ARRAY_SIZE(ar5416Modes_9160), 6); 126 ARRAY_SIZE(ar5416Modes_9160), 6);
@@ -174,8 +144,8 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
174 ARRAY_SIZE(ar5416Bank7_9160), 2); 144 ARRAY_SIZE(ar5416Bank7_9160), 2);
175 if (AR_SREV_9160_11(ah)) { 145 if (AR_SREV_9160_11(ah)) {
176 INIT_INI_ARRAY(&ah->iniAddac, 146 INIT_INI_ARRAY(&ah->iniAddac,
177 ar5416Addac_91601_1, 147 ar5416Addac_9160_1_1,
178 ARRAY_SIZE(ar5416Addac_91601_1), 2); 148 ARRAY_SIZE(ar5416Addac_9160_1_1), 2);
179 } else { 149 } else {
180 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160, 150 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
181 ARRAY_SIZE(ar5416Addac_9160), 2); 151 ARRAY_SIZE(ar5416Addac_9160), 2);
@@ -234,12 +204,12 @@ void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
234{ 204{
235 if (AR_SREV_9287_11_OR_LATER(ah)) { 205 if (AR_SREV_9287_11_OR_LATER(ah)) {
236 INIT_INI_ARRAY(&ah->iniCckfirNormal, 206 INIT_INI_ARRAY(&ah->iniCckfirNormal,
237 ar9287Common_normal_cck_fir_coeff_92871_1, 207 ar9287Common_normal_cck_fir_coeff_9287_1_1,
238 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 208 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_9287_1_1),
239 2); 209 2);
240 INIT_INI_ARRAY(&ah->iniCckfirJapan2484, 210 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
241 ar9287Common_japan_2484_cck_fir_coeff_92871_1, 211 ar9287Common_japan_2484_cck_fir_coeff_9287_1_1,
242 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 212 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_9287_1_1),
243 2); 213 2);
244 } 214 }
245} 215}
@@ -300,10 +270,6 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
300 INIT_INI_ARRAY(&ah->iniModesRxGain, 270 INIT_INI_ARRAY(&ah->iniModesRxGain,
301 ar9287Modes_rx_gain_9287_1_1, 271 ar9287Modes_rx_gain_9287_1_1,
302 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6); 272 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
303 else if (AR_SREV_9287_10(ah))
304 INIT_INI_ARRAY(&ah->iniModesRxGain,
305 ar9287Modes_rx_gain_9287_1_0,
306 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
307 else if (AR_SREV_9280_20(ah)) 273 else if (AR_SREV_9280_20(ah))
308 ar9280_20_hw_init_rxgain_ini(ah); 274 ar9280_20_hw_init_rxgain_ini(ah);
309 275
@@ -311,10 +277,6 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
311 INIT_INI_ARRAY(&ah->iniModesTxGain, 277 INIT_INI_ARRAY(&ah->iniModesTxGain,
312 ar9287Modes_tx_gain_9287_1_1, 278 ar9287Modes_tx_gain_9287_1_1,
313 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6); 279 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
314 } else if (AR_SREV_9287_10(ah)) {
315 INIT_INI_ARRAY(&ah->iniModesTxGain,
316 ar9287Modes_tx_gain_9287_1_0,
317 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
318 } else if (AR_SREV_9280_20(ah)) { 280 } else if (AR_SREV_9280_20(ah)) {
319 ar9280_20_hw_init_txgain_ini(ah); 281 ar9280_20_hw_init_txgain_ini(ah);
320 } else if (AR_SREV_9285_12_OR_LATER(ah)) { 282 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
@@ -384,29 +346,6 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
384 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0), 346 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
385 INI_RA(&ah->iniPcieSerdes, i, 1)); 347 INI_RA(&ah->iniPcieSerdes, i, 1));
386 } 348 }
387 } else if (AR_SREV_9280(ah) &&
388 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
389 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
390 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
391
392 /* RX shut off when elecidle is asserted */
393 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
394 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
395 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
396
397 /* Shut off CLKREQ active in L1 */
398 if (ah->config.pcie_clock_req)
399 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
400 else
401 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
402
403 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
404 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
405 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
406
407 /* Load the new settings */
408 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
409
410 } else { 349 } else {
411 ENABLE_REGWRITE_BUFFER(ah); 350 ENABLE_REGWRITE_BUFFER(ah);
412 351
@@ -436,55 +375,84 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
436 } 375 }
437 376
438 udelay(1000); 377 udelay(1000);
378 }
439 379
440 /* set bit 19 to allow forcing of pcie core into L1 state */ 380 if (power_off) {
441 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); 381 /* clear bit 19 to disable L1 */
382 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
383
384 val = REG_READ(ah, AR_WA);
442 385
443 /* Several PCIe massages to ensure proper behaviour */ 386 /*
387 * Set PCIe workaround bits
388 * In AR9280 and AR9285, bit 14 in WA register (disable L1)
389 * should only be set when device enters D3 and be
390 * cleared when device comes back to D0.
391 */
392 if (ah->config.pcie_waen) {
393 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
394 val |= AR_WA_D3_L1_DISABLE;
395 } else {
396 if (((AR_SREV_9285(ah) ||
397 AR_SREV_9271(ah) ||
398 AR_SREV_9287(ah)) &&
399 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
400 (AR_SREV_9280(ah) &&
401 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
402 val |= AR_WA_D3_L1_DISABLE;
403 }
404 }
405
406 if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
407 /*
408 * Disable bit 6 and 7 before entering D3 to
409 * prevent system hang.
410 */
411 val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
412 }
413
414 if (AR_SREV_9285E_20(ah))
415 val |= AR_WA_BIT23;
416
417 REG_WRITE(ah, AR_WA, val);
418 } else {
444 if (ah->config.pcie_waen) { 419 if (ah->config.pcie_waen) {
445 val = ah->config.pcie_waen; 420 val = ah->config.pcie_waen;
446 if (!power_off) 421 if (!power_off)
447 val &= (~AR_WA_D3_L1_DISABLE); 422 val &= (~AR_WA_D3_L1_DISABLE);
448 } else { 423 } else {
449 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || 424 if (AR_SREV_9285(ah) ||
425 AR_SREV_9271(ah) ||
450 AR_SREV_9287(ah)) { 426 AR_SREV_9287(ah)) {
451 val = AR9285_WA_DEFAULT; 427 val = AR9285_WA_DEFAULT;
452 if (!power_off) 428 if (!power_off)
453 val &= (~AR_WA_D3_L1_DISABLE); 429 val &= (~AR_WA_D3_L1_DISABLE);
454 } else if (AR_SREV_9280(ah)) { 430 }
431 else if (AR_SREV_9280(ah)) {
455 /* 432 /*
456 * On AR9280 chips bit 22 of 0x4004 needs to be 433 * For AR9280 chips, bit 22 of 0x4004
457 * set otherwise card may disappear. 434 * needs to be set.
458 */ 435 */
459 val = AR9280_WA_DEFAULT; 436 val = AR9280_WA_DEFAULT;
460 if (!power_off) 437 if (!power_off)
461 val &= (~AR_WA_D3_L1_DISABLE); 438 val &= (~AR_WA_D3_L1_DISABLE);
462 } else 439 } else {
463 val = AR_WA_DEFAULT; 440 val = AR_WA_DEFAULT;
441 }
442 }
443
444 /* WAR for ASPM system hang */
445 if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
446 val |= (AR_WA_BIT6 | AR_WA_BIT7);
464 } 447 }
465 448
449 if (AR_SREV_9285E_20(ah))
450 val |= AR_WA_BIT23;
451
466 REG_WRITE(ah, AR_WA, val); 452 REG_WRITE(ah, AR_WA, val);
467 }
468 453
469 if (power_off) { 454 /* set bit 19 to allow forcing of pcie core into L1 state */
470 /* 455 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
471 * Set PCIe workaround bits
472 * bit 14 in WA register (disable L1) should only
473 * be set when device enters D3 and be cleared
474 * when device comes back to D0.
475 */
476 if (ah->config.pcie_waen) {
477 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
478 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
479 } else {
480 if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
481 AR_SREV_9287(ah)) &&
482 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
483 (AR_SREV_9280(ah) &&
484 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
485 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
486 }
487 }
488 } 456 }
489} 457}
490 458
@@ -536,18 +504,29 @@ int ar9002_hw_rf_claim(struct ath_hw *ah)
536 return 0; 504 return 0;
537} 505}
538 506
507void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
508{
509 if (AR_SREV_9287_13_OR_LATER(ah)) {
510 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
511 AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
512 REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
513 REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
514 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
515 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
516 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
517 }
518}
519
539/* 520/*
540 * Enable ASYNC FIFO
541 *
542 * If Async FIFO is enabled, the following counters change as MAC now runs 521 * If Async FIFO is enabled, the following counters change as MAC now runs
543 * at 117 Mhz instead of 88/44MHz when async FIFO is disabled. 522 * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
544 * 523 *
545 * The values below tested for ht40 2 chain. 524 * The values below tested for ht40 2 chain.
546 * Overwrite the delay/timeouts initialized in process ini. 525 * Overwrite the delay/timeouts initialized in process ini.
547 */ 526 */
548void ar9002_hw_enable_async_fifo(struct ath_hw *ah) 527void ar9002_hw_update_async_fifo(struct ath_hw *ah)
549{ 528{
550 if (AR_SREV_9287_12_OR_LATER(ah)) { 529 if (AR_SREV_9287_13_OR_LATER(ah)) {
551 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 530 REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
552 AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR); 531 AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
553 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, 532 REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
@@ -571,9 +550,9 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
571 */ 550 */
572void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah) 551void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
573{ 552{
574 if (AR_SREV_9287_12_OR_LATER(ah)) { 553 if (AR_SREV_9287_13_OR_LATER(ah)) {
575 REG_SET_BIT(ah, AR_PCU_MISC_MODE2, 554 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
576 AR_PCU_MISC_MODE2_ENABLE_AGGWEP); 555 AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
577 } 556 }
578} 557}
579 558
@@ -595,4 +574,9 @@ void ar9002_hw_attach_ops(struct ath_hw *ah)
595 574
596 ar9002_hw_attach_calib_ops(ah); 575 ar9002_hw_attach_calib_ops(ah);
597 ar9002_hw_attach_mac_ops(ah); 576 ar9002_hw_attach_mac_ops(ah);
577
578 if (modparam_force_new_ani)
579 ath9k_hw_attach_ani_ops_new(ah);
580 else
581 ath9k_hw_attach_ani_ops_old(ah);
598} 582}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index dae7f3304eb8..6203eed860dd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -14,5217 +14,3252 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#ifndef INITVALS_9002_10_H
18#define INITVALS_9002_10_H
19
20static const u32 ar9280Modes_9280[][6] = {
21 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
22 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
23 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
24 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
25 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 },
26 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
27 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
28 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
29 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
30 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
31 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
32 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
33 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
34 { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
35 { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
36 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
37 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
38 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
39 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 },
40 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
41 { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 },
42 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
43 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
44 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
45 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
46 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
47 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
48 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
49 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
50 { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a },
51 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
52 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
53 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
54 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
55 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
56 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
57 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
58 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
59 { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 },
60 { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 },
61 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 },
62 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 },
63 { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c },
64 { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 },
65 { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 },
66 { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 },
67 { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac },
68 { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 },
69 { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 },
70 { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 },
71 { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 },
72 { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 },
73 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 },
74 { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 },
75 { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 },
76 { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac },
77 { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 },
78 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 },
79 { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 },
80 { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 },
81 { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 },
82 { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad },
83 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
84 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
85 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
86 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
87 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
88 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
89 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
90 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
91 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
92 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
93 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
94 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
95 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
96 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
97 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
98 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
99 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
100 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
101 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
102 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
103 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
104 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
105 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
106 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
107 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
108 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
109 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
110 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
111 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
112 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
113 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
114 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
115 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
116 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
117 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
118 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
119 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
120 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
121 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
122 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
123 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
124 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
125 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
126 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
127 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
128 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
129 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
130 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
131 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
132 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
133 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
134 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
135 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
136 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
137 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
138 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
139 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
140 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
141 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
142 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
143 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
144 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
145 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
146 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
147 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
148 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
149 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
150 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
151 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
152 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
153 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
154 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
155 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
156 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
157 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
158 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
159 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
160 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
161 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
162 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
163 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
164 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
165 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
166 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
167 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
168 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
169 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
170 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
171 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
172 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
173 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
174 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
175 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
176 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
177 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
178 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
179 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
180 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
181 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
182 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
183 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
184 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
185 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
186 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
187 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
188 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
189 { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
190 { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
191 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
192 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
193 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
194 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
195 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
196 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
197 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
198 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
199 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
200 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
201 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
202 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
203 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
204 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
205 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
206 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
207 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
208 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
209 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
210 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
211 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
212 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
213 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
214 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
215 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
216 { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c },
217 { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 },
218 { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 },
219 { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 },
220};
221
222static const u32 ar9280Common_9280[][2] = {
223 { 0x0000000c, 0x00000000 },
224 { 0x00000030, 0x00020015 },
225 { 0x00000034, 0x00000005 },
226 { 0x00000040, 0x00000000 },
227 { 0x00000044, 0x00000008 },
228 { 0x00000048, 0x00000008 },
229 { 0x0000004c, 0x00000010 },
230 { 0x00000050, 0x00000000 },
231 { 0x00000054, 0x0000001f },
232 { 0x00000800, 0x00000000 },
233 { 0x00000804, 0x00000000 },
234 { 0x00000808, 0x00000000 },
235 { 0x0000080c, 0x00000000 },
236 { 0x00000810, 0x00000000 },
237 { 0x00000814, 0x00000000 },
238 { 0x00000818, 0x00000000 },
239 { 0x0000081c, 0x00000000 },
240 { 0x00000820, 0x00000000 },
241 { 0x00000824, 0x00000000 },
242 { 0x00001040, 0x002ffc0f },
243 { 0x00001044, 0x002ffc0f },
244 { 0x00001048, 0x002ffc0f },
245 { 0x0000104c, 0x002ffc0f },
246 { 0x00001050, 0x002ffc0f },
247 { 0x00001054, 0x002ffc0f },
248 { 0x00001058, 0x002ffc0f },
249 { 0x0000105c, 0x002ffc0f },
250 { 0x00001060, 0x002ffc0f },
251 { 0x00001064, 0x002ffc0f },
252 { 0x00001230, 0x00000000 },
253 { 0x00001270, 0x00000000 },
254 { 0x00001038, 0x00000000 },
255 { 0x00001078, 0x00000000 },
256 { 0x000010b8, 0x00000000 },
257 { 0x000010f8, 0x00000000 },
258 { 0x00001138, 0x00000000 },
259 { 0x00001178, 0x00000000 },
260 { 0x000011b8, 0x00000000 },
261 { 0x000011f8, 0x00000000 },
262 { 0x00001238, 0x00000000 },
263 { 0x00001278, 0x00000000 },
264 { 0x000012b8, 0x00000000 },
265 { 0x000012f8, 0x00000000 },
266 { 0x00001338, 0x00000000 },
267 { 0x00001378, 0x00000000 },
268 { 0x000013b8, 0x00000000 },
269 { 0x000013f8, 0x00000000 },
270 { 0x00001438, 0x00000000 },
271 { 0x00001478, 0x00000000 },
272 { 0x000014b8, 0x00000000 },
273 { 0x000014f8, 0x00000000 },
274 { 0x00001538, 0x00000000 },
275 { 0x00001578, 0x00000000 },
276 { 0x000015b8, 0x00000000 },
277 { 0x000015f8, 0x00000000 },
278 { 0x00001638, 0x00000000 },
279 { 0x00001678, 0x00000000 },
280 { 0x000016b8, 0x00000000 },
281 { 0x000016f8, 0x00000000 },
282 { 0x00001738, 0x00000000 },
283 { 0x00001778, 0x00000000 },
284 { 0x000017b8, 0x00000000 },
285 { 0x000017f8, 0x00000000 },
286 { 0x0000103c, 0x00000000 },
287 { 0x0000107c, 0x00000000 },
288 { 0x000010bc, 0x00000000 },
289 { 0x000010fc, 0x00000000 },
290 { 0x0000113c, 0x00000000 },
291 { 0x0000117c, 0x00000000 },
292 { 0x000011bc, 0x00000000 },
293 { 0x000011fc, 0x00000000 },
294 { 0x0000123c, 0x00000000 },
295 { 0x0000127c, 0x00000000 },
296 { 0x000012bc, 0x00000000 },
297 { 0x000012fc, 0x00000000 },
298 { 0x0000133c, 0x00000000 },
299 { 0x0000137c, 0x00000000 },
300 { 0x000013bc, 0x00000000 },
301 { 0x000013fc, 0x00000000 },
302 { 0x0000143c, 0x00000000 },
303 { 0x0000147c, 0x00000000 },
304 { 0x00004030, 0x00000002 },
305 { 0x0000403c, 0x00000002 },
306 { 0x00004024, 0x0000001f },
307 { 0x00007010, 0x00000033 },
308 { 0x00007038, 0x000004c2 },
309 { 0x00008004, 0x00000000 },
310 { 0x00008008, 0x00000000 },
311 { 0x0000800c, 0x00000000 },
312 { 0x00008018, 0x00000700 },
313 { 0x00008020, 0x00000000 },
314 { 0x00008038, 0x00000000 },
315 { 0x0000803c, 0x00000000 },
316 { 0x00008048, 0x40000000 },
317 { 0x00008054, 0x00000000 },
318 { 0x00008058, 0x00000000 },
319 { 0x0000805c, 0x000fc78f },
320 { 0x00008060, 0x0000000f },
321 { 0x00008064, 0x00000000 },
322 { 0x00008070, 0x00000000 },
323 { 0x000080c0, 0x2a82301a },
324 { 0x000080c4, 0x05dc01e0 },
325 { 0x000080c8, 0x1f402710 },
326 { 0x000080cc, 0x01f40000 },
327 { 0x000080d0, 0x00001e00 },
328 { 0x000080d4, 0x00000000 },
329 { 0x000080d8, 0x00400000 },
330 { 0x000080e0, 0xffffffff },
331 { 0x000080e4, 0x0000ffff },
332 { 0x000080e8, 0x003f3f3f },
333 { 0x000080ec, 0x00000000 },
334 { 0x000080f0, 0x00000000 },
335 { 0x000080f4, 0x00000000 },
336 { 0x000080f8, 0x00000000 },
337 { 0x000080fc, 0x00020000 },
338 { 0x00008100, 0x00020000 },
339 { 0x00008104, 0x00000001 },
340 { 0x00008108, 0x00000052 },
341 { 0x0000810c, 0x00000000 },
342 { 0x00008110, 0x00000168 },
343 { 0x00008118, 0x000100aa },
344 { 0x0000811c, 0x00003210 },
345 { 0x00008120, 0x08f04800 },
346 { 0x00008124, 0x00000000 },
347 { 0x00008128, 0x00000000 },
348 { 0x0000812c, 0x00000000 },
349 { 0x00008130, 0x00000000 },
350 { 0x00008134, 0x00000000 },
351 { 0x00008138, 0x00000000 },
352 { 0x0000813c, 0x00000000 },
353 { 0x00008144, 0x00000000 },
354 { 0x00008168, 0x00000000 },
355 { 0x0000816c, 0x00000000 },
356 { 0x00008170, 0x32143320 },
357 { 0x00008174, 0xfaa4fa50 },
358 { 0x00008178, 0x00000100 },
359 { 0x0000817c, 0x00000000 },
360 { 0x000081c4, 0x00000000 },
361 { 0x000081d0, 0x00003210 },
362 { 0x000081ec, 0x00000000 },
363 { 0x000081f0, 0x00000000 },
364 { 0x000081f4, 0x00000000 },
365 { 0x000081f8, 0x00000000 },
366 { 0x000081fc, 0x00000000 },
367 { 0x00008200, 0x00000000 },
368 { 0x00008204, 0x00000000 },
369 { 0x00008208, 0x00000000 },
370 { 0x0000820c, 0x00000000 },
371 { 0x00008210, 0x00000000 },
372 { 0x00008214, 0x00000000 },
373 { 0x00008218, 0x00000000 },
374 { 0x0000821c, 0x00000000 },
375 { 0x00008220, 0x00000000 },
376 { 0x00008224, 0x00000000 },
377 { 0x00008228, 0x00000000 },
378 { 0x0000822c, 0x00000000 },
379 { 0x00008230, 0x00000000 },
380 { 0x00008234, 0x00000000 },
381 { 0x00008238, 0x00000000 },
382 { 0x0000823c, 0x00000000 },
383 { 0x00008240, 0x00100000 },
384 { 0x00008244, 0x0010f400 },
385 { 0x00008248, 0x00000100 },
386 { 0x0000824c, 0x0001e800 },
387 { 0x00008250, 0x00000000 },
388 { 0x00008254, 0x00000000 },
389 { 0x00008258, 0x00000000 },
390 { 0x0000825c, 0x400000ff },
391 { 0x00008260, 0x00080922 },
392 { 0x00008270, 0x00000000 },
393 { 0x00008274, 0x40000000 },
394 { 0x00008278, 0x003e4180 },
395 { 0x0000827c, 0x00000000 },
396 { 0x00008284, 0x0000002c },
397 { 0x00008288, 0x0000002c },
398 { 0x0000828c, 0x00000000 },
399 { 0x00008294, 0x00000000 },
400 { 0x00008298, 0x00000000 },
401 { 0x00008300, 0x00000000 },
402 { 0x00008304, 0x00000000 },
403 { 0x00008308, 0x00000000 },
404 { 0x0000830c, 0x00000000 },
405 { 0x00008310, 0x00000000 },
406 { 0x00008314, 0x00000000 },
407 { 0x00008318, 0x00000000 },
408 { 0x00008328, 0x00000000 },
409 { 0x0000832c, 0x00000007 },
410 { 0x00008330, 0x00000302 },
411 { 0x00008334, 0x00000e00 },
412 { 0x00008338, 0x00000000 },
413 { 0x0000833c, 0x00000000 },
414 { 0x00008340, 0x000107ff },
415 { 0x00008344, 0x00000000 },
416 { 0x00009808, 0x00000000 },
417 { 0x0000980c, 0xaf268e30 },
418 { 0x00009810, 0xfd14e000 },
419 { 0x00009814, 0x9c0a9f6b },
420 { 0x0000981c, 0x00000000 },
421 { 0x0000982c, 0x0000a000 },
422 { 0x00009830, 0x00000000 },
423 { 0x0000983c, 0x00200400 },
424 { 0x00009840, 0x206a01ae },
425 { 0x0000984c, 0x0040233c },
426 { 0x0000a84c, 0x0040233c },
427 { 0x00009854, 0x00000044 },
428 { 0x00009900, 0x00000000 },
429 { 0x00009904, 0x00000000 },
430 { 0x00009908, 0x00000000 },
431 { 0x0000990c, 0x00000000 },
432 { 0x0000991c, 0x10000fff },
433 { 0x00009920, 0x04900000 },
434 { 0x0000a920, 0x04900000 },
435 { 0x00009928, 0x00000001 },
436 { 0x0000992c, 0x00000004 },
437 { 0x00009934, 0x1e1f2022 },
438 { 0x00009938, 0x0a0b0c0d },
439 { 0x0000993c, 0x00000000 },
440 { 0x00009948, 0x9280c00a },
441 { 0x0000994c, 0x00020028 },
442 { 0x00009954, 0xe250a51e },
443 { 0x00009958, 0x3388ffff },
444 { 0x00009940, 0x00781204 },
445 { 0x0000c95c, 0x004b6a8e },
446 { 0x0000c968, 0x000003ce },
447 { 0x00009970, 0x190fb514 },
448 { 0x00009974, 0x00000000 },
449 { 0x00009978, 0x00000001 },
450 { 0x0000997c, 0x00000000 },
451 { 0x00009980, 0x00000000 },
452 { 0x00009984, 0x00000000 },
453 { 0x00009988, 0x00000000 },
454 { 0x0000998c, 0x00000000 },
455 { 0x00009990, 0x00000000 },
456 { 0x00009994, 0x00000000 },
457 { 0x00009998, 0x00000000 },
458 { 0x0000999c, 0x00000000 },
459 { 0x000099a0, 0x00000000 },
460 { 0x000099a4, 0x00000001 },
461 { 0x000099a8, 0x201fff00 },
462 { 0x000099ac, 0x006f00c4 },
463 { 0x000099b0, 0x03051000 },
464 { 0x000099b4, 0x00000820 },
465 { 0x000099dc, 0x00000000 },
466 { 0x000099e0, 0x00000000 },
467 { 0x000099e4, 0xaaaaaaaa },
468 { 0x000099e8, 0x3c466478 },
469 { 0x000099ec, 0x0cc80caa },
470 { 0x000099fc, 0x00001042 },
471 { 0x0000a210, 0x4080a333 },
472 { 0x0000a214, 0x40206c10 },
473 { 0x0000a218, 0x009c4060 },
474 { 0x0000a220, 0x01834061 },
475 { 0x0000a224, 0x00000400 },
476 { 0x0000a228, 0x000003b5 },
477 { 0x0000a22c, 0x23277200 },
478 { 0x0000a234, 0x20202020 },
479 { 0x0000a238, 0x20202020 },
480 { 0x0000a23c, 0x13c889af },
481 { 0x0000a240, 0x38490a20 },
482 { 0x0000a244, 0x00007bb6 },
483 { 0x0000a248, 0x0fff3ffc },
484 { 0x0000a24c, 0x00000001 },
485 { 0x0000a250, 0x001da000 },
486 { 0x0000a254, 0x00000000 },
487 { 0x0000a258, 0x0cdbd380 },
488 { 0x0000a25c, 0x0f0f0f01 },
489 { 0x0000a260, 0xdfa91f01 },
490 { 0x0000a268, 0x00000000 },
491 { 0x0000a26c, 0x0ebae9c6 },
492 { 0x0000b26c, 0x0ebae9c6 },
493 { 0x0000d270, 0x00820820 },
494 { 0x0000a278, 0x1ce739ce },
495 { 0x0000a27c, 0x050701ce },
496 { 0x0000a358, 0x7999aa0f },
497 { 0x0000d35c, 0x07ffffef },
498 { 0x0000d360, 0x0fffffe7 },
499 { 0x0000d364, 0x17ffffe5 },
500 { 0x0000d368, 0x1fffffe4 },
501 { 0x0000d36c, 0x37ffffe3 },
502 { 0x0000d370, 0x3fffffe3 },
503 { 0x0000d374, 0x57ffffe3 },
504 { 0x0000d378, 0x5fffffe2 },
505 { 0x0000d37c, 0x7fffffe2 },
506 { 0x0000d380, 0x7f3c7bba },
507 { 0x0000d384, 0xf3307ff0 },
508 { 0x0000a388, 0x0c000000 },
509 { 0x0000a38c, 0x20202020 },
510 { 0x0000a390, 0x20202020 },
511 { 0x0000a394, 0x1ce739ce },
512 { 0x0000a398, 0x000001ce },
513 { 0x0000a39c, 0x00000001 },
514 { 0x0000a3a0, 0x00000000 },
515 { 0x0000a3a4, 0x00000000 },
516 { 0x0000a3a8, 0x00000000 },
517 { 0x0000a3ac, 0x00000000 },
518 { 0x0000a3b0, 0x00000000 },
519 { 0x0000a3b4, 0x00000000 },
520 { 0x0000a3b8, 0x00000000 },
521 { 0x0000a3bc, 0x00000000 },
522 { 0x0000a3c0, 0x00000000 },
523 { 0x0000a3c4, 0x00000000 },
524 { 0x0000a3c8, 0x00000246 },
525 { 0x0000a3cc, 0x20202020 },
526 { 0x0000a3d0, 0x20202020 },
527 { 0x0000a3d4, 0x20202020 },
528 { 0x0000a3dc, 0x1ce739ce },
529 { 0x0000a3e0, 0x000001ce },
530 { 0x0000a3e4, 0x00000000 },
531 { 0x0000a3e8, 0x18c43433 },
532 { 0x0000a3ec, 0x00f38081 },
533 { 0x00007800, 0x00040000 },
534 { 0x00007804, 0xdb005012 },
535 { 0x00007808, 0x04924914 },
536 { 0x0000780c, 0x21084210 },
537 { 0x00007810, 0x6d801300 },
538 { 0x00007814, 0x0019beff },
539 { 0x00007818, 0x07e40000 },
540 { 0x0000781c, 0x00492000 },
541 { 0x00007820, 0x92492480 },
542 { 0x00007824, 0x00040000 },
543 { 0x00007828, 0xdb005012 },
544 { 0x0000782c, 0x04924914 },
545 { 0x00007830, 0x21084210 },
546 { 0x00007834, 0x6d801300 },
547 { 0x00007838, 0x0019beff },
548 { 0x0000783c, 0x07e40000 },
549 { 0x00007840, 0x00492000 },
550 { 0x00007844, 0x92492480 },
551 { 0x00007848, 0x00120000 },
552 { 0x00007850, 0x54214514 },
553 { 0x00007858, 0x92592692 },
554 { 0x00007860, 0x52802000 },
555 { 0x00007864, 0x0a8e370e },
556 { 0x00007868, 0xc0102850 },
557 { 0x0000786c, 0x812d4000 },
558 { 0x00007874, 0x001b6db0 },
559 { 0x00007878, 0x00376b63 },
560 { 0x0000787c, 0x06db6db6 },
561 { 0x00007880, 0x006d8000 },
562 { 0x00007884, 0xffeffffe },
563 { 0x00007888, 0xffeffffe },
564 { 0x00007890, 0x00060aeb },
565 { 0x00007894, 0x5a108000 },
566 { 0x00007898, 0x2a850160 },
567};
568
569/* XXX 9280 2 */
570static const u32 ar9280Modes_9280_2[][6] = { 17static const u32 ar9280Modes_9280_2[][6] = {
571 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 18 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
572 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 19 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
573 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 20 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
574 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, 21 {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
575 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 22 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
576 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, 23 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f},
577 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, 24 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
578 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a }, 25 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
579 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 26 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880},
580 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 27 {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
581 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 28 {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
582 { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, 29 {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
583 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 30 {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
584 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 31 {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
585 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 32 {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
586 { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e }, 33 {0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e},
587 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, 34 {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0},
588 { 0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, 35 {0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2},
589 { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 36 {0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
590 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e }, 37 {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
591 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, 38 {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18},
592 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 39 {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
593 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 40 {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
594 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 41 {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881},
595 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 42 {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
596 { 0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016 }, 43 {0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016},
597 { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, 44 {0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
598 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 }, 45 {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010},
599 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 46 {0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
600 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 47 {0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
601 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, 48 {0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210},
602 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce }, 49 {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce},
603 { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c }, 50 {0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c},
604 { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 }, 51 {0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00},
605 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 52 {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
606 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 53 {0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444},
607 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, 54 {0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019},
608 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 55 {0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019},
609 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 56 {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
610 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 57 {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
611 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 58 {0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000},
612 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, 59 {0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000},
613 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, 60 {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
614 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, 61 {0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000},
615 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 62 {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
616 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 63 {0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000},
617 { 0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000 },
618 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 },
619 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
620 { 0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000 },
621 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
622 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
623}; 64};
624 65
625static const u32 ar9280Common_9280_2[][2] = { 66static const u32 ar9280Common_9280_2[][2] = {
626 { 0x0000000c, 0x00000000 }, 67 /* Addr allmodes */
627 { 0x00000030, 0x00020015 }, 68 {0x0000000c, 0x00000000},
628 { 0x00000034, 0x00000005 }, 69 {0x00000030, 0x00020015},
629 { 0x00000040, 0x00000000 }, 70 {0x00000034, 0x00000005},
630 { 0x00000044, 0x00000008 }, 71 {0x00000040, 0x00000000},
631 { 0x00000048, 0x00000008 }, 72 {0x00000044, 0x00000008},
632 { 0x0000004c, 0x00000010 }, 73 {0x00000048, 0x00000008},
633 { 0x00000050, 0x00000000 }, 74 {0x0000004c, 0x00000010},
634 { 0x00000054, 0x0000001f }, 75 {0x00000050, 0x00000000},
635 { 0x00000800, 0x00000000 }, 76 {0x00000054, 0x0000001f},
636 { 0x00000804, 0x00000000 }, 77 {0x00000800, 0x00000000},
637 { 0x00000808, 0x00000000 }, 78 {0x00000804, 0x00000000},
638 { 0x0000080c, 0x00000000 }, 79 {0x00000808, 0x00000000},
639 { 0x00000810, 0x00000000 }, 80 {0x0000080c, 0x00000000},
640 { 0x00000814, 0x00000000 }, 81 {0x00000810, 0x00000000},
641 { 0x00000818, 0x00000000 }, 82 {0x00000814, 0x00000000},
642 { 0x0000081c, 0x00000000 }, 83 {0x00000818, 0x00000000},
643 { 0x00000820, 0x00000000 }, 84 {0x0000081c, 0x00000000},
644 { 0x00000824, 0x00000000 }, 85 {0x00000820, 0x00000000},
645 { 0x00001040, 0x002ffc0f }, 86 {0x00000824, 0x00000000},
646 { 0x00001044, 0x002ffc0f }, 87 {0x00001040, 0x002ffc0f},
647 { 0x00001048, 0x002ffc0f }, 88 {0x00001044, 0x002ffc0f},
648 { 0x0000104c, 0x002ffc0f }, 89 {0x00001048, 0x002ffc0f},
649 { 0x00001050, 0x002ffc0f }, 90 {0x0000104c, 0x002ffc0f},
650 { 0x00001054, 0x002ffc0f }, 91 {0x00001050, 0x002ffc0f},
651 { 0x00001058, 0x002ffc0f }, 92 {0x00001054, 0x002ffc0f},
652 { 0x0000105c, 0x002ffc0f }, 93 {0x00001058, 0x002ffc0f},
653 { 0x00001060, 0x002ffc0f }, 94 {0x0000105c, 0x002ffc0f},
654 { 0x00001064, 0x002ffc0f }, 95 {0x00001060, 0x002ffc0f},
655 { 0x00001230, 0x00000000 }, 96 {0x00001064, 0x002ffc0f},
656 { 0x00001270, 0x00000000 }, 97 {0x00001230, 0x00000000},
657 { 0x00001038, 0x00000000 }, 98 {0x00001270, 0x00000000},
658 { 0x00001078, 0x00000000 }, 99 {0x00001038, 0x00000000},
659 { 0x000010b8, 0x00000000 }, 100 {0x00001078, 0x00000000},
660 { 0x000010f8, 0x00000000 }, 101 {0x000010b8, 0x00000000},
661 { 0x00001138, 0x00000000 }, 102 {0x000010f8, 0x00000000},
662 { 0x00001178, 0x00000000 }, 103 {0x00001138, 0x00000000},
663 { 0x000011b8, 0x00000000 }, 104 {0x00001178, 0x00000000},
664 { 0x000011f8, 0x00000000 }, 105 {0x000011b8, 0x00000000},
665 { 0x00001238, 0x00000000 }, 106 {0x000011f8, 0x00000000},
666 { 0x00001278, 0x00000000 }, 107 {0x00001238, 0x00000000},
667 { 0x000012b8, 0x00000000 }, 108 {0x00001278, 0x00000000},
668 { 0x000012f8, 0x00000000 }, 109 {0x000012b8, 0x00000000},
669 { 0x00001338, 0x00000000 }, 110 {0x000012f8, 0x00000000},
670 { 0x00001378, 0x00000000 }, 111 {0x00001338, 0x00000000},
671 { 0x000013b8, 0x00000000 }, 112 {0x00001378, 0x00000000},
672 { 0x000013f8, 0x00000000 }, 113 {0x000013b8, 0x00000000},
673 { 0x00001438, 0x00000000 }, 114 {0x000013f8, 0x00000000},
674 { 0x00001478, 0x00000000 }, 115 {0x00001438, 0x00000000},
675 { 0x000014b8, 0x00000000 }, 116 {0x00001478, 0x00000000},
676 { 0x000014f8, 0x00000000 }, 117 {0x000014b8, 0x00000000},
677 { 0x00001538, 0x00000000 }, 118 {0x000014f8, 0x00000000},
678 { 0x00001578, 0x00000000 }, 119 {0x00001538, 0x00000000},
679 { 0x000015b8, 0x00000000 }, 120 {0x00001578, 0x00000000},
680 { 0x000015f8, 0x00000000 }, 121 {0x000015b8, 0x00000000},
681 { 0x00001638, 0x00000000 }, 122 {0x000015f8, 0x00000000},
682 { 0x00001678, 0x00000000 }, 123 {0x00001638, 0x00000000},
683 { 0x000016b8, 0x00000000 }, 124 {0x00001678, 0x00000000},
684 { 0x000016f8, 0x00000000 }, 125 {0x000016b8, 0x00000000},
685 { 0x00001738, 0x00000000 }, 126 {0x000016f8, 0x00000000},
686 { 0x00001778, 0x00000000 }, 127 {0x00001738, 0x00000000},
687 { 0x000017b8, 0x00000000 }, 128 {0x00001778, 0x00000000},
688 { 0x000017f8, 0x00000000 }, 129 {0x000017b8, 0x00000000},
689 { 0x0000103c, 0x00000000 }, 130 {0x000017f8, 0x00000000},
690 { 0x0000107c, 0x00000000 }, 131 {0x0000103c, 0x00000000},
691 { 0x000010bc, 0x00000000 }, 132 {0x0000107c, 0x00000000},
692 { 0x000010fc, 0x00000000 }, 133 {0x000010bc, 0x00000000},
693 { 0x0000113c, 0x00000000 }, 134 {0x000010fc, 0x00000000},
694 { 0x0000117c, 0x00000000 }, 135 {0x0000113c, 0x00000000},
695 { 0x000011bc, 0x00000000 }, 136 {0x0000117c, 0x00000000},
696 { 0x000011fc, 0x00000000 }, 137 {0x000011bc, 0x00000000},
697 { 0x0000123c, 0x00000000 }, 138 {0x000011fc, 0x00000000},
698 { 0x0000127c, 0x00000000 }, 139 {0x0000123c, 0x00000000},
699 { 0x000012bc, 0x00000000 }, 140 {0x0000127c, 0x00000000},
700 { 0x000012fc, 0x00000000 }, 141 {0x000012bc, 0x00000000},
701 { 0x0000133c, 0x00000000 }, 142 {0x000012fc, 0x00000000},
702 { 0x0000137c, 0x00000000 }, 143 {0x0000133c, 0x00000000},
703 { 0x000013bc, 0x00000000 }, 144 {0x0000137c, 0x00000000},
704 { 0x000013fc, 0x00000000 }, 145 {0x000013bc, 0x00000000},
705 { 0x0000143c, 0x00000000 }, 146 {0x000013fc, 0x00000000},
706 { 0x0000147c, 0x00000000 }, 147 {0x0000143c, 0x00000000},
707 { 0x00004030, 0x00000002 }, 148 {0x0000147c, 0x00000000},
708 { 0x0000403c, 0x00000002 }, 149 {0x00004030, 0x00000002},
709 { 0x00004024, 0x0000001f }, 150 {0x0000403c, 0x00000002},
710 { 0x00004060, 0x00000000 }, 151 {0x00004024, 0x0000001f},
711 { 0x00004064, 0x00000000 }, 152 {0x00004060, 0x00000000},
712 { 0x00007010, 0x00000033 }, 153 {0x00004064, 0x00000000},
713 { 0x00007034, 0x00000002 }, 154 {0x00007010, 0x00000033},
714 { 0x00007038, 0x000004c2 }, 155 {0x00007034, 0x00000002},
715 { 0x00008004, 0x00000000 }, 156 {0x00007038, 0x000004c2},
716 { 0x00008008, 0x00000000 }, 157 {0x00008004, 0x00000000},
717 { 0x0000800c, 0x00000000 }, 158 {0x00008008, 0x00000000},
718 { 0x00008018, 0x00000700 }, 159 {0x0000800c, 0x00000000},
719 { 0x00008020, 0x00000000 }, 160 {0x00008018, 0x00000700},
720 { 0x00008038, 0x00000000 }, 161 {0x00008020, 0x00000000},
721 { 0x0000803c, 0x00000000 }, 162 {0x00008038, 0x00000000},
722 { 0x00008048, 0x40000000 }, 163 {0x0000803c, 0x00000000},
723 { 0x00008054, 0x00000000 }, 164 {0x00008048, 0x40000000},
724 { 0x00008058, 0x00000000 }, 165 {0x00008054, 0x00000000},
725 { 0x0000805c, 0x000fc78f }, 166 {0x00008058, 0x00000000},
726 { 0x00008060, 0x0000000f }, 167 {0x0000805c, 0x000fc78f},
727 { 0x00008064, 0x00000000 }, 168 {0x00008060, 0x0000000f},
728 { 0x00008070, 0x00000000 }, 169 {0x00008064, 0x00000000},
729 { 0x000080c0, 0x2a80001a }, 170 {0x00008070, 0x00000000},
730 { 0x000080c4, 0x05dc01e0 }, 171 {0x000080c0, 0x2a80001a},
731 { 0x000080c8, 0x1f402710 }, 172 {0x000080c4, 0x05dc01e0},
732 { 0x000080cc, 0x01f40000 }, 173 {0x000080c8, 0x1f402710},
733 { 0x000080d0, 0x00001e00 }, 174 {0x000080cc, 0x01f40000},
734 { 0x000080d4, 0x00000000 }, 175 {0x000080d0, 0x00001e00},
735 { 0x000080d8, 0x00400000 }, 176 {0x000080d4, 0x00000000},
736 { 0x000080e0, 0xffffffff }, 177 {0x000080d8, 0x00400000},
737 { 0x000080e4, 0x0000ffff }, 178 {0x000080e0, 0xffffffff},
738 { 0x000080e8, 0x003f3f3f }, 179 {0x000080e4, 0x0000ffff},
739 { 0x000080ec, 0x00000000 }, 180 {0x000080e8, 0x003f3f3f},
740 { 0x000080f0, 0x00000000 }, 181 {0x000080ec, 0x00000000},
741 { 0x000080f4, 0x00000000 }, 182 {0x000080f0, 0x00000000},
742 { 0x000080f8, 0x00000000 }, 183 {0x000080f4, 0x00000000},
743 { 0x000080fc, 0x00020000 }, 184 {0x000080f8, 0x00000000},
744 { 0x00008100, 0x00020000 }, 185 {0x000080fc, 0x00020000},
745 { 0x00008104, 0x00000001 }, 186 {0x00008100, 0x00020000},
746 { 0x00008108, 0x00000052 }, 187 {0x00008104, 0x00000001},
747 { 0x0000810c, 0x00000000 }, 188 {0x00008108, 0x00000052},
748 { 0x00008110, 0x00000168 }, 189 {0x0000810c, 0x00000000},
749 { 0x00008118, 0x000100aa }, 190 {0x00008110, 0x00000168},
750 { 0x0000811c, 0x00003210 }, 191 {0x00008118, 0x000100aa},
751 { 0x00008124, 0x00000000 }, 192 {0x0000811c, 0x00003210},
752 { 0x00008128, 0x00000000 }, 193 {0x00008124, 0x00000000},
753 { 0x0000812c, 0x00000000 }, 194 {0x00008128, 0x00000000},
754 { 0x00008130, 0x00000000 }, 195 {0x0000812c, 0x00000000},
755 { 0x00008134, 0x00000000 }, 196 {0x00008130, 0x00000000},
756 { 0x00008138, 0x00000000 }, 197 {0x00008134, 0x00000000},
757 { 0x0000813c, 0x00000000 }, 198 {0x00008138, 0x00000000},
758 { 0x00008144, 0xffffffff }, 199 {0x0000813c, 0x00000000},
759 { 0x00008168, 0x00000000 }, 200 {0x00008144, 0xffffffff},
760 { 0x0000816c, 0x00000000 }, 201 {0x00008168, 0x00000000},
761 { 0x00008170, 0x32143320 }, 202 {0x0000816c, 0x00000000},
762 { 0x00008174, 0xfaa4fa50 }, 203 {0x00008170, 0x32143320},
763 { 0x00008178, 0x00000100 }, 204 {0x00008174, 0xfaa4fa50},
764 { 0x0000817c, 0x00000000 }, 205 {0x00008178, 0x00000100},
765 { 0x000081c0, 0x00000000 }, 206 {0x0000817c, 0x00000000},
766 { 0x000081ec, 0x00000000 }, 207 {0x000081c0, 0x00000000},
767 { 0x000081f0, 0x00000000 }, 208 {0x000081ec, 0x00000000},
768 { 0x000081f4, 0x00000000 }, 209 {0x000081f0, 0x00000000},
769 { 0x000081f8, 0x00000000 }, 210 {0x000081f4, 0x00000000},
770 { 0x000081fc, 0x00000000 }, 211 {0x000081f8, 0x00000000},
771 { 0x00008200, 0x00000000 }, 212 {0x000081fc, 0x00000000},
772 { 0x00008204, 0x00000000 }, 213 {0x00008200, 0x00000000},
773 { 0x00008208, 0x00000000 }, 214 {0x00008204, 0x00000000},
774 { 0x0000820c, 0x00000000 }, 215 {0x00008208, 0x00000000},
775 { 0x00008210, 0x00000000 }, 216 {0x0000820c, 0x00000000},
776 { 0x00008214, 0x00000000 }, 217 {0x00008210, 0x00000000},
777 { 0x00008218, 0x00000000 }, 218 {0x00008214, 0x00000000},
778 { 0x0000821c, 0x00000000 }, 219 {0x00008218, 0x00000000},
779 { 0x00008220, 0x00000000 }, 220 {0x0000821c, 0x00000000},
780 { 0x00008224, 0x00000000 }, 221 {0x00008220, 0x00000000},
781 { 0x00008228, 0x00000000 }, 222 {0x00008224, 0x00000000},
782 { 0x0000822c, 0x00000000 }, 223 {0x00008228, 0x00000000},
783 { 0x00008230, 0x00000000 }, 224 {0x0000822c, 0x00000000},
784 { 0x00008234, 0x00000000 }, 225 {0x00008230, 0x00000000},
785 { 0x00008238, 0x00000000 }, 226 {0x00008234, 0x00000000},
786 { 0x0000823c, 0x00000000 }, 227 {0x00008238, 0x00000000},
787 { 0x00008240, 0x00100000 }, 228 {0x0000823c, 0x00000000},
788 { 0x00008244, 0x0010f400 }, 229 {0x00008240, 0x00100000},
789 { 0x00008248, 0x00000100 }, 230 {0x00008244, 0x0010f400},
790 { 0x0000824c, 0x0001e800 }, 231 {0x00008248, 0x00000100},
791 { 0x00008250, 0x00000000 }, 232 {0x0000824c, 0x0001e800},
792 { 0x00008254, 0x00000000 }, 233 {0x00008250, 0x00000000},
793 { 0x00008258, 0x00000000 }, 234 {0x00008254, 0x00000000},
794 { 0x0000825c, 0x400000ff }, 235 {0x00008258, 0x00000000},
795 { 0x00008260, 0x00080922 }, 236 {0x0000825c, 0x400000ff},
796 { 0x00008264, 0x88a00010 }, 237 {0x00008260, 0x00080922},
797 { 0x00008270, 0x00000000 }, 238 {0x00008264, 0x88a00010},
798 { 0x00008274, 0x40000000 }, 239 {0x00008270, 0x00000000},
799 { 0x00008278, 0x003e4180 }, 240 {0x00008274, 0x40000000},
800 { 0x0000827c, 0x00000000 }, 241 {0x00008278, 0x003e4180},
801 { 0x00008284, 0x0000002c }, 242 {0x0000827c, 0x00000000},
802 { 0x00008288, 0x0000002c }, 243 {0x00008284, 0x0000002c},
803 { 0x0000828c, 0x00000000 }, 244 {0x00008288, 0x0000002c},
804 { 0x00008294, 0x00000000 }, 245 {0x0000828c, 0x00000000},
805 { 0x00008298, 0x00000000 }, 246 {0x00008294, 0x00000000},
806 { 0x0000829c, 0x00000000 }, 247 {0x00008298, 0x00000000},
807 { 0x00008300, 0x00000040 }, 248 {0x0000829c, 0x00000000},
808 { 0x00008314, 0x00000000 }, 249 {0x00008300, 0x00000040},
809 { 0x00008328, 0x00000000 }, 250 {0x00008314, 0x00000000},
810 { 0x0000832c, 0x00000007 }, 251 {0x00008328, 0x00000000},
811 { 0x00008330, 0x00000302 }, 252 {0x0000832c, 0x00000007},
812 { 0x00008334, 0x00000e00 }, 253 {0x00008330, 0x00000302},
813 { 0x00008338, 0x00ff0000 }, 254 {0x00008334, 0x00000e00},
814 { 0x0000833c, 0x00000000 }, 255 {0x00008338, 0x00ff0000},
815 { 0x00008340, 0x000107ff }, 256 {0x0000833c, 0x00000000},
816 { 0x00008344, 0x00481043 }, 257 {0x00008340, 0x000107ff},
817 { 0x00009808, 0x00000000 }, 258 {0x00008344, 0x00481043},
818 { 0x0000980c, 0xafa68e30 }, 259 {0x00009808, 0x00000000},
819 { 0x00009810, 0xfd14e000 }, 260 {0x0000980c, 0xafa68e30},
820 { 0x00009814, 0x9c0a9f6b }, 261 {0x00009810, 0xfd14e000},
821 { 0x0000981c, 0x00000000 }, 262 {0x00009814, 0x9c0a9f6b},
822 { 0x0000982c, 0x0000a000 }, 263 {0x0000981c, 0x00000000},
823 { 0x00009830, 0x00000000 }, 264 {0x0000982c, 0x0000a000},
824 { 0x0000983c, 0x00200400 }, 265 {0x00009830, 0x00000000},
825 { 0x0000984c, 0x0040233c }, 266 {0x0000983c, 0x00200400},
826 { 0x0000a84c, 0x0040233c }, 267 {0x0000984c, 0x0040233c},
827 { 0x00009854, 0x00000044 }, 268 {0x0000a84c, 0x0040233c},
828 { 0x00009900, 0x00000000 }, 269 {0x00009854, 0x00000044},
829 { 0x00009904, 0x00000000 }, 270 {0x00009900, 0x00000000},
830 { 0x00009908, 0x00000000 }, 271 {0x00009904, 0x00000000},
831 { 0x0000990c, 0x00000000 }, 272 {0x00009908, 0x00000000},
832 { 0x00009910, 0x01002310 }, 273 {0x0000990c, 0x00000000},
833 { 0x0000991c, 0x10000fff }, 274 {0x00009910, 0x01002310},
834 { 0x00009920, 0x04900000 }, 275 {0x0000991c, 0x10000fff},
835 { 0x0000a920, 0x04900000 }, 276 {0x00009920, 0x04900000},
836 { 0x00009928, 0x00000001 }, 277 {0x0000a920, 0x04900000},
837 { 0x0000992c, 0x00000004 }, 278 {0x00009928, 0x00000001},
838 { 0x00009934, 0x1e1f2022 }, 279 {0x0000992c, 0x00000004},
839 { 0x00009938, 0x0a0b0c0d }, 280 {0x00009934, 0x1e1f2022},
840 { 0x0000993c, 0x00000000 }, 281 {0x00009938, 0x0a0b0c0d},
841 { 0x00009948, 0x9280c00a }, 282 {0x0000993c, 0x00000000},
842 { 0x0000994c, 0x00020028 }, 283 {0x00009948, 0x9280c00a},
843 { 0x00009954, 0x5f3ca3de }, 284 {0x0000994c, 0x00020028},
844 { 0x00009958, 0x2108ecff }, 285 {0x00009954, 0x5f3ca3de},
845 { 0x00009940, 0x14750604 }, 286 {0x00009958, 0x2108ecff},
846 { 0x0000c95c, 0x004b6a8e }, 287 {0x00009940, 0x14750604},
847 { 0x00009970, 0x190fb515 }, 288 {0x0000c95c, 0x004b6a8e},
848 { 0x00009974, 0x00000000 }, 289 {0x00009970, 0x190fb514},
849 { 0x00009978, 0x00000001 }, 290 {0x00009974, 0x00000000},
850 { 0x0000997c, 0x00000000 }, 291 {0x00009978, 0x00000001},
851 { 0x00009980, 0x00000000 }, 292 {0x0000997c, 0x00000000},
852 { 0x00009984, 0x00000000 }, 293 {0x00009980, 0x00000000},
853 { 0x00009988, 0x00000000 }, 294 {0x00009984, 0x00000000},
854 { 0x0000998c, 0x00000000 }, 295 {0x00009988, 0x00000000},
855 { 0x00009990, 0x00000000 }, 296 {0x0000998c, 0x00000000},
856 { 0x00009994, 0x00000000 }, 297 {0x00009990, 0x00000000},
857 { 0x00009998, 0x00000000 }, 298 {0x00009994, 0x00000000},
858 { 0x0000999c, 0x00000000 }, 299 {0x00009998, 0x00000000},
859 { 0x000099a0, 0x00000000 }, 300 {0x0000999c, 0x00000000},
860 { 0x000099a4, 0x00000001 }, 301 {0x000099a0, 0x00000000},
861 { 0x000099a8, 0x201fff00 }, 302 {0x000099a4, 0x00000001},
862 { 0x000099ac, 0x006f0000 }, 303 {0x000099a8, 0x201fff00},
863 { 0x000099b0, 0x03051000 }, 304 {0x000099ac, 0x006f0000},
864 { 0x000099b4, 0x00000820 }, 305 {0x000099b0, 0x03051000},
865 { 0x000099dc, 0x00000000 }, 306 {0x000099b4, 0x00000820},
866 { 0x000099e0, 0x00000000 }, 307 {0x000099c4, 0x06336f77},
867 { 0x000099e4, 0xaaaaaaaa }, 308 {0x000099c8, 0x6af6532f},
868 { 0x000099e8, 0x3c466478 }, 309 {0x000099cc, 0x08f186c8},
869 { 0x000099ec, 0x0cc80caa }, 310 {0x000099d0, 0x00046384},
870 { 0x000099f0, 0x00000000 }, 311 {0x000099d4, 0x00000000},
871 { 0x000099fc, 0x00001042 }, 312 {0x000099d8, 0x00000000},
872 { 0x0000a208, 0x803e4788 }, 313 {0x000099dc, 0x00000000},
873 { 0x0000a210, 0x4080a333 }, 314 {0x000099e0, 0x00000000},
874 { 0x0000a214, 0x40206c10 }, 315 {0x000099e4, 0xaaaaaaaa},
875 { 0x0000a218, 0x009c4060 }, 316 {0x000099e8, 0x3c466478},
876 { 0x0000a220, 0x01834061 }, 317 {0x000099ec, 0x0cc80caa},
877 { 0x0000a224, 0x00000400 }, 318 {0x000099f0, 0x00000000},
878 { 0x0000a228, 0x000003b5 }, 319 {0x000099fc, 0x00001042},
879 { 0x0000a22c, 0x233f7180 }, 320 {0x0000a208, 0x803e4788},
880 { 0x0000a234, 0x20202020 }, 321 {0x0000a210, 0x4080a333},
881 { 0x0000a238, 0x20202020 }, 322 {0x0000a214, 0x40206c10},
882 { 0x0000a240, 0x38490a20 }, 323 {0x0000a218, 0x009c4060},
883 { 0x0000a244, 0x00007bb6 }, 324 {0x0000a220, 0x01834061},
884 { 0x0000a248, 0x0fff3ffc }, 325 {0x0000a224, 0x00000400},
885 { 0x0000a24c, 0x00000000 }, 326 {0x0000a228, 0x000003b5},
886 { 0x0000a254, 0x00000000 }, 327 {0x0000a22c, 0x233f7180},
887 { 0x0000a258, 0x0cdbd380 }, 328 {0x0000a234, 0x20202020},
888 { 0x0000a25c, 0x0f0f0f01 }, 329 {0x0000a238, 0x20202020},
889 { 0x0000a260, 0xdfa91f01 }, 330 {0x0000a240, 0x38490a20},
890 { 0x0000a268, 0x00000000 }, 331 {0x0000a244, 0x00007bb6},
891 { 0x0000a26c, 0x0e79e5c6 }, 332 {0x0000a248, 0x0fff3ffc},
892 { 0x0000b26c, 0x0e79e5c6 }, 333 {0x0000a24c, 0x00000000},
893 { 0x0000d270, 0x00820820 }, 334 {0x0000a254, 0x00000000},
894 { 0x0000a278, 0x1ce739ce }, 335 {0x0000a258, 0x0cdbd380},
895 { 0x0000d35c, 0x07ffffef }, 336 {0x0000a25c, 0x0f0f0f01},
896 { 0x0000d360, 0x0fffffe7 }, 337 {0x0000a260, 0xdfa91f01},
897 { 0x0000d364, 0x17ffffe5 }, 338 {0x0000a268, 0x00000000},
898 { 0x0000d368, 0x1fffffe4 }, 339 {0x0000a26c, 0x0e79e5c6},
899 { 0x0000d36c, 0x37ffffe3 }, 340 {0x0000b26c, 0x0e79e5c6},
900 { 0x0000d370, 0x3fffffe3 }, 341 {0x0000d270, 0x00820820},
901 { 0x0000d374, 0x57ffffe3 }, 342 {0x0000a278, 0x1ce739ce},
902 { 0x0000d378, 0x5fffffe2 }, 343 {0x0000d35c, 0x07ffffef},
903 { 0x0000d37c, 0x7fffffe2 }, 344 {0x0000d360, 0x0fffffe7},
904 { 0x0000d380, 0x7f3c7bba }, 345 {0x0000d364, 0x17ffffe5},
905 { 0x0000d384, 0xf3307ff0 }, 346 {0x0000d368, 0x1fffffe4},
906 { 0x0000a38c, 0x20202020 }, 347 {0x0000d36c, 0x37ffffe3},
907 { 0x0000a390, 0x20202020 }, 348 {0x0000d370, 0x3fffffe3},
908 { 0x0000a394, 0x1ce739ce }, 349 {0x0000d374, 0x57ffffe3},
909 { 0x0000a398, 0x000001ce }, 350 {0x0000d378, 0x5fffffe2},
910 { 0x0000a39c, 0x00000001 }, 351 {0x0000d37c, 0x7fffffe2},
911 { 0x0000a3a0, 0x00000000 }, 352 {0x0000d380, 0x7f3c7bba},
912 { 0x0000a3a4, 0x00000000 }, 353 {0x0000d384, 0xf3307ff0},
913 { 0x0000a3a8, 0x00000000 }, 354 {0x0000a38c, 0x20202020},
914 { 0x0000a3ac, 0x00000000 }, 355 {0x0000a390, 0x20202020},
915 { 0x0000a3b0, 0x00000000 }, 356 {0x0000a394, 0x1ce739ce},
916 { 0x0000a3b4, 0x00000000 }, 357 {0x0000a398, 0x000001ce},
917 { 0x0000a3b8, 0x00000000 }, 358 {0x0000a39c, 0x00000001},
918 { 0x0000a3bc, 0x00000000 }, 359 {0x0000a3a0, 0x00000000},
919 { 0x0000a3c0, 0x00000000 }, 360 {0x0000a3a4, 0x00000000},
920 { 0x0000a3c4, 0x00000000 }, 361 {0x0000a3a8, 0x00000000},
921 { 0x0000a3c8, 0x00000246 }, 362 {0x0000a3ac, 0x00000000},
922 { 0x0000a3cc, 0x20202020 }, 363 {0x0000a3b0, 0x00000000},
923 { 0x0000a3d0, 0x20202020 }, 364 {0x0000a3b4, 0x00000000},
924 { 0x0000a3d4, 0x20202020 }, 365 {0x0000a3b8, 0x00000000},
925 { 0x0000a3dc, 0x1ce739ce }, 366 {0x0000a3bc, 0x00000000},
926 { 0x0000a3e0, 0x000001ce }, 367 {0x0000a3c0, 0x00000000},
927 { 0x0000a3e4, 0x00000000 }, 368 {0x0000a3c4, 0x00000000},
928 { 0x0000a3e8, 0x18c43433 }, 369 {0x0000a3c8, 0x00000246},
929 { 0x0000a3ec, 0x00f70081 }, 370 {0x0000a3cc, 0x20202020},
930 { 0x00007800, 0x00040000 }, 371 {0x0000a3d0, 0x20202020},
931 { 0x00007804, 0xdb005012 }, 372 {0x0000a3d4, 0x20202020},
932 { 0x00007808, 0x04924914 }, 373 {0x0000a3dc, 0x1ce739ce},
933 { 0x0000780c, 0x21084210 }, 374 {0x0000a3e0, 0x000001ce},
934 { 0x00007810, 0x6d801300 }, 375 {0x0000a3e4, 0x00000000},
935 { 0x00007818, 0x07e41000 }, 376 {0x0000a3e8, 0x18c43433},
936 { 0x00007824, 0x00040000 }, 377 {0x00007800, 0x00040000},
937 { 0x00007828, 0xdb005012 }, 378 {0x00007804, 0xdb005012},
938 { 0x0000782c, 0x04924914 }, 379 {0x00007808, 0x04924914},
939 { 0x00007830, 0x21084210 }, 380 {0x0000780c, 0x21084210},
940 { 0x00007834, 0x6d801300 }, 381 {0x00007810, 0x6d801300},
941 { 0x0000783c, 0x07e40000 }, 382 {0x00007818, 0x07e41000},
942 { 0x00007848, 0x00100000 }, 383 {0x00007824, 0x00040000},
943 { 0x0000784c, 0x773f0567 }, 384 {0x00007828, 0xdb005012},
944 { 0x00007850, 0x54214514 }, 385 {0x0000782c, 0x04924914},
945 { 0x00007854, 0x12035828 }, 386 {0x00007830, 0x21084210},
946 { 0x00007858, 0x9259269a }, 387 {0x00007834, 0x6d801300},
947 { 0x00007860, 0x52802000 }, 388 {0x0000783c, 0x07e40000},
948 { 0x00007864, 0x0a8e370e }, 389 {0x00007848, 0x00100000},
949 { 0x00007868, 0xc0102850 }, 390 {0x0000784c, 0x773f0567},
950 { 0x0000786c, 0x812d4000 }, 391 {0x00007850, 0x54214514},
951 { 0x00007870, 0x807ec400 }, 392 {0x00007854, 0x12035828},
952 { 0x00007874, 0x001b6db0 }, 393 {0x00007858, 0x9259269a},
953 { 0x00007878, 0x00376b63 }, 394 {0x00007860, 0x52802000},
954 { 0x0000787c, 0x06db6db6 }, 395 {0x00007864, 0x0a8e370e},
955 { 0x00007880, 0x006d8000 }, 396 {0x00007868, 0xc0102850},
956 { 0x00007884, 0xffeffffe }, 397 {0x0000786c, 0x812d4000},
957 { 0x00007888, 0xffeffffe }, 398 {0x00007870, 0x807ec400},
958 { 0x0000788c, 0x00010000 }, 399 {0x00007874, 0x001b6db0},
959 { 0x00007890, 0x02060aeb }, 400 {0x00007878, 0x00376b63},
960 { 0x00007898, 0x2a850160 }, 401 {0x0000787c, 0x06db6db6},
402 {0x00007880, 0x006d8000},
403 {0x00007884, 0xffeffffe},
404 {0x00007888, 0xffeffffe},
405 {0x0000788c, 0x00010000},
406 {0x00007890, 0x02060aeb},
407 {0x00007898, 0x2a850160},
961}; 408};
962 409
963static const u32 ar9280Modes_fast_clock_9280_2[][3] = { 410static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
964 { 0x00001030, 0x00000268, 0x000004d0 }, 411 /* Addr 5G_HT20 5G_HT40 */
965 { 0x00001070, 0x0000018c, 0x00000318 }, 412 {0x00001030, 0x00000268, 0x000004d0},
966 { 0x000010b0, 0x00000fd0, 0x00001fa0 }, 413 {0x00001070, 0x0000018c, 0x00000318},
967 { 0x00008014, 0x044c044c, 0x08980898 }, 414 {0x000010b0, 0x00000fd0, 0x00001fa0},
968 { 0x0000801c, 0x148ec02b, 0x148ec057 }, 415 {0x00008014, 0x044c044c, 0x08980898},
969 { 0x00008318, 0x000044c0, 0x00008980 }, 416 {0x0000801c, 0x148ec02b, 0x148ec057},
970 { 0x00009820, 0x02020200, 0x02020200 }, 417 {0x00008318, 0x000044c0, 0x00008980},
971 { 0x00009824, 0x01000f0f, 0x01000f0f }, 418 {0x00009820, 0x02020200, 0x02020200},
972 { 0x00009828, 0x0b020001, 0x0b020001 }, 419 {0x00009824, 0x01000f0f, 0x01000f0f},
973 { 0x00009834, 0x00000f0f, 0x00000f0f }, 420 {0x00009828, 0x0b020001, 0x0b020001},
974 { 0x00009844, 0x03721821, 0x03721821 }, 421 {0x00009834, 0x00000f0f, 0x00000f0f},
975 { 0x00009914, 0x00000898, 0x00001130 }, 422 {0x00009844, 0x03721821, 0x03721821},
976 { 0x00009918, 0x0000000b, 0x00000016 }, 423 {0x00009914, 0x00000898, 0x00001130},
424 {0x00009918, 0x0000000b, 0x00000016},
977}; 425};
978 426
979static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = { 427static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = {
980 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, 428 {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290},
981 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, 429 {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300},
982 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, 430 {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304},
983 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, 431 {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308},
984 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, 432 {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c},
985 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, 433 {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000},
986 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, 434 {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004},
987 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, 435 {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008},
988 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, 436 {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c},
989 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, 437 {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080},
990 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, 438 {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084},
991 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, 439 {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088},
992 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, 440 {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c},
993 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, 441 {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100},
994 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, 442 {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104},
995 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, 443 {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108},
996 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, 444 {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c},
997 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, 445 {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110},
998 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, 446 {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114},
999 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, 447 {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180},
1000 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, 448 {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184},
1001 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, 449 {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188},
1002 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, 450 {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c},
1003 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, 451 {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190},
1004 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, 452 {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194},
1005 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, 453 {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0},
1006 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, 454 {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c},
1007 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, 455 {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8},
1008 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, 456 {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284},
1009 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, 457 {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288},
1010 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, 458 {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224},
1011 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, 459 {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290},
1012 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, 460 {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300},
1013 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, 461 {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304},
1014 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, 462 {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308},
1015 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, 463 {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c},
1016 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, 464 {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380},
1017 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, 465 {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384},
1018 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, 466 {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700},
1019 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, 467 {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704},
1020 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, 468 {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708},
1021 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, 469 {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c},
1022 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, 470 {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780},
1023 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, 471 {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784},
1024 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, 472 {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00},
1025 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, 473 {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04},
1026 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, 474 {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08},
1027 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, 475 {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c},
1028 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10, 0x00008b10 }, 476 {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10, 0x00008b10},
1029 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b14, 0x00008b14, 0x00008b14 }, 477 {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b80, 0x00008b80, 0x00008b80},
1030 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b01, 0x00008b01, 0x00008b01 }, 478 {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b84, 0x00008b84, 0x00008b84},
1031 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b05, 0x00008b05, 0x00008b05 }, 479 {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b88, 0x00008b88, 0x00008b88},
1032 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b09, 0x00008b09, 0x00008b09 }, 480 {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b8c, 0x00008b8c, 0x00008b8c},
1033 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b0d, 0x00008b0d, 0x00008b0d }, 481 {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b90, 0x00008b90, 0x00008b90},
1034 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b11, 0x00008b11, 0x00008b11 }, 482 {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b94, 0x00008b94, 0x00008b94},
1035 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008b15, 0x00008b15, 0x00008b15 }, 483 {0x00009adc, 0x0000b390, 0x0000b390, 0x00008b98, 0x00008b98, 0x00008b98},
1036 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008b02, 0x00008b02, 0x00008b02 }, 484 {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008ba4, 0x00008ba4, 0x00008ba4},
1037 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008b06, 0x00008b06, 0x00008b06 }, 485 {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008ba8, 0x00008ba8, 0x00008ba8},
1038 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00008b0a, 0x00008b0a, 0x00008b0a }, 486 {0x00009ae8, 0x0000b780, 0x0000b780, 0x00008bac, 0x00008bac, 0x00008bac},
1039 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00008b0e, 0x00008b0e, 0x00008b0e }, 487 {0x00009aec, 0x0000b784, 0x0000b784, 0x00008bb0, 0x00008bb0, 0x00008bb0},
1040 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00008b12, 0x00008b12, 0x00008b12 }, 488 {0x00009af0, 0x0000b788, 0x0000b788, 0x00008bb4, 0x00008bb4, 0x00008bb4},
1041 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008b16, 0x00008b16, 0x00008b16 }, 489 {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008ba1, 0x00008ba1, 0x00008ba1},
1042 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00008b03, 0x00008b03, 0x00008b03 }, 490 {0x00009af8, 0x0000b790, 0x0000b790, 0x00008ba5, 0x00008ba5, 0x00008ba5},
1043 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00008b07, 0x00008b07, 0x00008b07 }, 491 {0x00009afc, 0x0000b794, 0x0000b794, 0x00008ba9, 0x00008ba9, 0x00008ba9},
1044 { 0x00009b00, 0x0000b798, 0x0000b798, 0x00008b0b, 0x00008b0b, 0x00008b0b }, 492 {0x00009b00, 0x0000b798, 0x0000b798, 0x00008bad, 0x00008bad, 0x00008bad},
1045 { 0x00009b04, 0x0000d784, 0x0000d784, 0x00008b0f, 0x00008b0f, 0x00008b0f }, 493 {0x00009b04, 0x0000d784, 0x0000d784, 0x00008bb1, 0x00008bb1, 0x00008bb1},
1046 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00008b13, 0x00008b13, 0x00008b13 }, 494 {0x00009b08, 0x0000d788, 0x0000d788, 0x00008bb5, 0x00008bb5, 0x00008bb5},
1047 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008b17, 0x00008b17, 0x00008b17 }, 495 {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008ba2, 0x00008ba2, 0x00008ba2},
1048 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00008b23, 0x00008b23, 0x00008b23 }, 496 {0x00009b10, 0x0000d790, 0x0000d790, 0x00008ba6, 0x00008ba6, 0x00008ba6},
1049 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00008b27, 0x00008b27, 0x00008b27 }, 497 {0x00009b14, 0x0000f780, 0x0000f780, 0x00008baa, 0x00008baa, 0x00008baa},
1050 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00008b2b, 0x00008b2b, 0x00008b2b }, 498 {0x00009b18, 0x0000f784, 0x0000f784, 0x00008bae, 0x00008bae, 0x00008bae},
1051 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00008b2f, 0x00008b2f, 0x00008b2f }, 499 {0x00009b1c, 0x0000f788, 0x0000f788, 0x00008bb2, 0x00008bb2, 0x00008bb2},
1052 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008b33, 0x00008b33, 0x00008b33 }, 500 {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008bb6, 0x00008bb6, 0x00008bb6},
1053 { 0x00009b24, 0x0000f790, 0x0000f790, 0x00008b37, 0x00008b37, 0x00008b37 }, 501 {0x00009b24, 0x0000f790, 0x0000f790, 0x00008ba3, 0x00008ba3, 0x00008ba3},
1054 { 0x00009b28, 0x0000f794, 0x0000f794, 0x00008b43, 0x00008b43, 0x00008b43 }, 502 {0x00009b28, 0x0000f794, 0x0000f794, 0x00008ba7, 0x00008ba7, 0x00008ba7},
1055 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008b47, 0x00008b47, 0x00008b47 }, 503 {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008bab, 0x00008bab, 0x00008bab},
1056 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008b4b, 0x00008b4b, 0x00008b4b }, 504 {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008baf, 0x00008baf, 0x00008baf},
1057 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008b4f, 0x00008b4f, 0x00008b4f }, 505 {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008bb3, 0x00008bb3, 0x00008bb3},
1058 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008b53, 0x00008b53, 0x00008b53 }, 506 {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008bb7, 0x00008bb7, 0x00008bb7},
1059 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008b57, 0x00008b57, 0x00008b57 }, 507 {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008bc3, 0x00008bc3, 0x00008bc3},
1060 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 508 {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008bc7, 0x00008bc7, 0x00008bc7},
1061 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 509 {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008bcb, 0x00008bcb, 0x00008bcb},
1062 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 510 {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008bcf, 0x00008bcf, 0x00008bcf},
1063 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 511 {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008bd3, 0x00008bd3, 0x00008bd3},
1064 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 512 {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008bd7, 0x00008bd7, 0x00008bd7},
1065 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 513 {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1066 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 514 {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1067 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 515 {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1068 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 516 {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1069 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 517 {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1070 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 518 {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1071 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 519 {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1072 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 520 {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1073 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 521 {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1074 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 522 {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1075 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 523 {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1076 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 524 {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1077 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 525 {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1078 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 526 {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1079 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 527 {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1080 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 528 {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1081 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 529 {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1082 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 530 {0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1083 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 531 {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1084 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 532 {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1085 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 533 {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1086 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 534 {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1087 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 535 {0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1088 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 536 {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1089 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 537 {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1090 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 538 {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1091 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 539 {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1092 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 540 {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1093 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 541 {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1094 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 542 {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1095 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 543 {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1096 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 544 {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1097 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 545 {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1098 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 546 {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1099 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 547 {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1100 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 548 {0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1101 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 549 {0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1102 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 550 {0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1103 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 551 {0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1104 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 552 {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1105 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 553 {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1106 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 554 {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1107 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, 555 {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
1108 { 0x00009848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 }, 556 {0x00009848, 0x00001066, 0x00001066, 0x00001055, 0x00001055, 0x00001055},
1109 { 0x0000a848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 }, 557 {0x0000a848, 0x00001066, 0x00001066, 0x00001055, 0x00001055, 0x00001055},
1110}; 558};
1111 559
1112static const u32 ar9280Modes_original_rxgain_9280_2[][6] = { 560static const u32 ar9280Modes_original_rxgain_9280_2[][6] = {
1113 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, 561 {0x00009a00, 0x00008184, 0x00008184, 0x00008000, 0x00008000, 0x00008000},
1114 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, 562 {0x00009a04, 0x00008188, 0x00008188, 0x00008000, 0x00008000, 0x00008000},
1115 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, 563 {0x00009a08, 0x0000818c, 0x0000818c, 0x00008000, 0x00008000, 0x00008000},
1116 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, 564 {0x00009a0c, 0x00008190, 0x00008190, 0x00008000, 0x00008000, 0x00008000},
1117 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, 565 {0x00009a10, 0x00008194, 0x00008194, 0x00008000, 0x00008000, 0x00008000},
1118 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, 566 {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000},
1119 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, 567 {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004},
1120 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, 568 {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008},
1121 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, 569 {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c},
1122 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, 570 {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080},
1123 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, 571 {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084},
1124 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, 572 {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088},
1125 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, 573 {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c},
1126 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, 574 {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100},
1127 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, 575 {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104},
1128 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, 576 {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108},
1129 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, 577 {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c},
1130 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, 578 {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110},
1131 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, 579 {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114},
1132 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, 580 {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180},
1133 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, 581 {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184},
1134 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, 582 {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188},
1135 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, 583 {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c},
1136 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, 584 {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190},
1137 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, 585 {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194},
1138 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, 586 {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0},
1139 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, 587 {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c},
1140 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, 588 {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8},
1141 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, 589 {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284},
1142 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, 590 {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288},
1143 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, 591 {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224},
1144 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, 592 {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290},
1145 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, 593 {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300},
1146 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, 594 {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304},
1147 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, 595 {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308},
1148 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, 596 {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c},
1149 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, 597 {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380},
1150 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, 598 {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384},
1151 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, 599 {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700},
1152 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, 600 {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704},
1153 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, 601 {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708},
1154 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, 602 {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c},
1155 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, 603 {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780},
1156 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, 604 {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784},
1157 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, 605 {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00},
1158 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, 606 {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04},
1159 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, 607 {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08},
1160 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, 608 {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c},
1161 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, 609 {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80},
1162 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, 610 {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84},
1163 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, 611 {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88},
1164 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, 612 {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c},
1165 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, 613 {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90},
1166 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, 614 {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80},
1167 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, 615 {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84},
1168 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, 616 {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88},
1169 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, 617 {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c},
1170 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, 618 {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90},
1171 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c }, 619 {0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c},
1172 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 }, 620 {0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310},
1173 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 }, 621 {0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384},
1174 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 }, 622 {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388},
1175 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 }, 623 {0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324},
1176 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 }, 624 {0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704},
1177 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 }, 625 {0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4},
1178 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 }, 626 {0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8},
1179 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 }, 627 {0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710},
1180 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 }, 628 {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714},
1181 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 }, 629 {0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720},
1182 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 }, 630 {0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724},
1183 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 }, 631 {0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728},
1184 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c }, 632 {0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c},
1185 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 }, 633 {0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0},
1186 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 }, 634 {0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4},
1187 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 }, 635 {0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8},
1188 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 }, 636 {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0},
1189 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 }, 637 {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4},
1190 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 }, 638 {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8},
1191 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 }, 639 {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5},
1192 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 }, 640 {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9},
1193 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad }, 641 {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad},
1194 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 }, 642 {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1},
1195 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 }, 643 {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5},
1196 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 }, 644 {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9},
1197 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 }, 645 {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5},
1198 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 }, 646 {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9},
1199 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 }, 647 {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1},
1200 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 }, 648 {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5},
1201 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 }, 649 {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9},
1202 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 }, 650 {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6},
1203 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca }, 651 {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca},
1204 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce }, 652 {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce},
1205 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 }, 653 {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2},
1206 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 }, 654 {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6},
1207 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 }, 655 {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3},
1208 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 }, 656 {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7},
1209 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb }, 657 {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb},
1210 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf }, 658 {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf},
1211 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 }, 659 {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7},
1212 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db }, 660 {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db},
1213 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db }, 661 {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db},
1214 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db }, 662 {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db},
1215 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 663 {0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1216 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 664 {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1217 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 665 {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1218 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 666 {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1219 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 667 {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1220 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 668 {0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1221 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 669 {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1222 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 670 {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1223 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 671 {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1224 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 672 {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1225 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 673 {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1226 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 674 {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1227 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 675 {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1228 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 676 {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1229 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 677 {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1230 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 678 {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1231 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 679 {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1232 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 680 {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1233 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 681 {0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1234 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 682 {0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1235 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 683 {0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1236 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 684 {0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1237 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 685 {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1238 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 686 {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1239 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 687 {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1240 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 688 {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
1241 { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, 689 {0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063},
1242 { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, 690 {0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063},
1243}; 691};
1244 692
1245static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = { 693static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = {
1246 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, 694 {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290},
1247 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, 695 {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300},
1248 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, 696 {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304},
1249 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, 697 {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308},
1250 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, 698 {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c},
1251 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, 699 {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000},
1252 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, 700 {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004},
1253 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, 701 {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008},
1254 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, 702 {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c},
1255 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, 703 {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080},
1256 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, 704 {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084},
1257 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, 705 {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088},
1258 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, 706 {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c},
1259 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, 707 {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100},
1260 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, 708 {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104},
1261 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, 709 {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108},
1262 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, 710 {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c},
1263 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, 711 {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110},
1264 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, 712 {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114},
1265 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, 713 {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180},
1266 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, 714 {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184},
1267 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, 715 {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188},
1268 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, 716 {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c},
1269 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, 717 {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190},
1270 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, 718 {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194},
1271 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, 719 {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0},
1272 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, 720 {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c},
1273 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, 721 {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8},
1274 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, 722 {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284},
1275 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, 723 {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288},
1276 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, 724 {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224},
1277 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, 725 {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290},
1278 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, 726 {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300},
1279 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, 727 {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304},
1280 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, 728 {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308},
1281 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, 729 {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c},
1282 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, 730 {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380},
1283 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, 731 {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384},
1284 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, 732 {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700},
1285 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, 733 {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704},
1286 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, 734 {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708},
1287 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, 735 {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c},
1288 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, 736 {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780},
1289 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, 737 {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784},
1290 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, 738 {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00},
1291 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, 739 {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04},
1292 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, 740 {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08},
1293 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, 741 {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c},
1294 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, 742 {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80},
1295 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, 743 {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84},
1296 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, 744 {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88},
1297 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, 745 {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c},
1298 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, 746 {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90},
1299 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, 747 {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80},
1300 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, 748 {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84},
1301 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, 749 {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88},
1302 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, 750 {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c},
1303 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, 751 {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90},
1304 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310, 0x00009310 }, 752 {0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310, 0x00009310},
1305 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314, 0x00009314 }, 753 {0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314, 0x00009314},
1306 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320, 0x00009320 }, 754 {0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320, 0x00009320},
1307 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324, 0x00009324 }, 755 {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324, 0x00009324},
1308 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328, 0x00009328 }, 756 {0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328, 0x00009328},
1309 { 0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c, 0x0000932c }, 757 {0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c, 0x0000932c},
1310 { 0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330, 0x00009330 }, 758 {0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330, 0x00009330},
1311 { 0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334, 0x00009334 }, 759 {0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334, 0x00009334},
1312 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321, 0x00009321 }, 760 {0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321, 0x00009321},
1313 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325, 0x00009325 }, 761 {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325, 0x00009325},
1314 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329, 0x00009329 }, 762 {0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329, 0x00009329},
1315 { 0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d, 0x0000932d }, 763 {0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d, 0x0000932d},
1316 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331, 0x00009331 }, 764 {0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331, 0x00009331},
1317 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335, 0x00009335 }, 765 {0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335, 0x00009335},
1318 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322, 0x00009322 }, 766 {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322, 0x00009322},
1319 { 0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326, 0x00009326 }, 767 {0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326, 0x00009326},
1320 { 0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a, 0x0000932a }, 768 {0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a, 0x0000932a},
1321 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e, 0x0000932e }, 769 {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e, 0x0000932e},
1322 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332, 0x00009332 }, 770 {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332, 0x00009332},
1323 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336, 0x00009336 }, 771 {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336, 0x00009336},
1324 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323, 0x00009323 }, 772 {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323, 0x00009323},
1325 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327, 0x00009327 }, 773 {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327, 0x00009327},
1326 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b, 0x0000932b }, 774 {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b, 0x0000932b},
1327 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f, 0x0000932f }, 775 {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f, 0x0000932f},
1328 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333, 0x00009333 }, 776 {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333, 0x00009333},
1329 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337, 0x00009337 }, 777 {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337, 0x00009337},
1330 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343, 0x00009343 }, 778 {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343, 0x00009343},
1331 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347, 0x00009347 }, 779 {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347, 0x00009347},
1332 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b, 0x0000934b }, 780 {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b, 0x0000934b},
1333 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f, 0x0000934f }, 781 {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f, 0x0000934f},
1334 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353, 0x00009353 }, 782 {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353, 0x00009353},
1335 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357, 0x00009357 }, 783 {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357, 0x00009357},
1336 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b, 0x0000935b }, 784 {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b, 0x0000935b},
1337 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b, 0x0000935b }, 785 {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b, 0x0000935b},
1338 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b, 0x0000935b }, 786 {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b, 0x0000935b},
1339 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b, 0x0000935b }, 787 {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b, 0x0000935b},
1340 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b, 0x0000935b }, 788 {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b, 0x0000935b},
1341 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b, 0x0000935b }, 789 {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b, 0x0000935b},
1342 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b, 0x0000935b }, 790 {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b, 0x0000935b},
1343 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b, 0x0000935b }, 791 {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b, 0x0000935b},
1344 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b, 0x0000935b }, 792 {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b, 0x0000935b},
1345 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b, 0x0000935b }, 793 {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b, 0x0000935b},
1346 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b, 0x0000935b }, 794 {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b, 0x0000935b},
1347 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b, 0x0000935b }, 795 {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b, 0x0000935b},
1348 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 796 {0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1349 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 797 {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1350 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 798 {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1351 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 799 {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1352 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 800 {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1353 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 801 {0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1354 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 802 {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1355 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 803 {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1356 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 804 {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1357 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 805 {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1358 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 806 {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1359 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 807 {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1360 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 808 {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1361 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 809 {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1362 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 810 {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1363 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 811 {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1364 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 812 {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1365 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 813 {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1366 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 814 {0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1367 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 815 {0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1368 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 816 {0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1369 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 817 {0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1370 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 818 {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1371 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 819 {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1372 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 820 {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1373 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, 821 {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
1374 { 0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a }, 822 {0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a},
1375 { 0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a }, 823 {0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a},
1376}; 824};
1377 825
1378static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = { 826static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
1379 { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, 827 {0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652},
1380 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce }, 828 {0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce},
1381 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 829 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1382 { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 }, 830 {0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002},
1383 { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 }, 831 {0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008},
1384 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010, 0x0000c010 }, 832 {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010, 0x0000c010},
1385 { 0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012, 0x00010012 }, 833 {0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012, 0x00010012},
1386 { 0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014, 0x00013014 }, 834 {0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014, 0x00013014},
1387 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a }, 835 {0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a},
1388 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211 }, 836 {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211},
1389 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, 837 {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213},
1390 { 0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411, 0x00022411 }, 838 {0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411, 0x00022411},
1391 { 0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413, 0x00025413 }, 839 {0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413, 0x00025413},
1392 { 0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811, 0x00029811 }, 840 {0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811, 0x00029811},
1393 { 0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813, 0x0002c813 }, 841 {0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813, 0x0002c813},
1394 { 0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14, 0x00030a14 }, 842 {0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14, 0x00030a14},
1395 { 0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50, 0x00035a50 }, 843 {0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50, 0x00035a50},
1396 { 0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c, 0x00039c4c }, 844 {0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c, 0x00039c4c},
1397 { 0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a, 0x0003de8a }, 845 {0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a, 0x0003de8a},
1398 { 0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92, 0x00042e92 }, 846 {0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92, 0x00042e92},
1399 { 0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2, 0x00046ed2 }, 847 {0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2, 0x00046ed2},
1400 { 0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5, 0x0004bed5 }, 848 {0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5, 0x0004bed5},
1401 { 0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54, 0x0004ff54 }, 849 {0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54, 0x0004ff54},
1402 { 0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5, 0x00055fd5 }, 850 {0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5, 0x00055fd5},
1403 { 0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, 851 {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
1404 { 0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, 852 {0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
1405 { 0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, 853 {0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
1406 { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, 854 {0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
1407 { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, 855 {0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
1408 { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, 856 {0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
857 {0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
1409}; 858};
1410 859
1411static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = { 860static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
1412 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, 861 {0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652},
1413 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce }, 862 {0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce},
1414 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 863 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1415 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, 864 {0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002},
1416 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, 865 {0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009},
1417 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b }, 866 {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b},
1418 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 }, 867 {0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012},
1419 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 }, 868 {0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048},
1420 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a }, 869 {0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a},
1421 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 }, 870 {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211},
1422 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, 871 {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213},
1423 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b }, 872 {0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b},
1424 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 }, 873 {0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412},
1425 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 }, 874 {0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414},
1426 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a }, 875 {0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a},
1427 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 }, 876 {0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649},
1428 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b }, 877 {0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b},
1429 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 }, 878 {0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49},
1430 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 }, 879 {0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48},
1431 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a }, 880 {0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a},
1432 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 }, 881 {0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88},
1433 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, 882 {0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a},
1434 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, 883 {0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9},
1435 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, 884 {0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42},
1436 { 0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, 885 {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
1437 { 0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, 886 {0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
1438 { 0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, 887 {0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
1439 { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, 888 {0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
1440 { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, 889 {0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
1441 { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, 890 {0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
891 {0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
1442}; 892};
1443 893
1444static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = { 894static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
1445 {0x00004040, 0x9248fd00 }, 895 /* Addr allmodes */
1446 {0x00004040, 0x24924924 }, 896 {0x00004040, 0x9248fd00},
1447 {0x00004040, 0xa8000019 }, 897 {0x00004040, 0x24924924},
1448 {0x00004040, 0x13160820 }, 898 {0x00004040, 0xa8000019},
1449 {0x00004040, 0xe5980560 }, 899 {0x00004040, 0x13160820},
1450 {0x00004040, 0xc01dcffc }, 900 {0x00004040, 0xe5980560},
1451 {0x00004040, 0x1aaabe41 }, 901 {0x00004040, 0xc01dcffc},
1452 {0x00004040, 0xbe105554 }, 902 {0x00004040, 0x1aaabe41},
1453 {0x00004040, 0x00043007 }, 903 {0x00004040, 0xbe105554},
1454 {0x00004044, 0x00000000 }, 904 {0x00004040, 0x00043007},
905 {0x00004044, 0x00000000},
1455}; 906};
1456 907
1457static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = { 908static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
1458 {0x00004040, 0x9248fd00 }, 909 /* Addr allmodes */
1459 {0x00004040, 0x24924924 }, 910 {0x00004040, 0x9248fd00},
1460 {0x00004040, 0xa8000019 }, 911 {0x00004040, 0x24924924},
1461 {0x00004040, 0x13160820 }, 912 {0x00004040, 0xa8000019},
1462 {0x00004040, 0xe5980560 }, 913 {0x00004040, 0x13160820},
1463 {0x00004040, 0xc01dcffd }, 914 {0x00004040, 0xe5980560},
1464 {0x00004040, 0x1aaabe41 }, 915 {0x00004040, 0xc01dcffd},
1465 {0x00004040, 0xbe105554 }, 916 {0x00004040, 0x1aaabe41},
1466 {0x00004040, 0x00043007 }, 917 {0x00004040, 0xbe105554},
1467 {0x00004044, 0x00000000 }, 918 {0x00004040, 0x00043007},
1468}; 919 {0x00004044, 0x00000000},
1469
1470/* AR9285 Revsion 10*/
1471static const u32 ar9285Modes_9285[][6] = {
1472 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1473 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
1474 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
1475 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
1476 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
1477 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
1478 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
1479 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
1480 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
1481 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1482 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
1483 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1484 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
1485 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e },
1486 { 0x00009844, 0x0372161e, 0x0372161e, 0x03720020, 0x03720020, 0x037216a0 },
1487 { 0x00009848, 0x00001066, 0x00001066, 0x0000004e, 0x0000004e, 0x00001059 },
1488 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
1489 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
1490 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3136605e, 0x3136605e, 0x3139605e },
1491 { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 },
1492 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
1493 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
1494 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
1495 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
1496 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
1497 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
1498 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1020, 0xdfbc1020, 0xdfbc1010 },
1499 { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1500 { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1501 { 0x000099b8, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c },
1502 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
1503 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
1504 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
1505 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
1506 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
1507 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
1508 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1509 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1510 { 0x00009a00, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 },
1511 { 0x00009a04, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 },
1512 { 0x00009a08, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 },
1513 { 0x00009a0c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 },
1514 { 0x00009a10, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 },
1515 { 0x00009a14, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 },
1516 { 0x00009a18, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 },
1517 { 0x00009a1c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
1518 { 0x00009a20, 0x00000000, 0x00000000, 0x00068114, 0x00068114, 0x00000000 },
1519 { 0x00009a24, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 },
1520 { 0x00009a28, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 },
1521 { 0x00009a2c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 },
1522 { 0x00009a30, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 },
1523 { 0x00009a34, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 },
1524 { 0x00009a38, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 },
1525 { 0x00009a3c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 },
1526 { 0x00009a40, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 },
1527 { 0x00009a44, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 },
1528 { 0x00009a48, 0x00000000, 0x00000000, 0x00068284, 0x00068284, 0x00000000 },
1529 { 0x00009a4c, 0x00000000, 0x00000000, 0x00068288, 0x00068288, 0x00000000 },
1530 { 0x00009a50, 0x00000000, 0x00000000, 0x00068220, 0x00068220, 0x00000000 },
1531 { 0x00009a54, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 },
1532 { 0x00009a58, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 },
1533 { 0x00009a5c, 0x00000000, 0x00000000, 0x00068304, 0x00068304, 0x00000000 },
1534 { 0x00009a60, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 },
1535 { 0x00009a64, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 },
1536 { 0x00009a68, 0x00000000, 0x00000000, 0x00068380, 0x00068380, 0x00000000 },
1537 { 0x00009a6c, 0x00000000, 0x00000000, 0x00068384, 0x00068384, 0x00000000 },
1538 { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 },
1539 { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 },
1540 { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 },
1541 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
1542 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
1543 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
1544 { 0x00009a88, 0x00000000, 0x00000000, 0x00068b04, 0x00068b04, 0x00000000 },
1545 { 0x00009a8c, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 },
1546 { 0x00009a90, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 },
1547 { 0x00009a94, 0x00000000, 0x00000000, 0x00068b0c, 0x00068b0c, 0x00000000 },
1548 { 0x00009a98, 0x00000000, 0x00000000, 0x00068b80, 0x00068b80, 0x00000000 },
1549 { 0x00009a9c, 0x00000000, 0x00000000, 0x00068b84, 0x00068b84, 0x00000000 },
1550 { 0x00009aa0, 0x00000000, 0x00000000, 0x00068b88, 0x00068b88, 0x00000000 },
1551 { 0x00009aa4, 0x00000000, 0x00000000, 0x00068b8c, 0x00068b8c, 0x00000000 },
1552 { 0x00009aa8, 0x00000000, 0x00000000, 0x000b8b90, 0x000b8b90, 0x00000000 },
1553 { 0x00009aac, 0x00000000, 0x00000000, 0x000b8f80, 0x000b8f80, 0x00000000 },
1554 { 0x00009ab0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
1555 { 0x00009ab4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 },
1556 { 0x00009ab8, 0x00000000, 0x00000000, 0x000b8f8c, 0x000b8f8c, 0x00000000 },
1557 { 0x00009abc, 0x00000000, 0x00000000, 0x000b8f90, 0x000b8f90, 0x00000000 },
1558 { 0x00009ac0, 0x00000000, 0x00000000, 0x000bb30c, 0x000bb30c, 0x00000000 },
1559 { 0x00009ac4, 0x00000000, 0x00000000, 0x000bb310, 0x000bb310, 0x00000000 },
1560 { 0x00009ac8, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 },
1561 { 0x00009acc, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 },
1562 { 0x00009ad0, 0x00000000, 0x00000000, 0x000bb324, 0x000bb324, 0x00000000 },
1563 { 0x00009ad4, 0x00000000, 0x00000000, 0x000bb704, 0x000bb704, 0x00000000 },
1564 { 0x00009ad8, 0x00000000, 0x00000000, 0x000f96a4, 0x000f96a4, 0x00000000 },
1565 { 0x00009adc, 0x00000000, 0x00000000, 0x000f96a8, 0x000f96a8, 0x00000000 },
1566 { 0x00009ae0, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 },
1567 { 0x00009ae4, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 },
1568 { 0x00009ae8, 0x00000000, 0x00000000, 0x000f9720, 0x000f9720, 0x00000000 },
1569 { 0x00009aec, 0x00000000, 0x00000000, 0x000f9724, 0x000f9724, 0x00000000 },
1570 { 0x00009af0, 0x00000000, 0x00000000, 0x000f9728, 0x000f9728, 0x00000000 },
1571 { 0x00009af4, 0x00000000, 0x00000000, 0x000f972c, 0x000f972c, 0x00000000 },
1572 { 0x00009af8, 0x00000000, 0x00000000, 0x000f97a0, 0x000f97a0, 0x00000000 },
1573 { 0x00009afc, 0x00000000, 0x00000000, 0x000f97a4, 0x000f97a4, 0x00000000 },
1574 { 0x00009b00, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 },
1575 { 0x00009b04, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 },
1576 { 0x00009b08, 0x00000000, 0x00000000, 0x000fb7b4, 0x000fb7b4, 0x00000000 },
1577 { 0x00009b0c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 },
1578 { 0x00009b10, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 },
1579 { 0x00009b14, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 },
1580 { 0x00009b18, 0x00000000, 0x00000000, 0x000fb7ad, 0x000fb7ad, 0x00000000 },
1581 { 0x00009b1c, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 },
1582 { 0x00009b20, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 },
1583 { 0x00009b24, 0x00000000, 0x00000000, 0x000fb7b9, 0x000fb7b9, 0x00000000 },
1584 { 0x00009b28, 0x00000000, 0x00000000, 0x000fb7c5, 0x000fb7c5, 0x00000000 },
1585 { 0x00009b2c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 },
1586 { 0x00009b30, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 },
1587 { 0x00009b34, 0x00000000, 0x00000000, 0x000fb7d5, 0x000fb7d5, 0x00000000 },
1588 { 0x00009b38, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 },
1589 { 0x00009b3c, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 },
1590 { 0x00009b40, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 },
1591 { 0x00009b44, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 },
1592 { 0x00009b48, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 },
1593 { 0x00009b4c, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 },
1594 { 0x00009b50, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 },
1595 { 0x00009b54, 0x00000000, 0x00000000, 0x000fb7c7, 0x000fb7c7, 0x00000000 },
1596 { 0x00009b58, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 },
1597 { 0x00009b5c, 0x00000000, 0x00000000, 0x000fb7cf, 0x000fb7cf, 0x00000000 },
1598 { 0x00009b60, 0x00000000, 0x00000000, 0x000fb7d7, 0x000fb7d7, 0x00000000 },
1599 { 0x00009b64, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1600 { 0x00009b68, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1601 { 0x00009b6c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1602 { 0x00009b70, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1603 { 0x00009b74, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1604 { 0x00009b78, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1605 { 0x00009b7c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1606 { 0x00009b80, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1607 { 0x00009b84, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1608 { 0x00009b88, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1609 { 0x00009b8c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1610 { 0x00009b90, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1611 { 0x00009b94, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1612 { 0x00009b98, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1613 { 0x00009b9c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1614 { 0x00009ba0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1615 { 0x00009ba4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1616 { 0x00009ba8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1617 { 0x00009bac, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1618 { 0x00009bb0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1619 { 0x00009bb4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1620 { 0x00009bb8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1621 { 0x00009bbc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1622 { 0x00009bc0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1623 { 0x00009bc4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1624 { 0x00009bc8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1625 { 0x00009bcc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1626 { 0x00009bd0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1627 { 0x00009bd4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1628 { 0x00009bd8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1629 { 0x00009bdc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1630 { 0x00009be0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1631 { 0x00009be4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1632 { 0x00009be8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1633 { 0x00009bec, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1634 { 0x00009bf0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1635 { 0x00009bf4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1636 { 0x00009bf8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1637 { 0x00009bfc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
1638 { 0x0000aa00, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 },
1639 { 0x0000aa04, 0x00000000, 0x00000000, 0x00068080, 0x00068080, 0x00000000 },
1640 { 0x0000aa08, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 },
1641 { 0x0000aa0c, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 },
1642 { 0x0000aa10, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 },
1643 { 0x0000aa14, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 },
1644 { 0x0000aa18, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 },
1645 { 0x0000aa1c, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 },
1646 { 0x0000aa20, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 },
1647 { 0x0000aa24, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
1648 { 0x0000aa28, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
1649 { 0x0000aa2c, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 },
1650 { 0x0000aa30, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 },
1651 { 0x0000aa34, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 },
1652 { 0x0000aa38, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 },
1653 { 0x0000aa3c, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 },
1654 { 0x0000aa40, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 },
1655 { 0x0000aa44, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 },
1656 { 0x0000aa48, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 },
1657 { 0x0000aa4c, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 },
1658 { 0x0000aa50, 0x00000000, 0x00000000, 0x000681ac, 0x000681ac, 0x00000000 },
1659 { 0x0000aa54, 0x00000000, 0x00000000, 0x0006821c, 0x0006821c, 0x00000000 },
1660 { 0x0000aa58, 0x00000000, 0x00000000, 0x00068224, 0x00068224, 0x00000000 },
1661 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 },
1662 { 0x0000aa60, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 },
1663 { 0x0000aa64, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 },
1664 { 0x0000aa68, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 },
1665 { 0x0000aa6c, 0x00000000, 0x00000000, 0x00068310, 0x00068310, 0x00000000 },
1666 { 0x0000aa70, 0x00000000, 0x00000000, 0x00068788, 0x00068788, 0x00000000 },
1667 { 0x0000aa74, 0x00000000, 0x00000000, 0x0006878c, 0x0006878c, 0x00000000 },
1668 { 0x0000aa78, 0x00000000, 0x00000000, 0x00068790, 0x00068790, 0x00000000 },
1669 { 0x0000aa7c, 0x00000000, 0x00000000, 0x00068794, 0x00068794, 0x00000000 },
1670 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068798, 0x00068798, 0x00000000 },
1671 { 0x0000aa84, 0x00000000, 0x00000000, 0x0006879c, 0x0006879c, 0x00000000 },
1672 { 0x0000aa88, 0x00000000, 0x00000000, 0x00068b89, 0x00068b89, 0x00000000 },
1673 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00068b8d, 0x00068b8d, 0x00000000 },
1674 { 0x0000aa90, 0x00000000, 0x00000000, 0x00068b91, 0x00068b91, 0x00000000 },
1675 { 0x0000aa94, 0x00000000, 0x00000000, 0x00068b95, 0x00068b95, 0x00000000 },
1676 { 0x0000aa98, 0x00000000, 0x00000000, 0x00068b99, 0x00068b99, 0x00000000 },
1677 { 0x0000aa9c, 0x00000000, 0x00000000, 0x00068ba5, 0x00068ba5, 0x00000000 },
1678 { 0x0000aaa0, 0x00000000, 0x00000000, 0x00068ba9, 0x00068ba9, 0x00000000 },
1679 { 0x0000aaa4, 0x00000000, 0x00000000, 0x00068bad, 0x00068bad, 0x00000000 },
1680 { 0x0000aaa8, 0x00000000, 0x00000000, 0x000b8b0c, 0x000b8b0c, 0x00000000 },
1681 { 0x0000aaac, 0x00000000, 0x00000000, 0x000b8f10, 0x000b8f10, 0x00000000 },
1682 { 0x0000aab0, 0x00000000, 0x00000000, 0x000b8f14, 0x000b8f14, 0x00000000 },
1683 { 0x0000aab4, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
1684 { 0x0000aab8, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
1685 { 0x0000aabc, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 },
1686 { 0x0000aac0, 0x00000000, 0x00000000, 0x000bb380, 0x000bb380, 0x00000000 },
1687 { 0x0000aac4, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 },
1688 { 0x0000aac8, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 },
1689 { 0x0000aacc, 0x00000000, 0x00000000, 0x000bb38c, 0x000bb38c, 0x00000000 },
1690 { 0x0000aad0, 0x00000000, 0x00000000, 0x000bb394, 0x000bb394, 0x00000000 },
1691 { 0x0000aad4, 0x00000000, 0x00000000, 0x000bb798, 0x000bb798, 0x00000000 },
1692 { 0x0000aad8, 0x00000000, 0x00000000, 0x000f970c, 0x000f970c, 0x00000000 },
1693 { 0x0000aadc, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 },
1694 { 0x0000aae0, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 },
1695 { 0x0000aae4, 0x00000000, 0x00000000, 0x000f9718, 0x000f9718, 0x00000000 },
1696 { 0x0000aae8, 0x00000000, 0x00000000, 0x000f9705, 0x000f9705, 0x00000000 },
1697 { 0x0000aaec, 0x00000000, 0x00000000, 0x000f9709, 0x000f9709, 0x00000000 },
1698 { 0x0000aaf0, 0x00000000, 0x00000000, 0x000f970d, 0x000f970d, 0x00000000 },
1699 { 0x0000aaf4, 0x00000000, 0x00000000, 0x000f9711, 0x000f9711, 0x00000000 },
1700 { 0x0000aaf8, 0x00000000, 0x00000000, 0x000f9715, 0x000f9715, 0x00000000 },
1701 { 0x0000aafc, 0x00000000, 0x00000000, 0x000f9719, 0x000f9719, 0x00000000 },
1702 { 0x0000ab00, 0x00000000, 0x00000000, 0x000fb7a4, 0x000fb7a4, 0x00000000 },
1703 { 0x0000ab04, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 },
1704 { 0x0000ab08, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 },
1705 { 0x0000ab0c, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 },
1706 { 0x0000ab10, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 },
1707 { 0x0000ab14, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 },
1708 { 0x0000ab18, 0x00000000, 0x00000000, 0x000fb7bc, 0x000fb7bc, 0x00000000 },
1709 { 0x0000ab1c, 0x00000000, 0x00000000, 0x000fb7a1, 0x000fb7a1, 0x00000000 },
1710 { 0x0000ab20, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 },
1711 { 0x0000ab24, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 },
1712 { 0x0000ab28, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 },
1713 { 0x0000ab2c, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 },
1714 { 0x0000ab30, 0x00000000, 0x00000000, 0x000fb7bd, 0x000fb7bd, 0x00000000 },
1715 { 0x0000ab34, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 },
1716 { 0x0000ab38, 0x00000000, 0x00000000, 0x000fb7cd, 0x000fb7cd, 0x00000000 },
1717 { 0x0000ab3c, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 },
1718 { 0x0000ab40, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 },
1719 { 0x0000ab44, 0x00000000, 0x00000000, 0x000fb7c2, 0x000fb7c2, 0x00000000 },
1720 { 0x0000ab48, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 },
1721 { 0x0000ab4c, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 },
1722 { 0x0000ab50, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 },
1723 { 0x0000ab54, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 },
1724 { 0x0000ab58, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 },
1725 { 0x0000ab5c, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 },
1726 { 0x0000ab60, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 },
1727 { 0x0000ab64, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1728 { 0x0000ab68, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1729 { 0x0000ab6c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1730 { 0x0000ab70, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1731 { 0x0000ab74, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1732 { 0x0000ab78, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1733 { 0x0000ab7c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1734 { 0x0000ab80, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1735 { 0x0000ab84, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1736 { 0x0000ab88, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1737 { 0x0000ab8c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1738 { 0x0000ab90, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1739 { 0x0000ab94, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1740 { 0x0000ab98, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1741 { 0x0000ab9c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1742 { 0x0000aba0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1743 { 0x0000aba4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1744 { 0x0000aba8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1745 { 0x0000abac, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1746 { 0x0000abb0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1747 { 0x0000abb4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1748 { 0x0000abb8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1749 { 0x0000abbc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1750 { 0x0000abc0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1751 { 0x0000abc4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1752 { 0x0000abc8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1753 { 0x0000abcc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1754 { 0x0000abd0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1755 { 0x0000abd4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1756 { 0x0000abd8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1757 { 0x0000abdc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1758 { 0x0000abe0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1759 { 0x0000abe4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1760 { 0x0000abe8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1761 { 0x0000abec, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1762 { 0x0000abf0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1763 { 0x0000abf4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1764 { 0x0000abf8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1765 { 0x0000abfc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
1766 { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 },
1767 { 0x0000a20c, 0x00000014, 0x00000014, 0x00000000, 0x00000000, 0x0001f000 },
1768 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
1769 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
1770 { 0x0000a250, 0x001ff000, 0x001ff000, 0x001ca000, 0x001ca000, 0x001da000 },
1771 { 0x0000a274, 0x0a81c652, 0x0a81c652, 0x0a820652, 0x0a820652, 0x0a82a652 },
1772 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1773 { 0x0000a304, 0x00000000, 0x00000000, 0x00007201, 0x00007201, 0x00000000 },
1774 { 0x0000a308, 0x00000000, 0x00000000, 0x00010408, 0x00010408, 0x00000000 },
1775 { 0x0000a30c, 0x00000000, 0x00000000, 0x0001860a, 0x0001860a, 0x00000000 },
1776 { 0x0000a310, 0x00000000, 0x00000000, 0x00020818, 0x00020818, 0x00000000 },
1777 { 0x0000a314, 0x00000000, 0x00000000, 0x00024858, 0x00024858, 0x00000000 },
1778 { 0x0000a318, 0x00000000, 0x00000000, 0x00026859, 0x00026859, 0x00000000 },
1779 { 0x0000a31c, 0x00000000, 0x00000000, 0x0002985b, 0x0002985b, 0x00000000 },
1780 { 0x0000a320, 0x00000000, 0x00000000, 0x0002c89a, 0x0002c89a, 0x00000000 },
1781 { 0x0000a324, 0x00000000, 0x00000000, 0x0002e89b, 0x0002e89b, 0x00000000 },
1782 { 0x0000a328, 0x00000000, 0x00000000, 0x0003089c, 0x0003089c, 0x00000000 },
1783 { 0x0000a32c, 0x00000000, 0x00000000, 0x0003289d, 0x0003289d, 0x00000000 },
1784 { 0x0000a330, 0x00000000, 0x00000000, 0x0003489e, 0x0003489e, 0x00000000 },
1785 { 0x0000a334, 0x00000000, 0x00000000, 0x000388de, 0x000388de, 0x00000000 },
1786 { 0x0000a338, 0x00000000, 0x00000000, 0x0003b91e, 0x0003b91e, 0x00000000 },
1787 { 0x0000a33c, 0x00000000, 0x00000000, 0x0003d95e, 0x0003d95e, 0x00000000 },
1788 { 0x0000a340, 0x00000000, 0x00000000, 0x000419df, 0x000419df, 0x00000000 },
1789 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
1790 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
1791};
1792
1793static const u32 ar9285Common_9285[][2] = {
1794 { 0x0000000c, 0x00000000 },
1795 { 0x00000030, 0x00020045 },
1796 { 0x00000034, 0x00000005 },
1797 { 0x00000040, 0x00000000 },
1798 { 0x00000044, 0x00000008 },
1799 { 0x00000048, 0x00000008 },
1800 { 0x0000004c, 0x00000010 },
1801 { 0x00000050, 0x00000000 },
1802 { 0x00000054, 0x0000001f },
1803 { 0x00000800, 0x00000000 },
1804 { 0x00000804, 0x00000000 },
1805 { 0x00000808, 0x00000000 },
1806 { 0x0000080c, 0x00000000 },
1807 { 0x00000810, 0x00000000 },
1808 { 0x00000814, 0x00000000 },
1809 { 0x00000818, 0x00000000 },
1810 { 0x0000081c, 0x00000000 },
1811 { 0x00000820, 0x00000000 },
1812 { 0x00000824, 0x00000000 },
1813 { 0x00001040, 0x002ffc0f },
1814 { 0x00001044, 0x002ffc0f },
1815 { 0x00001048, 0x002ffc0f },
1816 { 0x0000104c, 0x002ffc0f },
1817 { 0x00001050, 0x002ffc0f },
1818 { 0x00001054, 0x002ffc0f },
1819 { 0x00001058, 0x002ffc0f },
1820 { 0x0000105c, 0x002ffc0f },
1821 { 0x00001060, 0x002ffc0f },
1822 { 0x00001064, 0x002ffc0f },
1823 { 0x00001230, 0x00000000 },
1824 { 0x00001270, 0x00000000 },
1825 { 0x00001038, 0x00000000 },
1826 { 0x00001078, 0x00000000 },
1827 { 0x000010b8, 0x00000000 },
1828 { 0x000010f8, 0x00000000 },
1829 { 0x00001138, 0x00000000 },
1830 { 0x00001178, 0x00000000 },
1831 { 0x000011b8, 0x00000000 },
1832 { 0x000011f8, 0x00000000 },
1833 { 0x00001238, 0x00000000 },
1834 { 0x00001278, 0x00000000 },
1835 { 0x000012b8, 0x00000000 },
1836 { 0x000012f8, 0x00000000 },
1837 { 0x00001338, 0x00000000 },
1838 { 0x00001378, 0x00000000 },
1839 { 0x000013b8, 0x00000000 },
1840 { 0x000013f8, 0x00000000 },
1841 { 0x00001438, 0x00000000 },
1842 { 0x00001478, 0x00000000 },
1843 { 0x000014b8, 0x00000000 },
1844 { 0x000014f8, 0x00000000 },
1845 { 0x00001538, 0x00000000 },
1846 { 0x00001578, 0x00000000 },
1847 { 0x000015b8, 0x00000000 },
1848 { 0x000015f8, 0x00000000 },
1849 { 0x00001638, 0x00000000 },
1850 { 0x00001678, 0x00000000 },
1851 { 0x000016b8, 0x00000000 },
1852 { 0x000016f8, 0x00000000 },
1853 { 0x00001738, 0x00000000 },
1854 { 0x00001778, 0x00000000 },
1855 { 0x000017b8, 0x00000000 },
1856 { 0x000017f8, 0x00000000 },
1857 { 0x0000103c, 0x00000000 },
1858 { 0x0000107c, 0x00000000 },
1859 { 0x000010bc, 0x00000000 },
1860 { 0x000010fc, 0x00000000 },
1861 { 0x0000113c, 0x00000000 },
1862 { 0x0000117c, 0x00000000 },
1863 { 0x000011bc, 0x00000000 },
1864 { 0x000011fc, 0x00000000 },
1865 { 0x0000123c, 0x00000000 },
1866 { 0x0000127c, 0x00000000 },
1867 { 0x000012bc, 0x00000000 },
1868 { 0x000012fc, 0x00000000 },
1869 { 0x0000133c, 0x00000000 },
1870 { 0x0000137c, 0x00000000 },
1871 { 0x000013bc, 0x00000000 },
1872 { 0x000013fc, 0x00000000 },
1873 { 0x0000143c, 0x00000000 },
1874 { 0x0000147c, 0x00000000 },
1875 { 0x00004030, 0x00000002 },
1876 { 0x0000403c, 0x00000002 },
1877 { 0x00004024, 0x0000001f },
1878 { 0x00004060, 0x00000000 },
1879 { 0x00004064, 0x00000000 },
1880 { 0x00007010, 0x00000031 },
1881 { 0x00007034, 0x00000002 },
1882 { 0x00007038, 0x000004c2 },
1883 { 0x00008004, 0x00000000 },
1884 { 0x00008008, 0x00000000 },
1885 { 0x0000800c, 0x00000000 },
1886 { 0x00008018, 0x00000700 },
1887 { 0x00008020, 0x00000000 },
1888 { 0x00008038, 0x00000000 },
1889 { 0x0000803c, 0x00000000 },
1890 { 0x00008048, 0x00000000 },
1891 { 0x00008054, 0x00000000 },
1892 { 0x00008058, 0x00000000 },
1893 { 0x0000805c, 0x000fc78f },
1894 { 0x00008060, 0x0000000f },
1895 { 0x00008064, 0x00000000 },
1896 { 0x00008070, 0x00000000 },
1897 { 0x000080c0, 0x2a80001a },
1898 { 0x000080c4, 0x05dc01e0 },
1899 { 0x000080c8, 0x1f402710 },
1900 { 0x000080cc, 0x01f40000 },
1901 { 0x000080d0, 0x00001e00 },
1902 { 0x000080d4, 0x00000000 },
1903 { 0x000080d8, 0x00400000 },
1904 { 0x000080e0, 0xffffffff },
1905 { 0x000080e4, 0x0000ffff },
1906 { 0x000080e8, 0x003f3f3f },
1907 { 0x000080ec, 0x00000000 },
1908 { 0x000080f0, 0x00000000 },
1909 { 0x000080f4, 0x00000000 },
1910 { 0x000080f8, 0x00000000 },
1911 { 0x000080fc, 0x00020000 },
1912 { 0x00008100, 0x00020000 },
1913 { 0x00008104, 0x00000001 },
1914 { 0x00008108, 0x00000052 },
1915 { 0x0000810c, 0x00000000 },
1916 { 0x00008110, 0x00000168 },
1917 { 0x00008118, 0x000100aa },
1918 { 0x0000811c, 0x00003210 },
1919 { 0x00008120, 0x08f04800 },
1920 { 0x00008124, 0x00000000 },
1921 { 0x00008128, 0x00000000 },
1922 { 0x0000812c, 0x00000000 },
1923 { 0x00008130, 0x00000000 },
1924 { 0x00008134, 0x00000000 },
1925 { 0x00008138, 0x00000000 },
1926 { 0x0000813c, 0x00000000 },
1927 { 0x00008144, 0x00000000 },
1928 { 0x00008168, 0x00000000 },
1929 { 0x0000816c, 0x00000000 },
1930 { 0x00008170, 0x32143320 },
1931 { 0x00008174, 0xfaa4fa50 },
1932 { 0x00008178, 0x00000100 },
1933 { 0x0000817c, 0x00000000 },
1934 { 0x000081c0, 0x00000000 },
1935 { 0x000081d0, 0x00003210 },
1936 { 0x000081ec, 0x00000000 },
1937 { 0x000081f0, 0x00000000 },
1938 { 0x000081f4, 0x00000000 },
1939 { 0x000081f8, 0x00000000 },
1940 { 0x000081fc, 0x00000000 },
1941 { 0x00008200, 0x00000000 },
1942 { 0x00008204, 0x00000000 },
1943 { 0x00008208, 0x00000000 },
1944 { 0x0000820c, 0x00000000 },
1945 { 0x00008210, 0x00000000 },
1946 { 0x00008214, 0x00000000 },
1947 { 0x00008218, 0x00000000 },
1948 { 0x0000821c, 0x00000000 },
1949 { 0x00008220, 0x00000000 },
1950 { 0x00008224, 0x00000000 },
1951 { 0x00008228, 0x00000000 },
1952 { 0x0000822c, 0x00000000 },
1953 { 0x00008230, 0x00000000 },
1954 { 0x00008234, 0x00000000 },
1955 { 0x00008238, 0x00000000 },
1956 { 0x0000823c, 0x00000000 },
1957 { 0x00008240, 0x00100000 },
1958 { 0x00008244, 0x0010f400 },
1959 { 0x00008248, 0x00000100 },
1960 { 0x0000824c, 0x0001e800 },
1961 { 0x00008250, 0x00000000 },
1962 { 0x00008254, 0x00000000 },
1963 { 0x00008258, 0x00000000 },
1964 { 0x0000825c, 0x400000ff },
1965 { 0x00008260, 0x00080922 },
1966 { 0x00008264, 0x88a00010 },
1967 { 0x00008270, 0x00000000 },
1968 { 0x00008274, 0x40000000 },
1969 { 0x00008278, 0x003e4180 },
1970 { 0x0000827c, 0x00000000 },
1971 { 0x00008284, 0x0000002c },
1972 { 0x00008288, 0x0000002c },
1973 { 0x0000828c, 0x00000000 },
1974 { 0x00008294, 0x00000000 },
1975 { 0x00008298, 0x00000000 },
1976 { 0x0000829c, 0x00000000 },
1977 { 0x00008300, 0x00000040 },
1978 { 0x00008314, 0x00000000 },
1979 { 0x00008328, 0x00000000 },
1980 { 0x0000832c, 0x00000001 },
1981 { 0x00008330, 0x00000302 },
1982 { 0x00008334, 0x00000e00 },
1983 { 0x00008338, 0x00000000 },
1984 { 0x0000833c, 0x00000000 },
1985 { 0x00008340, 0x00010380 },
1986 { 0x00008344, 0x00481043 },
1987 { 0x00009808, 0x00000000 },
1988 { 0x0000980c, 0xafe68e30 },
1989 { 0x00009810, 0xfd14e000 },
1990 { 0x00009814, 0x9c0a9f6b },
1991 { 0x0000981c, 0x00000000 },
1992 { 0x0000982c, 0x0000a000 },
1993 { 0x00009830, 0x00000000 },
1994 { 0x0000983c, 0x00200400 },
1995 { 0x0000984c, 0x0040233c },
1996 { 0x00009854, 0x00000044 },
1997 { 0x00009900, 0x00000000 },
1998 { 0x00009904, 0x00000000 },
1999 { 0x00009908, 0x00000000 },
2000 { 0x0000990c, 0x00000000 },
2001 { 0x00009910, 0x01002310 },
2002 { 0x0000991c, 0x10000fff },
2003 { 0x00009920, 0x04900000 },
2004 { 0x00009928, 0x00000001 },
2005 { 0x0000992c, 0x00000004 },
2006 { 0x00009934, 0x1e1f2022 },
2007 { 0x00009938, 0x0a0b0c0d },
2008 { 0x0000993c, 0x00000000 },
2009 { 0x00009940, 0x14750604 },
2010 { 0x00009948, 0x9280c00a },
2011 { 0x0000994c, 0x00020028 },
2012 { 0x00009954, 0x5f3ca3de },
2013 { 0x00009958, 0x2108ecff },
2014 { 0x00009968, 0x000003ce },
2015 { 0x00009970, 0x1927b515 },
2016 { 0x00009974, 0x00000000 },
2017 { 0x00009978, 0x00000001 },
2018 { 0x0000997c, 0x00000000 },
2019 { 0x00009980, 0x00000000 },
2020 { 0x00009984, 0x00000000 },
2021 { 0x00009988, 0x00000000 },
2022 { 0x0000998c, 0x00000000 },
2023 { 0x00009990, 0x00000000 },
2024 { 0x00009994, 0x00000000 },
2025 { 0x00009998, 0x00000000 },
2026 { 0x0000999c, 0x00000000 },
2027 { 0x000099a0, 0x00000000 },
2028 { 0x000099a4, 0x00000001 },
2029 { 0x000099a8, 0x201fff00 },
2030 { 0x000099ac, 0x2def0a00 },
2031 { 0x000099b0, 0x03051000 },
2032 { 0x000099b4, 0x00000820 },
2033 { 0x000099dc, 0x00000000 },
2034 { 0x000099e0, 0x00000000 },
2035 { 0x000099e4, 0xaaaaaaaa },
2036 { 0x000099e8, 0x3c466478 },
2037 { 0x000099ec, 0x0cc80caa },
2038 { 0x000099f0, 0x00000000 },
2039 { 0x0000a208, 0x803e6788 },
2040 { 0x0000a210, 0x4080a333 },
2041 { 0x0000a214, 0x00206c10 },
2042 { 0x0000a218, 0x009c4060 },
2043 { 0x0000a220, 0x01834061 },
2044 { 0x0000a224, 0x00000400 },
2045 { 0x0000a228, 0x000003b5 },
2046 { 0x0000a22c, 0x00000000 },
2047 { 0x0000a234, 0x20202020 },
2048 { 0x0000a238, 0x20202020 },
2049 { 0x0000a244, 0x00000000 },
2050 { 0x0000a248, 0xfffffffc },
2051 { 0x0000a24c, 0x00000000 },
2052 { 0x0000a254, 0x00000000 },
2053 { 0x0000a258, 0x0ccb5380 },
2054 { 0x0000a25c, 0x15151501 },
2055 { 0x0000a260, 0xdfa90f01 },
2056 { 0x0000a268, 0x00000000 },
2057 { 0x0000a26c, 0x0ebae9e6 },
2058 { 0x0000d270, 0x0d820820 },
2059 { 0x0000a278, 0x39ce739c },
2060 { 0x0000a27c, 0x050e039c },
2061 { 0x0000d35c, 0x07ffffef },
2062 { 0x0000d360, 0x0fffffe7 },
2063 { 0x0000d364, 0x17ffffe5 },
2064 { 0x0000d368, 0x1fffffe4 },
2065 { 0x0000d36c, 0x37ffffe3 },
2066 { 0x0000d370, 0x3fffffe3 },
2067 { 0x0000d374, 0x57ffffe3 },
2068 { 0x0000d378, 0x5fffffe2 },
2069 { 0x0000d37c, 0x7fffffe2 },
2070 { 0x0000d380, 0x7f3c7bba },
2071 { 0x0000d384, 0xf3307ff0 },
2072 { 0x0000a388, 0x0c000000 },
2073 { 0x0000a38c, 0x20202020 },
2074 { 0x0000a390, 0x20202020 },
2075 { 0x0000a394, 0x39ce739c },
2076 { 0x0000a398, 0x0000039c },
2077 { 0x0000a39c, 0x00000001 },
2078 { 0x0000a3a0, 0x00000000 },
2079 { 0x0000a3a4, 0x00000000 },
2080 { 0x0000a3a8, 0x00000000 },
2081 { 0x0000a3ac, 0x00000000 },
2082 { 0x0000a3b0, 0x00000000 },
2083 { 0x0000a3b4, 0x00000000 },
2084 { 0x0000a3b8, 0x00000000 },
2085 { 0x0000a3bc, 0x00000000 },
2086 { 0x0000a3c0, 0x00000000 },
2087 { 0x0000a3c4, 0x00000000 },
2088 { 0x0000a3cc, 0x20202020 },
2089 { 0x0000a3d0, 0x20202020 },
2090 { 0x0000a3d4, 0x20202020 },
2091 { 0x0000a3dc, 0x39ce739c },
2092 { 0x0000a3e0, 0x0000039c },
2093 { 0x0000a3e4, 0x00000000 },
2094 { 0x0000a3e8, 0x18c43433 },
2095 { 0x0000a3ec, 0x00f70081 },
2096 { 0x00007800, 0x00140000 },
2097 { 0x00007804, 0x0e4548d8 },
2098 { 0x00007808, 0x54214514 },
2099 { 0x0000780c, 0x02025820 },
2100 { 0x00007810, 0x71c0d388 },
2101 { 0x00007814, 0x924934a8 },
2102 { 0x0000781c, 0x00000000 },
2103 { 0x00007820, 0x00000c04 },
2104 { 0x00007824, 0x00d86fff },
2105 { 0x00007828, 0x26d2491b },
2106 { 0x0000782c, 0x6e36d97b },
2107 { 0x00007830, 0xedb6d96c },
2108 { 0x00007834, 0x71400086 },
2109 { 0x00007838, 0xfac68800 },
2110 { 0x0000783c, 0x0001fffe },
2111 { 0x00007840, 0xffeb1a20 },
2112 { 0x00007844, 0x000c0db6 },
2113 { 0x00007848, 0x6db61b6f },
2114 { 0x0000784c, 0x6d9b66db },
2115 { 0x00007850, 0x6d8c6dba },
2116 { 0x00007854, 0x00040000 },
2117 { 0x00007858, 0xdb003012 },
2118 { 0x0000785c, 0x04924914 },
2119 { 0x00007860, 0x21084210 },
2120 { 0x00007864, 0xf7d7ffde },
2121 { 0x00007868, 0xc2034080 },
2122 { 0x0000786c, 0x48609eb4 },
2123 { 0x00007870, 0x10142c00 },
2124}; 920};
2125 921
2126static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = { 922static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
2127 {0x00004040, 0x9248fd00 }, 923 /* Addr allmodes */
2128 {0x00004040, 0x24924924 }, 924 {0x00004040, 0x9248fd00},
2129 {0x00004040, 0xa8000019 }, 925 {0x00004040, 0x24924924},
2130 {0x00004040, 0x13160820 }, 926 {0x00004040, 0xa8000019},
2131 {0x00004040, 0xe5980560 }, 927 {0x00004040, 0x13160820},
2132 {0x00004040, 0xc01dcffd }, 928 {0x00004040, 0xe5980560},
2133 {0x00004040, 0x1aaabe41 }, 929 {0x00004040, 0xc01dcffd},
2134 {0x00004040, 0xbe105554 }, 930 {0x00004040, 0x1aaabe41},
2135 {0x00004040, 0x00043007 }, 931 {0x00004040, 0xbe105554},
2136 {0x00004044, 0x00000000 }, 932 {0x00004040, 0x00043007},
933 {0x00004044, 0x00000000},
2137}; 934};
2138 935
2139static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = { 936static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
2140 {0x00004040, 0x9248fd00 }, 937 /* Addr allmodes */
2141 {0x00004040, 0x24924924 }, 938 {0x00004040, 0x9248fd00},
2142 {0x00004040, 0xa8000019 }, 939 {0x00004040, 0x24924924},
2143 {0x00004040, 0x13160820 }, 940 {0x00004040, 0xa8000019},
2144 {0x00004040, 0xe5980560 }, 941 {0x00004040, 0x13160820},
2145 {0x00004040, 0xc01dcffc }, 942 {0x00004040, 0xe5980560},
2146 {0x00004040, 0x1aaabe41 }, 943 {0x00004040, 0xc01dcffc},
2147 {0x00004040, 0xbe105554 }, 944 {0x00004040, 0x1aaabe41},
2148 {0x00004040, 0x00043007 }, 945 {0x00004040, 0xbe105554},
2149 {0x00004044, 0x00000000 }, 946 {0x00004040, 0x00043007},
947 {0x00004044, 0x00000000},
2150}; 948};
2151 949
2152/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */
2153static const u32 ar9285Modes_9285_1_2[][6] = { 950static const u32 ar9285Modes_9285_1_2[][6] = {
2154 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 951 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
2155 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 952 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
2156 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 953 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
2157 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 954 {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
2158 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, 955 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
2159 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 956 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f},
2160 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, 957 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880},
2161 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 958 {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
2162 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 959 {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
2163 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 960 {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
2164 { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, 961 {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
2165 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 962 {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
2166 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 963 {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
2167 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 964 {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
2168 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, 965 {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0},
2169 { 0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0 }, 966 {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
2170 { 0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, 967 {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
2171 { 0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, 968 {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
2172 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, 969 {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
2173 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 970 {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e},
2174 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, 971 {0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18},
2175 { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 }, 972 {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
2176 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 973 {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
2177 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 974 {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881},
2178 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 975 {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
2179 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 976 {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
2180 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 977 {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d},
2181 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, 978 {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010},
2182 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010 }, 979 {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2183 { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 980 {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2184 { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 981 {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
2185 { 0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c }, 982 {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00},
2186 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, 983 {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
2187 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 984 {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
2188 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 985 {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
2189 { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f }, 986 {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
2190 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 987 {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
2191 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 988 {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2192 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 989 {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2193 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 990 {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
2194 { 0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, 991 {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
2195 { 0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, 992 {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
2196 { 0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, 993 {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
2197 { 0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, 994 {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
2198 { 0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, 995 {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
2199 { 0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, 996 {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
2200 { 0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, 997 {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
2201 { 0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, 998 {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
2202 { 0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, 999 {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
2203 { 0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, 1000 {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
2204 { 0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, 1001 {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
2205 { 0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, 1002 {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
2206 { 0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, 1003 {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
2207 { 0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, 1004 {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
2208 { 0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, 1005 {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
2209 { 0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, 1006 {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
2210 { 0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, 1007 {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
2211 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 1008 {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
2212 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 1009 {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
2213 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 1010 {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
2214 { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, 1011 {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
2215 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 1012 {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
2216 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 1013 {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
2217 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 1014 {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
2218 { 0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, 1015 {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
2219 { 0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, 1016 {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
2220 { 0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, 1017 {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
2221 { 0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, 1018 {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
2222 { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, 1019 {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
2223 { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, 1020 {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
2224 { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, 1021 {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
2225 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 1022 {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
2226 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 1023 {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
2227 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 1024 {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
2228 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, 1025 {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
2229 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 1026 {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
2230 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 1027 {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
2231 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 1028 {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
2232 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 1029 {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
2233 { 0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, 1030 {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
2234 { 0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, 1031 {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
2235 { 0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, 1032 {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
2236 { 0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, 1033 {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
2237 { 0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, 1034 {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
2238 { 0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, 1035 {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
2239 { 0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, 1036 {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
2240 { 0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, 1037 {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
2241 { 0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, 1038 {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
2242 { 0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, 1039 {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
2243 { 0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, 1040 {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
2244 { 0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, 1041 {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
2245 { 0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, 1042 {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
2246 { 0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, 1043 {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
2247 { 0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, 1044 {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
2248 { 0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, 1045 {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
2249 { 0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, 1046 {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
2250 { 0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, 1047 {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
2251 { 0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, 1048 {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
2252 { 0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, 1049 {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
2253 { 0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, 1050 {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
2254 { 0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, 1051 {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
2255 { 0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, 1052 {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
2256 { 0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, 1053 {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
2257 { 0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, 1054 {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
2258 { 0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, 1055 {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
2259 { 0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, 1056 {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
2260 { 0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, 1057 {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
2261 { 0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, 1058 {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
2262 { 0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, 1059 {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
2263 { 0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, 1060 {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
2264 { 0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, 1061 {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
2265 { 0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, 1062 {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
2266 { 0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, 1063 {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
2267 { 0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, 1064 {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
2268 { 0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, 1065 {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
2269 { 0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, 1066 {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
2270 { 0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, 1067 {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
2271 { 0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, 1068 {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
2272 { 0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, 1069 {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
2273 { 0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, 1070 {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
2274 { 0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, 1071 {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
2275 { 0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, 1072 {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
2276 { 0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, 1073 {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
2277 { 0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, 1074 {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
2278 { 0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, 1075 {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
2279 { 0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, 1076 {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
2280 { 0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, 1077 {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
2281 { 0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, 1078 {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
2282 { 0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, 1079 {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2283 { 0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1080 {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2284 { 0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1081 {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2285 { 0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1082 {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2286 { 0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1083 {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2287 { 0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1084 {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2288 { 0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1085 {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2289 { 0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1086 {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2290 { 0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1087 {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2291 { 0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1088 {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2292 { 0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1089 {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2293 { 0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1090 {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2294 { 0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1091 {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2295 { 0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1092 {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2296 { 0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1093 {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2297 { 0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1094 {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2298 { 0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1095 {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2299 { 0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1096 {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2300 { 0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1097 {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2301 { 0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1098 {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2302 { 0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1099 {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2303 { 0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1100 {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2304 { 0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1101 {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2305 { 0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1102 {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2306 { 0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1103 {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2307 { 0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1104 {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2308 { 0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1105 {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2309 { 0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1106 {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2310 { 0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1107 {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2311 { 0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1108 {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2312 { 0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1109 {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2313 { 0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1110 {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2314 { 0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1111 {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2315 { 0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1112 {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2316 { 0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1113 {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2317 { 0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1114 {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2318 { 0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1115 {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2319 { 0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1116 {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2320 { 0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1117 {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2321 { 0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1118 {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
2322 { 0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, 1119 {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
2323 { 0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, 1120 {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
2324 { 0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, 1121 {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
2325 { 0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, 1122 {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
2326 { 0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, 1123 {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
2327 { 0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, 1124 {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
2328 { 0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, 1125 {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
2329 { 0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, 1126 {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
2330 { 0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, 1127 {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
2331 { 0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, 1128 {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
2332 { 0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, 1129 {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
2333 { 0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, 1130 {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
2334 { 0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, 1131 {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
2335 { 0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, 1132 {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
2336 { 0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, 1133 {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
2337 { 0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, 1134 {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
2338 { 0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, 1135 {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
2339 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 1136 {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
2340 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 1137 {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
2341 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 1138 {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
2342 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, 1139 {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
2343 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 1140 {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
2344 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 1141 {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
2345 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 1142 {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
2346 { 0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, 1143 {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
2347 { 0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, 1144 {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
2348 { 0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, 1145 {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
2349 { 0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, 1146 {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
2350 { 0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, 1147 {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
2351 { 0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, 1148 {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
2352 { 0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, 1149 {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
2353 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 1150 {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
2354 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 1151 {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
2355 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 1152 {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
2356 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, 1153 {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
2357 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 1154 {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
2358 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 1155 {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
2359 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 1156 {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
2360 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 1157 {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
2361 { 0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, 1158 {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
2362 { 0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, 1159 {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
2363 { 0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, 1160 {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
2364 { 0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, 1161 {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
2365 { 0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, 1162 {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
2366 { 0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, 1163 {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
2367 { 0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, 1164 {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
2368 { 0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, 1165 {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
2369 { 0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, 1166 {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
2370 { 0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, 1167 {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
2371 { 0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, 1168 {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
2372 { 0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, 1169 {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
2373 { 0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, 1170 {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
2374 { 0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, 1171 {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
2375 { 0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, 1172 {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
2376 { 0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, 1173 {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
2377 { 0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, 1174 {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
2378 { 0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, 1175 {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
2379 { 0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, 1176 {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
2380 { 0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, 1177 {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
2381 { 0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, 1178 {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
2382 { 0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, 1179 {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
2383 { 0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, 1180 {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
2384 { 0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, 1181 {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
2385 { 0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, 1182 {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
2386 { 0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, 1183 {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
2387 { 0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, 1184 {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
2388 { 0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, 1185 {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
2389 { 0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, 1186 {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
2390 { 0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, 1187 {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
2391 { 0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, 1188 {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
2392 { 0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, 1189 {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
2393 { 0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, 1190 {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
2394 { 0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, 1191 {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
2395 { 0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, 1192 {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
2396 { 0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, 1193 {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
2397 { 0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, 1194 {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
2398 { 0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, 1195 {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
2399 { 0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, 1196 {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
2400 { 0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, 1197 {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
2401 { 0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, 1198 {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
2402 { 0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, 1199 {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
2403 { 0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, 1200 {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
2404 { 0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, 1201 {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
2405 { 0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, 1202 {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
2406 { 0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, 1203 {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
2407 { 0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, 1204 {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
2408 { 0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, 1205 {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
2409 { 0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, 1206 {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
2410 { 0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, 1207 {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2411 { 0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1208 {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2412 { 0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1209 {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2413 { 0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1210 {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2414 { 0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1211 {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2415 { 0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1212 {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2416 { 0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1213 {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2417 { 0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1214 {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2418 { 0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1215 {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2419 { 0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1216 {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2420 { 0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1217 {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2421 { 0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1218 {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2422 { 0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1219 {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2423 { 0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1220 {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2424 { 0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1221 {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2425 { 0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1222 {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2426 { 0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1223 {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2427 { 0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1224 {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2428 { 0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1225 {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2429 { 0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1226 {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2430 { 0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1227 {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2431 { 0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1228 {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2432 { 0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1229 {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2433 { 0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1230 {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2434 { 0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1231 {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2435 { 0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1232 {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2436 { 0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1233 {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2437 { 0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1234 {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2438 { 0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1235 {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2439 { 0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1236 {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2440 { 0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1237 {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2441 { 0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1238 {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2442 { 0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1239 {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2443 { 0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1240 {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2444 { 0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1241 {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2445 { 0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1242 {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2446 { 0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1243 {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2447 { 0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1244 {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2448 { 0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1245 {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
2449 { 0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 1246 {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
2450 { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, 1247 {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
2451 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, 1248 {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
2452 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, 1249 {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
2453 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 1250 {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
2454 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 1251 {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000},
2455 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, 1252 {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
2456 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2457}; 1253};
2458 1254
2459static const u32 ar9285Common_9285_1_2[][2] = { 1255static const u32 ar9285Common_9285_1_2[][2] = {
2460 { 0x0000000c, 0x00000000 }, 1256 /* Addr allmodes */
2461 { 0x00000030, 0x00020045 }, 1257 {0x0000000c, 0x00000000},
2462 { 0x00000034, 0x00000005 }, 1258 {0x00000030, 0x00020045},
2463 { 0x00000040, 0x00000000 }, 1259 {0x00000034, 0x00000005},
2464 { 0x00000044, 0x00000008 }, 1260 {0x00000040, 0x00000000},
2465 { 0x00000048, 0x00000008 }, 1261 {0x00000044, 0x00000008},
2466 { 0x0000004c, 0x00000010 }, 1262 {0x00000048, 0x00000008},
2467 { 0x00000050, 0x00000000 }, 1263 {0x0000004c, 0x00000010},
2468 { 0x00000054, 0x0000001f }, 1264 {0x00000050, 0x00000000},
2469 { 0x00000800, 0x00000000 }, 1265 {0x00000054, 0x0000001f},
2470 { 0x00000804, 0x00000000 }, 1266 {0x00000800, 0x00000000},
2471 { 0x00000808, 0x00000000 }, 1267 {0x00000804, 0x00000000},
2472 { 0x0000080c, 0x00000000 }, 1268 {0x00000808, 0x00000000},
2473 { 0x00000810, 0x00000000 }, 1269 {0x0000080c, 0x00000000},
2474 { 0x00000814, 0x00000000 }, 1270 {0x00000810, 0x00000000},
2475 { 0x00000818, 0x00000000 }, 1271 {0x00000814, 0x00000000},
2476 { 0x0000081c, 0x00000000 }, 1272 {0x00000818, 0x00000000},
2477 { 0x00000820, 0x00000000 }, 1273 {0x0000081c, 0x00000000},
2478 { 0x00000824, 0x00000000 }, 1274 {0x00000820, 0x00000000},
2479 { 0x00001040, 0x002ffc0f }, 1275 {0x00000824, 0x00000000},
2480 { 0x00001044, 0x002ffc0f }, 1276 {0x00001040, 0x002ffc0f},
2481 { 0x00001048, 0x002ffc0f }, 1277 {0x00001044, 0x002ffc0f},
2482 { 0x0000104c, 0x002ffc0f }, 1278 {0x00001048, 0x002ffc0f},
2483 { 0x00001050, 0x002ffc0f }, 1279 {0x0000104c, 0x002ffc0f},
2484 { 0x00001054, 0x002ffc0f }, 1280 {0x00001050, 0x002ffc0f},
2485 { 0x00001058, 0x002ffc0f }, 1281 {0x00001054, 0x002ffc0f},
2486 { 0x0000105c, 0x002ffc0f }, 1282 {0x00001058, 0x002ffc0f},
2487 { 0x00001060, 0x002ffc0f }, 1283 {0x0000105c, 0x002ffc0f},
2488 { 0x00001064, 0x002ffc0f }, 1284 {0x00001060, 0x002ffc0f},
2489 { 0x00001230, 0x00000000 }, 1285 {0x00001064, 0x002ffc0f},
2490 { 0x00001270, 0x00000000 }, 1286 {0x00001230, 0x00000000},
2491 { 0x00001038, 0x00000000 }, 1287 {0x00001270, 0x00000000},
2492 { 0x00001078, 0x00000000 }, 1288 {0x00001038, 0x00000000},
2493 { 0x000010b8, 0x00000000 }, 1289 {0x00001078, 0x00000000},
2494 { 0x000010f8, 0x00000000 }, 1290 {0x000010b8, 0x00000000},
2495 { 0x00001138, 0x00000000 }, 1291 {0x000010f8, 0x00000000},
2496 { 0x00001178, 0x00000000 }, 1292 {0x00001138, 0x00000000},
2497 { 0x000011b8, 0x00000000 }, 1293 {0x00001178, 0x00000000},
2498 { 0x000011f8, 0x00000000 }, 1294 {0x000011b8, 0x00000000},
2499 { 0x00001238, 0x00000000 }, 1295 {0x000011f8, 0x00000000},
2500 { 0x00001278, 0x00000000 }, 1296 {0x00001238, 0x00000000},
2501 { 0x000012b8, 0x00000000 }, 1297 {0x00001278, 0x00000000},
2502 { 0x000012f8, 0x00000000 }, 1298 {0x000012b8, 0x00000000},
2503 { 0x00001338, 0x00000000 }, 1299 {0x000012f8, 0x00000000},
2504 { 0x00001378, 0x00000000 }, 1300 {0x00001338, 0x00000000},
2505 { 0x000013b8, 0x00000000 }, 1301 {0x00001378, 0x00000000},
2506 { 0x000013f8, 0x00000000 }, 1302 {0x000013b8, 0x00000000},
2507 { 0x00001438, 0x00000000 }, 1303 {0x000013f8, 0x00000000},
2508 { 0x00001478, 0x00000000 }, 1304 {0x00001438, 0x00000000},
2509 { 0x000014b8, 0x00000000 }, 1305 {0x00001478, 0x00000000},
2510 { 0x000014f8, 0x00000000 }, 1306 {0x000014b8, 0x00000000},
2511 { 0x00001538, 0x00000000 }, 1307 {0x000014f8, 0x00000000},
2512 { 0x00001578, 0x00000000 }, 1308 {0x00001538, 0x00000000},
2513 { 0x000015b8, 0x00000000 }, 1309 {0x00001578, 0x00000000},
2514 { 0x000015f8, 0x00000000 }, 1310 {0x000015b8, 0x00000000},
2515 { 0x00001638, 0x00000000 }, 1311 {0x000015f8, 0x00000000},
2516 { 0x00001678, 0x00000000 }, 1312 {0x00001638, 0x00000000},
2517 { 0x000016b8, 0x00000000 }, 1313 {0x00001678, 0x00000000},
2518 { 0x000016f8, 0x00000000 }, 1314 {0x000016b8, 0x00000000},
2519 { 0x00001738, 0x00000000 }, 1315 {0x000016f8, 0x00000000},
2520 { 0x00001778, 0x00000000 }, 1316 {0x00001738, 0x00000000},
2521 { 0x000017b8, 0x00000000 }, 1317 {0x00001778, 0x00000000},
2522 { 0x000017f8, 0x00000000 }, 1318 {0x000017b8, 0x00000000},
2523 { 0x0000103c, 0x00000000 }, 1319 {0x000017f8, 0x00000000},
2524 { 0x0000107c, 0x00000000 }, 1320 {0x0000103c, 0x00000000},
2525 { 0x000010bc, 0x00000000 }, 1321 {0x0000107c, 0x00000000},
2526 { 0x000010fc, 0x00000000 }, 1322 {0x000010bc, 0x00000000},
2527 { 0x0000113c, 0x00000000 }, 1323 {0x000010fc, 0x00000000},
2528 { 0x0000117c, 0x00000000 }, 1324 {0x0000113c, 0x00000000},
2529 { 0x000011bc, 0x00000000 }, 1325 {0x0000117c, 0x00000000},
2530 { 0x000011fc, 0x00000000 }, 1326 {0x000011bc, 0x00000000},
2531 { 0x0000123c, 0x00000000 }, 1327 {0x000011fc, 0x00000000},
2532 { 0x0000127c, 0x00000000 }, 1328 {0x0000123c, 0x00000000},
2533 { 0x000012bc, 0x00000000 }, 1329 {0x0000127c, 0x00000000},
2534 { 0x000012fc, 0x00000000 }, 1330 {0x000012bc, 0x00000000},
2535 { 0x0000133c, 0x00000000 }, 1331 {0x000012fc, 0x00000000},
2536 { 0x0000137c, 0x00000000 }, 1332 {0x0000133c, 0x00000000},
2537 { 0x000013bc, 0x00000000 }, 1333 {0x0000137c, 0x00000000},
2538 { 0x000013fc, 0x00000000 }, 1334 {0x000013bc, 0x00000000},
2539 { 0x0000143c, 0x00000000 }, 1335 {0x000013fc, 0x00000000},
2540 { 0x0000147c, 0x00000000 }, 1336 {0x0000143c, 0x00000000},
2541 { 0x00004030, 0x00000002 }, 1337 {0x0000147c, 0x00000000},
2542 { 0x0000403c, 0x00000002 }, 1338 {0x00004030, 0x00000002},
2543 { 0x00004024, 0x0000001f }, 1339 {0x0000403c, 0x00000002},
2544 { 0x00004060, 0x00000000 }, 1340 {0x00004024, 0x0000001f},
2545 { 0x00004064, 0x00000000 }, 1341 {0x00004060, 0x00000000},
2546 { 0x00007010, 0x00000031 }, 1342 {0x00004064, 0x00000000},
2547 { 0x00007034, 0x00000002 }, 1343 {0x00007010, 0x00000031},
2548 { 0x00007038, 0x000004c2 }, 1344 {0x00007034, 0x00000002},
2549 { 0x00008004, 0x00000000 }, 1345 {0x00007038, 0x000004c2},
2550 { 0x00008008, 0x00000000 }, 1346 {0x00008004, 0x00000000},
2551 { 0x0000800c, 0x00000000 }, 1347 {0x00008008, 0x00000000},
2552 { 0x00008018, 0x00000700 }, 1348 {0x0000800c, 0x00000000},
2553 { 0x00008020, 0x00000000 }, 1349 {0x00008018, 0x00000700},
2554 { 0x00008038, 0x00000000 }, 1350 {0x00008020, 0x00000000},
2555 { 0x0000803c, 0x00000000 }, 1351 {0x00008038, 0x00000000},
2556 { 0x00008048, 0x00000000 }, 1352 {0x0000803c, 0x00000000},
2557 { 0x00008054, 0x00000000 }, 1353 {0x00008048, 0x00000000},
2558 { 0x00008058, 0x00000000 }, 1354 {0x00008054, 0x00000000},
2559 { 0x0000805c, 0x000fc78f }, 1355 {0x00008058, 0x00000000},
2560 { 0x00008060, 0x0000000f }, 1356 {0x0000805c, 0x000fc78f},
2561 { 0x00008064, 0x00000000 }, 1357 {0x00008060, 0x0000000f},
2562 { 0x00008070, 0x00000000 }, 1358 {0x00008064, 0x00000000},
2563 { 0x000080c0, 0x2a80001a }, 1359 {0x00008070, 0x00000000},
2564 { 0x000080c4, 0x05dc01e0 }, 1360 {0x000080c0, 0x2a80001a},
2565 { 0x000080c8, 0x1f402710 }, 1361 {0x000080c4, 0x05dc01e0},
2566 { 0x000080cc, 0x01f40000 }, 1362 {0x000080c8, 0x1f402710},
2567 { 0x000080d0, 0x00001e00 }, 1363 {0x000080cc, 0x01f40000},
2568 { 0x000080d4, 0x00000000 }, 1364 {0x000080d0, 0x00001e00},
2569 { 0x000080d8, 0x00400000 }, 1365 {0x000080d4, 0x00000000},
2570 { 0x000080e0, 0xffffffff }, 1366 {0x000080d8, 0x00400000},
2571 { 0x000080e4, 0x0000ffff }, 1367 {0x000080e0, 0xffffffff},
2572 { 0x000080e8, 0x003f3f3f }, 1368 {0x000080e4, 0x0000ffff},
2573 { 0x000080ec, 0x00000000 }, 1369 {0x000080e8, 0x003f3f3f},
2574 { 0x000080f0, 0x00000000 }, 1370 {0x000080ec, 0x00000000},
2575 { 0x000080f4, 0x00000000 }, 1371 {0x000080f0, 0x00000000},
2576 { 0x000080f8, 0x00000000 }, 1372 {0x000080f4, 0x00000000},
2577 { 0x000080fc, 0x00020000 }, 1373 {0x000080f8, 0x00000000},
2578 { 0x00008100, 0x00020000 }, 1374 {0x000080fc, 0x00020000},
2579 { 0x00008104, 0x00000001 }, 1375 {0x00008100, 0x00020000},
2580 { 0x00008108, 0x00000052 }, 1376 {0x00008104, 0x00000001},
2581 { 0x0000810c, 0x00000000 }, 1377 {0x00008108, 0x00000052},
2582 { 0x00008110, 0x00000168 }, 1378 {0x0000810c, 0x00000000},
2583 { 0x00008118, 0x000100aa }, 1379 {0x00008110, 0x00000168},
2584 { 0x0000811c, 0x00003210 }, 1380 {0x00008118, 0x000100aa},
2585 { 0x00008120, 0x08f04810 }, 1381 {0x0000811c, 0x00003210},
2586 { 0x00008124, 0x00000000 }, 1382 {0x00008120, 0x08f04810},
2587 { 0x00008128, 0x00000000 }, 1383 {0x00008124, 0x00000000},
2588 { 0x0000812c, 0x00000000 }, 1384 {0x00008128, 0x00000000},
2589 { 0x00008130, 0x00000000 }, 1385 {0x0000812c, 0x00000000},
2590 { 0x00008134, 0x00000000 }, 1386 {0x00008130, 0x00000000},
2591 { 0x00008138, 0x00000000 }, 1387 {0x00008134, 0x00000000},
2592 { 0x0000813c, 0x00000000 }, 1388 {0x00008138, 0x00000000},
2593 { 0x00008144, 0xffffffff }, 1389 {0x0000813c, 0x00000000},
2594 { 0x00008168, 0x00000000 }, 1390 {0x00008144, 0xffffffff},
2595 { 0x0000816c, 0x00000000 }, 1391 {0x00008168, 0x00000000},
2596 { 0x00008170, 0x32143320 }, 1392 {0x0000816c, 0x00000000},
2597 { 0x00008174, 0xfaa4fa50 }, 1393 {0x00008170, 0x32143320},
2598 { 0x00008178, 0x00000100 }, 1394 {0x00008174, 0xfaa4fa50},
2599 { 0x0000817c, 0x00000000 }, 1395 {0x00008178, 0x00000100},
2600 { 0x000081c0, 0x00000000 }, 1396 {0x0000817c, 0x00000000},
2601 { 0x000081d0, 0x0000320a }, 1397 {0x000081c0, 0x00000000},
2602 { 0x000081ec, 0x00000000 }, 1398 {0x000081d0, 0x0000320a},
2603 { 0x000081f0, 0x00000000 }, 1399 {0x000081ec, 0x00000000},
2604 { 0x000081f4, 0x00000000 }, 1400 {0x000081f0, 0x00000000},
2605 { 0x000081f8, 0x00000000 }, 1401 {0x000081f4, 0x00000000},
2606 { 0x000081fc, 0x00000000 }, 1402 {0x000081f8, 0x00000000},
2607 { 0x00008200, 0x00000000 }, 1403 {0x000081fc, 0x00000000},
2608 { 0x00008204, 0x00000000 }, 1404 {0x00008200, 0x00000000},
2609 { 0x00008208, 0x00000000 }, 1405 {0x00008204, 0x00000000},
2610 { 0x0000820c, 0x00000000 }, 1406 {0x00008208, 0x00000000},
2611 { 0x00008210, 0x00000000 }, 1407 {0x0000820c, 0x00000000},
2612 { 0x00008214, 0x00000000 }, 1408 {0x00008210, 0x00000000},
2613 { 0x00008218, 0x00000000 }, 1409 {0x00008214, 0x00000000},
2614 { 0x0000821c, 0x00000000 }, 1410 {0x00008218, 0x00000000},
2615 { 0x00008220, 0x00000000 }, 1411 {0x0000821c, 0x00000000},
2616 { 0x00008224, 0x00000000 }, 1412 {0x00008220, 0x00000000},
2617 { 0x00008228, 0x00000000 }, 1413 {0x00008224, 0x00000000},
2618 { 0x0000822c, 0x00000000 }, 1414 {0x00008228, 0x00000000},
2619 { 0x00008230, 0x00000000 }, 1415 {0x0000822c, 0x00000000},
2620 { 0x00008234, 0x00000000 }, 1416 {0x00008230, 0x00000000},
2621 { 0x00008238, 0x00000000 }, 1417 {0x00008234, 0x00000000},
2622 { 0x0000823c, 0x00000000 }, 1418 {0x00008238, 0x00000000},
2623 { 0x00008240, 0x00100000 }, 1419 {0x0000823c, 0x00000000},
2624 { 0x00008244, 0x0010f400 }, 1420 {0x00008240, 0x00100000},
2625 { 0x00008248, 0x00000100 }, 1421 {0x00008244, 0x0010f400},
2626 { 0x0000824c, 0x0001e800 }, 1422 {0x00008248, 0x00000100},
2627 { 0x00008250, 0x00000000 }, 1423 {0x0000824c, 0x0001e800},
2628 { 0x00008254, 0x00000000 }, 1424 {0x00008250, 0x00000000},
2629 { 0x00008258, 0x00000000 }, 1425 {0x00008254, 0x00000000},
2630 { 0x0000825c, 0x400000ff }, 1426 {0x00008258, 0x00000000},
2631 { 0x00008260, 0x00080922 }, 1427 {0x0000825c, 0x400000ff},
2632 { 0x00008264, 0x88a00010 }, 1428 {0x00008260, 0x00080922},
2633 { 0x00008270, 0x00000000 }, 1429 {0x00008264, 0x88a00010},
2634 { 0x00008274, 0x40000000 }, 1430 {0x00008270, 0x00000000},
2635 { 0x00008278, 0x003e4180 }, 1431 {0x00008274, 0x40000000},
2636 { 0x0000827c, 0x00000000 }, 1432 {0x00008278, 0x003e4180},
2637 { 0x00008284, 0x0000002c }, 1433 {0x0000827c, 0x00000000},
2638 { 0x00008288, 0x0000002c }, 1434 {0x00008284, 0x0000002c},
2639 { 0x0000828c, 0x00000000 }, 1435 {0x00008288, 0x0000002c},
2640 { 0x00008294, 0x00000000 }, 1436 {0x0000828c, 0x00000000},
2641 { 0x00008298, 0x00000000 }, 1437 {0x00008294, 0x00000000},
2642 { 0x0000829c, 0x00000000 }, 1438 {0x00008298, 0x00000000},
2643 { 0x00008300, 0x00000040 }, 1439 {0x0000829c, 0x00000000},
2644 { 0x00008314, 0x00000000 }, 1440 {0x00008300, 0x00000040},
2645 { 0x00008328, 0x00000000 }, 1441 {0x00008314, 0x00000000},
2646 { 0x0000832c, 0x00000001 }, 1442 {0x00008328, 0x00000000},
2647 { 0x00008330, 0x00000302 }, 1443 {0x0000832c, 0x00000001},
2648 { 0x00008334, 0x00000e00 }, 1444 {0x00008330, 0x00000302},
2649 { 0x00008338, 0x00ff0000 }, 1445 {0x00008334, 0x00000e00},
2650 { 0x0000833c, 0x00000000 }, 1446 {0x00008338, 0x00ff0000},
2651 { 0x00008340, 0x00010380 }, 1447 {0x0000833c, 0x00000000},
2652 { 0x00008344, 0x00481043 }, 1448 {0x00008340, 0x00010380},
2653 { 0x00009808, 0x00000000 }, 1449 {0x00008344, 0x00481043},
2654 { 0x0000980c, 0xafe68e30 }, 1450 {0x00009808, 0x00000000},
2655 { 0x00009810, 0xfd14e000 }, 1451 {0x0000980c, 0xafe68e30},
2656 { 0x00009814, 0x9c0a9f6b }, 1452 {0x00009810, 0xfd14e000},
2657 { 0x0000981c, 0x00000000 }, 1453 {0x00009814, 0x9c0a9f6b},
2658 { 0x0000982c, 0x0000a000 }, 1454 {0x0000981c, 0x00000000},
2659 { 0x00009830, 0x00000000 }, 1455 {0x0000982c, 0x0000a000},
2660 { 0x0000983c, 0x00200400 }, 1456 {0x00009830, 0x00000000},
2661 { 0x0000984c, 0x0040233c }, 1457 {0x0000983c, 0x00200400},
2662 { 0x00009854, 0x00000044 }, 1458 {0x0000984c, 0x0040233c},
2663 { 0x00009900, 0x00000000 }, 1459 {0x00009854, 0x00000044},
2664 { 0x00009904, 0x00000000 }, 1460 {0x00009900, 0x00000000},
2665 { 0x00009908, 0x00000000 }, 1461 {0x00009904, 0x00000000},
2666 { 0x0000990c, 0x00000000 }, 1462 {0x00009908, 0x00000000},
2667 { 0x00009910, 0x01002310 }, 1463 {0x0000990c, 0x00000000},
2668 { 0x0000991c, 0x10000fff }, 1464 {0x00009910, 0x01002310},
2669 { 0x00009920, 0x04900000 }, 1465 {0x0000991c, 0x10000fff},
2670 { 0x00009928, 0x00000001 }, 1466 {0x00009920, 0x04900000},
2671 { 0x0000992c, 0x00000004 }, 1467 {0x00009928, 0x00000001},
2672 { 0x00009934, 0x1e1f2022 }, 1468 {0x0000992c, 0x00000004},
2673 { 0x00009938, 0x0a0b0c0d }, 1469 {0x00009934, 0x1e1f2022},
2674 { 0x0000993c, 0x00000000 }, 1470 {0x00009938, 0x0a0b0c0d},
2675 { 0x00009940, 0x14750604 }, 1471 {0x0000993c, 0x00000000},
2676 { 0x00009948, 0x9280c00a }, 1472 {0x00009940, 0x14750604},
2677 { 0x0000994c, 0x00020028 }, 1473 {0x00009948, 0x9280c00a},
2678 { 0x00009954, 0x5f3ca3de }, 1474 {0x0000994c, 0x00020028},
2679 { 0x00009958, 0x2108ecff }, 1475 {0x00009954, 0x5f3ca3de},
2680 { 0x00009968, 0x000003ce }, 1476 {0x00009958, 0x2108ecff},
2681 { 0x00009970, 0x192bb514 }, 1477 {0x00009968, 0x000003ce},
2682 { 0x00009974, 0x00000000 }, 1478 {0x00009970, 0x192bb514},
2683 { 0x00009978, 0x00000001 }, 1479 {0x00009974, 0x00000000},
2684 { 0x0000997c, 0x00000000 }, 1480 {0x00009978, 0x00000001},
2685 { 0x00009980, 0x00000000 }, 1481 {0x0000997c, 0x00000000},
2686 { 0x00009984, 0x00000000 }, 1482 {0x00009980, 0x00000000},
2687 { 0x00009988, 0x00000000 }, 1483 {0x00009984, 0x00000000},
2688 { 0x0000998c, 0x00000000 }, 1484 {0x00009988, 0x00000000},
2689 { 0x00009990, 0x00000000 }, 1485 {0x0000998c, 0x00000000},
2690 { 0x00009994, 0x00000000 }, 1486 {0x00009990, 0x00000000},
2691 { 0x00009998, 0x00000000 }, 1487 {0x00009994, 0x00000000},
2692 { 0x0000999c, 0x00000000 }, 1488 {0x00009998, 0x00000000},
2693 { 0x000099a0, 0x00000000 }, 1489 {0x0000999c, 0x00000000},
2694 { 0x000099a4, 0x00000001 }, 1490 {0x000099a0, 0x00000000},
2695 { 0x000099a8, 0x201fff00 }, 1491 {0x000099a4, 0x00000001},
2696 { 0x000099ac, 0x2def0400 }, 1492 {0x000099a8, 0x201fff00},
2697 { 0x000099b0, 0x03051000 }, 1493 {0x000099ac, 0x2def0400},
2698 { 0x000099b4, 0x00000820 }, 1494 {0x000099b0, 0x03051000},
2699 { 0x000099dc, 0x00000000 }, 1495 {0x000099b4, 0x00000820},
2700 { 0x000099e0, 0x00000000 }, 1496 {0x000099dc, 0x00000000},
2701 { 0x000099e4, 0xaaaaaaaa }, 1497 {0x000099e0, 0x00000000},
2702 { 0x000099e8, 0x3c466478 }, 1498 {0x000099e4, 0xaaaaaaaa},
2703 { 0x000099ec, 0x0cc80caa }, 1499 {0x000099e8, 0x3c466478},
2704 { 0x000099f0, 0x00000000 }, 1500 {0x000099ec, 0x0cc80caa},
2705 { 0x0000a208, 0x803e68c8 }, 1501 {0x000099f0, 0x00000000},
2706 { 0x0000a210, 0x4080a333 }, 1502 {0x0000a208, 0x803e68c8},
2707 { 0x0000a214, 0x00206c10 }, 1503 {0x0000a210, 0x4080a333},
2708 { 0x0000a218, 0x009c4060 }, 1504 {0x0000a214, 0x00206c10},
2709 { 0x0000a220, 0x01834061 }, 1505 {0x0000a218, 0x009c4060},
2710 { 0x0000a224, 0x00000400 }, 1506 {0x0000a220, 0x01834061},
2711 { 0x0000a228, 0x000003b5 }, 1507 {0x0000a224, 0x00000400},
2712 { 0x0000a22c, 0x00000000 }, 1508 {0x0000a228, 0x000003b5},
2713 { 0x0000a234, 0x20202020 }, 1509 {0x0000a22c, 0x00000000},
2714 { 0x0000a238, 0x20202020 }, 1510 {0x0000a234, 0x20202020},
2715 { 0x0000a244, 0x00000000 }, 1511 {0x0000a238, 0x20202020},
2716 { 0x0000a248, 0xfffffffc }, 1512 {0x0000a244, 0x00000000},
2717 { 0x0000a24c, 0x00000000 }, 1513 {0x0000a248, 0xfffffffc},
2718 { 0x0000a254, 0x00000000 }, 1514 {0x0000a24c, 0x00000000},
2719 { 0x0000a258, 0x0ccb5380 }, 1515 {0x0000a254, 0x00000000},
2720 { 0x0000a25c, 0x15151501 }, 1516 {0x0000a258, 0x0ccb5380},
2721 { 0x0000a260, 0xdfa90f01 }, 1517 {0x0000a25c, 0x15151501},
2722 { 0x0000a268, 0x00000000 }, 1518 {0x0000a260, 0xdfa90f01},
2723 { 0x0000a26c, 0x0ebae9e6 }, 1519 {0x0000a268, 0x00000000},
2724 { 0x0000d270, 0x0d820820 }, 1520 {0x0000a26c, 0x0ebae9e6},
2725 { 0x0000d35c, 0x07ffffef }, 1521 {0x0000d270, 0x0d820820},
2726 { 0x0000d360, 0x0fffffe7 }, 1522 {0x0000d35c, 0x07ffffef},
2727 { 0x0000d364, 0x17ffffe5 }, 1523 {0x0000d360, 0x0fffffe7},
2728 { 0x0000d368, 0x1fffffe4 }, 1524 {0x0000d364, 0x17ffffe5},
2729 { 0x0000d36c, 0x37ffffe3 }, 1525 {0x0000d368, 0x1fffffe4},
2730 { 0x0000d370, 0x3fffffe3 }, 1526 {0x0000d36c, 0x37ffffe3},
2731 { 0x0000d374, 0x57ffffe3 }, 1527 {0x0000d370, 0x3fffffe3},
2732 { 0x0000d378, 0x5fffffe2 }, 1528 {0x0000d374, 0x57ffffe3},
2733 { 0x0000d37c, 0x7fffffe2 }, 1529 {0x0000d378, 0x5fffffe2},
2734 { 0x0000d380, 0x7f3c7bba }, 1530 {0x0000d37c, 0x7fffffe2},
2735 { 0x0000d384, 0xf3307ff0 }, 1531 {0x0000d380, 0x7f3c7bba},
2736 { 0x0000a388, 0x0c000000 }, 1532 {0x0000d384, 0xf3307ff0},
2737 { 0x0000a38c, 0x20202020 }, 1533 {0x0000a388, 0x0c000000},
2738 { 0x0000a390, 0x20202020 }, 1534 {0x0000a38c, 0x20202020},
2739 { 0x0000a39c, 0x00000001 }, 1535 {0x0000a390, 0x20202020},
2740 { 0x0000a3a0, 0x00000000 }, 1536 {0x0000a39c, 0x00000001},
2741 { 0x0000a3a4, 0x00000000 }, 1537 {0x0000a3a0, 0x00000000},
2742 { 0x0000a3a8, 0x00000000 }, 1538 {0x0000a3a4, 0x00000000},
2743 { 0x0000a3ac, 0x00000000 }, 1539 {0x0000a3a8, 0x00000000},
2744 { 0x0000a3b0, 0x00000000 }, 1540 {0x0000a3ac, 0x00000000},
2745 { 0x0000a3b4, 0x00000000 }, 1541 {0x0000a3b0, 0x00000000},
2746 { 0x0000a3b8, 0x00000000 }, 1542 {0x0000a3b4, 0x00000000},
2747 { 0x0000a3bc, 0x00000000 }, 1543 {0x0000a3b8, 0x00000000},
2748 { 0x0000a3c0, 0x00000000 }, 1544 {0x0000a3bc, 0x00000000},
2749 { 0x0000a3c4, 0x00000000 }, 1545 {0x0000a3c0, 0x00000000},
2750 { 0x0000a3cc, 0x20202020 }, 1546 {0x0000a3c4, 0x00000000},
2751 { 0x0000a3d0, 0x20202020 }, 1547 {0x0000a3cc, 0x20202020},
2752 { 0x0000a3d4, 0x20202020 }, 1548 {0x0000a3d0, 0x20202020},
2753 { 0x0000a3e4, 0x00000000 }, 1549 {0x0000a3d4, 0x20202020},
2754 { 0x0000a3e8, 0x18c43433 }, 1550 {0x0000a3e4, 0x00000000},
2755 { 0x0000a3ec, 0x00f70081 }, 1551 {0x0000a3e8, 0x18c43433},
2756 { 0x00007800, 0x00140000 }, 1552 {0x0000a3ec, 0x00f70081},
2757 { 0x00007804, 0x0e4548d8 }, 1553 {0x00007800, 0x00140000},
2758 { 0x00007808, 0x54214514 }, 1554 {0x00007804, 0x0e4548d8},
2759 { 0x0000780c, 0x02025830 }, 1555 {0x00007808, 0x54214514},
2760 { 0x00007810, 0x71c0d388 }, 1556 {0x0000780c, 0x02025830},
2761 { 0x0000781c, 0x00000000 }, 1557 {0x00007810, 0x71c0d388},
2762 { 0x00007824, 0x00d86fff }, 1558 {0x0000781c, 0x00000000},
2763 { 0x0000782c, 0x6e36d97b }, 1559 {0x00007824, 0x00d86fff},
2764 { 0x00007834, 0x71400087 }, 1560 {0x0000782c, 0x6e36d97b},
2765 { 0x00007844, 0x000c0db6 }, 1561 {0x00007834, 0x71400087},
2766 { 0x00007848, 0x6db6246f }, 1562 {0x00007844, 0x000c0db6},
2767 { 0x0000784c, 0x6d9b66db }, 1563 {0x00007848, 0x6db6246f},
2768 { 0x00007850, 0x6d8c6dba }, 1564 {0x0000784c, 0x6d9b66db},
2769 { 0x00007854, 0x00040000 }, 1565 {0x00007850, 0x6d8c6dba},
2770 { 0x00007858, 0xdb003012 }, 1566 {0x00007854, 0x00040000},
2771 { 0x0000785c, 0x04924914 }, 1567 {0x00007858, 0xdb003012},
2772 { 0x00007860, 0x21084210 }, 1568 {0x0000785c, 0x04924914},
2773 { 0x00007864, 0xf7d7ffde }, 1569 {0x00007860, 0x21084210},
2774 { 0x00007868, 0xc2034080 }, 1570 {0x00007864, 0xf7d7ffde},
2775 { 0x00007870, 0x10142c00 }, 1571 {0x00007868, 0xc2034080},
1572 {0x00007870, 0x10142c00},
2776}; 1573};
2777 1574
2778static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = { 1575static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
2779 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 1576 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2780 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 1577 {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000},
2781 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, 1578 {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000},
2782 { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 }, 1579 {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000},
2783 { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 }, 1580 {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000},
2784 { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 }, 1581 {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000},
2785 { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 }, 1582 {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000},
2786 { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 }, 1583 {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000},
2787 { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 }, 1584 {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000},
2788 { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 }, 1585 {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000},
2789 { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 }, 1586 {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000},
2790 { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 }, 1587 {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000},
2791 { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 }, 1588 {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000},
2792 { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 }, 1589 {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000},
2793 { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 }, 1590 {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
2794 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, 1591 {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
2795 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, 1592 {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2796 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1593 {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2797 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1594 {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2798 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1595 {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2799 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1596 {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2800 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1597 {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2801 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1598 {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
2802 { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 }, 1599 {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
2803 { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b }, 1600 {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
2804 { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e }, 1601 {0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803},
2805 { 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 }, 1602 {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
2806 { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe }, 1603 {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
2807 { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 }, 1604 {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
2808 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe }, 1605 {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
2809 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, 1606 {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652},
2810 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 }, 1607 {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
2811 { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, 1608 {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
2812 { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 }, 1609 {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
2813 { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, 1610 {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
2814 { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, 1611 {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
2815 { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, 1612 {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
2816 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
2817}; 1613};
2818 1614
2819static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = { 1615static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
2820 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 1616 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2821 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 1617 {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000},
2822 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, 1618 {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000},
2823 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, 1619 {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000},
2824 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, 1620 {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000},
2825 { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 }, 1621 {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000},
2826 { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 }, 1622 {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000},
2827 { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 }, 1623 {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000},
2828 { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 }, 1624 {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000},
2829 { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 }, 1625 {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000},
2830 { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 }, 1626 {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000},
2831 { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 }, 1627 {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000},
2832 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 }, 1628 {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000},
2833 { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 }, 1629 {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000},
2834 { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 }, 1630 {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
2835 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, 1631 {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
2836 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, 1632 {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2837 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1633 {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2838 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1634 {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2839 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1635 {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2840 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1636 {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2841 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1637 {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2842 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1638 {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
2843 { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 }, 1639 {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
2844 { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b }, 1640 {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
2845 { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e }, 1641 {0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801},
2846 { 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 }, 1642 {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
2847 { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe }, 1643 {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
2848 { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 }, 1644 {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
2849 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, 1645 {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
2850 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, 1646 {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652},
2851 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 }, 1647 {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
2852 { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, 1648 {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
2853 { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c }, 1649 {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
2854 { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, 1650 {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
2855 { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, 1651 {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
2856 { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, 1652 {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
2857 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
2858}; 1653};
2859 1654
2860static const u32 ar9285Modes_XE2_0_normal_power[][6] = { 1655static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
2861 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 1656 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2862 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, 1657 {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000},
2863 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, 1658 {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000},
2864 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, 1659 {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000},
2865 { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 }, 1660 {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000},
2866 { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 }, 1661 {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000},
2867 { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 }, 1662 {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000},
2868 { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 }, 1663 {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000},
2869 { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 }, 1664 {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000},
2870 { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 }, 1665 {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000},
2871 { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 }, 1666 {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000},
2872 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 }, 1667 {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000},
2873 { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 }, 1668 {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000},
2874 { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 }, 1669 {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000},
2875 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, 1670 {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
2876 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, 1671 {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
2877 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1672 {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2878 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1673 {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2879 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1674 {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2880 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1675 {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2881 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1676 {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2882 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1677 {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2883 { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 }, 1678 {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
2884 { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b }, 1679 {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b},
2885 { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae }, 1680 {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae},
2886 { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 }, 1681 {0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441},
2887 { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe }, 1682 {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
2888 { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c }, 1683 {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
2889 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, 1684 {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
2890 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, 1685 {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
2891 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 }, 1686 {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652},
2892 { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, 1687 {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
2893 { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c }, 1688 {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
2894 { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, 1689 {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
2895 { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, 1690 {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
2896 { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, 1691 {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
2897 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, 1692 {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
2898}; 1693};
2899 1694
2900static const u32 ar9285Modes_XE2_0_high_power[][6] = { 1695static const u32 ar9285Modes_XE2_0_high_power[][6] = {
2901 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 1696 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2902 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, 1697 {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000},
2903 { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 }, 1698 {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000},
2904 { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 }, 1699 {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000},
2905 { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 }, 1700 {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000},
2906 { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 }, 1701 {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000},
2907 { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 }, 1702 {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000},
2908 { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 }, 1703 {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000},
2909 { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 }, 1704 {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000},
2910 { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 }, 1705 {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000},
2911 { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 }, 1706 {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000},
2912 { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 }, 1707 {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000},
2913 { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 }, 1708 {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000},
2914 { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 }, 1709 {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000},
2915 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, 1710 {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
2916 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, 1711 {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
2917 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1712 {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2918 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1713 {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2919 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1714 {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2920 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1715 {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2921 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1716 {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2922 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 1717 {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
2923 { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 }, 1718 {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
2924 { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b }, 1719 {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b},
2925 { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e }, 1720 {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e},
2926 { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 }, 1721 {0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443},
2927 { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe }, 1722 {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
2928 { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c }, 1723 {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
2929 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe }, 1724 {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
2930 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, 1725 {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
2931 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 }, 1726 {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652},
2932 { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, 1727 {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
2933 { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 }, 1728 {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
2934 { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, 1729 {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
2935 { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, 1730 {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
2936 { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, 1731 {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
2937 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, 1732 {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
2938}; 1733};
2939 1734
2940static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = { 1735static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
2941 {0x00004040, 0x9248fd00 }, 1736 /* Addr allmodes */
2942 {0x00004040, 0x24924924 }, 1737 {0x00004040, 0x9248fd00},
2943 {0x00004040, 0xa8000019 }, 1738 {0x00004040, 0x24924924},
2944 {0x00004040, 0x13160820 }, 1739 {0x00004040, 0xa8000019},
2945 {0x00004040, 0xe5980560 }, 1740 {0x00004040, 0x13160820},
2946 {0x00004040, 0xc01dcffd }, 1741 {0x00004040, 0xe5980560},
2947 {0x00004040, 0x1aaabe41 }, 1742 {0x00004040, 0xc01dcffd},
2948 {0x00004040, 0xbe105554 }, 1743 {0x00004040, 0x1aaabe41},
2949 {0x00004040, 0x00043007 }, 1744 {0x00004040, 0xbe105554},
2950 {0x00004044, 0x00000000 }, 1745 {0x00004040, 0x00043007},
1746 {0x00004044, 0x00000000},
2951}; 1747};
2952 1748
2953static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = { 1749static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
2954 {0x00004040, 0x9248fd00 }, 1750 /* Addr allmodes */
2955 {0x00004040, 0x24924924 }, 1751 {0x00004040, 0x9248fd00},
2956 {0x00004040, 0xa8000019 }, 1752 {0x00004040, 0x24924924},
2957 {0x00004040, 0x13160820 }, 1753 {0x00004040, 0xa8000019},
2958 {0x00004040, 0xe5980560 }, 1754 {0x00004040, 0x13160820},
2959 {0x00004040, 0xc01dcffc }, 1755 {0x00004040, 0xe5980560},
2960 {0x00004040, 0x1aaabe41 }, 1756 {0x00004040, 0xc01dcffc},
2961 {0x00004040, 0xbe105554 }, 1757 {0x00004040, 0x1aaabe41},
2962 {0x00004040, 0x00043007 }, 1758 {0x00004040, 0xbe105554},
2963 {0x00004044, 0x00000000 }, 1759 {0x00004040, 0x00043007},
2964}; 1760 {0x00004044, 0x00000000},
2965
2966/* AR9287 Revision 10 */
2967static const u32 ar9287Modes_9287_1_0[][6] = {
2968 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
2969 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
2970 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
2971 { 0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180 },
2972 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
2973 { 0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0 },
2974 { 0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f },
2975 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
2976 { 0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a },
2977 { 0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880 },
2978 { 0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303 },
2979 { 0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200 },
2980 { 0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e },
2981 { 0x00009828, 0x00000000, 0x00000000, 0x0a020001, 0x0a020001, 0x0a020001 },
2982 { 0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2983 { 0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007 },
2984 { 0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e },
2985 { 0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0 },
2986 { 0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 },
2987 { 0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
2988 { 0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e },
2989 { 0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18 },
2990 { 0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2991 { 0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
2992 { 0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881 },
2993 { 0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0 },
2994 { 0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016 },
2995 { 0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2996 { 0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010 },
2997 { 0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 },
2998 { 0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 },
2999 { 0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210 },
3000 { 0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce },
3001 { 0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c },
3002 { 0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00 },
3003 { 0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
3004 { 0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444 },
3005 { 0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3006 { 0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3007 { 0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a },
3008 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
3009 { 0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000 },
3010 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
3011 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3012};
3013
3014static const u32 ar9287Common_9287_1_0[][2] = {
3015 { 0x0000000c, 0x00000000 },
3016 { 0x00000030, 0x00020015 },
3017 { 0x00000034, 0x00000005 },
3018 { 0x00000040, 0x00000000 },
3019 { 0x00000044, 0x00000008 },
3020 { 0x00000048, 0x00000008 },
3021 { 0x0000004c, 0x00000010 },
3022 { 0x00000050, 0x00000000 },
3023 { 0x00000054, 0x0000001f },
3024 { 0x00000800, 0x00000000 },
3025 { 0x00000804, 0x00000000 },
3026 { 0x00000808, 0x00000000 },
3027 { 0x0000080c, 0x00000000 },
3028 { 0x00000810, 0x00000000 },
3029 { 0x00000814, 0x00000000 },
3030 { 0x00000818, 0x00000000 },
3031 { 0x0000081c, 0x00000000 },
3032 { 0x00000820, 0x00000000 },
3033 { 0x00000824, 0x00000000 },
3034 { 0x00001040, 0x002ffc0f },
3035 { 0x00001044, 0x002ffc0f },
3036 { 0x00001048, 0x002ffc0f },
3037 { 0x0000104c, 0x002ffc0f },
3038 { 0x00001050, 0x002ffc0f },
3039 { 0x00001054, 0x002ffc0f },
3040 { 0x00001058, 0x002ffc0f },
3041 { 0x0000105c, 0x002ffc0f },
3042 { 0x00001060, 0x002ffc0f },
3043 { 0x00001064, 0x002ffc0f },
3044 { 0x00001230, 0x00000000 },
3045 { 0x00001270, 0x00000000 },
3046 { 0x00001038, 0x00000000 },
3047 { 0x00001078, 0x00000000 },
3048 { 0x000010b8, 0x00000000 },
3049 { 0x000010f8, 0x00000000 },
3050 { 0x00001138, 0x00000000 },
3051 { 0x00001178, 0x00000000 },
3052 { 0x000011b8, 0x00000000 },
3053 { 0x000011f8, 0x00000000 },
3054 { 0x00001238, 0x00000000 },
3055 { 0x00001278, 0x00000000 },
3056 { 0x000012b8, 0x00000000 },
3057 { 0x000012f8, 0x00000000 },
3058 { 0x00001338, 0x00000000 },
3059 { 0x00001378, 0x00000000 },
3060 { 0x000013b8, 0x00000000 },
3061 { 0x000013f8, 0x00000000 },
3062 { 0x00001438, 0x00000000 },
3063 { 0x00001478, 0x00000000 },
3064 { 0x000014b8, 0x00000000 },
3065 { 0x000014f8, 0x00000000 },
3066 { 0x00001538, 0x00000000 },
3067 { 0x00001578, 0x00000000 },
3068 { 0x000015b8, 0x00000000 },
3069 { 0x000015f8, 0x00000000 },
3070 { 0x00001638, 0x00000000 },
3071 { 0x00001678, 0x00000000 },
3072 { 0x000016b8, 0x00000000 },
3073 { 0x000016f8, 0x00000000 },
3074 { 0x00001738, 0x00000000 },
3075 { 0x00001778, 0x00000000 },
3076 { 0x000017b8, 0x00000000 },
3077 { 0x000017f8, 0x00000000 },
3078 { 0x0000103c, 0x00000000 },
3079 { 0x0000107c, 0x00000000 },
3080 { 0x000010bc, 0x00000000 },
3081 { 0x000010fc, 0x00000000 },
3082 { 0x0000113c, 0x00000000 },
3083 { 0x0000117c, 0x00000000 },
3084 { 0x000011bc, 0x00000000 },
3085 { 0x000011fc, 0x00000000 },
3086 { 0x0000123c, 0x00000000 },
3087 { 0x0000127c, 0x00000000 },
3088 { 0x000012bc, 0x00000000 },
3089 { 0x000012fc, 0x00000000 },
3090 { 0x0000133c, 0x00000000 },
3091 { 0x0000137c, 0x00000000 },
3092 { 0x000013bc, 0x00000000 },
3093 { 0x000013fc, 0x00000000 },
3094 { 0x0000143c, 0x00000000 },
3095 { 0x0000147c, 0x00000000 },
3096 { 0x00004030, 0x00000002 },
3097 { 0x0000403c, 0x00000002 },
3098 { 0x00004024, 0x0000001f },
3099 { 0x00004060, 0x00000000 },
3100 { 0x00004064, 0x00000000 },
3101 { 0x00007010, 0x00000033 },
3102 { 0x00007020, 0x00000000 },
3103 { 0x00007034, 0x00000002 },
3104 { 0x00007038, 0x000004c2 },
3105 { 0x00008004, 0x00000000 },
3106 { 0x00008008, 0x00000000 },
3107 { 0x0000800c, 0x00000000 },
3108 { 0x00008018, 0x00000700 },
3109 { 0x00008020, 0x00000000 },
3110 { 0x00008038, 0x00000000 },
3111 { 0x0000803c, 0x00000000 },
3112 { 0x00008048, 0x40000000 },
3113 { 0x00008054, 0x00000000 },
3114 { 0x00008058, 0x00000000 },
3115 { 0x0000805c, 0x000fc78f },
3116 { 0x00008060, 0x0000000f },
3117 { 0x00008064, 0x00000000 },
3118 { 0x00008070, 0x00000000 },
3119 { 0x000080c0, 0x2a80001a },
3120 { 0x000080c4, 0x05dc01e0 },
3121 { 0x000080c8, 0x1f402710 },
3122 { 0x000080cc, 0x01f40000 },
3123 { 0x000080d0, 0x00001e00 },
3124 { 0x000080d4, 0x00000000 },
3125 { 0x000080d8, 0x00400000 },
3126 { 0x000080e0, 0xffffffff },
3127 { 0x000080e4, 0x0000ffff },
3128 { 0x000080e8, 0x003f3f3f },
3129 { 0x000080ec, 0x00000000 },
3130 { 0x000080f0, 0x00000000 },
3131 { 0x000080f4, 0x00000000 },
3132 { 0x000080f8, 0x00000000 },
3133 { 0x000080fc, 0x00020000 },
3134 { 0x00008100, 0x00020000 },
3135 { 0x00008104, 0x00000001 },
3136 { 0x00008108, 0x00000052 },
3137 { 0x0000810c, 0x00000000 },
3138 { 0x00008110, 0x00000168 },
3139 { 0x00008118, 0x000100aa },
3140 { 0x0000811c, 0x00003210 },
3141 { 0x00008124, 0x00000000 },
3142 { 0x00008128, 0x00000000 },
3143 { 0x0000812c, 0x00000000 },
3144 { 0x00008130, 0x00000000 },
3145 { 0x00008134, 0x00000000 },
3146 { 0x00008138, 0x00000000 },
3147 { 0x0000813c, 0x00000000 },
3148 { 0x00008144, 0xffffffff },
3149 { 0x00008168, 0x00000000 },
3150 { 0x0000816c, 0x00000000 },
3151 { 0x00008170, 0x18487320 },
3152 { 0x00008174, 0xfaa4fa50 },
3153 { 0x00008178, 0x00000100 },
3154 { 0x0000817c, 0x00000000 },
3155 { 0x000081c0, 0x00000000 },
3156 { 0x000081c4, 0x00000000 },
3157 { 0x000081d4, 0x00000000 },
3158 { 0x000081ec, 0x00000000 },
3159 { 0x000081f0, 0x00000000 },
3160 { 0x000081f4, 0x00000000 },
3161 { 0x000081f8, 0x00000000 },
3162 { 0x000081fc, 0x00000000 },
3163 { 0x00008200, 0x00000000 },
3164 { 0x00008204, 0x00000000 },
3165 { 0x00008208, 0x00000000 },
3166 { 0x0000820c, 0x00000000 },
3167 { 0x00008210, 0x00000000 },
3168 { 0x00008214, 0x00000000 },
3169 { 0x00008218, 0x00000000 },
3170 { 0x0000821c, 0x00000000 },
3171 { 0x00008220, 0x00000000 },
3172 { 0x00008224, 0x00000000 },
3173 { 0x00008228, 0x00000000 },
3174 { 0x0000822c, 0x00000000 },
3175 { 0x00008230, 0x00000000 },
3176 { 0x00008234, 0x00000000 },
3177 { 0x00008238, 0x00000000 },
3178 { 0x0000823c, 0x00000000 },
3179 { 0x00008240, 0x00100000 },
3180 { 0x00008244, 0x0010f400 },
3181 { 0x00008248, 0x00000100 },
3182 { 0x0000824c, 0x0001e800 },
3183 { 0x00008250, 0x00000000 },
3184 { 0x00008254, 0x00000000 },
3185 { 0x00008258, 0x00000000 },
3186 { 0x0000825c, 0x400000ff },
3187 { 0x00008260, 0x00080922 },
3188 { 0x00008264, 0x88a00010 },
3189 { 0x00008270, 0x00000000 },
3190 { 0x00008274, 0x40000000 },
3191 { 0x00008278, 0x003e4180 },
3192 { 0x0000827c, 0x00000000 },
3193 { 0x00008284, 0x0000002c },
3194 { 0x00008288, 0x0000002c },
3195 { 0x0000828c, 0x000000ff },
3196 { 0x00008294, 0x00000000 },
3197 { 0x00008298, 0x00000000 },
3198 { 0x0000829c, 0x00000000 },
3199 { 0x00008300, 0x00000040 },
3200 { 0x00008314, 0x00000000 },
3201 { 0x00008328, 0x00000000 },
3202 { 0x0000832c, 0x00000007 },
3203 { 0x00008330, 0x00000302 },
3204 { 0x00008334, 0x00000e00 },
3205 { 0x00008338, 0x00ff0000 },
3206 { 0x0000833c, 0x00000000 },
3207 { 0x00008340, 0x000107ff },
3208 { 0x00008344, 0x01c81043 },
3209 { 0x00008360, 0xffffffff },
3210 { 0x00008364, 0xffffffff },
3211 { 0x00008368, 0x00000000 },
3212 { 0x00008370, 0x00000000 },
3213 { 0x00008374, 0x000000ff },
3214 { 0x00008378, 0x00000000 },
3215 { 0x0000837c, 0x00000000 },
3216 { 0x00008380, 0xffffffff },
3217 { 0x00008384, 0xffffffff },
3218 { 0x00008390, 0x0fffffff },
3219 { 0x00008394, 0x0fffffff },
3220 { 0x00008398, 0x00000000 },
3221 { 0x0000839c, 0x00000000 },
3222 { 0x000083a0, 0x00000000 },
3223 { 0x00009808, 0x00000000 },
3224 { 0x0000980c, 0xafe68e30 },
3225 { 0x00009810, 0xfd14e000 },
3226 { 0x00009814, 0x9c0a9f6b },
3227 { 0x0000981c, 0x00000000 },
3228 { 0x0000982c, 0x0000a000 },
3229 { 0x00009830, 0x00000000 },
3230 { 0x0000983c, 0x00200400 },
3231 { 0x0000984c, 0x0040233c },
3232 { 0x0000a84c, 0x0040233c },
3233 { 0x00009854, 0x00000044 },
3234 { 0x00009900, 0x00000000 },
3235 { 0x00009904, 0x00000000 },
3236 { 0x00009908, 0x00000000 },
3237 { 0x0000990c, 0x00000000 },
3238 { 0x00009910, 0x10002310 },
3239 { 0x0000991c, 0x10000fff },
3240 { 0x00009920, 0x04900000 },
3241 { 0x0000a920, 0x04900000 },
3242 { 0x00009928, 0x00000001 },
3243 { 0x0000992c, 0x00000004 },
3244 { 0x00009930, 0x00000000 },
3245 { 0x0000a930, 0x00000000 },
3246 { 0x00009934, 0x1e1f2022 },
3247 { 0x00009938, 0x0a0b0c0d },
3248 { 0x0000993c, 0x00000000 },
3249 { 0x00009948, 0x9280c00a },
3250 { 0x0000994c, 0x00020028 },
3251 { 0x00009954, 0x5f3ca3de },
3252 { 0x00009958, 0x0108ecff },
3253 { 0x00009940, 0x14750604 },
3254 { 0x0000c95c, 0x004b6a8e },
3255 { 0x00009970, 0x990bb515 },
3256 { 0x00009974, 0x00000000 },
3257 { 0x00009978, 0x00000001 },
3258 { 0x0000997c, 0x00000000 },
3259 { 0x000099a0, 0x00000000 },
3260 { 0x000099a4, 0x00000001 },
3261 { 0x000099a8, 0x201fff00 },
3262 { 0x000099ac, 0x0c6f0000 },
3263 { 0x000099b0, 0x03051000 },
3264 { 0x000099b4, 0x00000820 },
3265 { 0x000099c4, 0x06336f77 },
3266 { 0x000099c8, 0x6af65329 },
3267 { 0x000099cc, 0x08f186c8 },
3268 { 0x000099d0, 0x00046384 },
3269 { 0x000099dc, 0x00000000 },
3270 { 0x000099e0, 0x00000000 },
3271 { 0x000099e4, 0xaaaaaaaa },
3272 { 0x000099e8, 0x3c466478 },
3273 { 0x000099ec, 0x0cc80caa },
3274 { 0x000099f0, 0x00000000 },
3275 { 0x000099fc, 0x00001042 },
3276 { 0x0000a1f4, 0x00fffeff },
3277 { 0x0000a1f8, 0x00f5f9ff },
3278 { 0x0000a1fc, 0xb79f6427 },
3279 { 0x0000a208, 0x803e4788 },
3280 { 0x0000a210, 0x4080a333 },
3281 { 0x0000a214, 0x40206c10 },
3282 { 0x0000a218, 0x009c4060 },
3283 { 0x0000a220, 0x01834061 },
3284 { 0x0000a224, 0x00000400 },
3285 { 0x0000a228, 0x000003b5 },
3286 { 0x0000a22c, 0x233f7180 },
3287 { 0x0000a234, 0x20202020 },
3288 { 0x0000a238, 0x20202020 },
3289 { 0x0000a23c, 0x13c889af },
3290 { 0x0000a240, 0x38490a20 },
3291 { 0x0000a244, 0x00000000 },
3292 { 0x0000a248, 0xfffffffc },
3293 { 0x0000a24c, 0x00000000 },
3294 { 0x0000a254, 0x00000000 },
3295 { 0x0000a258, 0x0cdbd380 },
3296 { 0x0000a25c, 0x0f0f0f01 },
3297 { 0x0000a260, 0xdfa91f01 },
3298 { 0x0000a264, 0x00418a11 },
3299 { 0x0000b264, 0x00418a11 },
3300 { 0x0000a268, 0x00000000 },
3301 { 0x0000a26c, 0x0e79e5c6 },
3302 { 0x0000b26c, 0x0e79e5c6 },
3303 { 0x0000d270, 0x00820820 },
3304 { 0x0000a278, 0x1ce739ce },
3305 { 0x0000a27c, 0x050701ce },
3306 { 0x0000d35c, 0x07ffffef },
3307 { 0x0000d360, 0x0fffffe7 },
3308 { 0x0000d364, 0x17ffffe5 },
3309 { 0x0000d368, 0x1fffffe4 },
3310 { 0x0000d36c, 0x37ffffe3 },
3311 { 0x0000d370, 0x3fffffe3 },
3312 { 0x0000d374, 0x57ffffe3 },
3313 { 0x0000d378, 0x5fffffe2 },
3314 { 0x0000d37c, 0x7fffffe2 },
3315 { 0x0000d380, 0x7f3c7bba },
3316 { 0x0000d384, 0xf3307ff0 },
3317 { 0x0000a388, 0x0c000000 },
3318 { 0x0000a38c, 0x20202020 },
3319 { 0x0000a390, 0x20202020 },
3320 { 0x0000a394, 0x1ce739ce },
3321 { 0x0000a398, 0x000001ce },
3322 { 0x0000b398, 0x000001ce },
3323 { 0x0000a39c, 0x00000001 },
3324 { 0x0000a3c8, 0x00000246 },
3325 { 0x0000a3cc, 0x20202020 },
3326 { 0x0000a3d0, 0x20202020 },
3327 { 0x0000a3d4, 0x20202020 },
3328 { 0x0000a3dc, 0x1ce739ce },
3329 { 0x0000a3e0, 0x000001ce },
3330 { 0x0000a3e4, 0x00000000 },
3331 { 0x0000a3e8, 0x18c43433 },
3332 { 0x0000a3ec, 0x00f70081 },
3333 { 0x0000a3f0, 0x01036a1e },
3334 { 0x0000a3f4, 0x00000000 },
3335 { 0x0000b3f4, 0x00000000 },
3336 { 0x0000a7d8, 0x00000001 },
3337 { 0x00007800, 0x00000800 },
3338 { 0x00007804, 0x6c35ffb0 },
3339 { 0x00007808, 0x6db6c000 },
3340 { 0x0000780c, 0x6db6cb30 },
3341 { 0x00007810, 0x6db6cb6c },
3342 { 0x00007814, 0x0501e200 },
3343 { 0x00007818, 0x0094128d },
3344 { 0x0000781c, 0x976ee392 },
3345 { 0x00007820, 0xf75ff6fc },
3346 { 0x00007824, 0x00040000 },
3347 { 0x00007828, 0xdb003012 },
3348 { 0x0000782c, 0x04924914 },
3349 { 0x00007830, 0x21084210 },
3350 { 0x00007834, 0x00140000 },
3351 { 0x00007838, 0x0e4548d8 },
3352 { 0x0000783c, 0x54214514 },
3353 { 0x00007840, 0x02025820 },
3354 { 0x00007844, 0x71c0d388 },
3355 { 0x00007848, 0x934934a8 },
3356 { 0x00007850, 0x00000000 },
3357 { 0x00007854, 0x00000800 },
3358 { 0x00007858, 0x6c35ffb0 },
3359 { 0x0000785c, 0x6db6c000 },
3360 { 0x00007860, 0x6db6cb2c },
3361 { 0x00007864, 0x6db6cb6c },
3362 { 0x00007868, 0x0501e200 },
3363 { 0x0000786c, 0x0094128d },
3364 { 0x00007870, 0x976ee392 },
3365 { 0x00007874, 0xf75ff6fc },
3366 { 0x00007878, 0x00040000 },
3367 { 0x0000787c, 0xdb003012 },
3368 { 0x00007880, 0x04924914 },
3369 { 0x00007884, 0x21084210 },
3370 { 0x00007888, 0x001b6db0 },
3371 { 0x0000788c, 0x00376b63 },
3372 { 0x00007890, 0x06db6db6 },
3373 { 0x00007894, 0x006d8000 },
3374 { 0x00007898, 0x48100000 },
3375 { 0x0000789c, 0x00000000 },
3376 { 0x000078a0, 0x08000000 },
3377 { 0x000078a4, 0x0007ffd8 },
3378 { 0x000078a8, 0x0007ffd8 },
3379 { 0x000078ac, 0x001c0020 },
3380 { 0x000078b0, 0x000611eb },
3381 { 0x000078b4, 0x40008080 },
3382 { 0x000078b8, 0x2a850160 },
3383}; 1761};
3384 1762
3385static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = {
3386 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
3387 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3388 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
3389 { 0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004 },
3390 { 0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a },
3391 { 0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c },
3392 { 0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b },
3393 { 0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a },
3394 { 0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a },
3395 { 0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a },
3396 { 0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a },
3397 { 0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a },
3398 { 0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a },
3399 { 0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a },
3400 { 0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a },
3401 { 0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c },
3402 { 0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc },
3403 { 0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4 },
3404 { 0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc },
3405 { 0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede },
3406 { 0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e },
3407 { 0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e },
3408 { 0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e },
3409 { 0x0000a780, 0x00000000, 0x00000000, 0x00000060, 0x00000060, 0x00000060 },
3410 { 0x0000a784, 0x00000000, 0x00000000, 0x00004062, 0x00004062, 0x00004062 },
3411 { 0x0000a788, 0x00000000, 0x00000000, 0x00008064, 0x00008064, 0x00008064 },
3412 { 0x0000a78c, 0x00000000, 0x00000000, 0x0000c0a4, 0x0000c0a4, 0x0000c0a4 },
3413 { 0x0000a790, 0x00000000, 0x00000000, 0x000100b0, 0x000100b0, 0x000100b0 },
3414 { 0x0000a794, 0x00000000, 0x00000000, 0x000140b2, 0x000140b2, 0x000140b2 },
3415 { 0x0000a798, 0x00000000, 0x00000000, 0x000180b4, 0x000180b4, 0x000180b4 },
3416 { 0x0000a79c, 0x00000000, 0x00000000, 0x0001c0f4, 0x0001c0f4, 0x0001c0f4 },
3417 { 0x0000a7a0, 0x00000000, 0x00000000, 0x00020134, 0x00020134, 0x00020134 },
3418 { 0x0000a7a4, 0x00000000, 0x00000000, 0x000240fe, 0x000240fe, 0x000240fe },
3419 { 0x0000a7a8, 0x00000000, 0x00000000, 0x0002813e, 0x0002813e, 0x0002813e },
3420 { 0x0000a7ac, 0x00000000, 0x00000000, 0x0002c17e, 0x0002c17e, 0x0002c17e },
3421 { 0x0000a7b0, 0x00000000, 0x00000000, 0x000301be, 0x000301be, 0x000301be },
3422 { 0x0000a7b4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3423 { 0x0000a7b8, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3424 { 0x0000a7bc, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3425 { 0x0000a7c0, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3426 { 0x0000a7c4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3427 { 0x0000a7c8, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3428 { 0x0000a7cc, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3429 { 0x0000a7d0, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3430 { 0x0000a7d4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe },
3431 { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
3432};
3433
3434
3435static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = {
3436 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
3437 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
3438 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
3439 { 0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 },
3440 { 0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c },
3441 { 0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 },
3442 { 0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 },
3443 { 0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 },
3444 { 0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c },
3445 { 0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 },
3446 { 0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 },
3447 { 0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 },
3448 { 0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c },
3449 { 0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 },
3450 { 0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 },
3451 { 0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 },
3452 { 0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 },
3453 { 0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 },
3454 { 0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac },
3455 { 0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 },
3456 { 0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 },
3457 { 0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 },
3458 { 0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 },
3459 { 0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 },
3460 { 0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c },
3461 { 0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 },
3462 { 0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 },
3463 { 0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 },
3464 { 0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c },
3465 { 0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 },
3466 { 0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 },
3467 { 0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 },
3468 { 0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c },
3469 { 0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 },
3470 { 0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 },
3471 { 0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 },
3472 { 0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 },
3473 { 0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 },
3474 { 0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 },
3475 { 0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 },
3476 { 0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c },
3477 { 0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 },
3478 { 0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 },
3479 { 0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 },
3480 { 0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c },
3481 { 0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 },
3482 { 0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 },
3483 { 0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 },
3484 { 0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 },
3485 { 0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 },
3486 { 0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 },
3487 { 0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c },
3488 { 0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 },
3489 { 0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 },
3490 { 0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 },
3491 { 0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 },
3492 { 0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 },
3493 { 0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac },
3494 { 0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 },
3495 { 0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 },
3496 { 0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 },
3497 { 0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 },
3498 { 0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 },
3499 { 0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 },
3500 { 0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 },
3501 { 0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 },
3502 { 0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 },
3503 { 0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 },
3504 { 0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c },
3505 { 0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 },
3506 { 0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 },
3507 { 0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c },
3508 { 0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 },
3509 { 0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 },
3510 { 0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 },
3511 { 0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 },
3512 { 0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 },
3513 { 0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac },
3514 { 0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 },
3515 { 0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 },
3516 { 0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 },
3517 { 0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 },
3518 { 0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 },
3519 { 0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad },
3520 { 0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 },
3521 { 0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 },
3522 { 0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 },
3523 { 0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 },
3524 { 0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 },
3525 { 0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd },
3526 { 0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 },
3527 { 0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 },
3528 { 0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 },
3529 { 0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 },
3530 { 0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca },
3531 { 0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce },
3532 { 0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 },
3533 { 0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 },
3534 { 0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda },
3535 { 0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 },
3536 { 0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb },
3537 { 0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf },
3538 { 0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 },
3539 { 0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 },
3540 { 0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3541 { 0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3542 { 0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3543 { 0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3544 { 0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3545 { 0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3546 { 0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3547 { 0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3548 { 0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3549 { 0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3550 { 0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3551 { 0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3552 { 0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3553 { 0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3554 { 0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3555 { 0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3556 { 0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3557 { 0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3558 { 0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3559 { 0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3560 { 0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3561 { 0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3562 { 0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3563 { 0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3564 { 0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3565 { 0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
3566 { 0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
3567 { 0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 },
3568 { 0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c },
3569 { 0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 },
3570 { 0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 },
3571 { 0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 },
3572 { 0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c },
3573 { 0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 },
3574 { 0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 },
3575 { 0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 },
3576 { 0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c },
3577 { 0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 },
3578 { 0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 },
3579 { 0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 },
3580 { 0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 },
3581 { 0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 },
3582 { 0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac },
3583 { 0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 },
3584 { 0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 },
3585 { 0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 },
3586 { 0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 },
3587 { 0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 },
3588 { 0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c },
3589 { 0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 },
3590 { 0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 },
3591 { 0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 },
3592 { 0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c },
3593 { 0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 },
3594 { 0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 },
3595 { 0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 },
3596 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c },
3597 { 0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 },
3598 { 0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 },
3599 { 0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 },
3600 { 0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 },
3601 { 0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 },
3602 { 0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 },
3603 { 0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 },
3604 { 0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c },
3605 { 0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 },
3606 { 0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 },
3607 { 0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 },
3608 { 0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c },
3609 { 0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 },
3610 { 0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 },
3611 { 0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 },
3612 { 0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 },
3613 { 0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 },
3614 { 0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 },
3615 { 0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c },
3616 { 0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 },
3617 { 0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 },
3618 { 0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 },
3619 { 0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 },
3620 { 0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 },
3621 { 0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac },
3622 { 0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 },
3623 { 0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 },
3624 { 0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 },
3625 { 0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 },
3626 { 0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 },
3627 { 0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 },
3628 { 0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 },
3629 { 0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 },
3630 { 0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 },
3631 { 0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 },
3632 { 0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c },
3633 { 0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 },
3634 { 0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 },
3635 { 0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c },
3636 { 0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 },
3637 { 0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 },
3638 { 0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 },
3639 { 0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 },
3640 { 0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 },
3641 { 0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac },
3642 { 0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 },
3643 { 0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 },
3644 { 0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 },
3645 { 0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 },
3646 { 0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 },
3647 { 0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad },
3648 { 0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 },
3649 { 0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 },
3650 { 0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 },
3651 { 0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 },
3652 { 0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 },
3653 { 0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd },
3654 { 0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 },
3655 { 0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 },
3656 { 0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 },
3657 { 0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 },
3658 { 0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca },
3659 { 0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce },
3660 { 0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 },
3661 { 0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 },
3662 { 0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda },
3663 { 0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 },
3664 { 0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb },
3665 { 0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf },
3666 { 0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 },
3667 { 0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 },
3668 { 0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3669 { 0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3670 { 0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3671 { 0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3672 { 0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3673 { 0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3674 { 0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3675 { 0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3676 { 0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3677 { 0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3678 { 0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3679 { 0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3680 { 0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3681 { 0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3682 { 0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3683 { 0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3684 { 0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3685 { 0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3686 { 0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3687 { 0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3688 { 0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3689 { 0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3690 { 0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3691 { 0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3692 { 0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb },
3693 { 0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
3694 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
3695};
3696
3697static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
3698 {0x00004040, 0x9248fd00 },
3699 {0x00004040, 0x24924924 },
3700 {0x00004040, 0xa8000019 },
3701 {0x00004040, 0x13160820 },
3702 {0x00004040, 0xe5980560 },
3703 {0x00004040, 0xc01dcffd },
3704 {0x00004040, 0x1aaabe41 },
3705 {0x00004040, 0xbe105554 },
3706 {0x00004040, 0x00043007 },
3707 {0x00004044, 0x00000000 },
3708};
3709
3710static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
3711 {0x00004040, 0x9248fd00 },
3712 {0x00004040, 0x24924924 },
3713 {0x00004040, 0xa8000019 },
3714 {0x00004040, 0x13160820 },
3715 {0x00004040, 0xe5980560 },
3716 {0x00004040, 0xc01dcffc },
3717 {0x00004040, 0x1aaabe41 },
3718 {0x00004040, 0xbe105554 },
3719 {0x00004040, 0x00043007 },
3720 {0x00004044, 0x00000000 },
3721};
3722
3723/* AR9287 Revision 11 */
3724
3725static const u32 ar9287Modes_9287_1_1[][6] = { 1763static const u32 ar9287Modes_9287_1_1[][6] = {
3726 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 1764 {0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0},
3727 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, 1765 {0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0},
3728 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, 1766 {0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180},
3729 { 0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180 }, 1767 {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
3730 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, 1768 {0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0},
3731 { 0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0 }, 1769 {0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f},
3732 { 0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f }, 1770 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
3733 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, 1771 {0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a},
3734 { 0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a }, 1772 {0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880},
3735 { 0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880 }, 1773 {0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303},
3736 { 0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303 }, 1774 {0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200},
3737 { 0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200 }, 1775 {0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e},
3738 { 0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e }, 1776 {0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001, 0x3a020001},
3739 { 0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001, 0x3a020001 }, 1777 {0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e},
3740 { 0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 1778 {0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007},
3741 { 0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007 }, 1779 {0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e},
3742 { 0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e }, 1780 {0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0},
3743 { 0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0 }, 1781 {0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2},
3744 { 0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, 1782 {0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
3745 { 0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 1783 {0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e},
3746 { 0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e }, 1784 {0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18},
3747 { 0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18 }, 1785 {0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
3748 { 0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 1786 {0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
3749 { 0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 1787 {0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881},
3750 { 0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881 }, 1788 {0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0},
3751 { 0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0 }, 1789 {0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016},
3752 { 0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016 }, 1790 {0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
3753 { 0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, 1791 {0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010},
3754 { 0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010 }, 1792 {0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010},
3755 { 0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, 1793 {0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010},
3756 { 0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, 1794 {0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210},
3757 { 0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210 }, 1795 {0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce},
3758 { 0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce }, 1796 {0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c},
3759 { 0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c }, 1797 {0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00},
3760 { 0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00 }, 1798 {0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
3761 { 0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 1799 {0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444},
3762 { 0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444 }, 1800 {0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3763 { 0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 1801 {0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3764 { 0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 1802 {0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a},
3765 { 0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a }, 1803 {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
3766 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 1804 {0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000},
3767 { 0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000 }, 1805 {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
3768 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 1806 {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3769 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3770}; 1807};
3771 1808
3772static const u32 ar9287Common_9287_1_1[][2] = { 1809static const u32 ar9287Common_9287_1_1[][2] = {
3773 { 0x0000000c, 0x00000000 }, 1810 /* Addr allmodes */
3774 { 0x00000030, 0x00020015 }, 1811 {0x0000000c, 0x00000000},
3775 { 0x00000034, 0x00000005 }, 1812 {0x00000030, 0x00020015},
3776 { 0x00000040, 0x00000000 }, 1813 {0x00000034, 0x00000005},
3777 { 0x00000044, 0x00000008 }, 1814 {0x00000040, 0x00000000},
3778 { 0x00000048, 0x00000008 }, 1815 {0x00000044, 0x00000008},
3779 { 0x0000004c, 0x00000010 }, 1816 {0x00000048, 0x00000008},
3780 { 0x00000050, 0x00000000 }, 1817 {0x0000004c, 0x00000010},
3781 { 0x00000054, 0x0000001f }, 1818 {0x00000050, 0x00000000},
3782 { 0x00000800, 0x00000000 }, 1819 {0x00000054, 0x0000001f},
3783 { 0x00000804, 0x00000000 }, 1820 {0x00000800, 0x00000000},
3784 { 0x00000808, 0x00000000 }, 1821 {0x00000804, 0x00000000},
3785 { 0x0000080c, 0x00000000 }, 1822 {0x00000808, 0x00000000},
3786 { 0x00000810, 0x00000000 }, 1823 {0x0000080c, 0x00000000},
3787 { 0x00000814, 0x00000000 }, 1824 {0x00000810, 0x00000000},
3788 { 0x00000818, 0x00000000 }, 1825 {0x00000814, 0x00000000},
3789 { 0x0000081c, 0x00000000 }, 1826 {0x00000818, 0x00000000},
3790 { 0x00000820, 0x00000000 }, 1827 {0x0000081c, 0x00000000},
3791 { 0x00000824, 0x00000000 }, 1828 {0x00000820, 0x00000000},
3792 { 0x00001040, 0x002ffc0f }, 1829 {0x00000824, 0x00000000},
3793 { 0x00001044, 0x002ffc0f }, 1830 {0x00001040, 0x002ffc0f},
3794 { 0x00001048, 0x002ffc0f }, 1831 {0x00001044, 0x002ffc0f},
3795 { 0x0000104c, 0x002ffc0f }, 1832 {0x00001048, 0x002ffc0f},
3796 { 0x00001050, 0x002ffc0f }, 1833 {0x0000104c, 0x002ffc0f},
3797 { 0x00001054, 0x002ffc0f }, 1834 {0x00001050, 0x002ffc0f},
3798 { 0x00001058, 0x002ffc0f }, 1835 {0x00001054, 0x002ffc0f},
3799 { 0x0000105c, 0x002ffc0f }, 1836 {0x00001058, 0x002ffc0f},
3800 { 0x00001060, 0x002ffc0f }, 1837 {0x0000105c, 0x002ffc0f},
3801 { 0x00001064, 0x002ffc0f }, 1838 {0x00001060, 0x002ffc0f},
3802 { 0x00001230, 0x00000000 }, 1839 {0x00001064, 0x002ffc0f},
3803 { 0x00001270, 0x00000000 }, 1840 {0x00001230, 0x00000000},
3804 { 0x00001038, 0x00000000 }, 1841 {0x00001270, 0x00000000},
3805 { 0x00001078, 0x00000000 }, 1842 {0x00001038, 0x00000000},
3806 { 0x000010b8, 0x00000000 }, 1843 {0x00001078, 0x00000000},
3807 { 0x000010f8, 0x00000000 }, 1844 {0x000010b8, 0x00000000},
3808 { 0x00001138, 0x00000000 }, 1845 {0x000010f8, 0x00000000},
3809 { 0x00001178, 0x00000000 }, 1846 {0x00001138, 0x00000000},
3810 { 0x000011b8, 0x00000000 }, 1847 {0x00001178, 0x00000000},
3811 { 0x000011f8, 0x00000000 }, 1848 {0x000011b8, 0x00000000},
3812 { 0x00001238, 0x00000000 }, 1849 {0x000011f8, 0x00000000},
3813 { 0x00001278, 0x00000000 }, 1850 {0x00001238, 0x00000000},
3814 { 0x000012b8, 0x00000000 }, 1851 {0x00001278, 0x00000000},
3815 { 0x000012f8, 0x00000000 }, 1852 {0x000012b8, 0x00000000},
3816 { 0x00001338, 0x00000000 }, 1853 {0x000012f8, 0x00000000},
3817 { 0x00001378, 0x00000000 }, 1854 {0x00001338, 0x00000000},
3818 { 0x000013b8, 0x00000000 }, 1855 {0x00001378, 0x00000000},
3819 { 0x000013f8, 0x00000000 }, 1856 {0x000013b8, 0x00000000},
3820 { 0x00001438, 0x00000000 }, 1857 {0x000013f8, 0x00000000},
3821 { 0x00001478, 0x00000000 }, 1858 {0x00001438, 0x00000000},
3822 { 0x000014b8, 0x00000000 }, 1859 {0x00001478, 0x00000000},
3823 { 0x000014f8, 0x00000000 }, 1860 {0x000014b8, 0x00000000},
3824 { 0x00001538, 0x00000000 }, 1861 {0x000014f8, 0x00000000},
3825 { 0x00001578, 0x00000000 }, 1862 {0x00001538, 0x00000000},
3826 { 0x000015b8, 0x00000000 }, 1863 {0x00001578, 0x00000000},
3827 { 0x000015f8, 0x00000000 }, 1864 {0x000015b8, 0x00000000},
3828 { 0x00001638, 0x00000000 }, 1865 {0x000015f8, 0x00000000},
3829 { 0x00001678, 0x00000000 }, 1866 {0x00001638, 0x00000000},
3830 { 0x000016b8, 0x00000000 }, 1867 {0x00001678, 0x00000000},
3831 { 0x000016f8, 0x00000000 }, 1868 {0x000016b8, 0x00000000},
3832 { 0x00001738, 0x00000000 }, 1869 {0x000016f8, 0x00000000},
3833 { 0x00001778, 0x00000000 }, 1870 {0x00001738, 0x00000000},
3834 { 0x000017b8, 0x00000000 }, 1871 {0x00001778, 0x00000000},
3835 { 0x000017f8, 0x00000000 }, 1872 {0x000017b8, 0x00000000},
3836 { 0x0000103c, 0x00000000 }, 1873 {0x000017f8, 0x00000000},
3837 { 0x0000107c, 0x00000000 }, 1874 {0x0000103c, 0x00000000},
3838 { 0x000010bc, 0x00000000 }, 1875 {0x0000107c, 0x00000000},
3839 { 0x000010fc, 0x00000000 }, 1876 {0x000010bc, 0x00000000},
3840 { 0x0000113c, 0x00000000 }, 1877 {0x000010fc, 0x00000000},
3841 { 0x0000117c, 0x00000000 }, 1878 {0x0000113c, 0x00000000},
3842 { 0x000011bc, 0x00000000 }, 1879 {0x0000117c, 0x00000000},
3843 { 0x000011fc, 0x00000000 }, 1880 {0x000011bc, 0x00000000},
3844 { 0x0000123c, 0x00000000 }, 1881 {0x000011fc, 0x00000000},
3845 { 0x0000127c, 0x00000000 }, 1882 {0x0000123c, 0x00000000},
3846 { 0x000012bc, 0x00000000 }, 1883 {0x0000127c, 0x00000000},
3847 { 0x000012fc, 0x00000000 }, 1884 {0x000012bc, 0x00000000},
3848 { 0x0000133c, 0x00000000 }, 1885 {0x000012fc, 0x00000000},
3849 { 0x0000137c, 0x00000000 }, 1886 {0x0000133c, 0x00000000},
3850 { 0x000013bc, 0x00000000 }, 1887 {0x0000137c, 0x00000000},
3851 { 0x000013fc, 0x00000000 }, 1888 {0x000013bc, 0x00000000},
3852 { 0x0000143c, 0x00000000 }, 1889 {0x000013fc, 0x00000000},
3853 { 0x0000147c, 0x00000000 }, 1890 {0x0000143c, 0x00000000},
3854 { 0x00004030, 0x00000002 }, 1891 {0x0000147c, 0x00000000},
3855 { 0x0000403c, 0x00000002 }, 1892 {0x00004030, 0x00000002},
3856 { 0x00004024, 0x0000001f }, 1893 {0x0000403c, 0x00000002},
3857 { 0x00004060, 0x00000000 }, 1894 {0x00004024, 0x0000001f},
3858 { 0x00004064, 0x00000000 }, 1895 {0x00004060, 0x00000000},
3859 { 0x00007010, 0x00000033 }, 1896 {0x00004064, 0x00000000},
3860 { 0x00007020, 0x00000000 }, 1897 {0x00007010, 0x00000033},
3861 { 0x00007034, 0x00000002 }, 1898 {0x00007020, 0x00000000},
3862 { 0x00007038, 0x000004c2 }, 1899 {0x00007034, 0x00000002},
3863 { 0x00008004, 0x00000000 }, 1900 {0x00007038, 0x000004c2},
3864 { 0x00008008, 0x00000000 }, 1901 {0x00008004, 0x00000000},
3865 { 0x0000800c, 0x00000000 }, 1902 {0x00008008, 0x00000000},
3866 { 0x00008018, 0x00000700 }, 1903 {0x0000800c, 0x00000000},
3867 { 0x00008020, 0x00000000 }, 1904 {0x00008018, 0x00000700},
3868 { 0x00008038, 0x00000000 }, 1905 {0x00008020, 0x00000000},
3869 { 0x0000803c, 0x00000000 }, 1906 {0x00008038, 0x00000000},
3870 { 0x00008048, 0x40000000 }, 1907 {0x0000803c, 0x00000000},
3871 { 0x00008054, 0x00000000 }, 1908 {0x00008048, 0x40000000},
3872 { 0x00008058, 0x00000000 }, 1909 {0x00008054, 0x00000000},
3873 { 0x0000805c, 0x000fc78f }, 1910 {0x00008058, 0x00000000},
3874 { 0x00008060, 0x0000000f }, 1911 {0x0000805c, 0x000fc78f},
3875 { 0x00008064, 0x00000000 }, 1912 {0x00008060, 0x0000000f},
3876 { 0x00008070, 0x00000000 }, 1913 {0x00008064, 0x00000000},
3877 { 0x000080c0, 0x2a80001a }, 1914 {0x00008070, 0x00000000},
3878 { 0x000080c4, 0x05dc01e0 }, 1915 {0x000080c0, 0x2a80001a},
3879 { 0x000080c8, 0x1f402710 }, 1916 {0x000080c4, 0x05dc01e0},
3880 { 0x000080cc, 0x01f40000 }, 1917 {0x000080c8, 0x1f402710},
3881 { 0x000080d0, 0x00001e00 }, 1918 {0x000080cc, 0x01f40000},
3882 { 0x000080d4, 0x00000000 }, 1919 {0x000080d0, 0x00001e00},
3883 { 0x000080d8, 0x00400000 }, 1920 {0x000080d4, 0x00000000},
3884 { 0x000080e0, 0xffffffff }, 1921 {0x000080d8, 0x00400000},
3885 { 0x000080e4, 0x0000ffff }, 1922 {0x000080e0, 0xffffffff},
3886 { 0x000080e8, 0x003f3f3f }, 1923 {0x000080e4, 0x0000ffff},
3887 { 0x000080ec, 0x00000000 }, 1924 {0x000080e8, 0x003f3f3f},
3888 { 0x000080f0, 0x00000000 }, 1925 {0x000080ec, 0x00000000},
3889 { 0x000080f4, 0x00000000 }, 1926 {0x000080f0, 0x00000000},
3890 { 0x000080f8, 0x00000000 }, 1927 {0x000080f4, 0x00000000},
3891 { 0x000080fc, 0x00020000 }, 1928 {0x000080f8, 0x00000000},
3892 { 0x00008100, 0x00020000 }, 1929 {0x000080fc, 0x00020000},
3893 { 0x00008104, 0x00000001 }, 1930 {0x00008100, 0x00020000},
3894 { 0x00008108, 0x00000052 }, 1931 {0x00008104, 0x00000001},
3895 { 0x0000810c, 0x00000000 }, 1932 {0x00008108, 0x00000052},
3896 { 0x00008110, 0x00000168 }, 1933 {0x0000810c, 0x00000000},
3897 { 0x00008118, 0x000100aa }, 1934 {0x00008110, 0x00000168},
3898 { 0x0000811c, 0x00003210 }, 1935 {0x00008118, 0x000100aa},
3899 { 0x00008124, 0x00000000 }, 1936 {0x0000811c, 0x00003210},
3900 { 0x00008128, 0x00000000 }, 1937 {0x00008124, 0x00000000},
3901 { 0x0000812c, 0x00000000 }, 1938 {0x00008128, 0x00000000},
3902 { 0x00008130, 0x00000000 }, 1939 {0x0000812c, 0x00000000},
3903 { 0x00008134, 0x00000000 }, 1940 {0x00008130, 0x00000000},
3904 { 0x00008138, 0x00000000 }, 1941 {0x00008134, 0x00000000},
3905 { 0x0000813c, 0x00000000 }, 1942 {0x00008138, 0x00000000},
3906 { 0x00008144, 0xffffffff }, 1943 {0x0000813c, 0x00000000},
3907 { 0x00008168, 0x00000000 }, 1944 {0x00008144, 0xffffffff},
3908 { 0x0000816c, 0x00000000 }, 1945 {0x00008168, 0x00000000},
3909 { 0x00008170, 0x18487320 }, 1946 {0x0000816c, 0x00000000},
3910 { 0x00008174, 0xfaa4fa50 }, 1947 {0x00008170, 0x18487320},
3911 { 0x00008178, 0x00000100 }, 1948 {0x00008174, 0xfaa4fa50},
3912 { 0x0000817c, 0x00000000 }, 1949 {0x00008178, 0x00000100},
3913 { 0x000081c0, 0x00000000 }, 1950 {0x0000817c, 0x00000000},
3914 { 0x000081c4, 0x00000000 }, 1951 {0x000081c0, 0x00000000},
3915 { 0x000081d4, 0x00000000 }, 1952 {0x000081c4, 0x00000000},
3916 { 0x000081ec, 0x00000000 }, 1953 {0x000081d4, 0x00000000},
3917 { 0x000081f0, 0x00000000 }, 1954 {0x000081ec, 0x00000000},
3918 { 0x000081f4, 0x00000000 }, 1955 {0x000081f0, 0x00000000},
3919 { 0x000081f8, 0x00000000 }, 1956 {0x000081f4, 0x00000000},
3920 { 0x000081fc, 0x00000000 }, 1957 {0x000081f8, 0x00000000},
3921 { 0x00008200, 0x00000000 }, 1958 {0x000081fc, 0x00000000},
3922 { 0x00008204, 0x00000000 }, 1959 {0x00008200, 0x00000000},
3923 { 0x00008208, 0x00000000 }, 1960 {0x00008204, 0x00000000},
3924 { 0x0000820c, 0x00000000 }, 1961 {0x00008208, 0x00000000},
3925 { 0x00008210, 0x00000000 }, 1962 {0x0000820c, 0x00000000},
3926 { 0x00008214, 0x00000000 }, 1963 {0x00008210, 0x00000000},
3927 { 0x00008218, 0x00000000 }, 1964 {0x00008214, 0x00000000},
3928 { 0x0000821c, 0x00000000 }, 1965 {0x00008218, 0x00000000},
3929 { 0x00008220, 0x00000000 }, 1966 {0x0000821c, 0x00000000},
3930 { 0x00008224, 0x00000000 }, 1967 {0x00008220, 0x00000000},
3931 { 0x00008228, 0x00000000 }, 1968 {0x00008224, 0x00000000},
3932 { 0x0000822c, 0x00000000 }, 1969 {0x00008228, 0x00000000},
3933 { 0x00008230, 0x00000000 }, 1970 {0x0000822c, 0x00000000},
3934 { 0x00008234, 0x00000000 }, 1971 {0x00008230, 0x00000000},
3935 { 0x00008238, 0x00000000 }, 1972 {0x00008234, 0x00000000},
3936 { 0x0000823c, 0x00000000 }, 1973 {0x00008238, 0x00000000},
3937 { 0x00008240, 0x00100000 }, 1974 {0x0000823c, 0x00000000},
3938 { 0x00008244, 0x0010f400 }, 1975 {0x00008240, 0x00100000},
3939 { 0x00008248, 0x00000100 }, 1976 {0x00008244, 0x0010f400},
3940 { 0x0000824c, 0x0001e800 }, 1977 {0x00008248, 0x00000100},
3941 { 0x00008250, 0x00000000 }, 1978 {0x0000824c, 0x0001e800},
3942 { 0x00008254, 0x00000000 }, 1979 {0x00008250, 0x00000000},
3943 { 0x00008258, 0x00000000 }, 1980 {0x00008254, 0x00000000},
3944 { 0x0000825c, 0x400000ff }, 1981 {0x00008258, 0x00000000},
3945 { 0x00008260, 0x00080922 }, 1982 {0x0000825c, 0x400000ff},
3946 { 0x00008264, 0x88a00010 }, 1983 {0x00008260, 0x00080922},
3947 { 0x00008270, 0x00000000 }, 1984 {0x00008264, 0x88a00010},
3948 { 0x00008274, 0x40000000 }, 1985 {0x00008270, 0x00000000},
3949 { 0x00008278, 0x003e4180 }, 1986 {0x00008274, 0x40000000},
3950 { 0x0000827c, 0x00000000 }, 1987 {0x00008278, 0x003e4180},
3951 { 0x00008284, 0x0000002c }, 1988 {0x0000827c, 0x00000000},
3952 { 0x00008288, 0x0000002c }, 1989 {0x00008284, 0x0000002c},
3953 { 0x0000828c, 0x000000ff }, 1990 {0x00008288, 0x0000002c},
3954 { 0x00008294, 0x00000000 }, 1991 {0x0000828c, 0x000000ff},
3955 { 0x00008298, 0x00000000 }, 1992 {0x00008294, 0x00000000},
3956 { 0x0000829c, 0x00000000 }, 1993 {0x00008298, 0x00000000},
3957 { 0x00008300, 0x00000040 }, 1994 {0x0000829c, 0x00000000},
3958 { 0x00008314, 0x00000000 }, 1995 {0x00008300, 0x00000040},
3959 { 0x00008328, 0x00000000 }, 1996 {0x00008314, 0x00000000},
3960 { 0x0000832c, 0x00000007 }, 1997 {0x00008328, 0x00000000},
3961 { 0x00008330, 0x00000302 }, 1998 {0x0000832c, 0x00000007},
3962 { 0x00008334, 0x00000e00 }, 1999 {0x00008330, 0x00000302},
3963 { 0x00008338, 0x00ff0000 }, 2000 {0x00008334, 0x00000e00},
3964 { 0x0000833c, 0x00000000 }, 2001 {0x00008338, 0x00ff0000},
3965 { 0x00008340, 0x000107ff }, 2002 {0x0000833c, 0x00000000},
3966 { 0x00008344, 0x01c81043 }, 2003 {0x00008340, 0x000107ff},
3967 { 0x00008360, 0xffffffff }, 2004 {0x00008344, 0x01c81043},
3968 { 0x00008364, 0xffffffff }, 2005 {0x00008360, 0xffffffff},
3969 { 0x00008368, 0x00000000 }, 2006 {0x00008364, 0xffffffff},
3970 { 0x00008370, 0x00000000 }, 2007 {0x00008368, 0x00000000},
3971 { 0x00008374, 0x000000ff }, 2008 {0x00008370, 0x00000000},
3972 { 0x00008378, 0x00000000 }, 2009 {0x00008374, 0x000000ff},
3973 { 0x0000837c, 0x00000000 }, 2010 {0x00008378, 0x00000000},
3974 { 0x00008380, 0xffffffff }, 2011 {0x0000837c, 0x00000000},
3975 { 0x00008384, 0xffffffff }, 2012 {0x00008380, 0xffffffff},
3976 { 0x00008390, 0x0fffffff }, 2013 {0x00008384, 0xffffffff},
3977 { 0x00008394, 0x0fffffff }, 2014 {0x00008390, 0x0fffffff},
3978 { 0x00008398, 0x00000000 }, 2015 {0x00008394, 0x0fffffff},
3979 { 0x0000839c, 0x00000000 }, 2016 {0x00008398, 0x00000000},
3980 { 0x000083a0, 0x00000000 }, 2017 {0x0000839c, 0x00000000},
3981 { 0x00009808, 0x00000000 }, 2018 {0x000083a0, 0x00000000},
3982 { 0x0000980c, 0xafe68e30 }, 2019 {0x00009808, 0x00000000},
3983 { 0x00009810, 0xfd14e000 }, 2020 {0x0000980c, 0xafe68e30},
3984 { 0x00009814, 0x9c0a9f6b }, 2021 {0x00009810, 0xfd14e000},
3985 { 0x0000981c, 0x00000000 }, 2022 {0x00009814, 0x9c0a9f6b},
3986 { 0x0000982c, 0x0000a000 }, 2023 {0x0000981c, 0x00000000},
3987 { 0x00009830, 0x00000000 }, 2024 {0x0000982c, 0x0000a000},
3988 { 0x0000983c, 0x00200400 }, 2025 {0x00009830, 0x00000000},
3989 { 0x0000984c, 0x0040233c }, 2026 {0x0000983c, 0x00200400},
3990 { 0x0000a84c, 0x0040233c }, 2027 {0x0000984c, 0x0040233c},
3991 { 0x00009854, 0x00000044 }, 2028 {0x0000a84c, 0x0040233c},
3992 { 0x00009900, 0x00000000 }, 2029 {0x00009854, 0x00000044},
3993 { 0x00009904, 0x00000000 }, 2030 {0x00009900, 0x00000000},
3994 { 0x00009908, 0x00000000 }, 2031 {0x00009904, 0x00000000},
3995 { 0x0000990c, 0x00000000 }, 2032 {0x00009908, 0x00000000},
3996 { 0x00009910, 0x10002310 }, 2033 {0x0000990c, 0x00000000},
3997 { 0x0000991c, 0x10000fff }, 2034 {0x00009910, 0x10002310},
3998 { 0x00009920, 0x04900000 }, 2035 {0x0000991c, 0x10000fff},
3999 { 0x0000a920, 0x04900000 }, 2036 {0x00009920, 0x04900000},
4000 { 0x00009928, 0x00000001 }, 2037 {0x0000a920, 0x04900000},
4001 { 0x0000992c, 0x00000004 }, 2038 {0x00009928, 0x00000001},
4002 { 0x00009930, 0x00000000 }, 2039 {0x0000992c, 0x00000004},
4003 { 0x0000a930, 0x00000000 }, 2040 {0x00009930, 0x00000000},
4004 { 0x00009934, 0x1e1f2022 }, 2041 {0x0000a930, 0x00000000},
4005 { 0x00009938, 0x0a0b0c0d }, 2042 {0x00009934, 0x1e1f2022},
4006 { 0x0000993c, 0x00000000 }, 2043 {0x00009938, 0x0a0b0c0d},
4007 { 0x00009948, 0x9280c00a }, 2044 {0x0000993c, 0x00000000},
4008 { 0x0000994c, 0x00020028 }, 2045 {0x00009948, 0x9280c00a},
4009 { 0x00009954, 0x5f3ca3de }, 2046 {0x0000994c, 0x00020028},
4010 { 0x00009958, 0x0108ecff }, 2047 {0x00009954, 0x5f3ca3de},
4011 { 0x00009940, 0x14750604 }, 2048 {0x00009958, 0x0108ecff},
4012 { 0x0000c95c, 0x004b6a8e }, 2049 {0x00009940, 0x14750604},
4013 { 0x00009970, 0x990bb514 }, 2050 {0x0000c95c, 0x004b6a8e},
4014 { 0x00009974, 0x00000000 }, 2051 {0x00009970, 0x990bb514},
4015 { 0x00009978, 0x00000001 }, 2052 {0x00009974, 0x00000000},
4016 { 0x0000997c, 0x00000000 }, 2053 {0x00009978, 0x00000001},
4017 { 0x000099a0, 0x00000000 }, 2054 {0x0000997c, 0x00000000},
4018 { 0x000099a4, 0x00000001 }, 2055 {0x000099a0, 0x00000000},
4019 { 0x000099a8, 0x201fff00 }, 2056 {0x000099a4, 0x00000001},
4020 { 0x000099ac, 0x0c6f0000 }, 2057 {0x000099a8, 0x201fff00},
4021 { 0x000099b0, 0x03051000 }, 2058 {0x000099ac, 0x0c6f0000},
4022 { 0x000099b4, 0x00000820 }, 2059 {0x000099b0, 0x03051000},
4023 { 0x000099c4, 0x06336f77 }, 2060 {0x000099b4, 0x00000820},
4024 { 0x000099c8, 0x6af6532f }, 2061 {0x000099c4, 0x06336f77},
4025 { 0x000099cc, 0x08f186c8 }, 2062 {0x000099c8, 0x6af6532f},
4026 { 0x000099d0, 0x00046384 }, 2063 {0x000099cc, 0x08f186c8},
4027 { 0x000099dc, 0x00000000 }, 2064 {0x000099d0, 0x00046384},
4028 { 0x000099e0, 0x00000000 }, 2065 {0x000099dc, 0x00000000},
4029 { 0x000099e4, 0xaaaaaaaa }, 2066 {0x000099e0, 0x00000000},
4030 { 0x000099e8, 0x3c466478 }, 2067 {0x000099e4, 0xaaaaaaaa},
4031 { 0x000099ec, 0x0cc80caa }, 2068 {0x000099e8, 0x3c466478},
4032 { 0x000099f0, 0x00000000 }, 2069 {0x000099ec, 0x0cc80caa},
4033 { 0x000099fc, 0x00001042 }, 2070 {0x000099f0, 0x00000000},
4034 { 0x0000a208, 0x803e4788 }, 2071 {0x000099fc, 0x00001042},
4035 { 0x0000a210, 0x4080a333 }, 2072 {0x0000a208, 0x803e4788},
4036 { 0x0000a214, 0x40206c10 }, 2073 {0x0000a210, 0x4080a333},
4037 { 0x0000a218, 0x009c4060 }, 2074 {0x0000a214, 0x40206c10},
4038 { 0x0000a220, 0x01834061 }, 2075 {0x0000a218, 0x009c4060},
4039 { 0x0000a224, 0x00000400 }, 2076 {0x0000a220, 0x01834061},
4040 { 0x0000a228, 0x000003b5 }, 2077 {0x0000a224, 0x00000400},
4041 { 0x0000a22c, 0x233f7180 }, 2078 {0x0000a228, 0x000003b5},
4042 { 0x0000a234, 0x20202020 }, 2079 {0x0000a22c, 0x233f7180},
4043 { 0x0000a238, 0x20202020 }, 2080 {0x0000a234, 0x20202020},
4044 { 0x0000a23c, 0x13c889af }, 2081 {0x0000a238, 0x20202020},
4045 { 0x0000a240, 0x38490a20 }, 2082 {0x0000a23c, 0x13c889af},
4046 { 0x0000a244, 0x00000000 }, 2083 {0x0000a240, 0x38490a20},
4047 { 0x0000a248, 0xfffffffc }, 2084 {0x0000a244, 0x00000000},
4048 { 0x0000a24c, 0x00000000 }, 2085 {0x0000a248, 0xfffffffc},
4049 { 0x0000a254, 0x00000000 }, 2086 {0x0000a24c, 0x00000000},
4050 { 0x0000a258, 0x0cdbd380 }, 2087 {0x0000a254, 0x00000000},
4051 { 0x0000a25c, 0x0f0f0f01 }, 2088 {0x0000a258, 0x0cdbd380},
4052 { 0x0000a260, 0xdfa91f01 }, 2089 {0x0000a25c, 0x0f0f0f01},
4053 { 0x0000a264, 0x00418a11 }, 2090 {0x0000a260, 0xdfa91f01},
4054 { 0x0000b264, 0x00418a11 }, 2091 {0x0000a264, 0x00418a11},
4055 { 0x0000a268, 0x00000000 }, 2092 {0x0000b264, 0x00418a11},
4056 { 0x0000a26c, 0x0e79e5c6 }, 2093 {0x0000a268, 0x00000000},
4057 { 0x0000b26c, 0x0e79e5c6 }, 2094 {0x0000a26c, 0x0e79e5c6},
4058 { 0x0000d270, 0x00820820 }, 2095 {0x0000b26c, 0x0e79e5c6},
4059 { 0x0000a278, 0x1ce739ce }, 2096 {0x0000d270, 0x00820820},
4060 { 0x0000a27c, 0x050701ce }, 2097 {0x0000a278, 0x1ce739ce},
4061 { 0x0000d35c, 0x07ffffef }, 2098 {0x0000a27c, 0x050701ce},
4062 { 0x0000d360, 0x0fffffe7 }, 2099 {0x0000d35c, 0x07ffffef},
4063 { 0x0000d364, 0x17ffffe5 }, 2100 {0x0000d360, 0x0fffffe7},
4064 { 0x0000d368, 0x1fffffe4 }, 2101 {0x0000d364, 0x17ffffe5},
4065 { 0x0000d36c, 0x37ffffe3 }, 2102 {0x0000d368, 0x1fffffe4},
4066 { 0x0000d370, 0x3fffffe3 }, 2103 {0x0000d36c, 0x37ffffe3},
4067 { 0x0000d374, 0x57ffffe3 }, 2104 {0x0000d370, 0x3fffffe3},
4068 { 0x0000d378, 0x5fffffe2 }, 2105 {0x0000d374, 0x57ffffe3},
4069 { 0x0000d37c, 0x7fffffe2 }, 2106 {0x0000d378, 0x5fffffe2},
4070 { 0x0000d380, 0x7f3c7bba }, 2107 {0x0000d37c, 0x7fffffe2},
4071 { 0x0000d384, 0xf3307ff0 }, 2108 {0x0000d380, 0x7f3c7bba},
4072 { 0x0000a388, 0x0c000000 }, 2109 {0x0000d384, 0xf3307ff0},
4073 { 0x0000a38c, 0x20202020 }, 2110 {0x0000a388, 0x0c000000},
4074 { 0x0000a390, 0x20202020 }, 2111 {0x0000a38c, 0x20202020},
4075 { 0x0000a394, 0x1ce739ce }, 2112 {0x0000a390, 0x20202020},
4076 { 0x0000a398, 0x000001ce }, 2113 {0x0000a394, 0x1ce739ce},
4077 { 0x0000b398, 0x000001ce }, 2114 {0x0000a398, 0x000001ce},
4078 { 0x0000a39c, 0x00000001 }, 2115 {0x0000b398, 0x000001ce},
4079 { 0x0000a3c8, 0x00000246 }, 2116 {0x0000a39c, 0x00000001},
4080 { 0x0000a3cc, 0x20202020 }, 2117 {0x0000a3c8, 0x00000246},
4081 { 0x0000a3d0, 0x20202020 }, 2118 {0x0000a3cc, 0x20202020},
4082 { 0x0000a3d4, 0x20202020 }, 2119 {0x0000a3d0, 0x20202020},
4083 { 0x0000a3dc, 0x1ce739ce }, 2120 {0x0000a3d4, 0x20202020},
4084 { 0x0000a3e0, 0x000001ce }, 2121 {0x0000a3dc, 0x1ce739ce},
4085 { 0x0000a3e4, 0x00000000 }, 2122 {0x0000a3e0, 0x000001ce},
4086 { 0x0000a3e8, 0x18c43433 }, 2123 {0x0000a3e4, 0x00000000},
4087 { 0x0000a3ec, 0x00f70081 }, 2124 {0x0000a3e8, 0x18c43433},
4088 { 0x0000a3f0, 0x01036a1e }, 2125 {0x0000a3ec, 0x00f70081},
4089 { 0x0000a3f4, 0x00000000 }, 2126 {0x0000a3f0, 0x01036a1e},
4090 { 0x0000b3f4, 0x00000000 }, 2127 {0x0000a3f4, 0x00000000},
4091 { 0x0000a7d8, 0x000003f1 }, 2128 {0x0000b3f4, 0x00000000},
4092 { 0x00007800, 0x00000800 }, 2129 {0x0000a7d8, 0x000003f1},
4093 { 0x00007804, 0x6c35ffd2 }, 2130 {0x00007800, 0x00000800},
4094 { 0x00007808, 0x6db6c000 }, 2131 {0x00007804, 0x6c35ffd2},
4095 { 0x0000780c, 0x6db6cb30 }, 2132 {0x00007808, 0x6db6c000},
4096 { 0x00007810, 0x6db6cb6c }, 2133 {0x0000780c, 0x6db6cb30},
4097 { 0x00007814, 0x0501e200 }, 2134 {0x00007810, 0x6db6cb6c},
4098 { 0x00007818, 0x0094128d }, 2135 {0x00007814, 0x0501e200},
4099 { 0x0000781c, 0x976ee392 }, 2136 {0x00007818, 0x0094128d},
4100 { 0x00007820, 0xf75ff6fc }, 2137 {0x0000781c, 0x976ee392},
4101 { 0x00007824, 0x00040000 }, 2138 {0x00007820, 0xf75ff6fc},
4102 { 0x00007828, 0xdb003012 }, 2139 {0x00007824, 0x00040000},
4103 { 0x0000782c, 0x04924914 }, 2140 {0x00007828, 0xdb003012},
4104 { 0x00007830, 0x21084210 }, 2141 {0x0000782c, 0x04924914},
4105 { 0x00007834, 0x00140000 }, 2142 {0x00007830, 0x21084210},
4106 { 0x00007838, 0x0e4548d8 }, 2143 {0x00007834, 0x00140000},
4107 { 0x0000783c, 0x54214514 }, 2144 {0x00007838, 0x0e4548d8},
4108 { 0x00007840, 0x02025830 }, 2145 {0x0000783c, 0x54214514},
4109 { 0x00007844, 0x71c0d388 }, 2146 {0x00007840, 0x02025830},
4110 { 0x00007848, 0x934934a8 }, 2147 {0x00007844, 0x71c0d388},
4111 { 0x00007850, 0x00000000 }, 2148 {0x00007848, 0x934934a8},
4112 { 0x00007854, 0x00000800 }, 2149 {0x00007850, 0x00000000},
4113 { 0x00007858, 0x6c35ffd2 }, 2150 {0x00007854, 0x00000800},
4114 { 0x0000785c, 0x6db6c000 }, 2151 {0x00007858, 0x6c35ffd2},
4115 { 0x00007860, 0x6db6cb30 }, 2152 {0x0000785c, 0x6db6c000},
4116 { 0x00007864, 0x6db6cb6c }, 2153 {0x00007860, 0x6db6cb30},
4117 { 0x00007868, 0x0501e200 }, 2154 {0x00007864, 0x6db6cb6c},
4118 { 0x0000786c, 0x0094128d }, 2155 {0x00007868, 0x0501e200},
4119 { 0x00007870, 0x976ee392 }, 2156 {0x0000786c, 0x0094128d},
4120 { 0x00007874, 0xf75ff6fc }, 2157 {0x00007870, 0x976ee392},
4121 { 0x00007878, 0x00040000 }, 2158 {0x00007874, 0xf75ff6fc},
4122 { 0x0000787c, 0xdb003012 }, 2159 {0x00007878, 0x00040000},
4123 { 0x00007880, 0x04924914 }, 2160 {0x0000787c, 0xdb003012},
4124 { 0x00007884, 0x21084210 }, 2161 {0x00007880, 0x04924914},
4125 { 0x00007888, 0x001b6db0 }, 2162 {0x00007884, 0x21084210},
4126 { 0x0000788c, 0x00376b63 }, 2163 {0x00007888, 0x001b6db0},
4127 { 0x00007890, 0x06db6db6 }, 2164 {0x0000788c, 0x00376b63},
4128 { 0x00007894, 0x006d8000 }, 2165 {0x00007890, 0x06db6db6},
4129 { 0x00007898, 0x48100000 }, 2166 {0x00007894, 0x006d8000},
4130 { 0x0000789c, 0x00000000 }, 2167 {0x00007898, 0x48100000},
4131 { 0x000078a0, 0x08000000 }, 2168 {0x0000789c, 0x00000000},
4132 { 0x000078a4, 0x0007ffd8 }, 2169 {0x000078a0, 0x08000000},
4133 { 0x000078a8, 0x0007ffd8 }, 2170 {0x000078a4, 0x0007ffd8},
4134 { 0x000078ac, 0x001c0020 }, 2171 {0x000078a8, 0x0007ffd8},
4135 { 0x000078b0, 0x00060aeb }, 2172 {0x000078ac, 0x001c0020},
4136 { 0x000078b4, 0x40008080 }, 2173 {0x000078b0, 0x00060aeb},
4137 { 0x000078b8, 0x2a850160 }, 2174 {0x000078b4, 0x40008080},
2175 {0x000078b8, 0x2a850160},
4138}; 2176};
4139 2177
4140/* 2178static const u32 ar9287Common_normal_cck_fir_coeff_9287_1_1[][2] = {
4141 * For Japanese regulatory requirements, 2484 MHz requires the following three 2179 /* Addr allmodes */
4142 * registers be programmed differently from the channel between 2412 and 2180 {0x0000a1f4, 0x00fffeff},
4143 * 2472 MHz. 2181 {0x0000a1f8, 0x00f5f9ff},
4144 */ 2182 {0x0000a1fc, 0xb79f6427},
4145static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
4146 { 0x0000a1f4, 0x00fffeff },
4147 { 0x0000a1f8, 0x00f5f9ff },
4148 { 0x0000a1fc, 0xb79f6427 },
4149}; 2183};
4150 2184
4151static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = { 2185static const u32 ar9287Common_japan_2484_cck_fir_coeff_9287_1_1[][2] = {
4152 { 0x0000a1f4, 0x00000000 }, 2186 /* Addr allmodes */
4153 { 0x0000a1f8, 0xefff0301 }, 2187 {0x0000a1f4, 0x00000000},
4154 { 0x0000a1fc, 0xca9228ee }, 2188 {0x0000a1f8, 0xefff0301},
2189 {0x0000a1fc, 0xca9228ee},
4155}; 2190};
4156 2191
4157static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = { 2192static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
4158 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2193 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
4159 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2194 {0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002},
4160 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, 2195 {0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004},
4161 { 0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004 }, 2196 {0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a},
4162 { 0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a }, 2197 {0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c},
4163 { 0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c }, 2198 {0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b},
4164 { 0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b }, 2199 {0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a},
4165 { 0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a }, 2200 {0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a},
4166 { 0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a }, 2201 {0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a},
4167 { 0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a }, 2202 {0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a},
4168 { 0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a }, 2203 {0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a},
4169 { 0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a }, 2204 {0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a},
4170 { 0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a }, 2205 {0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a},
4171 { 0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a }, 2206 {0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a},
4172 { 0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a }, 2207 {0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c},
4173 { 0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c }, 2208 {0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc},
4174 { 0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc }, 2209 {0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4},
4175 { 0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4 }, 2210 {0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc},
4176 { 0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc }, 2211 {0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede},
4177 { 0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede }, 2212 {0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e},
4178 { 0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e }, 2213 {0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e},
4179 { 0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e }, 2214 {0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e},
4180 { 0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e }, 2215 {0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062, 0x00000062},
4181 { 0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062, 0x00000062 }, 2216 {0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064, 0x00004064},
4182 { 0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064, 0x00004064 }, 2217 {0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4, 0x000080a4},
4183 { 0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4, 0x000080a4 }, 2218 {0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa, 0x0000c0aa},
4184 { 0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa, 0x0000c0aa }, 2219 {0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac, 0x000100ac},
4185 { 0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac, 0x000100ac }, 2220 {0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4, 0x000140b4},
4186 { 0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4, 0x000140b4 }, 2221 {0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4, 0x000180f4},
4187 { 0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4, 0x000180f4 }, 2222 {0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134, 0x0001c134},
4188 { 0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134, 0x0001c134 }, 2223 {0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174, 0x00020174},
4189 { 0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174, 0x00020174 }, 2224 {0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c, 0x0002417c},
4190 { 0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c, 0x0002417c }, 2225 {0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e, 0x0002817e},
4191 { 0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e, 0x0002817e }, 2226 {0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be, 0x0002c1be},
4192 { 0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be, 0x0002c1be }, 2227 {0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4193 { 0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2228 {0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4194 { 0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2229 {0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4195 { 0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2230 {0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4196 { 0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2231 {0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4197 { 0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2232 {0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4198 { 0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2233 {0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4199 { 0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2234 {0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4200 { 0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2235 {0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4201 { 0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2236 {0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
4202 { 0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, 2237 {0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000},
4203 { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
4204}; 2238};
4205 2239
4206static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = { 2240static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
4207 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2241 {0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120},
4208 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, 2242 {0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124},
4209 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, 2243 {0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128},
4210 { 0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, 2244 {0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c},
4211 { 0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, 2245 {0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130},
4212 { 0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, 2246 {0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194},
4213 { 0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, 2247 {0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198},
4214 { 0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, 2248 {0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c},
4215 { 0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, 2249 {0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210},
4216 { 0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, 2250 {0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284},
4217 { 0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, 2251 {0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288},
4218 { 0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, 2252 {0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c},
4219 { 0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, 2253 {0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290},
4220 { 0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, 2254 {0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294},
4221 { 0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, 2255 {0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0},
4222 { 0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, 2256 {0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4},
4223 { 0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, 2257 {0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8},
4224 { 0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, 2258 {0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac},
4225 { 0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, 2259 {0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0},
4226 { 0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, 2260 {0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4},
4227 { 0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, 2261 {0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8},
4228 { 0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, 2262 {0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4},
4229 { 0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, 2263 {0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708},
4230 { 0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, 2264 {0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c},
4231 { 0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, 2265 {0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710},
4232 { 0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, 2266 {0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04},
4233 { 0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, 2267 {0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08},
4234 { 0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, 2268 {0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c},
4235 { 0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, 2269 {0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10},
4236 { 0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, 2270 {0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14},
4237 { 0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, 2271 {0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18},
4238 { 0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, 2272 {0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c},
4239 { 0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, 2273 {0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90},
4240 { 0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, 2274 {0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94},
4241 { 0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, 2275 {0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98},
4242 { 0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, 2276 {0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4},
4243 { 0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, 2277 {0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8},
4244 { 0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, 2278 {0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04},
4245 { 0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, 2279 {0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08},
4246 { 0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, 2280 {0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c},
4247 { 0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, 2281 {0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10},
4248 { 0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, 2282 {0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14},
4249 { 0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, 2283 {0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18},
4250 { 0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, 2284 {0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c},
4251 { 0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, 2285 {0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90},
4252 { 0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, 2286 {0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18},
4253 { 0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, 2287 {0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24},
4254 { 0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, 2288 {0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28},
4255 { 0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, 2289 {0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314},
4256 { 0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, 2290 {0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318},
4257 { 0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, 2291 {0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c},
4258 { 0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, 2292 {0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390},
4259 { 0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, 2293 {0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394},
4260 { 0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, 2294 {0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398},
4261 { 0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, 2295 {0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4},
4262 { 0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, 2296 {0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8},
4263 { 0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, 2297 {0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac},
4264 { 0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, 2298 {0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0},
4265 { 0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, 2299 {0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380},
4266 { 0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, 2300 {0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384},
4267 { 0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, 2301 {0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388},
4268 { 0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, 2302 {0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710},
4269 { 0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, 2303 {0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714},
4270 { 0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, 2304 {0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718},
4271 { 0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, 2305 {0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10},
4272 { 0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, 2306 {0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14},
4273 { 0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, 2307 {0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18},
4274 { 0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, 2308 {0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c},
4275 { 0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, 2309 {0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90},
4276 { 0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, 2310 {0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94},
4277 { 0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, 2311 {0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c},
4278 { 0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, 2312 {0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90},
4279 { 0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, 2313 {0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94},
4280 { 0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, 2314 {0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0},
4281 { 0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, 2315 {0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4},
4282 { 0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, 2316 {0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8},
4283 { 0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, 2317 {0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac},
4284 { 0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, 2318 {0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0},
4285 { 0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, 2319 {0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4},
4286 { 0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, 2320 {0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1},
4287 { 0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, 2321 {0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5},
4288 { 0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, 2322 {0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9},
4289 { 0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, 2323 {0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad},
4290 { 0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, 2324 {0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1},
4291 { 0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, 2325 {0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5},
4292 { 0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, 2326 {0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9},
4293 { 0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, 2327 {0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5},
4294 { 0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, 2328 {0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9},
4295 { 0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, 2329 {0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd},
4296 { 0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, 2330 {0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1},
4297 { 0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, 2331 {0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5},
4298 { 0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, 2332 {0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2},
4299 { 0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, 2333 {0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6},
4300 { 0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, 2334 {0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca},
4301 { 0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, 2335 {0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce},
4302 { 0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, 2336 {0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2},
4303 { 0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, 2337 {0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6},
4304 { 0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, 2338 {0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda},
4305 { 0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, 2339 {0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7},
4306 { 0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, 2340 {0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb},
4307 { 0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, 2341 {0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf},
4308 { 0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, 2342 {0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3},
4309 { 0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, 2343 {0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7},
4310 { 0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, 2344 {0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4311 { 0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2345 {0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4312 { 0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2346 {0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4313 { 0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2347 {0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4314 { 0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2348 {0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4315 { 0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2349 {0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4316 { 0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2350 {0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4317 { 0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2351 {0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4318 { 0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2352 {0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4319 { 0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2353 {0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4320 { 0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2354 {0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4321 { 0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2355 {0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4322 { 0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2356 {0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4323 { 0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2357 {0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4324 { 0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2358 {0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4325 { 0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2359 {0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4326 { 0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2360 {0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4327 { 0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2361 {0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4328 { 0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2362 {0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4329 { 0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2363 {0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4330 { 0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2364 {0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4331 { 0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2365 {0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4332 { 0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2366 {0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4333 { 0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2367 {0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4334 { 0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2368 {0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4335 { 0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2369 {0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120},
4336 { 0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, 2370 {0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124},
4337 { 0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, 2371 {0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128},
4338 { 0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, 2372 {0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c},
4339 { 0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, 2373 {0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130},
4340 { 0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, 2374 {0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194},
4341 { 0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, 2375 {0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198},
4342 { 0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, 2376 {0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c},
4343 { 0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, 2377 {0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210},
4344 { 0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, 2378 {0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284},
4345 { 0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, 2379 {0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288},
4346 { 0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, 2380 {0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c},
4347 { 0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, 2381 {0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290},
4348 { 0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, 2382 {0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294},
4349 { 0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, 2383 {0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0},
4350 { 0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, 2384 {0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4},
4351 { 0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, 2385 {0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8},
4352 { 0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, 2386 {0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac},
4353 { 0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, 2387 {0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0},
4354 { 0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, 2388 {0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4},
4355 { 0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, 2389 {0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8},
4356 { 0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, 2390 {0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4},
4357 { 0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, 2391 {0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708},
4358 { 0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, 2392 {0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c},
4359 { 0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, 2393 {0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710},
4360 { 0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, 2394 {0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04},
4361 { 0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, 2395 {0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08},
4362 { 0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, 2396 {0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c},
4363 { 0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, 2397 {0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10},
4364 { 0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, 2398 {0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14},
4365 { 0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, 2399 {0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18},
4366 { 0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, 2400 {0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c},
4367 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, 2401 {0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90},
4368 { 0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, 2402 {0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94},
4369 { 0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, 2403 {0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98},
4370 { 0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, 2404 {0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4},
4371 { 0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, 2405 {0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8},
4372 { 0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, 2406 {0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04},
4373 { 0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, 2407 {0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08},
4374 { 0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, 2408 {0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c},
4375 { 0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, 2409 {0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10},
4376 { 0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, 2410 {0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14},
4377 { 0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, 2411 {0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18},
4378 { 0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, 2412 {0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c},
4379 { 0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, 2413 {0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90},
4380 { 0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, 2414 {0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18},
4381 { 0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, 2415 {0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24},
4382 { 0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, 2416 {0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28},
4383 { 0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, 2417 {0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314},
4384 { 0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, 2418 {0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318},
4385 { 0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, 2419 {0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c},
4386 { 0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, 2420 {0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390},
4387 { 0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, 2421 {0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394},
4388 { 0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, 2422 {0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398},
4389 { 0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, 2423 {0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4},
4390 { 0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, 2424 {0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8},
4391 { 0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, 2425 {0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac},
4392 { 0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, 2426 {0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0},
4393 { 0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, 2427 {0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380},
4394 { 0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, 2428 {0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384},
4395 { 0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, 2429 {0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388},
4396 { 0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, 2430 {0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710},
4397 { 0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, 2431 {0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714},
4398 { 0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, 2432 {0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718},
4399 { 0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, 2433 {0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10},
4400 { 0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, 2434 {0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14},
4401 { 0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, 2435 {0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18},
4402 { 0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, 2436 {0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c},
4403 { 0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, 2437 {0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90},
4404 { 0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, 2438 {0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94},
4405 { 0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, 2439 {0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c},
4406 { 0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, 2440 {0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90},
4407 { 0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, 2441 {0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94},
4408 { 0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, 2442 {0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0},
4409 { 0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, 2443 {0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4},
4410 { 0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, 2444 {0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8},
4411 { 0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, 2445 {0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac},
4412 { 0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, 2446 {0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0},
4413 { 0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, 2447 {0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4},
4414 { 0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, 2448 {0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1},
4415 { 0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, 2449 {0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5},
4416 { 0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, 2450 {0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9},
4417 { 0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, 2451 {0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad},
4418 { 0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, 2452 {0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1},
4419 { 0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, 2453 {0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5},
4420 { 0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, 2454 {0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9},
4421 { 0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, 2455 {0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5},
4422 { 0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, 2456 {0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9},
4423 { 0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, 2457 {0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd},
4424 { 0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, 2458 {0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1},
4425 { 0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, 2459 {0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5},
4426 { 0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, 2460 {0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2},
4427 { 0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, 2461 {0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6},
4428 { 0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, 2462 {0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca},
4429 { 0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, 2463 {0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce},
4430 { 0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, 2464 {0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2},
4431 { 0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, 2465 {0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6},
4432 { 0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, 2466 {0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda},
4433 { 0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, 2467 {0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7},
4434 { 0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, 2468 {0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb},
4435 { 0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, 2469 {0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf},
4436 { 0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, 2470 {0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3},
4437 { 0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, 2471 {0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7},
4438 { 0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, 2472 {0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4439 { 0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2473 {0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4440 { 0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2474 {0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4441 { 0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2475 {0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4442 { 0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2476 {0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4443 { 0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2477 {0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4444 { 0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2478 {0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4445 { 0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2479 {0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4446 { 0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2480 {0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4447 { 0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2481 {0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4448 { 0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2482 {0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4449 { 0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2483 {0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4450 { 0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2484 {0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4451 { 0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2485 {0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4452 { 0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2486 {0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4453 { 0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2487 {0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4454 { 0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2488 {0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4455 { 0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2489 {0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4456 { 0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2490 {0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4457 { 0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2491 {0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4458 { 0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2492 {0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4459 { 0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2493 {0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4460 { 0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2494 {0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4461 { 0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2495 {0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4462 { 0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2496 {0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
4463 { 0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, 2497 {0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067},
4464 { 0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, 2498 {0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067},
4465 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
4466}; 2499};
4467 2500
4468static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = { 2501static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
4469 {0x00004040, 0x9248fd00 }, 2502 /* Addr allmodes */
4470 {0x00004040, 0x24924924 }, 2503 {0x00004040, 0x9248fd00},
4471 {0x00004040, 0xa8000019 }, 2504 {0x00004040, 0x24924924},
4472 {0x00004040, 0x13160820 }, 2505 {0x00004040, 0xa8000019},
4473 {0x00004040, 0xe5980560 }, 2506 {0x00004040, 0x13160820},
4474 {0x00004040, 0xc01dcffd }, 2507 {0x00004040, 0xe5980560},
4475 {0x00004040, 0x1aaabe41 }, 2508 {0x00004040, 0xc01dcffd},
4476 {0x00004040, 0xbe105554 }, 2509 {0x00004040, 0x1aaabe41},
4477 {0x00004040, 0x00043007 }, 2510 {0x00004040, 0xbe105554},
4478 {0x00004044, 0x00000000 }, 2511 {0x00004040, 0x00043007},
2512 {0x00004044, 0x00000000},
4479}; 2513};
4480 2514
4481static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = { 2515static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
4482 {0x00004040, 0x9248fd00 }, 2516 /* Addr allmodes */
4483 {0x00004040, 0x24924924 }, 2517 {0x00004040, 0x9248fd00},
4484 {0x00004040, 0xa8000019 }, 2518 {0x00004040, 0x24924924},
4485 {0x00004040, 0x13160820 }, 2519 {0x00004040, 0xa8000019},
4486 {0x00004040, 0xe5980560 }, 2520 {0x00004040, 0x13160820},
4487 {0x00004040, 0xc01dcffc }, 2521 {0x00004040, 0xe5980560},
4488 {0x00004040, 0x1aaabe41 }, 2522 {0x00004040, 0xc01dcffc},
4489 {0x00004040, 0xbe105554 }, 2523 {0x00004040, 0x1aaabe41},
4490 {0x00004040, 0x00043007 }, 2524 {0x00004040, 0xbe105554},
4491 {0x00004044, 0x00000000 }, 2525 {0x00004040, 0x00043007},
2526 {0x00004044, 0x00000000},
4492}; 2527};
4493 2528
4494
4495/* AR9271 initialization values automaticaly created: 06/04/09 */
4496static const u32 ar9271Modes_9271[][6] = { 2529static const u32 ar9271Modes_9271[][6] = {
4497 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 2530 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
4498 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 2531 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
4499 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 2532 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
4500 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, 2533 {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
4501 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 2534 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
4502 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, 2535 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f},
4503 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 2536 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880},
4504 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 2537 {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
4505 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 2538 {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
4506 { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, 2539 {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
4507 { 0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001 }, 2540 {0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001},
4508 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 2541 {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
4509 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 2542 {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
4510 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, 2543 {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
4511 { 0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0 }, 2544 {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0},
4512 { 0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, 2545 {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
4513 { 0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, 2546 {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
4514 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, 2547 {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
4515 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 2548 {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
4516 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, 2549 {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e},
4517 { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18 }, 2550 {0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
4518 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 2551 {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
4519 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 2552 {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
4520 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 2553 {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881},
4521 { 0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310 }, 2554 {0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310},
4522 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 2555 {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
4523 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 2556 {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
4524 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, 2557 {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d},
4525 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010 }, 2558 {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010},
4526 { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2559 {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
4527 { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2560 {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
4528 { 0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c }, 2561 {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
4529 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, 2562 {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00},
4530 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 2563 {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
4531 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 2564 {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
4532 { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f }, 2565 {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
4533 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 2566 {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
4534 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 2567 {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
4535 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2568 {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
4536 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2569 {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
4537 { 0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, 2570 {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
4538 { 0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, 2571 {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
4539 { 0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, 2572 {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
4540 { 0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, 2573 {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
4541 { 0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, 2574 {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
4542 { 0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, 2575 {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
4543 { 0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, 2576 {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
4544 { 0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, 2577 {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
4545 { 0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, 2578 {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
4546 { 0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, 2579 {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
4547 { 0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, 2580 {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
4548 { 0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, 2581 {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
4549 { 0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, 2582 {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
4550 { 0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, 2583 {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
4551 { 0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, 2584 {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
4552 { 0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, 2585 {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
4553 { 0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, 2586 {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
4554 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 2587 {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
4555 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 2588 {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
4556 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 2589 {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
4557 { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, 2590 {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
4558 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 2591 {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
4559 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 2592 {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
4560 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 2593 {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
4561 { 0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, 2594 {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
4562 { 0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, 2595 {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
4563 { 0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, 2596 {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
4564 { 0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, 2597 {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
4565 { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, 2598 {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
4566 { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, 2599 {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
4567 { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, 2600 {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
4568 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 2601 {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
4569 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 2602 {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
4570 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 2603 {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
4571 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, 2604 {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
4572 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 2605 {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
4573 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 2606 {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
4574 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 2607 {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
4575 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 2608 {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
4576 { 0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, 2609 {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
4577 { 0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, 2610 {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
4578 { 0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, 2611 {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
4579 { 0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, 2612 {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
4580 { 0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, 2613 {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
4581 { 0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, 2614 {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
4582 { 0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, 2615 {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
4583 { 0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, 2616 {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
4584 { 0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, 2617 {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
4585 { 0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, 2618 {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
4586 { 0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, 2619 {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
4587 { 0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, 2620 {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
4588 { 0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, 2621 {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
4589 { 0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, 2622 {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
4590 { 0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, 2623 {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
4591 { 0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, 2624 {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
4592 { 0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, 2625 {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
4593 { 0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, 2626 {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
4594 { 0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, 2627 {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
4595 { 0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, 2628 {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
4596 { 0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, 2629 {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
4597 { 0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, 2630 {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
4598 { 0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, 2631 {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
4599 { 0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, 2632 {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
4600 { 0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, 2633 {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
4601 { 0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, 2634 {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
4602 { 0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, 2635 {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
4603 { 0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, 2636 {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
4604 { 0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, 2637 {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
4605 { 0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, 2638 {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
4606 { 0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, 2639 {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
4607 { 0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, 2640 {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
4608 { 0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, 2641 {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
4609 { 0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, 2642 {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
4610 { 0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, 2643 {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
4611 { 0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, 2644 {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
4612 { 0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, 2645 {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
4613 { 0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, 2646 {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
4614 { 0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, 2647 {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
4615 { 0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, 2648 {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
4616 { 0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, 2649 {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
4617 { 0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, 2650 {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
4618 { 0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, 2651 {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
4619 { 0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, 2652 {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
4620 { 0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, 2653 {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
4621 { 0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, 2654 {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
4622 { 0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, 2655 {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
4623 { 0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, 2656 {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
4624 { 0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, 2657 {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
4625 { 0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, 2658 {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
4626 { 0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2659 {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4627 { 0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2660 {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4628 { 0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2661 {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4629 { 0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2662 {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4630 { 0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2663 {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4631 { 0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2664 {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4632 { 0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2665 {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4633 { 0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2666 {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4634 { 0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2667 {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4635 { 0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2668 {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4636 { 0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2669 {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4637 { 0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2670 {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4638 { 0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2671 {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4639 { 0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2672 {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4640 { 0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2673 {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4641 { 0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2674 {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4642 { 0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2675 {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4643 { 0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2676 {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4644 { 0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2677 {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4645 { 0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2678 {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4646 { 0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2679 {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4647 { 0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2680 {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4648 { 0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2681 {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4649 { 0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2682 {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4650 { 0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2683 {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4651 { 0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2684 {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4652 { 0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2685 {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4653 { 0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2686 {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4654 { 0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2687 {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4655 { 0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2688 {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4656 { 0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2689 {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4657 { 0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2690 {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4658 { 0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2691 {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4659 { 0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2692 {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4660 { 0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2693 {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4661 { 0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2694 {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4662 { 0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2695 {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4663 { 0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2696 {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4664 { 0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2697 {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4665 { 0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, 2698 {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
4666 { 0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, 2699 {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
4667 { 0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, 2700 {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
4668 { 0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, 2701 {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
4669 { 0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, 2702 {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
4670 { 0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, 2703 {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
4671 { 0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, 2704 {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
4672 { 0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, 2705 {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
4673 { 0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, 2706 {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
4674 { 0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, 2707 {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
4675 { 0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, 2708 {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
4676 { 0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, 2709 {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
4677 { 0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, 2710 {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
4678 { 0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, 2711 {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
4679 { 0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, 2712 {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
4680 { 0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, 2713 {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
4681 { 0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, 2714 {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
4682 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 2715 {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
4683 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 2716 {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
4684 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 2717 {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
4685 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, 2718 {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
4686 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 2719 {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
4687 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 2720 {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
4688 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 2721 {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
4689 { 0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, 2722 {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
4690 { 0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, 2723 {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
4691 { 0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, 2724 {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
4692 { 0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, 2725 {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
4693 { 0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, 2726 {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
4694 { 0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, 2727 {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
4695 { 0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, 2728 {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
4696 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 2729 {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
4697 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 2730 {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
4698 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 2731 {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
4699 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, 2732 {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
4700 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 2733 {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
4701 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 2734 {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
4702 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 2735 {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
4703 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 2736 {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
4704 { 0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, 2737 {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
4705 { 0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, 2738 {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
4706 { 0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, 2739 {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
4707 { 0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, 2740 {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
4708 { 0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, 2741 {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
4709 { 0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, 2742 {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
4710 { 0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, 2743 {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
4711 { 0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, 2744 {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
4712 { 0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, 2745 {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
4713 { 0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, 2746 {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
4714 { 0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, 2747 {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
4715 { 0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, 2748 {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
4716 { 0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, 2749 {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
4717 { 0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, 2750 {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
4718 { 0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, 2751 {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
4719 { 0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, 2752 {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
4720 { 0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, 2753 {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
4721 { 0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, 2754 {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
4722 { 0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, 2755 {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
4723 { 0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, 2756 {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
4724 { 0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, 2757 {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
4725 { 0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, 2758 {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
4726 { 0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, 2759 {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
4727 { 0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, 2760 {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
4728 { 0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, 2761 {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
4729 { 0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, 2762 {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
4730 { 0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, 2763 {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
4731 { 0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, 2764 {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
4732 { 0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, 2765 {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
4733 { 0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, 2766 {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
4734 { 0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, 2767 {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
4735 { 0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, 2768 {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
4736 { 0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, 2769 {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
4737 { 0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, 2770 {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
4738 { 0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, 2771 {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
4739 { 0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, 2772 {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
4740 { 0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, 2773 {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
4741 { 0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, 2774 {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
4742 { 0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, 2775 {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
4743 { 0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, 2776 {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
4744 { 0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, 2777 {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
4745 { 0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, 2778 {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
4746 { 0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, 2779 {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
4747 { 0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, 2780 {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
4748 { 0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, 2781 {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
4749 { 0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, 2782 {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
4750 { 0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, 2783 {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
4751 { 0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, 2784 {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
4752 { 0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, 2785 {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
4753 { 0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, 2786 {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
4754 { 0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2787 {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4755 { 0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2788 {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4756 { 0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2789 {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4757 { 0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2790 {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4758 { 0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2791 {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4759 { 0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2792 {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4760 { 0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2793 {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4761 { 0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2794 {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4762 { 0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2795 {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4763 { 0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2796 {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4764 { 0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2797 {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4765 { 0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2798 {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4766 { 0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2799 {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4767 { 0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2800 {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4768 { 0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2801 {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4769 { 0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2802 {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4770 { 0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2803 {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4771 { 0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2804 {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4772 { 0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2805 {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4773 { 0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2806 {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4774 { 0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2807 {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4775 { 0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2808 {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4776 { 0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2809 {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4777 { 0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2810 {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4778 { 0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2811 {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4779 { 0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2812 {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4780 { 0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2813 {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4781 { 0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2814 {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4782 { 0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2815 {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4783 { 0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2816 {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4784 { 0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2817 {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4785 { 0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2818 {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4786 { 0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2819 {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4787 { 0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2820 {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4788 { 0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2821 {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4789 { 0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2822 {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4790 { 0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2823 {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4791 { 0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2824 {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4792 { 0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, 2825 {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
4793 { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, 2826 {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
4794 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, 2827 {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
4795 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, 2828 {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
4796 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 2829 {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
4797 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 2830 {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
4798 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, 2831 {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000},
4799 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 2832 {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
4800}; 2833};
4801 2834
4802static const u32 ar9271Common_9271[][2] = { 2835static const u32 ar9271Common_9271[][2] = {
4803 { 0x0000000c, 0x00000000 }, 2836 /* Addr allmodes */
4804 { 0x00000030, 0x00020045 }, 2837 {0x0000000c, 0x00000000},
4805 { 0x00000034, 0x00000005 }, 2838 {0x00000030, 0x00020045},
4806 { 0x00000040, 0x00000000 }, 2839 {0x00000034, 0x00000005},
4807 { 0x00000044, 0x00000008 }, 2840 {0x00000040, 0x00000000},
4808 { 0x00000048, 0x00000008 }, 2841 {0x00000044, 0x00000008},
4809 { 0x0000004c, 0x00000010 }, 2842 {0x00000048, 0x00000008},
4810 { 0x00000050, 0x00000000 }, 2843 {0x0000004c, 0x00000010},
4811 { 0x00000054, 0x0000001f }, 2844 {0x00000050, 0x00000000},
4812 { 0x00000800, 0x00000000 }, 2845 {0x00000054, 0x0000001f},
4813 { 0x00000804, 0x00000000 }, 2846 {0x00000800, 0x00000000},
4814 { 0x00000808, 0x00000000 }, 2847 {0x00000804, 0x00000000},
4815 { 0x0000080c, 0x00000000 }, 2848 {0x00000808, 0x00000000},
4816 { 0x00000810, 0x00000000 }, 2849 {0x0000080c, 0x00000000},
4817 { 0x00000814, 0x00000000 }, 2850 {0x00000810, 0x00000000},
4818 { 0x00000818, 0x00000000 }, 2851 {0x00000814, 0x00000000},
4819 { 0x0000081c, 0x00000000 }, 2852 {0x00000818, 0x00000000},
4820 { 0x00000820, 0x00000000 }, 2853 {0x0000081c, 0x00000000},
4821 { 0x00000824, 0x00000000 }, 2854 {0x00000820, 0x00000000},
4822 { 0x00001040, 0x002ffc0f }, 2855 {0x00000824, 0x00000000},
4823 { 0x00001044, 0x002ffc0f }, 2856 {0x00001040, 0x002ffc0f},
4824 { 0x00001048, 0x002ffc0f }, 2857 {0x00001044, 0x002ffc0f},
4825 { 0x0000104c, 0x002ffc0f }, 2858 {0x00001048, 0x002ffc0f},
4826 { 0x00001050, 0x002ffc0f }, 2859 {0x0000104c, 0x002ffc0f},
4827 { 0x00001054, 0x002ffc0f }, 2860 {0x00001050, 0x002ffc0f},
4828 { 0x00001058, 0x002ffc0f }, 2861 {0x00001054, 0x002ffc0f},
4829 { 0x0000105c, 0x002ffc0f }, 2862 {0x00001058, 0x002ffc0f},
4830 { 0x00001060, 0x002ffc0f }, 2863 {0x0000105c, 0x002ffc0f},
4831 { 0x00001064, 0x002ffc0f }, 2864 {0x00001060, 0x002ffc0f},
4832 { 0x00001230, 0x00000000 }, 2865 {0x00001064, 0x002ffc0f},
4833 { 0x00001270, 0x00000000 }, 2866 {0x00001230, 0x00000000},
4834 { 0x00001038, 0x00000000 }, 2867 {0x00001270, 0x00000000},
4835 { 0x00001078, 0x00000000 }, 2868 {0x00001038, 0x00000000},
4836 { 0x000010b8, 0x00000000 }, 2869 {0x00001078, 0x00000000},
4837 { 0x000010f8, 0x00000000 }, 2870 {0x000010b8, 0x00000000},
4838 { 0x00001138, 0x00000000 }, 2871 {0x000010f8, 0x00000000},
4839 { 0x00001178, 0x00000000 }, 2872 {0x00001138, 0x00000000},
4840 { 0x000011b8, 0x00000000 }, 2873 {0x00001178, 0x00000000},
4841 { 0x000011f8, 0x00000000 }, 2874 {0x000011b8, 0x00000000},
4842 { 0x00001238, 0x00000000 }, 2875 {0x000011f8, 0x00000000},
4843 { 0x00001278, 0x00000000 }, 2876 {0x00001238, 0x00000000},
4844 { 0x000012b8, 0x00000000 }, 2877 {0x00001278, 0x00000000},
4845 { 0x000012f8, 0x00000000 }, 2878 {0x000012b8, 0x00000000},
4846 { 0x00001338, 0x00000000 }, 2879 {0x000012f8, 0x00000000},
4847 { 0x00001378, 0x00000000 }, 2880 {0x00001338, 0x00000000},
4848 { 0x000013b8, 0x00000000 }, 2881 {0x00001378, 0x00000000},
4849 { 0x000013f8, 0x00000000 }, 2882 {0x000013b8, 0x00000000},
4850 { 0x00001438, 0x00000000 }, 2883 {0x000013f8, 0x00000000},
4851 { 0x00001478, 0x00000000 }, 2884 {0x00001438, 0x00000000},
4852 { 0x000014b8, 0x00000000 }, 2885 {0x00001478, 0x00000000},
4853 { 0x000014f8, 0x00000000 }, 2886 {0x000014b8, 0x00000000},
4854 { 0x00001538, 0x00000000 }, 2887 {0x000014f8, 0x00000000},
4855 { 0x00001578, 0x00000000 }, 2888 {0x00001538, 0x00000000},
4856 { 0x000015b8, 0x00000000 }, 2889 {0x00001578, 0x00000000},
4857 { 0x000015f8, 0x00000000 }, 2890 {0x000015b8, 0x00000000},
4858 { 0x00001638, 0x00000000 }, 2891 {0x000015f8, 0x00000000},
4859 { 0x00001678, 0x00000000 }, 2892 {0x00001638, 0x00000000},
4860 { 0x000016b8, 0x00000000 }, 2893 {0x00001678, 0x00000000},
4861 { 0x000016f8, 0x00000000 }, 2894 {0x000016b8, 0x00000000},
4862 { 0x00001738, 0x00000000 }, 2895 {0x000016f8, 0x00000000},
4863 { 0x00001778, 0x00000000 }, 2896 {0x00001738, 0x00000000},
4864 { 0x000017b8, 0x00000000 }, 2897 {0x00001778, 0x00000000},
4865 { 0x000017f8, 0x00000000 }, 2898 {0x000017b8, 0x00000000},
4866 { 0x0000103c, 0x00000000 }, 2899 {0x000017f8, 0x00000000},
4867 { 0x0000107c, 0x00000000 }, 2900 {0x0000103c, 0x00000000},
4868 { 0x000010bc, 0x00000000 }, 2901 {0x0000107c, 0x00000000},
4869 { 0x000010fc, 0x00000000 }, 2902 {0x000010bc, 0x00000000},
4870 { 0x0000113c, 0x00000000 }, 2903 {0x000010fc, 0x00000000},
4871 { 0x0000117c, 0x00000000 }, 2904 {0x0000113c, 0x00000000},
4872 { 0x000011bc, 0x00000000 }, 2905 {0x0000117c, 0x00000000},
4873 { 0x000011fc, 0x00000000 }, 2906 {0x000011bc, 0x00000000},
4874 { 0x0000123c, 0x00000000 }, 2907 {0x000011fc, 0x00000000},
4875 { 0x0000127c, 0x00000000 }, 2908 {0x0000123c, 0x00000000},
4876 { 0x000012bc, 0x00000000 }, 2909 {0x0000127c, 0x00000000},
4877 { 0x000012fc, 0x00000000 }, 2910 {0x000012bc, 0x00000000},
4878 { 0x0000133c, 0x00000000 }, 2911 {0x000012fc, 0x00000000},
4879 { 0x0000137c, 0x00000000 }, 2912 {0x0000133c, 0x00000000},
4880 { 0x000013bc, 0x00000000 }, 2913 {0x0000137c, 0x00000000},
4881 { 0x000013fc, 0x00000000 }, 2914 {0x000013bc, 0x00000000},
4882 { 0x0000143c, 0x00000000 }, 2915 {0x000013fc, 0x00000000},
4883 { 0x0000147c, 0x00000000 }, 2916 {0x0000143c, 0x00000000},
4884 { 0x00004030, 0x00000002 }, 2917 {0x0000147c, 0x00000000},
4885 { 0x0000403c, 0x00000002 }, 2918 {0x00004030, 0x00000002},
4886 { 0x00004024, 0x0000001f }, 2919 {0x0000403c, 0x00000002},
4887 { 0x00004060, 0x00000000 }, 2920 {0x00004024, 0x0000001f},
4888 { 0x00004064, 0x00000000 }, 2921 {0x00004060, 0x00000000},
4889 { 0x00008004, 0x00000000 }, 2922 {0x00004064, 0x00000000},
4890 { 0x00008008, 0x00000000 }, 2923 {0x00008004, 0x00000000},
4891 { 0x0000800c, 0x00000000 }, 2924 {0x00008008, 0x00000000},
4892 { 0x00008018, 0x00000700 }, 2925 {0x0000800c, 0x00000000},
4893 { 0x00008020, 0x00000000 }, 2926 {0x00008018, 0x00000700},
4894 { 0x00008038, 0x00000000 }, 2927 {0x00008020, 0x00000000},
4895 { 0x0000803c, 0x00000000 }, 2928 {0x00008038, 0x00000000},
4896 { 0x00008048, 0x00000000 }, 2929 {0x0000803c, 0x00000000},
4897 { 0x00008054, 0x00000000 }, 2930 {0x00008048, 0x00000000},
4898 { 0x00008058, 0x00000000 }, 2931 {0x00008054, 0x00000000},
4899 { 0x0000805c, 0x000fc78f }, 2932 {0x00008058, 0x00000000},
4900 { 0x00008060, 0x0000000f }, 2933 {0x0000805c, 0x000fc78f},
4901 { 0x00008064, 0x00000000 }, 2934 {0x00008060, 0x0000000f},
4902 { 0x00008070, 0x00000000 }, 2935 {0x00008064, 0x00000000},
4903 { 0x000080b0, 0x00000000 }, 2936 {0x00008070, 0x00000000},
4904 { 0x000080b4, 0x00000000 }, 2937 {0x000080b0, 0x00000000},
4905 { 0x000080b8, 0x00000000 }, 2938 {0x000080b4, 0x00000000},
4906 { 0x000080bc, 0x00000000 }, 2939 {0x000080b8, 0x00000000},
4907 { 0x000080c0, 0x2a80001a }, 2940 {0x000080bc, 0x00000000},
4908 { 0x000080c4, 0x05dc01e0 }, 2941 {0x000080c0, 0x2a80001a},
4909 { 0x000080c8, 0x1f402710 }, 2942 {0x000080c4, 0x05dc01e0},
4910 { 0x000080cc, 0x01f40000 }, 2943 {0x000080c8, 0x1f402710},
4911 { 0x000080d0, 0x00001e00 }, 2944 {0x000080cc, 0x01f40000},
4912 { 0x000080d4, 0x00000000 }, 2945 {0x000080d0, 0x00001e00},
4913 { 0x000080d8, 0x00400000 }, 2946 {0x000080d4, 0x00000000},
4914 { 0x000080e0, 0xffffffff }, 2947 {0x000080d8, 0x00400000},
4915 { 0x000080e4, 0x0000ffff }, 2948 {0x000080e0, 0xffffffff},
4916 { 0x000080e8, 0x003f3f3f }, 2949 {0x000080e4, 0x0000ffff},
4917 { 0x000080ec, 0x00000000 }, 2950 {0x000080e8, 0x003f3f3f},
4918 { 0x000080f0, 0x00000000 }, 2951 {0x000080ec, 0x00000000},
4919 { 0x000080f4, 0x00000000 }, 2952 {0x000080f0, 0x00000000},
4920 { 0x000080f8, 0x00000000 }, 2953 {0x000080f4, 0x00000000},
4921 { 0x000080fc, 0x00020000 }, 2954 {0x000080f8, 0x00000000},
4922 { 0x00008100, 0x00020000 }, 2955 {0x000080fc, 0x00020000},
4923 { 0x00008104, 0x00000001 }, 2956 {0x00008100, 0x00020000},
4924 { 0x00008108, 0x00000052 }, 2957 {0x00008104, 0x00000001},
4925 { 0x0000810c, 0x00000000 }, 2958 {0x00008108, 0x00000052},
4926 { 0x00008110, 0x00000168 }, 2959 {0x0000810c, 0x00000000},
4927 { 0x00008118, 0x000100aa }, 2960 {0x00008110, 0x00000168},
4928 { 0x0000811c, 0x00003210 }, 2961 {0x00008118, 0x000100aa},
4929 { 0x00008120, 0x08f04810 }, 2962 {0x0000811c, 0x00003210},
4930 { 0x00008124, 0x00000000 }, 2963 {0x00008120, 0x08f04810},
4931 { 0x00008128, 0x00000000 }, 2964 {0x00008124, 0x00000000},
4932 { 0x0000812c, 0x00000000 }, 2965 {0x00008128, 0x00000000},
4933 { 0x00008130, 0x00000000 }, 2966 {0x0000812c, 0x00000000},
4934 { 0x00008134, 0x00000000 }, 2967 {0x00008130, 0x00000000},
4935 { 0x00008138, 0x00000000 }, 2968 {0x00008134, 0x00000000},
4936 { 0x0000813c, 0x00000000 }, 2969 {0x00008138, 0x00000000},
4937 { 0x00008144, 0xffffffff }, 2970 {0x0000813c, 0x00000000},
4938 { 0x00008168, 0x00000000 }, 2971 {0x00008144, 0xffffffff},
4939 { 0x0000816c, 0x00000000 }, 2972 {0x00008168, 0x00000000},
4940 { 0x00008170, 0x32143320 }, 2973 {0x0000816c, 0x00000000},
4941 { 0x00008174, 0xfaa4fa50 }, 2974 {0x00008170, 0x32143320},
4942 { 0x00008178, 0x00000100 }, 2975 {0x00008174, 0xfaa4fa50},
4943 { 0x0000817c, 0x00000000 }, 2976 {0x00008178, 0x00000100},
4944 { 0x000081c0, 0x00000000 }, 2977 {0x0000817c, 0x00000000},
4945 { 0x000081d0, 0x0000320a }, 2978 {0x000081c0, 0x00000000},
4946 { 0x000081ec, 0x00000000 }, 2979 {0x000081d0, 0x0000320a},
4947 { 0x000081f0, 0x00000000 }, 2980 {0x000081ec, 0x00000000},
4948 { 0x000081f4, 0x00000000 }, 2981 {0x000081f0, 0x00000000},
4949 { 0x000081f8, 0x00000000 }, 2982 {0x000081f4, 0x00000000},
4950 { 0x000081fc, 0x00000000 }, 2983 {0x000081f8, 0x00000000},
4951 { 0x00008200, 0x00000000 }, 2984 {0x000081fc, 0x00000000},
4952 { 0x00008204, 0x00000000 }, 2985 {0x00008200, 0x00000000},
4953 { 0x00008208, 0x00000000 }, 2986 {0x00008204, 0x00000000},
4954 { 0x0000820c, 0x00000000 }, 2987 {0x00008208, 0x00000000},
4955 { 0x00008210, 0x00000000 }, 2988 {0x0000820c, 0x00000000},
4956 { 0x00008214, 0x00000000 }, 2989 {0x00008210, 0x00000000},
4957 { 0x00008218, 0x00000000 }, 2990 {0x00008214, 0x00000000},
4958 { 0x0000821c, 0x00000000 }, 2991 {0x00008218, 0x00000000},
4959 { 0x00008220, 0x00000000 }, 2992 {0x0000821c, 0x00000000},
4960 { 0x00008224, 0x00000000 }, 2993 {0x00008220, 0x00000000},
4961 { 0x00008228, 0x00000000 }, 2994 {0x00008224, 0x00000000},
4962 { 0x0000822c, 0x00000000 }, 2995 {0x00008228, 0x00000000},
4963 { 0x00008230, 0x00000000 }, 2996 {0x0000822c, 0x00000000},
4964 { 0x00008234, 0x00000000 }, 2997 {0x00008230, 0x00000000},
4965 { 0x00008238, 0x00000000 }, 2998 {0x00008234, 0x00000000},
4966 { 0x0000823c, 0x00000000 }, 2999 {0x00008238, 0x00000000},
4967 { 0x00008240, 0x00100000 }, 3000 {0x0000823c, 0x00000000},
4968 { 0x00008244, 0x0010f400 }, 3001 {0x00008240, 0x00100000},
4969 { 0x00008248, 0x00000100 }, 3002 {0x00008244, 0x0010f400},
4970 { 0x0000824c, 0x0001e800 }, 3003 {0x00008248, 0x00000100},
4971 { 0x00008250, 0x00000000 }, 3004 {0x0000824c, 0x0001e800},
4972 { 0x00008254, 0x00000000 }, 3005 {0x00008250, 0x00000000},
4973 { 0x00008258, 0x00000000 }, 3006 {0x00008254, 0x00000000},
4974 { 0x0000825c, 0x400000ff }, 3007 {0x00008258, 0x00000000},
4975 { 0x00008260, 0x00080922 }, 3008 {0x0000825c, 0x400000ff},
4976 { 0x00008264, 0x88a00010 }, 3009 {0x00008260, 0x00080922},
4977 { 0x00008270, 0x00000000 }, 3010 {0x00008264, 0x88a00010},
4978 { 0x00008274, 0x40000000 }, 3011 {0x00008270, 0x00000000},
4979 { 0x00008278, 0x003e4180 }, 3012 {0x00008274, 0x40000000},
4980 { 0x0000827c, 0x00000000 }, 3013 {0x00008278, 0x003e4180},
4981 { 0x00008284, 0x0000002c }, 3014 {0x0000827c, 0x00000000},
4982 { 0x00008288, 0x0000002c }, 3015 {0x00008284, 0x0000002c},
4983 { 0x0000828c, 0x00000000 }, 3016 {0x00008288, 0x0000002c},
4984 { 0x00008294, 0x00000000 }, 3017 {0x0000828c, 0x00000000},
4985 { 0x00008298, 0x00000000 }, 3018 {0x00008294, 0x00000000},
4986 { 0x0000829c, 0x00000000 }, 3019 {0x00008298, 0x00000000},
4987 { 0x00008300, 0x00000040 }, 3020 {0x0000829c, 0x00000000},
4988 { 0x00008314, 0x00000000 }, 3021 {0x00008300, 0x00000040},
4989 { 0x00008328, 0x00000000 }, 3022 {0x00008314, 0x00000000},
4990 { 0x0000832c, 0x00000001 }, 3023 {0x00008328, 0x00000000},
4991 { 0x00008330, 0x00000302 }, 3024 {0x0000832c, 0x00000001},
4992 { 0x00008334, 0x00000e00 }, 3025 {0x00008330, 0x00000302},
4993 { 0x00008338, 0x00ff0000 }, 3026 {0x00008334, 0x00000e00},
4994 { 0x0000833c, 0x00000000 }, 3027 {0x00008338, 0x00ff0000},
4995 { 0x00008340, 0x00010380 }, 3028 {0x0000833c, 0x00000000},
4996 { 0x00008344, 0x00581043 }, 3029 {0x00008340, 0x00010380},
4997 { 0x00007010, 0x00000030 }, 3030 {0x00008344, 0x00581043},
4998 { 0x00007034, 0x00000002 }, 3031 {0x00007010, 0x00000030},
4999 { 0x00007038, 0x000004c2 }, 3032 {0x00007034, 0x00000002},
5000 { 0x00007800, 0x00140000 }, 3033 {0x00007038, 0x000004c2},
5001 { 0x00007804, 0x0e4548d8 }, 3034 {0x00007800, 0x00140000},
5002 { 0x00007808, 0x54214514 }, 3035 {0x00007804, 0x0e4548d8},
5003 { 0x0000780c, 0x02025820 }, 3036 {0x00007808, 0x54214514},
5004 { 0x00007810, 0x71c0d388 }, 3037 {0x0000780c, 0x02025820},
5005 { 0x00007814, 0x924934a8 }, 3038 {0x00007810, 0x71c0d388},
5006 { 0x0000781c, 0x00000000 }, 3039 {0x00007814, 0x924934a8},
5007 { 0x00007828, 0x66964300 }, 3040 {0x0000781c, 0x00000000},
5008 { 0x0000782c, 0x8db6d961 }, 3041 {0x00007828, 0x66964300},
5009 { 0x00007830, 0x8db6d96c }, 3042 {0x0000782c, 0x8db6d961},
5010 { 0x00007834, 0x6140008b }, 3043 {0x00007830, 0x8db6d96c},
5011 { 0x0000783c, 0x72ee0a72 }, 3044 {0x00007834, 0x6140008b},
5012 { 0x00007840, 0xbbfffffc }, 3045 {0x0000783c, 0x72ee0a72},
5013 { 0x00007844, 0x000c0db6 }, 3046 {0x00007840, 0xbbfffffc},
5014 { 0x00007848, 0x6db61b6f }, 3047 {0x00007844, 0x000c0db6},
5015 { 0x0000784c, 0x6d9b66db }, 3048 {0x00007848, 0x6db6246f},
5016 { 0x00007850, 0x6d8c6dba }, 3049 {0x0000784c, 0x6d9b66db},
5017 { 0x00007854, 0x00040000 }, 3050 {0x00007850, 0x6d8c6dba},
5018 { 0x00007858, 0xdb003012 }, 3051 {0x00007854, 0x00040000},
5019 { 0x0000785c, 0x04924914 }, 3052 {0x00007858, 0xdb003012},
5020 { 0x00007860, 0x21084210 }, 3053 {0x0000785c, 0x04924914},
5021 { 0x00007864, 0xf7d7ffde }, 3054 {0x00007860, 0x21084210},
5022 { 0x00007868, 0xc2034080 }, 3055 {0x00007864, 0xf7d7ffde},
5023 { 0x00007870, 0x10142c00 }, 3056 {0x00007868, 0xc2034080},
5024 { 0x00009808, 0x00000000 }, 3057 {0x00007870, 0x10142c00},
5025 { 0x0000980c, 0xafe68e30 }, 3058 {0x00009808, 0x00000000},
5026 { 0x00009810, 0xfd14e000 }, 3059 {0x0000980c, 0xafe68e30},
5027 { 0x00009814, 0x9c0a9f6b }, 3060 {0x00009810, 0xfd14e000},
5028 { 0x0000981c, 0x00000000 }, 3061 {0x00009814, 0x9c0a9f6b},
5029 { 0x0000982c, 0x0000a000 }, 3062 {0x0000981c, 0x00000000},
5030 { 0x00009830, 0x00000000 }, 3063 {0x0000982c, 0x0000a000},
5031 { 0x0000983c, 0x00200400 }, 3064 {0x00009830, 0x00000000},
5032 { 0x0000984c, 0x0040233c }, 3065 {0x0000983c, 0x00200400},
5033 { 0x00009854, 0x00000044 }, 3066 {0x0000984c, 0x0040233c},
5034 { 0x00009900, 0x00000000 }, 3067 {0x00009854, 0x00000044},
5035 { 0x00009904, 0x00000000 }, 3068 {0x00009900, 0x00000000},
5036 { 0x00009908, 0x00000000 }, 3069 {0x00009904, 0x00000000},
5037 { 0x0000990c, 0x00000000 }, 3070 {0x00009908, 0x00000000},
5038 { 0x0000991c, 0x10000fff }, 3071 {0x0000990c, 0x00000000},
5039 { 0x00009920, 0x04900000 }, 3072 {0x0000991c, 0x10000fff},
5040 { 0x00009928, 0x00000001 }, 3073 {0x00009920, 0x04900000},
5041 { 0x0000992c, 0x00000004 }, 3074 {0x00009928, 0x00000001},
5042 { 0x00009934, 0x1e1f2022 }, 3075 {0x0000992c, 0x00000004},
5043 { 0x00009938, 0x0a0b0c0d }, 3076 {0x00009934, 0x1e1f2022},
5044 { 0x0000993c, 0x00000000 }, 3077 {0x00009938, 0x0a0b0c0d},
5045 { 0x00009940, 0x14750604 }, 3078 {0x0000993c, 0x00000000},
5046 { 0x00009948, 0x9280c00a }, 3079 {0x00009940, 0x14750604},
5047 { 0x0000994c, 0x00020028 }, 3080 {0x00009948, 0x9280c00a},
5048 { 0x00009954, 0x5f3ca3de }, 3081 {0x0000994c, 0x00020028},
5049 { 0x00009958, 0x0108ecff }, 3082 {0x00009954, 0x5f3ca3de},
5050 { 0x00009968, 0x000003ce }, 3083 {0x00009958, 0x0108ecff},
5051 { 0x00009970, 0x192bb514 }, 3084 {0x00009968, 0x000003ce},
5052 { 0x00009974, 0x00000000 }, 3085 {0x00009970, 0x192bb514},
5053 { 0x00009978, 0x00000001 }, 3086 {0x00009974, 0x00000000},
5054 { 0x0000997c, 0x00000000 }, 3087 {0x00009978, 0x00000001},
5055 { 0x00009980, 0x00000000 }, 3088 {0x0000997c, 0x00000000},
5056 { 0x00009984, 0x00000000 }, 3089 {0x00009980, 0x00000000},
5057 { 0x00009988, 0x00000000 }, 3090 {0x00009984, 0x00000000},
5058 { 0x0000998c, 0x00000000 }, 3091 {0x00009988, 0x00000000},
5059 { 0x00009990, 0x00000000 }, 3092 {0x0000998c, 0x00000000},
5060 { 0x00009994, 0x00000000 }, 3093 {0x00009990, 0x00000000},
5061 { 0x00009998, 0x00000000 }, 3094 {0x00009994, 0x00000000},
5062 { 0x0000999c, 0x00000000 }, 3095 {0x00009998, 0x00000000},
5063 { 0x000099a0, 0x00000000 }, 3096 {0x0000999c, 0x00000000},
5064 { 0x000099a4, 0x00000001 }, 3097 {0x000099a0, 0x00000000},
5065 { 0x000099a8, 0x201fff00 }, 3098 {0x000099a4, 0x00000001},
5066 { 0x000099ac, 0x2def0400 }, 3099 {0x000099a8, 0x201fff00},
5067 { 0x000099b0, 0x03051000 }, 3100 {0x000099ac, 0x2def0400},
5068 { 0x000099b4, 0x00000820 }, 3101 {0x000099b0, 0x03051000},
5069 { 0x000099dc, 0x00000000 }, 3102 {0x000099b4, 0x00000820},
5070 { 0x000099e0, 0x00000000 }, 3103 {0x000099dc, 0x00000000},
5071 { 0x000099e4, 0xaaaaaaaa }, 3104 {0x000099e0, 0x00000000},
5072 { 0x000099e8, 0x3c466478 }, 3105 {0x000099e4, 0xaaaaaaaa},
5073 { 0x000099ec, 0x0cc80caa }, 3106 {0x000099e8, 0x3c466478},
5074 { 0x000099f0, 0x00000000 }, 3107 {0x000099ec, 0x0cc80caa},
5075 { 0x0000a208, 0x803e68c8 }, 3108 {0x000099f0, 0x00000000},
5076 { 0x0000a210, 0x4080a333 }, 3109 {0x0000a208, 0x803e68c8},
5077 { 0x0000a214, 0x00206c10 }, 3110 {0x0000a210, 0x4080a333},
5078 { 0x0000a218, 0x009c4060 }, 3111 {0x0000a214, 0x00206c10},
5079 { 0x0000a220, 0x01834061 }, 3112 {0x0000a218, 0x009c4060},
5080 { 0x0000a224, 0x00000400 }, 3113 {0x0000a220, 0x01834061},
5081 { 0x0000a228, 0x000003b5 }, 3114 {0x0000a224, 0x00000400},
5082 { 0x0000a22c, 0x00000000 }, 3115 {0x0000a228, 0x000003b5},
5083 { 0x0000a234, 0x20202020 }, 3116 {0x0000a22c, 0x00000000},
5084 { 0x0000a238, 0x20202020 }, 3117 {0x0000a234, 0x20202020},
5085 { 0x0000a244, 0x00000000 }, 3118 {0x0000a238, 0x20202020},
5086 { 0x0000a248, 0xfffffffc }, 3119 {0x0000a244, 0x00000000},
5087 { 0x0000a24c, 0x00000000 }, 3120 {0x0000a248, 0xfffffffc},
5088 { 0x0000a254, 0x00000000 }, 3121 {0x0000a24c, 0x00000000},
5089 { 0x0000a258, 0x0ccb5380 }, 3122 {0x0000a254, 0x00000000},
5090 { 0x0000a25c, 0x15151501 }, 3123 {0x0000a258, 0x0ccb5380},
5091 { 0x0000a260, 0xdfa90f01 }, 3124 {0x0000a25c, 0x15151501},
5092 { 0x0000a268, 0x00000000 }, 3125 {0x0000a260, 0xdfa90f01},
5093 { 0x0000a26c, 0x0ebae9e6 }, 3126 {0x0000a268, 0x00000000},
5094 { 0x0000a388, 0x0c000000 }, 3127 {0x0000a26c, 0x0ebae9e6},
5095 { 0x0000a38c, 0x20202020 }, 3128 {0x0000a388, 0x0c000000},
5096 { 0x0000a390, 0x20202020 }, 3129 {0x0000a38c, 0x20202020},
5097 { 0x0000a39c, 0x00000001 }, 3130 {0x0000a390, 0x20202020},
5098 { 0x0000a3a0, 0x00000000 }, 3131 {0x0000a39c, 0x00000001},
5099 { 0x0000a3a4, 0x00000000 }, 3132 {0x0000a3a0, 0x00000000},
5100 { 0x0000a3a8, 0x00000000 }, 3133 {0x0000a3a4, 0x00000000},
5101 { 0x0000a3ac, 0x00000000 }, 3134 {0x0000a3a8, 0x00000000},
5102 { 0x0000a3b0, 0x00000000 }, 3135 {0x0000a3ac, 0x00000000},
5103 { 0x0000a3b4, 0x00000000 }, 3136 {0x0000a3b0, 0x00000000},
5104 { 0x0000a3b8, 0x00000000 }, 3137 {0x0000a3b4, 0x00000000},
5105 { 0x0000a3bc, 0x00000000 }, 3138 {0x0000a3b8, 0x00000000},
5106 { 0x0000a3c0, 0x00000000 }, 3139 {0x0000a3bc, 0x00000000},
5107 { 0x0000a3c4, 0x00000000 }, 3140 {0x0000a3c0, 0x00000000},
5108 { 0x0000a3cc, 0x20202020 }, 3141 {0x0000a3c4, 0x00000000},
5109 { 0x0000a3d0, 0x20202020 }, 3142 {0x0000a3cc, 0x20202020},
5110 { 0x0000a3d4, 0x20202020 }, 3143 {0x0000a3d0, 0x20202020},
5111 { 0x0000a3e4, 0x00000000 }, 3144 {0x0000a3d4, 0x20202020},
5112 { 0x0000a3e8, 0x18c43433 }, 3145 {0x0000a3e4, 0x00000000},
5113 { 0x0000a3ec, 0x00f70081 }, 3146 {0x0000a3e8, 0x18c43433},
5114 { 0x0000a3f0, 0x01036a2f }, 3147 {0x0000a3ec, 0x00f70081},
5115 { 0x0000a3f4, 0x00000000 }, 3148 {0x0000a3f0, 0x01036a2f},
5116 { 0x0000d270, 0x0d820820 }, 3149 {0x0000a3f4, 0x00000000},
5117 { 0x0000d35c, 0x07ffffef }, 3150 {0x0000d270, 0x0d820820},
5118 { 0x0000d360, 0x0fffffe7 }, 3151 {0x0000d35c, 0x07ffffef},
5119 { 0x0000d364, 0x17ffffe5 }, 3152 {0x0000d360, 0x0fffffe7},
5120 { 0x0000d368, 0x1fffffe4 }, 3153 {0x0000d364, 0x17ffffe5},
5121 { 0x0000d36c, 0x37ffffe3 }, 3154 {0x0000d368, 0x1fffffe4},
5122 { 0x0000d370, 0x3fffffe3 }, 3155 {0x0000d36c, 0x37ffffe3},
5123 { 0x0000d374, 0x57ffffe3 }, 3156 {0x0000d370, 0x3fffffe3},
5124 { 0x0000d378, 0x5fffffe2 }, 3157 {0x0000d374, 0x57ffffe3},
5125 { 0x0000d37c, 0x7fffffe2 }, 3158 {0x0000d378, 0x5fffffe2},
5126 { 0x0000d380, 0x7f3c7bba }, 3159 {0x0000d37c, 0x7fffffe2},
5127 { 0x0000d384, 0xf3307ff0 }, 3160 {0x0000d380, 0x7f3c7bba},
3161 {0x0000d384, 0xf3307ff0},
5128}; 3162};
5129 3163
5130static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = { 3164static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
5131 { 0x0000a1f4, 0x00fffeff }, 3165 /* Addr allmodes */
5132 { 0x0000a1f8, 0x00f5f9ff }, 3166 {0x0000a1f4, 0x00fffeff},
5133 { 0x0000a1fc, 0xb79f6427 }, 3167 {0x0000a1f8, 0x00f5f9ff},
3168 {0x0000a1fc, 0xb79f6427},
5134}; 3169};
5135 3170
5136static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = { 3171static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
5137 { 0x0000a1f4, 0x00000000 }, 3172 /* Addr allmodes */
5138 { 0x0000a1f8, 0xefff0301 }, 3173 {0x0000a1f4, 0x00000000},
5139 { 0x0000a1fc, 0xca9228ee }, 3174 {0x0000a1f8, 0xefff0301},
3175 {0x0000a1fc, 0xca9228ee},
5140}; 3176};
5141 3177
5142static const u32 ar9271Modes_9271_1_0_only[][6] = { 3178static const u32 ar9271Modes_9271_1_0_only[][6] = {
5143 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 }, 3179 {0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311},
5144 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 3180 {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
5145}; 3181};
5146 3182
5147static const u32 ar9271Modes_9271_ANI_reg[][6] = { 3183static const u32 ar9271Modes_9271_ANI_reg[][6] = {
5148 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, 3184 {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
5149 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, 3185 {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e},
5150 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 3186 {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
5151 { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 }, 3187 {0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881},
5152 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 3188 {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
5153 { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 }, 3189 {0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8},
5154 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, 3190 {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d},
5155 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 3191 {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
5156}; 3192};
5157 3193
5158static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = { 3194static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
5159 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3195 {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
5160 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, 3196 {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000},
5161 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, 3197 {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000},
5162 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, 3198 {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000},
5163 { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 }, 3199 {0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000},
5164 { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 }, 3200 {0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000},
5165 { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 }, 3201 {0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000},
5166 { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 }, 3202 {0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000},
5167 { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 }, 3203 {0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000},
5168 { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 }, 3204 {0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000},
5169 { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 }, 3205 {0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000},
5170 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 }, 3206 {0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000},
5171 { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 }, 3207 {0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000},
5172 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 }, 3208 {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000},
5173 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, 3209 {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
5174 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, 3210 {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
5175 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3211 {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5176 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3212 {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5177 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3213 {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5178 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3214 {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5179 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3215 {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5180 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3216 {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5181 { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 }, 3217 {0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029},
5182 { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff }, 3218 {0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff},
5183 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, 3219 {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
5184 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, 3220 {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
5185 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 }, 3221 {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652},
5186 { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd }, 3222 {0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
5187 { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd }, 3223 {0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd},
5188 { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd }, 3224 {0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
5189 { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd }, 3225 {0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
5190 { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd }, 3226 {0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
5191 { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd }, 3227 {0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
5192}; 3228};
5193 3229
5194static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = { 3230static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
5195 { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 }, 3231 {0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000},
5196 { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 }, 3232 {0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000},
5197 { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 }, 3233 {0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000},
5198 { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 }, 3234 {0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000},
5199 { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 }, 3235 {0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000},
5200 { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 }, 3236 {0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000},
5201 { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 }, 3237 {0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000},
5202 { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 }, 3238 {0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000},
5203 { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 }, 3239 {0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000},
5204 { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 }, 3240 {0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000},
5205 { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 }, 3241 {0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000},
5206 { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 }, 3242 {0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000},
5207 { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 }, 3243 {0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000},
5208 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 }, 3244 {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000},
5209 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, 3245 {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
5210 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, 3246 {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
5211 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3247 {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5212 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3248 {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5213 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3249 {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5214 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3250 {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5215 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3251 {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5216 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 3252 {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
5217 { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b }, 3253 {0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b},
5218 { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff }, 3254 {0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff},
5219 { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 }, 3255 {0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6},
5220 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, 3256 {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
5221 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 }, 3257 {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a214652, 0x0a214652, 0x0a22a652},
5222 { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, 3258 {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
5223 { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 }, 3259 {0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063},
5224 { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 }, 3260 {0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
5225 { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 }, 3261 {0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
5226 { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 }, 3262 {0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
5227 { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 }, 3263 {0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
5228}; 3264};
5229 3265
5230#endif /* INITVALS_9002_10_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 2be20d2070c4..50dda394f8be 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -287,6 +287,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
287 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt); 287 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
288 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt); 288 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
289 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt); 289 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
290 ts->tid = MS(ads->ds_txstatus9, AR_TxTid);
290 ts->ts_antenna = 0; 291 ts->ts_antenna = 0;
291 292
292 return 0; 293 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index ed314e89bfe1..adbf031fbc5a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -471,52 +471,47 @@ static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
471static void ar9002_hw_do_getnf(struct ath_hw *ah, 471static void ar9002_hw_do_getnf(struct ath_hw *ah,
472 int16_t nfarray[NUM_NF_READINGS]) 472 int16_t nfarray[NUM_NF_READINGS])
473{ 473{
474 struct ath_common *common = ath9k_hw_common(ah);
475 int16_t nf; 474 int16_t nf;
476 475
477 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); 476 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
478 477 nfarray[0] = sign_extend(nf, 9);
479 if (nf & 0x100)
480 nf = 0 - ((nf ^ 0x1ff) + 1);
481 ath_print(common, ATH_DBG_CALIBRATE,
482 "NF calibrated [ctl] [chain 0] is %d\n", nf);
483
484 if (AR_SREV_9271(ah) && (nf >= -114))
485 nf = -116;
486
487 nfarray[0] = nf;
488
489 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
490 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
491 AR9280_PHY_CH1_MINCCA_PWR);
492
493 if (nf & 0x100)
494 nf = 0 - ((nf ^ 0x1ff) + 1);
495 ath_print(common, ATH_DBG_CALIBRATE,
496 "NF calibrated [ctl] [chain 1] is %d\n", nf);
497 nfarray[1] = nf;
498 }
499 478
500 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); 479 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
501 if (nf & 0x100) 480 if (IS_CHAN_HT40(ah->curchan))
502 nf = 0 - ((nf ^ 0x1ff) + 1); 481 nfarray[3] = sign_extend(nf, 9);
503 ath_print(common, ATH_DBG_CALIBRATE,
504 "NF calibrated [ext] [chain 0] is %d\n", nf);
505 482
506 if (AR_SREV_9271(ah) && (nf >= -114)) 483 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
507 nf = -116; 484 return;
508 485
509 nfarray[3] = nf; 486 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
487 nfarray[1] = sign_extend(nf, 9);
510 488
511 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) { 489 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR);
512 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), 490 if (IS_CHAN_HT40(ah->curchan))
513 AR9280_PHY_CH1_EXT_MINCCA_PWR); 491 nfarray[4] = sign_extend(nf, 9);
492}
514 493
515 if (nf & 0x100) 494static void ar9002_hw_set_nf_limits(struct ath_hw *ah)
516 nf = 0 - ((nf ^ 0x1ff) + 1); 495{
517 ath_print(common, ATH_DBG_CALIBRATE, 496 if (AR_SREV_9285(ah)) {
518 "NF calibrated [ext] [chain 1] is %d\n", nf); 497 ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ;
519 nfarray[4] = nf; 498 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ;
499 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9285_2GHZ;
500 } else if (AR_SREV_9287(ah)) {
501 ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ;
502 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ;
503 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9287_2GHZ;
504 } else if (AR_SREV_9271(ah)) {
505 ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ;
506 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ;
507 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9271_2GHZ;
508 } else {
509 ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ;
510 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ;
511 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9280_2GHZ;
512 ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ;
513 ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ;
514 ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9280_5GHZ;
520 } 515 }
521} 516}
522 517
@@ -532,4 +527,6 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
532 priv_ops->olc_init = ar9002_olc_init; 527 priv_ops->olc_init = ar9002_olc_init;
533 priv_ops->compute_pll_control = ar9002_hw_compute_pll_control; 528 priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
534 priv_ops->do_getnf = ar9002_hw_do_getnf; 529 priv_ops->do_getnf = ar9002_hw_do_getnf;
530
531 ar9002_hw_set_nf_limits(ah);
535} 532}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 81bf6e5840e1..c5151a4dd10b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -114,6 +114,10 @@
114#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000 114#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
115#define AR_PHY_FIND_SIG_FIRPWR_S 18 115#define AR_PHY_FIND_SIG_FIRPWR_S 18
116 116
117#define AR_PHY_FIND_SIG_LOW 0x9840
118#define AR_PHY_FIND_SIG_FIRSTEP_LOW 0x00000FC0L
119#define AR_PHY_FIND_SIG_FIRSTEP_LOW_S 6
120
117#define AR_PHY_AGC_CTL1 0x985C 121#define AR_PHY_AGC_CTL1 0x985C
118#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80 122#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
119#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7 123#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
@@ -325,6 +329,9 @@
325#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9 329#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
326#define AR_PHY_EXT_CCA_THRESH62 0x007F0000 330#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
327#define AR_PHY_EXT_CCA_THRESH62_S 16 331#define AR_PHY_EXT_CCA_THRESH62_S 16
332#define AR_PHY_EXT_TIMING5_CYCPWR_THR1 0x0000FE00L
333#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9
334
328#define AR_PHY_EXT_MINCCA_PWR 0xFF800000 335#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
329#define AR_PHY_EXT_MINCCA_PWR_S 23 336#define AR_PHY_EXT_MINCCA_PWR_S 23
330#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000 337#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
@@ -569,4 +576,30 @@
569#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000 576#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
570#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23 577#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
571 578
579#define AR_PHY_CCA_NOM_VAL_5416_2GHZ -90
580#define AR_PHY_CCA_NOM_VAL_5416_5GHZ -100
581#define AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ -100
582#define AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ -110
583#define AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ -80
584#define AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ -90
585
586#define AR_PHY_CCA_NOM_VAL_9280_2GHZ -112
587#define AR_PHY_CCA_NOM_VAL_9280_5GHZ -112
588#define AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ -127
589#define AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ -122
590#define AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ -97
591#define AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ -102
592
593#define AR_PHY_CCA_NOM_VAL_9285_2GHZ -118
594#define AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ -127
595#define AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ -108
596
597#define AR_PHY_CCA_NOM_VAL_9271_2GHZ -118
598#define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ -127
599#define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ -116
600
601#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -120
602#define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ -127
603#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -110
604
572#endif 605#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
index db019dd220b7..d3375fc4ce8b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
@@ -14,8 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#ifndef INITVALS_9003_H 17#ifndef INITVALS_9003_2P0_H
18#define INITVALS_9003_H 18#define INITVALS_9003_2P0_H
19 19
20/* AR9003 2.0 */ 20/* AR9003 2.0 */
21 21
@@ -835,71 +835,71 @@ static const u32 ar9300_2p0_baseband_core[][2] = {
835 835
836static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = { 836static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
838 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 838 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
839 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 839 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
840 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 840 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
841 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, 841 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
842 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200}, 842 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
843 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202}, 843 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
844 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400}, 844 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
845 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402}, 845 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
846 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404}, 846 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
847 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603}, 847 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
848 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02}, 848 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
849 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04}, 849 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
850 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20}, 850 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
851 {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20}, 851 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
852 {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22}, 852 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
853 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24}, 853 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
854 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640}, 854 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
855 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, 855 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
856 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, 856 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
857 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, 857 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
858 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 858 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
859 {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, 859 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
860 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, 860 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
861 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, 861 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
862 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, 862 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
863 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, 863 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
864 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 864 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
865 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 865 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
866 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 866 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
867 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 867 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
868 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 868 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
869 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 869 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
870 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 870 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
871 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, 871 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
872 {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002}, 872 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
873 {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004}, 873 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
874 {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200}, 874 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
875 {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202}, 875 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
876 {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400}, 876 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
877 {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402}, 877 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
878 {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404}, 878 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
879 {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603}, 879 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
880 {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02}, 880 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
881 {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04}, 881 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
882 {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20}, 882 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
883 {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20}, 883 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
884 {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22}, 884 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
885 {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24}, 885 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
886 {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640}, 886 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
887 {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660}, 887 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
888 {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861}, 888 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
889 {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81}, 889 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
890 {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83}, 890 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
891 {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84}, 891 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
892 {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3}, 892 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
893 {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5}, 893 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
894 {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9}, 894 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
895 {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb}, 895 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
896 {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 896 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
897 {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 897 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
898 {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 898 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
899 {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 899 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
900 {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 900 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
901 {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 901 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
902 {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 902 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
903 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6}, 903 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
904 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001}, 904 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
905 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c}, 905 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,71 +913,71 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
913 913
914static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = { 914static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
915 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 915 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
916 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 916 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
917 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 917 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
918 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 918 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
919 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, 919 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
920 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200}, 920 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
921 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202}, 921 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
922 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400}, 922 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
923 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402}, 923 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
924 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404}, 924 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
925 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603}, 925 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
926 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02}, 926 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
927 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04}, 927 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
928 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20}, 928 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
929 {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20}, 929 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
930 {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22}, 930 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
931 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24}, 931 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
932 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640}, 932 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
933 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, 933 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
934 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, 934 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
935 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, 935 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
936 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 936 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
937 {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, 937 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
938 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, 938 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
939 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, 939 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
940 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, 940 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
941 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, 941 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
942 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 942 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
943 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 943 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
944 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 944 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
945 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 945 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
946 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 946 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
947 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 947 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
948 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 948 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
949 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, 949 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
950 {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002}, 950 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
951 {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004}, 951 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
952 {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200}, 952 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
953 {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202}, 953 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
954 {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400}, 954 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
955 {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402}, 955 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
956 {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404}, 956 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
957 {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603}, 957 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
958 {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02}, 958 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
959 {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04}, 959 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
960 {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20}, 960 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
961 {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20}, 961 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
962 {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22}, 962 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
963 {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24}, 963 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
964 {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640}, 964 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
965 {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660}, 965 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
966 {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861}, 966 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
967 {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81}, 967 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
968 {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83}, 968 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
969 {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84}, 969 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
970 {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3}, 970 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
971 {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5}, 971 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
972 {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9}, 972 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
973 {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb}, 973 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
974 {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 974 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
975 {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 975 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
976 {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 976 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
977 {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 977 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
978 {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 978 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
979 {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 979 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
980 {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, 980 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
981 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4}, 981 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
982 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001}, 982 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
983 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 983 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1781,4 +1781,4 @@ static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
1781 {0x00004044, 0x00000000}, 1781 {0x00004044, 0x00000000},
1782}; 1782};
1783 1783
1784#endif /* INITVALS_9003_H */ 1784#endif /* INITVALS_9003_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
new file mode 100644
index 000000000000..ec98ab50748a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -0,0 +1,1785 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_9003_2P2_H
18#define INITVALS_9003_2P2_H
19
20/* AR9003 2.2 */
21
22static const u32 ar9300_2p2_radio_postamble[][5] = {
23 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
24 {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
25 {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
26 {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
27 {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
28 {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
29 {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
30 {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
31 {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
32 {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
33};
34
35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
37 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
38 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
39 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
40 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
41 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
42 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
43 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
44 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
45 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
46 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
47 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
48 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
49 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
50 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
51 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
52 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
53 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
54 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
55 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
56 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
57 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
58 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
59 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
60 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
61 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
62 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
63 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
64 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
65 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
66 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
67 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
68 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
69 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
70 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
71 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
72 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
73 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
74 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
75 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
76 {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
77 {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
78 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
79 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
80 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
81 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
82 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
83 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
84 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
85 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
86 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
87 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
88 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
89 {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
90 {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
91 {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
92 {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
93 {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
94 {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
95 {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
96 {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
97 {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
98 {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
99 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
100 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
101 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
102 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
103 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
104 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
105 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
106 {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
107 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
108 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
109 {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
110 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
111};
112
113static const u32 ar9300Modes_fast_clock_2p2[][3] = {
114 /* Addr 5G_HT20 5G_HT40 */
115 {0x00001030, 0x00000268, 0x000004d0},
116 {0x00001070, 0x0000018c, 0x00000318},
117 {0x000010b0, 0x00000fd0, 0x00001fa0},
118 {0x00008014, 0x044c044c, 0x08980898},
119 {0x0000801c, 0x148ec02b, 0x148ec057},
120 {0x00008318, 0x000044c0, 0x00008980},
121 {0x00009e00, 0x03721821, 0x03721821},
122 {0x0000a230, 0x0000000b, 0x00000016},
123 {0x0000a254, 0x00000898, 0x00001130},
124};
125
126static const u32 ar9300_2p2_radio_core[][2] = {
127 /* Addr allmodes */
128 {0x00016000, 0x36db6db6},
129 {0x00016004, 0x6db6db40},
130 {0x00016008, 0x73f00000},
131 {0x0001600c, 0x00000000},
132 {0x00016040, 0x7f80fff8},
133 {0x0001604c, 0x76d005b5},
134 {0x00016050, 0x556cf031},
135 {0x00016054, 0x13449440},
136 {0x00016058, 0x0c51c92c},
137 {0x0001605c, 0x3db7fffc},
138 {0x00016060, 0xfffffffc},
139 {0x00016064, 0x000f0278},
140 {0x0001606c, 0x6db60000},
141 {0x00016080, 0x00000000},
142 {0x00016084, 0x0e48048c},
143 {0x00016088, 0x54214514},
144 {0x0001608c, 0x119f481e},
145 {0x00016090, 0x24926490},
146 {0x00016098, 0xd2888888},
147 {0x000160a0, 0x0a108ffe},
148 {0x000160a4, 0x812fc370},
149 {0x000160a8, 0x423c8000},
150 {0x000160b4, 0x92480080},
151 {0x000160c0, 0x00adb6d0},
152 {0x000160c4, 0x6db6db60},
153 {0x000160c8, 0x6db6db6c},
154 {0x000160cc, 0x01e6c000},
155 {0x00016100, 0x3fffbe01},
156 {0x00016104, 0xfff80000},
157 {0x00016108, 0x00080010},
158 {0x00016144, 0x02084080},
159 {0x00016148, 0x00000000},
160 {0x00016280, 0x058a0001},
161 {0x00016284, 0x3d840208},
162 {0x00016288, 0x05a20408},
163 {0x0001628c, 0x00038c07},
164 {0x00016290, 0x00000004},
165 {0x00016294, 0x458aa14f},
166 {0x00016380, 0x00000000},
167 {0x00016384, 0x00000000},
168 {0x00016388, 0x00800700},
169 {0x0001638c, 0x00800700},
170 {0x00016390, 0x00800700},
171 {0x00016394, 0x00000000},
172 {0x00016398, 0x00000000},
173 {0x0001639c, 0x00000000},
174 {0x000163a0, 0x00000001},
175 {0x000163a4, 0x00000001},
176 {0x000163a8, 0x00000000},
177 {0x000163ac, 0x00000000},
178 {0x000163b0, 0x00000000},
179 {0x000163b4, 0x00000000},
180 {0x000163b8, 0x00000000},
181 {0x000163bc, 0x00000000},
182 {0x000163c0, 0x000000a0},
183 {0x000163c4, 0x000c0000},
184 {0x000163c8, 0x14021402},
185 {0x000163cc, 0x00001402},
186 {0x000163d0, 0x00000000},
187 {0x000163d4, 0x00000000},
188 {0x00016400, 0x36db6db6},
189 {0x00016404, 0x6db6db40},
190 {0x00016408, 0x73f00000},
191 {0x0001640c, 0x00000000},
192 {0x00016440, 0x7f80fff8},
193 {0x0001644c, 0x76d005b5},
194 {0x00016450, 0x556cf031},
195 {0x00016454, 0x13449440},
196 {0x00016458, 0x0c51c92c},
197 {0x0001645c, 0x3db7fffc},
198 {0x00016460, 0xfffffffc},
199 {0x00016464, 0x000f0278},
200 {0x0001646c, 0x6db60000},
201 {0x00016500, 0x3fffbe01},
202 {0x00016504, 0xfff80000},
203 {0x00016508, 0x00080010},
204 {0x00016544, 0x02084080},
205 {0x00016548, 0x00000000},
206 {0x00016780, 0x00000000},
207 {0x00016784, 0x00000000},
208 {0x00016788, 0x00800700},
209 {0x0001678c, 0x00800700},
210 {0x00016790, 0x00800700},
211 {0x00016794, 0x00000000},
212 {0x00016798, 0x00000000},
213 {0x0001679c, 0x00000000},
214 {0x000167a0, 0x00000001},
215 {0x000167a4, 0x00000001},
216 {0x000167a8, 0x00000000},
217 {0x000167ac, 0x00000000},
218 {0x000167b0, 0x00000000},
219 {0x000167b4, 0x00000000},
220 {0x000167b8, 0x00000000},
221 {0x000167bc, 0x00000000},
222 {0x000167c0, 0x000000a0},
223 {0x000167c4, 0x000c0000},
224 {0x000167c8, 0x14021402},
225 {0x000167cc, 0x00001402},
226 {0x000167d0, 0x00000000},
227 {0x000167d4, 0x00000000},
228 {0x00016800, 0x36db6db6},
229 {0x00016804, 0x6db6db40},
230 {0x00016808, 0x73f00000},
231 {0x0001680c, 0x00000000},
232 {0x00016840, 0x7f80fff8},
233 {0x0001684c, 0x76d005b5},
234 {0x00016850, 0x556cf031},
235 {0x00016854, 0x13449440},
236 {0x00016858, 0x0c51c92c},
237 {0x0001685c, 0x3db7fffc},
238 {0x00016860, 0xfffffffc},
239 {0x00016864, 0x000f0278},
240 {0x0001686c, 0x6db60000},
241 {0x00016900, 0x3fffbe01},
242 {0x00016904, 0xfff80000},
243 {0x00016908, 0x00080010},
244 {0x00016944, 0x02084080},
245 {0x00016948, 0x00000000},
246 {0x00016b80, 0x00000000},
247 {0x00016b84, 0x00000000},
248 {0x00016b88, 0x00800700},
249 {0x00016b8c, 0x00800700},
250 {0x00016b90, 0x00800700},
251 {0x00016b94, 0x00000000},
252 {0x00016b98, 0x00000000},
253 {0x00016b9c, 0x00000000},
254 {0x00016ba0, 0x00000001},
255 {0x00016ba4, 0x00000001},
256 {0x00016ba8, 0x00000000},
257 {0x00016bac, 0x00000000},
258 {0x00016bb0, 0x00000000},
259 {0x00016bb4, 0x00000000},
260 {0x00016bb8, 0x00000000},
261 {0x00016bbc, 0x00000000},
262 {0x00016bc0, 0x000000a0},
263 {0x00016bc4, 0x000c0000},
264 {0x00016bc8, 0x14021402},
265 {0x00016bcc, 0x00001402},
266 {0x00016bd0, 0x00000000},
267 {0x00016bd4, 0x00000000},
268};
269
270static const u32 ar9300Common_rx_gain_table_merlin_2p2[][2] = {
271 /* Addr allmodes */
272 {0x0000a000, 0x02000101},
273 {0x0000a004, 0x02000102},
274 {0x0000a008, 0x02000103},
275 {0x0000a00c, 0x02000104},
276 {0x0000a010, 0x02000200},
277 {0x0000a014, 0x02000201},
278 {0x0000a018, 0x02000202},
279 {0x0000a01c, 0x02000203},
280 {0x0000a020, 0x02000204},
281 {0x0000a024, 0x02000205},
282 {0x0000a028, 0x02000208},
283 {0x0000a02c, 0x02000302},
284 {0x0000a030, 0x02000303},
285 {0x0000a034, 0x02000304},
286 {0x0000a038, 0x02000400},
287 {0x0000a03c, 0x02010300},
288 {0x0000a040, 0x02010301},
289 {0x0000a044, 0x02010302},
290 {0x0000a048, 0x02000500},
291 {0x0000a04c, 0x02010400},
292 {0x0000a050, 0x02020300},
293 {0x0000a054, 0x02020301},
294 {0x0000a058, 0x02020302},
295 {0x0000a05c, 0x02020303},
296 {0x0000a060, 0x02020400},
297 {0x0000a064, 0x02030300},
298 {0x0000a068, 0x02030301},
299 {0x0000a06c, 0x02030302},
300 {0x0000a070, 0x02030303},
301 {0x0000a074, 0x02030400},
302 {0x0000a078, 0x02040300},
303 {0x0000a07c, 0x02040301},
304 {0x0000a080, 0x02040302},
305 {0x0000a084, 0x02040303},
306 {0x0000a088, 0x02030500},
307 {0x0000a08c, 0x02040400},
308 {0x0000a090, 0x02050203},
309 {0x0000a094, 0x02050204},
310 {0x0000a098, 0x02050205},
311 {0x0000a09c, 0x02040500},
312 {0x0000a0a0, 0x02050301},
313 {0x0000a0a4, 0x02050302},
314 {0x0000a0a8, 0x02050303},
315 {0x0000a0ac, 0x02050400},
316 {0x0000a0b0, 0x02050401},
317 {0x0000a0b4, 0x02050402},
318 {0x0000a0b8, 0x02050403},
319 {0x0000a0bc, 0x02050500},
320 {0x0000a0c0, 0x02050501},
321 {0x0000a0c4, 0x02050502},
322 {0x0000a0c8, 0x02050503},
323 {0x0000a0cc, 0x02050504},
324 {0x0000a0d0, 0x02050600},
325 {0x0000a0d4, 0x02050601},
326 {0x0000a0d8, 0x02050602},
327 {0x0000a0dc, 0x02050603},
328 {0x0000a0e0, 0x02050604},
329 {0x0000a0e4, 0x02050700},
330 {0x0000a0e8, 0x02050701},
331 {0x0000a0ec, 0x02050702},
332 {0x0000a0f0, 0x02050703},
333 {0x0000a0f4, 0x02050704},
334 {0x0000a0f8, 0x02050705},
335 {0x0000a0fc, 0x02050708},
336 {0x0000a100, 0x02050709},
337 {0x0000a104, 0x0205070a},
338 {0x0000a108, 0x0205070b},
339 {0x0000a10c, 0x0205070c},
340 {0x0000a110, 0x0205070d},
341 {0x0000a114, 0x02050710},
342 {0x0000a118, 0x02050711},
343 {0x0000a11c, 0x02050712},
344 {0x0000a120, 0x02050713},
345 {0x0000a124, 0x02050714},
346 {0x0000a128, 0x02050715},
347 {0x0000a12c, 0x02050730},
348 {0x0000a130, 0x02050731},
349 {0x0000a134, 0x02050732},
350 {0x0000a138, 0x02050733},
351 {0x0000a13c, 0x02050734},
352 {0x0000a140, 0x02050735},
353 {0x0000a144, 0x02050750},
354 {0x0000a148, 0x02050751},
355 {0x0000a14c, 0x02050752},
356 {0x0000a150, 0x02050753},
357 {0x0000a154, 0x02050754},
358 {0x0000a158, 0x02050755},
359 {0x0000a15c, 0x02050770},
360 {0x0000a160, 0x02050771},
361 {0x0000a164, 0x02050772},
362 {0x0000a168, 0x02050773},
363 {0x0000a16c, 0x02050774},
364 {0x0000a170, 0x02050775},
365 {0x0000a174, 0x00000776},
366 {0x0000a178, 0x00000776},
367 {0x0000a17c, 0x00000776},
368 {0x0000a180, 0x00000776},
369 {0x0000a184, 0x00000776},
370 {0x0000a188, 0x00000776},
371 {0x0000a18c, 0x00000776},
372 {0x0000a190, 0x00000776},
373 {0x0000a194, 0x00000776},
374 {0x0000a198, 0x00000776},
375 {0x0000a19c, 0x00000776},
376 {0x0000a1a0, 0x00000776},
377 {0x0000a1a4, 0x00000776},
378 {0x0000a1a8, 0x00000776},
379 {0x0000a1ac, 0x00000776},
380 {0x0000a1b0, 0x00000776},
381 {0x0000a1b4, 0x00000776},
382 {0x0000a1b8, 0x00000776},
383 {0x0000a1bc, 0x00000776},
384 {0x0000a1c0, 0x00000776},
385 {0x0000a1c4, 0x00000776},
386 {0x0000a1c8, 0x00000776},
387 {0x0000a1cc, 0x00000776},
388 {0x0000a1d0, 0x00000776},
389 {0x0000a1d4, 0x00000776},
390 {0x0000a1d8, 0x00000776},
391 {0x0000a1dc, 0x00000776},
392 {0x0000a1e0, 0x00000776},
393 {0x0000a1e4, 0x00000776},
394 {0x0000a1e8, 0x00000776},
395 {0x0000a1ec, 0x00000776},
396 {0x0000a1f0, 0x00000776},
397 {0x0000a1f4, 0x00000776},
398 {0x0000a1f8, 0x00000776},
399 {0x0000a1fc, 0x00000776},
400 {0x0000b000, 0x02000101},
401 {0x0000b004, 0x02000102},
402 {0x0000b008, 0x02000103},
403 {0x0000b00c, 0x02000104},
404 {0x0000b010, 0x02000200},
405 {0x0000b014, 0x02000201},
406 {0x0000b018, 0x02000202},
407 {0x0000b01c, 0x02000203},
408 {0x0000b020, 0x02000204},
409 {0x0000b024, 0x02000205},
410 {0x0000b028, 0x02000208},
411 {0x0000b02c, 0x02000302},
412 {0x0000b030, 0x02000303},
413 {0x0000b034, 0x02000304},
414 {0x0000b038, 0x02000400},
415 {0x0000b03c, 0x02010300},
416 {0x0000b040, 0x02010301},
417 {0x0000b044, 0x02010302},
418 {0x0000b048, 0x02000500},
419 {0x0000b04c, 0x02010400},
420 {0x0000b050, 0x02020300},
421 {0x0000b054, 0x02020301},
422 {0x0000b058, 0x02020302},
423 {0x0000b05c, 0x02020303},
424 {0x0000b060, 0x02020400},
425 {0x0000b064, 0x02030300},
426 {0x0000b068, 0x02030301},
427 {0x0000b06c, 0x02030302},
428 {0x0000b070, 0x02030303},
429 {0x0000b074, 0x02030400},
430 {0x0000b078, 0x02040300},
431 {0x0000b07c, 0x02040301},
432 {0x0000b080, 0x02040302},
433 {0x0000b084, 0x02040303},
434 {0x0000b088, 0x02030500},
435 {0x0000b08c, 0x02040400},
436 {0x0000b090, 0x02050203},
437 {0x0000b094, 0x02050204},
438 {0x0000b098, 0x02050205},
439 {0x0000b09c, 0x02040500},
440 {0x0000b0a0, 0x02050301},
441 {0x0000b0a4, 0x02050302},
442 {0x0000b0a8, 0x02050303},
443 {0x0000b0ac, 0x02050400},
444 {0x0000b0b0, 0x02050401},
445 {0x0000b0b4, 0x02050402},
446 {0x0000b0b8, 0x02050403},
447 {0x0000b0bc, 0x02050500},
448 {0x0000b0c0, 0x02050501},
449 {0x0000b0c4, 0x02050502},
450 {0x0000b0c8, 0x02050503},
451 {0x0000b0cc, 0x02050504},
452 {0x0000b0d0, 0x02050600},
453 {0x0000b0d4, 0x02050601},
454 {0x0000b0d8, 0x02050602},
455 {0x0000b0dc, 0x02050603},
456 {0x0000b0e0, 0x02050604},
457 {0x0000b0e4, 0x02050700},
458 {0x0000b0e8, 0x02050701},
459 {0x0000b0ec, 0x02050702},
460 {0x0000b0f0, 0x02050703},
461 {0x0000b0f4, 0x02050704},
462 {0x0000b0f8, 0x02050705},
463 {0x0000b0fc, 0x02050708},
464 {0x0000b100, 0x02050709},
465 {0x0000b104, 0x0205070a},
466 {0x0000b108, 0x0205070b},
467 {0x0000b10c, 0x0205070c},
468 {0x0000b110, 0x0205070d},
469 {0x0000b114, 0x02050710},
470 {0x0000b118, 0x02050711},
471 {0x0000b11c, 0x02050712},
472 {0x0000b120, 0x02050713},
473 {0x0000b124, 0x02050714},
474 {0x0000b128, 0x02050715},
475 {0x0000b12c, 0x02050730},
476 {0x0000b130, 0x02050731},
477 {0x0000b134, 0x02050732},
478 {0x0000b138, 0x02050733},
479 {0x0000b13c, 0x02050734},
480 {0x0000b140, 0x02050735},
481 {0x0000b144, 0x02050750},
482 {0x0000b148, 0x02050751},
483 {0x0000b14c, 0x02050752},
484 {0x0000b150, 0x02050753},
485 {0x0000b154, 0x02050754},
486 {0x0000b158, 0x02050755},
487 {0x0000b15c, 0x02050770},
488 {0x0000b160, 0x02050771},
489 {0x0000b164, 0x02050772},
490 {0x0000b168, 0x02050773},
491 {0x0000b16c, 0x02050774},
492 {0x0000b170, 0x02050775},
493 {0x0000b174, 0x00000776},
494 {0x0000b178, 0x00000776},
495 {0x0000b17c, 0x00000776},
496 {0x0000b180, 0x00000776},
497 {0x0000b184, 0x00000776},
498 {0x0000b188, 0x00000776},
499 {0x0000b18c, 0x00000776},
500 {0x0000b190, 0x00000776},
501 {0x0000b194, 0x00000776},
502 {0x0000b198, 0x00000776},
503 {0x0000b19c, 0x00000776},
504 {0x0000b1a0, 0x00000776},
505 {0x0000b1a4, 0x00000776},
506 {0x0000b1a8, 0x00000776},
507 {0x0000b1ac, 0x00000776},
508 {0x0000b1b0, 0x00000776},
509 {0x0000b1b4, 0x00000776},
510 {0x0000b1b8, 0x00000776},
511 {0x0000b1bc, 0x00000776},
512 {0x0000b1c0, 0x00000776},
513 {0x0000b1c4, 0x00000776},
514 {0x0000b1c8, 0x00000776},
515 {0x0000b1cc, 0x00000776},
516 {0x0000b1d0, 0x00000776},
517 {0x0000b1d4, 0x00000776},
518 {0x0000b1d8, 0x00000776},
519 {0x0000b1dc, 0x00000776},
520 {0x0000b1e0, 0x00000776},
521 {0x0000b1e4, 0x00000776},
522 {0x0000b1e8, 0x00000776},
523 {0x0000b1ec, 0x00000776},
524 {0x0000b1f0, 0x00000776},
525 {0x0000b1f4, 0x00000776},
526 {0x0000b1f8, 0x00000776},
527 {0x0000b1fc, 0x00000776},
528};
529
530static const u32 ar9300_2p2_mac_postamble[][5] = {
531 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
532 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
533 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
534 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
535 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
536 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
537 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
538 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
539 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
540};
541
542static const u32 ar9300_2p2_soc_postamble[][5] = {
543 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
544 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
545};
546
547static const u32 ar9200_merlin_2p2_radio_core[][2] = {
548 /* Addr allmodes */
549 {0x00007800, 0x00040000},
550 {0x00007804, 0xdb005012},
551 {0x00007808, 0x04924914},
552 {0x0000780c, 0x21084210},
553 {0x00007810, 0x6d801300},
554 {0x00007814, 0x0019beff},
555 {0x00007818, 0x07e41000},
556 {0x0000781c, 0x00392000},
557 {0x00007820, 0x92592480},
558 {0x00007824, 0x00040000},
559 {0x00007828, 0xdb005012},
560 {0x0000782c, 0x04924914},
561 {0x00007830, 0x21084210},
562 {0x00007834, 0x6d801300},
563 {0x00007838, 0x0019beff},
564 {0x0000783c, 0x07e40000},
565 {0x00007840, 0x00392000},
566 {0x00007844, 0x92592480},
567 {0x00007848, 0x00100000},
568 {0x0000784c, 0x773f0567},
569 {0x00007850, 0x54214514},
570 {0x00007854, 0x12035828},
571 {0x00007858, 0x92592692},
572 {0x0000785c, 0x00000000},
573 {0x00007860, 0x56400000},
574 {0x00007864, 0x0a8e370e},
575 {0x00007868, 0xc0102850},
576 {0x0000786c, 0x812d4000},
577 {0x00007870, 0x807ec400},
578 {0x00007874, 0x001b6db0},
579 {0x00007878, 0x00376b63},
580 {0x0000787c, 0x06db6db6},
581 {0x00007880, 0x006d8000},
582 {0x00007884, 0xffeffffe},
583 {0x00007888, 0xffeffffe},
584 {0x0000788c, 0x00010000},
585 {0x00007890, 0x02060aeb},
586 {0x00007894, 0x5a108000},
587};
588
589static const u32 ar9300_2p2_baseband_postamble[][5] = {
590 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
591 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
592 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
593 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
594 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
595 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
596 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
597 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
598 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
599 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
600 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
601 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
602 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
603 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
604 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
605 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
606 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
607 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
608 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
609 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
610 {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
611 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
612 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
613 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
614 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
615 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
616 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
617 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
618 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
619 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
620 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
621 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
622 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
623 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
624 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
625 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
626 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
627 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
628 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
629 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
630 {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
631 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
632 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
633 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
634 {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
635 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
636 {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
637 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
638 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
639 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
640 {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
641};
642
643static const u32 ar9300_2p2_baseband_core[][2] = {
644 /* Addr allmodes */
645 {0x00009800, 0xafe68e30},
646 {0x00009804, 0xfd14e000},
647 {0x00009808, 0x9c0a9f6b},
648 {0x0000980c, 0x04900000},
649 {0x00009814, 0x9280c00a},
650 {0x00009818, 0x00000000},
651 {0x0000981c, 0x00020028},
652 {0x00009834, 0x5f3ca3de},
653 {0x00009838, 0x0108ecff},
654 {0x0000983c, 0x14750600},
655 {0x00009880, 0x201fff00},
656 {0x00009884, 0x00001042},
657 {0x000098a4, 0x00200400},
658 {0x000098b0, 0x52440bbe},
659 {0x000098d0, 0x004b6a8e},
660 {0x000098d4, 0x00000820},
661 {0x000098dc, 0x00000000},
662 {0x000098f0, 0x00000000},
663 {0x000098f4, 0x00000000},
664 {0x00009c04, 0xff55ff55},
665 {0x00009c08, 0x0320ff55},
666 {0x00009c0c, 0x00000000},
667 {0x00009c10, 0x00000000},
668 {0x00009c14, 0x00046384},
669 {0x00009c18, 0x05b6b440},
670 {0x00009c1c, 0x00b6b440},
671 {0x00009d00, 0xc080a333},
672 {0x00009d04, 0x40206c10},
673 {0x00009d08, 0x009c4060},
674 {0x00009d0c, 0x9883800a},
675 {0x00009d10, 0x01834061},
676 {0x00009d14, 0x00c0040b},
677 {0x00009d18, 0x00000000},
678 {0x00009e08, 0x0038230c},
679 {0x00009e24, 0x990bb515},
680 {0x00009e28, 0x0c6f0000},
681 {0x00009e30, 0x06336f77},
682 {0x00009e34, 0x6af6532f},
683 {0x00009e38, 0x0cc80c00},
684 {0x00009e3c, 0xcf946222},
685 {0x00009e40, 0x0d261820},
686 {0x00009e4c, 0x00001004},
687 {0x00009e50, 0x00ff03f1},
688 {0x00009e54, 0x00000000},
689 {0x00009fc0, 0x803e4788},
690 {0x00009fc4, 0x0001efb5},
691 {0x00009fcc, 0x40000014},
692 {0x00009fd0, 0x01193b93},
693 {0x0000a20c, 0x00000000},
694 {0x0000a220, 0x00000000},
695 {0x0000a224, 0x00000000},
696 {0x0000a228, 0x10002310},
697 {0x0000a22c, 0x01036a1e},
698 {0x0000a23c, 0x00000000},
699 {0x0000a244, 0x0c000000},
700 {0x0000a2a0, 0x00000001},
701 {0x0000a2c0, 0x00000001},
702 {0x0000a2c8, 0x00000000},
703 {0x0000a2cc, 0x18c43433},
704 {0x0000a2d4, 0x00000000},
705 {0x0000a2dc, 0x00000000},
706 {0x0000a2e0, 0x00000000},
707 {0x0000a2e4, 0x00000000},
708 {0x0000a2e8, 0x00000000},
709 {0x0000a2ec, 0x00000000},
710 {0x0000a2f0, 0x00000000},
711 {0x0000a2f4, 0x00000000},
712 {0x0000a2f8, 0x00000000},
713 {0x0000a344, 0x00000000},
714 {0x0000a34c, 0x00000000},
715 {0x0000a350, 0x0000a000},
716 {0x0000a364, 0x00000000},
717 {0x0000a370, 0x00000000},
718 {0x0000a390, 0x00000001},
719 {0x0000a394, 0x00000444},
720 {0x0000a398, 0x001f0e0f},
721 {0x0000a39c, 0x0075393f},
722 {0x0000a3a0, 0xb79f6427},
723 {0x0000a3a4, 0x00000000},
724 {0x0000a3a8, 0xaaaaaaaa},
725 {0x0000a3ac, 0x3c466478},
726 {0x0000a3c0, 0x20202020},
727 {0x0000a3c4, 0x22222220},
728 {0x0000a3c8, 0x20200020},
729 {0x0000a3cc, 0x20202020},
730 {0x0000a3d0, 0x20202020},
731 {0x0000a3d4, 0x20202020},
732 {0x0000a3d8, 0x20202020},
733 {0x0000a3dc, 0x20202020},
734 {0x0000a3e0, 0x20202020},
735 {0x0000a3e4, 0x20202020},
736 {0x0000a3e8, 0x20202020},
737 {0x0000a3ec, 0x20202020},
738 {0x0000a3f0, 0x00000000},
739 {0x0000a3f4, 0x00000246},
740 {0x0000a3f8, 0x0cdbd380},
741 {0x0000a3fc, 0x000f0f01},
742 {0x0000a400, 0x8fa91f01},
743 {0x0000a404, 0x00000000},
744 {0x0000a408, 0x0e79e5c6},
745 {0x0000a40c, 0x00820820},
746 {0x0000a414, 0x1ce739ce},
747 {0x0000a418, 0x2d001dce},
748 {0x0000a41c, 0x1ce739ce},
749 {0x0000a420, 0x000001ce},
750 {0x0000a424, 0x1ce739ce},
751 {0x0000a428, 0x000001ce},
752 {0x0000a42c, 0x1ce739ce},
753 {0x0000a430, 0x1ce739ce},
754 {0x0000a434, 0x00000000},
755 {0x0000a438, 0x00001801},
756 {0x0000a43c, 0x00000000},
757 {0x0000a440, 0x00000000},
758 {0x0000a444, 0x00000000},
759 {0x0000a448, 0x06000080},
760 {0x0000a44c, 0x00000001},
761 {0x0000a450, 0x00010000},
762 {0x0000a458, 0x00000000},
763 {0x0000a600, 0x00000000},
764 {0x0000a604, 0x00000000},
765 {0x0000a608, 0x00000000},
766 {0x0000a60c, 0x00000000},
767 {0x0000a610, 0x00000000},
768 {0x0000a614, 0x00000000},
769 {0x0000a618, 0x00000000},
770 {0x0000a61c, 0x00000000},
771 {0x0000a620, 0x00000000},
772 {0x0000a624, 0x00000000},
773 {0x0000a628, 0x00000000},
774 {0x0000a62c, 0x00000000},
775 {0x0000a630, 0x00000000},
776 {0x0000a634, 0x00000000},
777 {0x0000a638, 0x00000000},
778 {0x0000a63c, 0x00000000},
779 {0x0000a640, 0x00000000},
780 {0x0000a644, 0x3fad9d74},
781 {0x0000a648, 0x0048060a},
782 {0x0000a64c, 0x00000637},
783 {0x0000a670, 0x03020100},
784 {0x0000a674, 0x09080504},
785 {0x0000a678, 0x0d0c0b0a},
786 {0x0000a67c, 0x13121110},
787 {0x0000a680, 0x31301514},
788 {0x0000a684, 0x35343332},
789 {0x0000a688, 0x00000036},
790 {0x0000a690, 0x00000838},
791 {0x0000a7c0, 0x00000000},
792 {0x0000a7c4, 0xfffffffc},
793 {0x0000a7c8, 0x00000000},
794 {0x0000a7cc, 0x00000000},
795 {0x0000a7d0, 0x00000000},
796 {0x0000a7d4, 0x00000004},
797 {0x0000a7dc, 0x00000001},
798 {0x0000a8d0, 0x004b6a8e},
799 {0x0000a8d4, 0x00000820},
800 {0x0000a8dc, 0x00000000},
801 {0x0000a8f0, 0x00000000},
802 {0x0000a8f4, 0x00000000},
803 {0x0000b2d0, 0x00000080},
804 {0x0000b2d4, 0x00000000},
805 {0x0000b2dc, 0x00000000},
806 {0x0000b2e0, 0x00000000},
807 {0x0000b2e4, 0x00000000},
808 {0x0000b2e8, 0x00000000},
809 {0x0000b2ec, 0x00000000},
810 {0x0000b2f0, 0x00000000},
811 {0x0000b2f4, 0x00000000},
812 {0x0000b2f8, 0x00000000},
813 {0x0000b408, 0x0e79e5c0},
814 {0x0000b40c, 0x00820820},
815 {0x0000b420, 0x00000000},
816 {0x0000b8d0, 0x004b6a8e},
817 {0x0000b8d4, 0x00000820},
818 {0x0000b8dc, 0x00000000},
819 {0x0000b8f0, 0x00000000},
820 {0x0000b8f4, 0x00000000},
821 {0x0000c2d0, 0x00000080},
822 {0x0000c2d4, 0x00000000},
823 {0x0000c2dc, 0x00000000},
824 {0x0000c2e0, 0x00000000},
825 {0x0000c2e4, 0x00000000},
826 {0x0000c2e8, 0x00000000},
827 {0x0000c2ec, 0x00000000},
828 {0x0000c2f0, 0x00000000},
829 {0x0000c2f4, 0x00000000},
830 {0x0000c2f8, 0x00000000},
831 {0x0000c408, 0x0e79e5c0},
832 {0x0000c40c, 0x00820820},
833 {0x0000c420, 0x00000000},
834};
835
836static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
838 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
839 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
840 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
841 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
842 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
843 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
844 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
845 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
846 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
847 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
848 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
849 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
850 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
851 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
852 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
853 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
854 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
855 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
856 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
857 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
858 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
859 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
860 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
861 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
862 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
863 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
864 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
865 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
866 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
867 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
868 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
869 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
870 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
871 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
872 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
873 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
874 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
875 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
876 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
877 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
878 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
879 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
880 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
881 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
882 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
883 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
884 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
885 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
886 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
887 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
888 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
889 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
890 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
891 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
892 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
893 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
894 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
895 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
896 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
897 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
898 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
899 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
900 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
901 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
902 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
903 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
904 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
905 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
906 {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
907 {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
908 {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
909 {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
910 {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
911 {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
912};
913
914static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
915 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
916 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
917 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
918 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
919 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
920 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
921 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
922 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
923 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
924 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
925 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
926 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
927 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
928 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
929 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
930 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
931 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
932 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
933 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
934 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
935 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
936 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
937 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
938 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
939 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
940 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
941 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
942 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
943 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
944 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
945 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
946 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
947 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
948 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
949 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
950 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
951 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
952 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
953 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
954 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
955 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
956 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
957 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
958 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
959 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
960 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
961 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
962 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
963 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
964 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
965 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
966 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
967 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
968 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
969 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
970 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
971 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
972 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
973 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
974 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
975 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
976 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
977 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
978 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
979 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
980 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
981 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
982 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
983 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
984 {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
985 {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
986 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
987 {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
988 {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
989 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
990};
991
992static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
993 /* Addr allmodes */
994 {0x0000a000, 0x00010000},
995 {0x0000a004, 0x00030002},
996 {0x0000a008, 0x00050004},
997 {0x0000a00c, 0x00810080},
998 {0x0000a010, 0x00830082},
999 {0x0000a014, 0x01810180},
1000 {0x0000a018, 0x01830182},
1001 {0x0000a01c, 0x01850184},
1002 {0x0000a020, 0x01890188},
1003 {0x0000a024, 0x018b018a},
1004 {0x0000a028, 0x018d018c},
1005 {0x0000a02c, 0x01910190},
1006 {0x0000a030, 0x01930192},
1007 {0x0000a034, 0x01950194},
1008 {0x0000a038, 0x038a0196},
1009 {0x0000a03c, 0x038c038b},
1010 {0x0000a040, 0x0390038d},
1011 {0x0000a044, 0x03920391},
1012 {0x0000a048, 0x03940393},
1013 {0x0000a04c, 0x03960395},
1014 {0x0000a050, 0x00000000},
1015 {0x0000a054, 0x00000000},
1016 {0x0000a058, 0x00000000},
1017 {0x0000a05c, 0x00000000},
1018 {0x0000a060, 0x00000000},
1019 {0x0000a064, 0x00000000},
1020 {0x0000a068, 0x00000000},
1021 {0x0000a06c, 0x00000000},
1022 {0x0000a070, 0x00000000},
1023 {0x0000a074, 0x00000000},
1024 {0x0000a078, 0x00000000},
1025 {0x0000a07c, 0x00000000},
1026 {0x0000a080, 0x22222229},
1027 {0x0000a084, 0x1d1d1d1d},
1028 {0x0000a088, 0x1d1d1d1d},
1029 {0x0000a08c, 0x1d1d1d1d},
1030 {0x0000a090, 0x171d1d1d},
1031 {0x0000a094, 0x11111717},
1032 {0x0000a098, 0x00030311},
1033 {0x0000a09c, 0x00000000},
1034 {0x0000a0a0, 0x00000000},
1035 {0x0000a0a4, 0x00000000},
1036 {0x0000a0a8, 0x00000000},
1037 {0x0000a0ac, 0x00000000},
1038 {0x0000a0b0, 0x00000000},
1039 {0x0000a0b4, 0x00000000},
1040 {0x0000a0b8, 0x00000000},
1041 {0x0000a0bc, 0x00000000},
1042 {0x0000a0c0, 0x001f0000},
1043 {0x0000a0c4, 0x01000101},
1044 {0x0000a0c8, 0x011e011f},
1045 {0x0000a0cc, 0x011c011d},
1046 {0x0000a0d0, 0x02030204},
1047 {0x0000a0d4, 0x02010202},
1048 {0x0000a0d8, 0x021f0200},
1049 {0x0000a0dc, 0x0302021e},
1050 {0x0000a0e0, 0x03000301},
1051 {0x0000a0e4, 0x031e031f},
1052 {0x0000a0e8, 0x0402031d},
1053 {0x0000a0ec, 0x04000401},
1054 {0x0000a0f0, 0x041e041f},
1055 {0x0000a0f4, 0x0502041d},
1056 {0x0000a0f8, 0x05000501},
1057 {0x0000a0fc, 0x051e051f},
1058 {0x0000a100, 0x06010602},
1059 {0x0000a104, 0x061f0600},
1060 {0x0000a108, 0x061d061e},
1061 {0x0000a10c, 0x07020703},
1062 {0x0000a110, 0x07000701},
1063 {0x0000a114, 0x00000000},
1064 {0x0000a118, 0x00000000},
1065 {0x0000a11c, 0x00000000},
1066 {0x0000a120, 0x00000000},
1067 {0x0000a124, 0x00000000},
1068 {0x0000a128, 0x00000000},
1069 {0x0000a12c, 0x00000000},
1070 {0x0000a130, 0x00000000},
1071 {0x0000a134, 0x00000000},
1072 {0x0000a138, 0x00000000},
1073 {0x0000a13c, 0x00000000},
1074 {0x0000a140, 0x001f0000},
1075 {0x0000a144, 0x01000101},
1076 {0x0000a148, 0x011e011f},
1077 {0x0000a14c, 0x011c011d},
1078 {0x0000a150, 0x02030204},
1079 {0x0000a154, 0x02010202},
1080 {0x0000a158, 0x021f0200},
1081 {0x0000a15c, 0x0302021e},
1082 {0x0000a160, 0x03000301},
1083 {0x0000a164, 0x031e031f},
1084 {0x0000a168, 0x0402031d},
1085 {0x0000a16c, 0x04000401},
1086 {0x0000a170, 0x041e041f},
1087 {0x0000a174, 0x0502041d},
1088 {0x0000a178, 0x05000501},
1089 {0x0000a17c, 0x051e051f},
1090 {0x0000a180, 0x06010602},
1091 {0x0000a184, 0x061f0600},
1092 {0x0000a188, 0x061d061e},
1093 {0x0000a18c, 0x07020703},
1094 {0x0000a190, 0x07000701},
1095 {0x0000a194, 0x00000000},
1096 {0x0000a198, 0x00000000},
1097 {0x0000a19c, 0x00000000},
1098 {0x0000a1a0, 0x00000000},
1099 {0x0000a1a4, 0x00000000},
1100 {0x0000a1a8, 0x00000000},
1101 {0x0000a1ac, 0x00000000},
1102 {0x0000a1b0, 0x00000000},
1103 {0x0000a1b4, 0x00000000},
1104 {0x0000a1b8, 0x00000000},
1105 {0x0000a1bc, 0x00000000},
1106 {0x0000a1c0, 0x00000000},
1107 {0x0000a1c4, 0x00000000},
1108 {0x0000a1c8, 0x00000000},
1109 {0x0000a1cc, 0x00000000},
1110 {0x0000a1d0, 0x00000000},
1111 {0x0000a1d4, 0x00000000},
1112 {0x0000a1d8, 0x00000000},
1113 {0x0000a1dc, 0x00000000},
1114 {0x0000a1e0, 0x00000000},
1115 {0x0000a1e4, 0x00000000},
1116 {0x0000a1e8, 0x00000000},
1117 {0x0000a1ec, 0x00000000},
1118 {0x0000a1f0, 0x00000396},
1119 {0x0000a1f4, 0x00000396},
1120 {0x0000a1f8, 0x00000396},
1121 {0x0000a1fc, 0x00000196},
1122 {0x0000b000, 0x00010000},
1123 {0x0000b004, 0x00030002},
1124 {0x0000b008, 0x00050004},
1125 {0x0000b00c, 0x00810080},
1126 {0x0000b010, 0x00830082},
1127 {0x0000b014, 0x01810180},
1128 {0x0000b018, 0x01830182},
1129 {0x0000b01c, 0x01850184},
1130 {0x0000b020, 0x02810280},
1131 {0x0000b024, 0x02830282},
1132 {0x0000b028, 0x02850284},
1133 {0x0000b02c, 0x02890288},
1134 {0x0000b030, 0x028b028a},
1135 {0x0000b034, 0x0388028c},
1136 {0x0000b038, 0x038a0389},
1137 {0x0000b03c, 0x038c038b},
1138 {0x0000b040, 0x0390038d},
1139 {0x0000b044, 0x03920391},
1140 {0x0000b048, 0x03940393},
1141 {0x0000b04c, 0x03960395},
1142 {0x0000b050, 0x00000000},
1143 {0x0000b054, 0x00000000},
1144 {0x0000b058, 0x00000000},
1145 {0x0000b05c, 0x00000000},
1146 {0x0000b060, 0x00000000},
1147 {0x0000b064, 0x00000000},
1148 {0x0000b068, 0x00000000},
1149 {0x0000b06c, 0x00000000},
1150 {0x0000b070, 0x00000000},
1151 {0x0000b074, 0x00000000},
1152 {0x0000b078, 0x00000000},
1153 {0x0000b07c, 0x00000000},
1154 {0x0000b080, 0x32323232},
1155 {0x0000b084, 0x2f2f3232},
1156 {0x0000b088, 0x23282a2d},
1157 {0x0000b08c, 0x1c1e2123},
1158 {0x0000b090, 0x14171919},
1159 {0x0000b094, 0x0e0e1214},
1160 {0x0000b098, 0x03050707},
1161 {0x0000b09c, 0x00030303},
1162 {0x0000b0a0, 0x00000000},
1163 {0x0000b0a4, 0x00000000},
1164 {0x0000b0a8, 0x00000000},
1165 {0x0000b0ac, 0x00000000},
1166 {0x0000b0b0, 0x00000000},
1167 {0x0000b0b4, 0x00000000},
1168 {0x0000b0b8, 0x00000000},
1169 {0x0000b0bc, 0x00000000},
1170 {0x0000b0c0, 0x003f0020},
1171 {0x0000b0c4, 0x00400041},
1172 {0x0000b0c8, 0x0140005f},
1173 {0x0000b0cc, 0x0160015f},
1174 {0x0000b0d0, 0x017e017f},
1175 {0x0000b0d4, 0x02410242},
1176 {0x0000b0d8, 0x025f0240},
1177 {0x0000b0dc, 0x027f0260},
1178 {0x0000b0e0, 0x0341027e},
1179 {0x0000b0e4, 0x035f0340},
1180 {0x0000b0e8, 0x037f0360},
1181 {0x0000b0ec, 0x04400441},
1182 {0x0000b0f0, 0x0460045f},
1183 {0x0000b0f4, 0x0541047f},
1184 {0x0000b0f8, 0x055f0540},
1185 {0x0000b0fc, 0x057f0560},
1186 {0x0000b100, 0x06400641},
1187 {0x0000b104, 0x0660065f},
1188 {0x0000b108, 0x067e067f},
1189 {0x0000b10c, 0x07410742},
1190 {0x0000b110, 0x075f0740},
1191 {0x0000b114, 0x077f0760},
1192 {0x0000b118, 0x07800781},
1193 {0x0000b11c, 0x07a0079f},
1194 {0x0000b120, 0x07c107bf},
1195 {0x0000b124, 0x000007c0},
1196 {0x0000b128, 0x00000000},
1197 {0x0000b12c, 0x00000000},
1198 {0x0000b130, 0x00000000},
1199 {0x0000b134, 0x00000000},
1200 {0x0000b138, 0x00000000},
1201 {0x0000b13c, 0x00000000},
1202 {0x0000b140, 0x003f0020},
1203 {0x0000b144, 0x00400041},
1204 {0x0000b148, 0x0140005f},
1205 {0x0000b14c, 0x0160015f},
1206 {0x0000b150, 0x017e017f},
1207 {0x0000b154, 0x02410242},
1208 {0x0000b158, 0x025f0240},
1209 {0x0000b15c, 0x027f0260},
1210 {0x0000b160, 0x0341027e},
1211 {0x0000b164, 0x035f0340},
1212 {0x0000b168, 0x037f0360},
1213 {0x0000b16c, 0x04400441},
1214 {0x0000b170, 0x0460045f},
1215 {0x0000b174, 0x0541047f},
1216 {0x0000b178, 0x055f0540},
1217 {0x0000b17c, 0x057f0560},
1218 {0x0000b180, 0x06400641},
1219 {0x0000b184, 0x0660065f},
1220 {0x0000b188, 0x067e067f},
1221 {0x0000b18c, 0x07410742},
1222 {0x0000b190, 0x075f0740},
1223 {0x0000b194, 0x077f0760},
1224 {0x0000b198, 0x07800781},
1225 {0x0000b19c, 0x07a0079f},
1226 {0x0000b1a0, 0x07c107bf},
1227 {0x0000b1a4, 0x000007c0},
1228 {0x0000b1a8, 0x00000000},
1229 {0x0000b1ac, 0x00000000},
1230 {0x0000b1b0, 0x00000000},
1231 {0x0000b1b4, 0x00000000},
1232 {0x0000b1b8, 0x00000000},
1233 {0x0000b1bc, 0x00000000},
1234 {0x0000b1c0, 0x00000000},
1235 {0x0000b1c4, 0x00000000},
1236 {0x0000b1c8, 0x00000000},
1237 {0x0000b1cc, 0x00000000},
1238 {0x0000b1d0, 0x00000000},
1239 {0x0000b1d4, 0x00000000},
1240 {0x0000b1d8, 0x00000000},
1241 {0x0000b1dc, 0x00000000},
1242 {0x0000b1e0, 0x00000000},
1243 {0x0000b1e4, 0x00000000},
1244 {0x0000b1e8, 0x00000000},
1245 {0x0000b1ec, 0x00000000},
1246 {0x0000b1f0, 0x00000396},
1247 {0x0000b1f4, 0x00000396},
1248 {0x0000b1f8, 0x00000396},
1249 {0x0000b1fc, 0x00000196},
1250};
1251
1252static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
1253 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1254 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1255 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1256 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1257 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1258 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1259 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1260 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
1261 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
1262 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
1263 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
1264 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
1265 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
1266 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
1267 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
1268 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
1269 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1270 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1271 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1272 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
1273 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
1274 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
1275 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
1276 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
1277 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
1278 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
1279 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
1280 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
1281 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
1282 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
1283 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
1284 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
1285 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
1286 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
1287 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1288 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1289 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
1290 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
1291 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
1292 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
1293 {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
1294 {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
1295 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
1296 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
1297 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
1298 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
1299 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
1300 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
1301 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
1302 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
1303 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
1304 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
1305 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
1306 {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
1307 {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
1308 {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
1309 {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
1310 {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
1311 {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
1312 {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1313 {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1314 {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1315 {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1316 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1317 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1318 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1319 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1320 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
1321 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1322 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1323 {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
1324 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1325 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1326 {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
1327 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1328};
1329
1330static const u32 ar9300_2p2_mac_core[][2] = {
1331 /* Addr allmodes */
1332 {0x00000008, 0x00000000},
1333 {0x00000030, 0x00020085},
1334 {0x00000034, 0x00000005},
1335 {0x00000040, 0x00000000},
1336 {0x00000044, 0x00000000},
1337 {0x00000048, 0x00000008},
1338 {0x0000004c, 0x00000010},
1339 {0x00000050, 0x00000000},
1340 {0x00001040, 0x002ffc0f},
1341 {0x00001044, 0x002ffc0f},
1342 {0x00001048, 0x002ffc0f},
1343 {0x0000104c, 0x002ffc0f},
1344 {0x00001050, 0x002ffc0f},
1345 {0x00001054, 0x002ffc0f},
1346 {0x00001058, 0x002ffc0f},
1347 {0x0000105c, 0x002ffc0f},
1348 {0x00001060, 0x002ffc0f},
1349 {0x00001064, 0x002ffc0f},
1350 {0x000010f0, 0x00000100},
1351 {0x00001270, 0x00000000},
1352 {0x000012b0, 0x00000000},
1353 {0x000012f0, 0x00000000},
1354 {0x0000143c, 0x00000000},
1355 {0x0000147c, 0x00000000},
1356 {0x00008000, 0x00000000},
1357 {0x00008004, 0x00000000},
1358 {0x00008008, 0x00000000},
1359 {0x0000800c, 0x00000000},
1360 {0x00008018, 0x00000000},
1361 {0x00008020, 0x00000000},
1362 {0x00008038, 0x00000000},
1363 {0x0000803c, 0x00000000},
1364 {0x00008040, 0x00000000},
1365 {0x00008044, 0x00000000},
1366 {0x00008048, 0x00000000},
1367 {0x0000804c, 0xffffffff},
1368 {0x00008054, 0x00000000},
1369 {0x00008058, 0x00000000},
1370 {0x0000805c, 0x000fc78f},
1371 {0x00008060, 0x0000000f},
1372 {0x00008064, 0x00000000},
1373 {0x00008070, 0x00000310},
1374 {0x00008074, 0x00000020},
1375 {0x00008078, 0x00000000},
1376 {0x0000809c, 0x0000000f},
1377 {0x000080a0, 0x00000000},
1378 {0x000080a4, 0x02ff0000},
1379 {0x000080a8, 0x0e070605},
1380 {0x000080ac, 0x0000000d},
1381 {0x000080b0, 0x00000000},
1382 {0x000080b4, 0x00000000},
1383 {0x000080b8, 0x00000000},
1384 {0x000080bc, 0x00000000},
1385 {0x000080c0, 0x2a800000},
1386 {0x000080c4, 0x06900168},
1387 {0x000080c8, 0x13881c20},
1388 {0x000080cc, 0x01f40000},
1389 {0x000080d0, 0x00252500},
1390 {0x000080d4, 0x00a00000},
1391 {0x000080d8, 0x00400000},
1392 {0x000080dc, 0x00000000},
1393 {0x000080e0, 0xffffffff},
1394 {0x000080e4, 0x0000ffff},
1395 {0x000080e8, 0x3f3f3f3f},
1396 {0x000080ec, 0x00000000},
1397 {0x000080f0, 0x00000000},
1398 {0x000080f4, 0x00000000},
1399 {0x000080fc, 0x00020000},
1400 {0x00008100, 0x00000000},
1401 {0x00008108, 0x00000052},
1402 {0x0000810c, 0x00000000},
1403 {0x00008110, 0x00000000},
1404 {0x00008114, 0x000007ff},
1405 {0x00008118, 0x000000aa},
1406 {0x0000811c, 0x00003210},
1407 {0x00008124, 0x00000000},
1408 {0x00008128, 0x00000000},
1409 {0x0000812c, 0x00000000},
1410 {0x00008130, 0x00000000},
1411 {0x00008134, 0x00000000},
1412 {0x00008138, 0x00000000},
1413 {0x0000813c, 0x0000ffff},
1414 {0x00008144, 0xffffffff},
1415 {0x00008168, 0x00000000},
1416 {0x0000816c, 0x00000000},
1417 {0x00008170, 0x18486200},
1418 {0x00008174, 0x33332210},
1419 {0x00008178, 0x00000000},
1420 {0x0000817c, 0x00020000},
1421 {0x000081c0, 0x00000000},
1422 {0x000081c4, 0x33332210},
1423 {0x000081c8, 0x00000000},
1424 {0x000081cc, 0x00000000},
1425 {0x000081d4, 0x00000000},
1426 {0x000081ec, 0x00000000},
1427 {0x000081f0, 0x00000000},
1428 {0x000081f4, 0x00000000},
1429 {0x000081f8, 0x00000000},
1430 {0x000081fc, 0x00000000},
1431 {0x00008240, 0x00100000},
1432 {0x00008244, 0x0010f424},
1433 {0x00008248, 0x00000800},
1434 {0x0000824c, 0x0001e848},
1435 {0x00008250, 0x00000000},
1436 {0x00008254, 0x00000000},
1437 {0x00008258, 0x00000000},
1438 {0x0000825c, 0x40000000},
1439 {0x00008260, 0x00080922},
1440 {0x00008264, 0x9bc00010},
1441 {0x00008268, 0xffffffff},
1442 {0x0000826c, 0x0000ffff},
1443 {0x00008270, 0x00000000},
1444 {0x00008274, 0x40000000},
1445 {0x00008278, 0x003e4180},
1446 {0x0000827c, 0x00000004},
1447 {0x00008284, 0x0000002c},
1448 {0x00008288, 0x0000002c},
1449 {0x0000828c, 0x000000ff},
1450 {0x00008294, 0x00000000},
1451 {0x00008298, 0x00000000},
1452 {0x0000829c, 0x00000000},
1453 {0x00008300, 0x00000140},
1454 {0x00008314, 0x00000000},
1455 {0x0000831c, 0x0000010d},
1456 {0x00008328, 0x00000000},
1457 {0x0000832c, 0x00000007},
1458 {0x00008330, 0x00000302},
1459 {0x00008334, 0x00000700},
1460 {0x00008338, 0x00ff0000},
1461 {0x0000833c, 0x02400000},
1462 {0x00008340, 0x000107ff},
1463 {0x00008344, 0xaa48105b},
1464 {0x00008348, 0x008f0000},
1465 {0x0000835c, 0x00000000},
1466 {0x00008360, 0xffffffff},
1467 {0x00008364, 0xffffffff},
1468 {0x00008368, 0x00000000},
1469 {0x00008370, 0x00000000},
1470 {0x00008374, 0x000000ff},
1471 {0x00008378, 0x00000000},
1472 {0x0000837c, 0x00000000},
1473 {0x00008380, 0xffffffff},
1474 {0x00008384, 0xffffffff},
1475 {0x00008390, 0xffffffff},
1476 {0x00008394, 0xffffffff},
1477 {0x00008398, 0x00000000},
1478 {0x0000839c, 0x00000000},
1479 {0x000083a0, 0x00000000},
1480 {0x000083a4, 0x0000fa14},
1481 {0x000083a8, 0x000f0c00},
1482 {0x000083ac, 0x33332210},
1483 {0x000083b0, 0x33332210},
1484 {0x000083b4, 0x33332210},
1485 {0x000083b8, 0x33332210},
1486 {0x000083bc, 0x00000000},
1487 {0x000083c0, 0x00000000},
1488 {0x000083c4, 0x00000000},
1489 {0x000083c8, 0x00000000},
1490 {0x000083cc, 0x00000200},
1491 {0x000083d0, 0x000301ff},
1492};
1493
1494static const u32 ar9300Common_wo_xlna_rx_gain_table_2p2[][2] = {
1495 /* Addr allmodes */
1496 {0x0000a000, 0x00010000},
1497 {0x0000a004, 0x00030002},
1498 {0x0000a008, 0x00050004},
1499 {0x0000a00c, 0x00810080},
1500 {0x0000a010, 0x00830082},
1501 {0x0000a014, 0x01810180},
1502 {0x0000a018, 0x01830182},
1503 {0x0000a01c, 0x01850184},
1504 {0x0000a020, 0x01890188},
1505 {0x0000a024, 0x018b018a},
1506 {0x0000a028, 0x018d018c},
1507 {0x0000a02c, 0x03820190},
1508 {0x0000a030, 0x03840383},
1509 {0x0000a034, 0x03880385},
1510 {0x0000a038, 0x038a0389},
1511 {0x0000a03c, 0x038c038b},
1512 {0x0000a040, 0x0390038d},
1513 {0x0000a044, 0x03920391},
1514 {0x0000a048, 0x03940393},
1515 {0x0000a04c, 0x03960395},
1516 {0x0000a050, 0x00000000},
1517 {0x0000a054, 0x00000000},
1518 {0x0000a058, 0x00000000},
1519 {0x0000a05c, 0x00000000},
1520 {0x0000a060, 0x00000000},
1521 {0x0000a064, 0x00000000},
1522 {0x0000a068, 0x00000000},
1523 {0x0000a06c, 0x00000000},
1524 {0x0000a070, 0x00000000},
1525 {0x0000a074, 0x00000000},
1526 {0x0000a078, 0x00000000},
1527 {0x0000a07c, 0x00000000},
1528 {0x0000a080, 0x29292929},
1529 {0x0000a084, 0x29292929},
1530 {0x0000a088, 0x29292929},
1531 {0x0000a08c, 0x29292929},
1532 {0x0000a090, 0x22292929},
1533 {0x0000a094, 0x1d1d2222},
1534 {0x0000a098, 0x0c111117},
1535 {0x0000a09c, 0x00030303},
1536 {0x0000a0a0, 0x00000000},
1537 {0x0000a0a4, 0x00000000},
1538 {0x0000a0a8, 0x00000000},
1539 {0x0000a0ac, 0x00000000},
1540 {0x0000a0b0, 0x00000000},
1541 {0x0000a0b4, 0x00000000},
1542 {0x0000a0b8, 0x00000000},
1543 {0x0000a0bc, 0x00000000},
1544 {0x0000a0c0, 0x001f0000},
1545 {0x0000a0c4, 0x01000101},
1546 {0x0000a0c8, 0x011e011f},
1547 {0x0000a0cc, 0x011c011d},
1548 {0x0000a0d0, 0x02030204},
1549 {0x0000a0d4, 0x02010202},
1550 {0x0000a0d8, 0x021f0200},
1551 {0x0000a0dc, 0x0302021e},
1552 {0x0000a0e0, 0x03000301},
1553 {0x0000a0e4, 0x031e031f},
1554 {0x0000a0e8, 0x0402031d},
1555 {0x0000a0ec, 0x04000401},
1556 {0x0000a0f0, 0x041e041f},
1557 {0x0000a0f4, 0x0502041d},
1558 {0x0000a0f8, 0x05000501},
1559 {0x0000a0fc, 0x051e051f},
1560 {0x0000a100, 0x06010602},
1561 {0x0000a104, 0x061f0600},
1562 {0x0000a108, 0x061d061e},
1563 {0x0000a10c, 0x07020703},
1564 {0x0000a110, 0x07000701},
1565 {0x0000a114, 0x00000000},
1566 {0x0000a118, 0x00000000},
1567 {0x0000a11c, 0x00000000},
1568 {0x0000a120, 0x00000000},
1569 {0x0000a124, 0x00000000},
1570 {0x0000a128, 0x00000000},
1571 {0x0000a12c, 0x00000000},
1572 {0x0000a130, 0x00000000},
1573 {0x0000a134, 0x00000000},
1574 {0x0000a138, 0x00000000},
1575 {0x0000a13c, 0x00000000},
1576 {0x0000a140, 0x001f0000},
1577 {0x0000a144, 0x01000101},
1578 {0x0000a148, 0x011e011f},
1579 {0x0000a14c, 0x011c011d},
1580 {0x0000a150, 0x02030204},
1581 {0x0000a154, 0x02010202},
1582 {0x0000a158, 0x021f0200},
1583 {0x0000a15c, 0x0302021e},
1584 {0x0000a160, 0x03000301},
1585 {0x0000a164, 0x031e031f},
1586 {0x0000a168, 0x0402031d},
1587 {0x0000a16c, 0x04000401},
1588 {0x0000a170, 0x041e041f},
1589 {0x0000a174, 0x0502041d},
1590 {0x0000a178, 0x05000501},
1591 {0x0000a17c, 0x051e051f},
1592 {0x0000a180, 0x06010602},
1593 {0x0000a184, 0x061f0600},
1594 {0x0000a188, 0x061d061e},
1595 {0x0000a18c, 0x07020703},
1596 {0x0000a190, 0x07000701},
1597 {0x0000a194, 0x00000000},
1598 {0x0000a198, 0x00000000},
1599 {0x0000a19c, 0x00000000},
1600 {0x0000a1a0, 0x00000000},
1601 {0x0000a1a4, 0x00000000},
1602 {0x0000a1a8, 0x00000000},
1603 {0x0000a1ac, 0x00000000},
1604 {0x0000a1b0, 0x00000000},
1605 {0x0000a1b4, 0x00000000},
1606 {0x0000a1b8, 0x00000000},
1607 {0x0000a1bc, 0x00000000},
1608 {0x0000a1c0, 0x00000000},
1609 {0x0000a1c4, 0x00000000},
1610 {0x0000a1c8, 0x00000000},
1611 {0x0000a1cc, 0x00000000},
1612 {0x0000a1d0, 0x00000000},
1613 {0x0000a1d4, 0x00000000},
1614 {0x0000a1d8, 0x00000000},
1615 {0x0000a1dc, 0x00000000},
1616 {0x0000a1e0, 0x00000000},
1617 {0x0000a1e4, 0x00000000},
1618 {0x0000a1e8, 0x00000000},
1619 {0x0000a1ec, 0x00000000},
1620 {0x0000a1f0, 0x00000396},
1621 {0x0000a1f4, 0x00000396},
1622 {0x0000a1f8, 0x00000396},
1623 {0x0000a1fc, 0x00000196},
1624 {0x0000b000, 0x00010000},
1625 {0x0000b004, 0x00030002},
1626 {0x0000b008, 0x00050004},
1627 {0x0000b00c, 0x00810080},
1628 {0x0000b010, 0x00830082},
1629 {0x0000b014, 0x01810180},
1630 {0x0000b018, 0x01830182},
1631 {0x0000b01c, 0x01850184},
1632 {0x0000b020, 0x02810280},
1633 {0x0000b024, 0x02830282},
1634 {0x0000b028, 0x02850284},
1635 {0x0000b02c, 0x02890288},
1636 {0x0000b030, 0x028b028a},
1637 {0x0000b034, 0x0388028c},
1638 {0x0000b038, 0x038a0389},
1639 {0x0000b03c, 0x038c038b},
1640 {0x0000b040, 0x0390038d},
1641 {0x0000b044, 0x03920391},
1642 {0x0000b048, 0x03940393},
1643 {0x0000b04c, 0x03960395},
1644 {0x0000b050, 0x00000000},
1645 {0x0000b054, 0x00000000},
1646 {0x0000b058, 0x00000000},
1647 {0x0000b05c, 0x00000000},
1648 {0x0000b060, 0x00000000},
1649 {0x0000b064, 0x00000000},
1650 {0x0000b068, 0x00000000},
1651 {0x0000b06c, 0x00000000},
1652 {0x0000b070, 0x00000000},
1653 {0x0000b074, 0x00000000},
1654 {0x0000b078, 0x00000000},
1655 {0x0000b07c, 0x00000000},
1656 {0x0000b080, 0x32323232},
1657 {0x0000b084, 0x2f2f3232},
1658 {0x0000b088, 0x23282a2d},
1659 {0x0000b08c, 0x1c1e2123},
1660 {0x0000b090, 0x14171919},
1661 {0x0000b094, 0x0e0e1214},
1662 {0x0000b098, 0x03050707},
1663 {0x0000b09c, 0x00030303},
1664 {0x0000b0a0, 0x00000000},
1665 {0x0000b0a4, 0x00000000},
1666 {0x0000b0a8, 0x00000000},
1667 {0x0000b0ac, 0x00000000},
1668 {0x0000b0b0, 0x00000000},
1669 {0x0000b0b4, 0x00000000},
1670 {0x0000b0b8, 0x00000000},
1671 {0x0000b0bc, 0x00000000},
1672 {0x0000b0c0, 0x003f0020},
1673 {0x0000b0c4, 0x00400041},
1674 {0x0000b0c8, 0x0140005f},
1675 {0x0000b0cc, 0x0160015f},
1676 {0x0000b0d0, 0x017e017f},
1677 {0x0000b0d4, 0x02410242},
1678 {0x0000b0d8, 0x025f0240},
1679 {0x0000b0dc, 0x027f0260},
1680 {0x0000b0e0, 0x0341027e},
1681 {0x0000b0e4, 0x035f0340},
1682 {0x0000b0e8, 0x037f0360},
1683 {0x0000b0ec, 0x04400441},
1684 {0x0000b0f0, 0x0460045f},
1685 {0x0000b0f4, 0x0541047f},
1686 {0x0000b0f8, 0x055f0540},
1687 {0x0000b0fc, 0x057f0560},
1688 {0x0000b100, 0x06400641},
1689 {0x0000b104, 0x0660065f},
1690 {0x0000b108, 0x067e067f},
1691 {0x0000b10c, 0x07410742},
1692 {0x0000b110, 0x075f0740},
1693 {0x0000b114, 0x077f0760},
1694 {0x0000b118, 0x07800781},
1695 {0x0000b11c, 0x07a0079f},
1696 {0x0000b120, 0x07c107bf},
1697 {0x0000b124, 0x000007c0},
1698 {0x0000b128, 0x00000000},
1699 {0x0000b12c, 0x00000000},
1700 {0x0000b130, 0x00000000},
1701 {0x0000b134, 0x00000000},
1702 {0x0000b138, 0x00000000},
1703 {0x0000b13c, 0x00000000},
1704 {0x0000b140, 0x003f0020},
1705 {0x0000b144, 0x00400041},
1706 {0x0000b148, 0x0140005f},
1707 {0x0000b14c, 0x0160015f},
1708 {0x0000b150, 0x017e017f},
1709 {0x0000b154, 0x02410242},
1710 {0x0000b158, 0x025f0240},
1711 {0x0000b15c, 0x027f0260},
1712 {0x0000b160, 0x0341027e},
1713 {0x0000b164, 0x035f0340},
1714 {0x0000b168, 0x037f0360},
1715 {0x0000b16c, 0x04400441},
1716 {0x0000b170, 0x0460045f},
1717 {0x0000b174, 0x0541047f},
1718 {0x0000b178, 0x055f0540},
1719 {0x0000b17c, 0x057f0560},
1720 {0x0000b180, 0x06400641},
1721 {0x0000b184, 0x0660065f},
1722 {0x0000b188, 0x067e067f},
1723 {0x0000b18c, 0x07410742},
1724 {0x0000b190, 0x075f0740},
1725 {0x0000b194, 0x077f0760},
1726 {0x0000b198, 0x07800781},
1727 {0x0000b19c, 0x07a0079f},
1728 {0x0000b1a0, 0x07c107bf},
1729 {0x0000b1a4, 0x000007c0},
1730 {0x0000b1a8, 0x00000000},
1731 {0x0000b1ac, 0x00000000},
1732 {0x0000b1b0, 0x00000000},
1733 {0x0000b1b4, 0x00000000},
1734 {0x0000b1b8, 0x00000000},
1735 {0x0000b1bc, 0x00000000},
1736 {0x0000b1c0, 0x00000000},
1737 {0x0000b1c4, 0x00000000},
1738 {0x0000b1c8, 0x00000000},
1739 {0x0000b1cc, 0x00000000},
1740 {0x0000b1d0, 0x00000000},
1741 {0x0000b1d4, 0x00000000},
1742 {0x0000b1d8, 0x00000000},
1743 {0x0000b1dc, 0x00000000},
1744 {0x0000b1e0, 0x00000000},
1745 {0x0000b1e4, 0x00000000},
1746 {0x0000b1e8, 0x00000000},
1747 {0x0000b1ec, 0x00000000},
1748 {0x0000b1f0, 0x00000396},
1749 {0x0000b1f4, 0x00000396},
1750 {0x0000b1f8, 0x00000396},
1751 {0x0000b1fc, 0x00000196},
1752};
1753
1754static const u32 ar9300_2p2_soc_preamble[][2] = {
1755 /* Addr allmodes */
1756 {0x000040a4, 0x00a0c1c9},
1757 {0x00007008, 0x00000000},
1758 {0x00007020, 0x00000000},
1759 {0x00007034, 0x00000002},
1760 {0x00007038, 0x000004c2},
1761 {0x00007048, 0x00000008},
1762};
1763
1764static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
1765 /* Addr allmodes */
1766 {0x00004040, 0x08212e5e},
1767 {0x00004040, 0x0008003b},
1768 {0x00004044, 0x00000000},
1769};
1770
1771static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
1772 /* Addr allmodes */
1773 {0x00004040, 0x08253e5e},
1774 {0x00004040, 0x0008003b},
1775 {0x00004044, 0x00000000},
1776};
1777
1778static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
1779 /* Addr allmodes */
1780 {0x00004040, 0x08213e5e},
1781 {0x00004040, 0x0008003b},
1782 {0x00004044, 0x00000000},
1783};
1784
1785#endif /* INITVALS_9003_2P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 56a9e5fa6d66..5a0650399136 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -739,6 +739,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
739 */ 739 */
740 ar9003_hw_set_chain_masks(ah, 0x7, 0x7); 740 ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
741 741
742 /* Do Tx IQ Calibration */
743 ar9003_hw_tx_iq_cal(ah);
744 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
745 udelay(5);
746 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
747
742 /* Calibrate the AGC */ 748 /* Calibrate the AGC */
743 REG_WRITE(ah, AR_PHY_AGC_CONTROL, 749 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
744 REG_READ(ah, AR_PHY_AGC_CONTROL) | 750 REG_READ(ah, AR_PHY_AGC_CONTROL) |
@@ -753,10 +759,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
753 return false; 759 return false;
754 } 760 }
755 761
756 /* Do Tx IQ Calibration */
757 if (ah->config.tx_iq_calibration)
758 ar9003_hw_tx_iq_cal(ah);
759
760 /* Revert chainmasks to their original values before NF cal */ 762 /* Revert chainmasks to their original values before NF cal */
761 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 763 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
762 764
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 23eb60ea5455..ace8d2678b18 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -67,6 +67,7 @@ static const struct ar9300_eeprom ar9300_default = {
67 * bit2 - enable fastClock - enabled 67 * bit2 - enable fastClock - enabled
68 * bit3 - enable doubling - enabled 68 * bit3 - enable doubling - enabled
69 * bit4 - enable internal regulator - disabled 69 * bit4 - enable internal regulator - disabled
70 * bit5 - enable pa predistortion - disabled
70 */ 71 */
71 .miscConfiguration = 0, /* bit0 - turn down drivestrength */ 72 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
72 .eepromWriteEnableGpio = 3, 73 .eepromWriteEnableGpio = 3,
@@ -129,9 +130,11 @@ static const struct ar9300_eeprom ar9300_default = {
129 .txEndToRxOn = 0x2, 130 .txEndToRxOn = 0x2,
130 .txFrameToXpaOn = 0xe, 131 .txFrameToXpaOn = 0xe,
131 .thresh62 = 28, 132 .thresh62 = 28,
132 .futureModal = { /* [32] */ 133 .papdRateMaskHt20 = LE32(0x80c080),
134 .papdRateMaskHt40 = LE32(0x80c080),
135 .futureModal = {
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 137 0, 0, 0, 0, 0, 0, 0, 0
135 }, 138 },
136 }, 139 },
137 .calFreqPier2G = { 140 .calFreqPier2G = {
@@ -326,9 +329,11 @@ static const struct ar9300_eeprom ar9300_default = {
326 .txEndToRxOn = 0x2, 329 .txEndToRxOn = 0x2,
327 .txFrameToXpaOn = 0xe, 330 .txFrameToXpaOn = 0xe,
328 .thresh62 = 28, 331 .thresh62 = 28,
332 .papdRateMaskHt20 = LE32(0xf0e0e0),
333 .papdRateMaskHt40 = LE32(0xf0e0e0),
329 .futureModal = { 334 .futureModal = {
330 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 335 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
331 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 336 0, 0, 0, 0, 0, 0, 0, 0
332 }, 337 },
333 }, 338 },
334 .calFreqPier5G = { 339 .calFreqPier5G = {
@@ -644,6 +649,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
644 return (pBase->featureEnable & 0x10) >> 4; 649 return (pBase->featureEnable & 0x10) >> 4;
645 case EEP_SWREG: 650 case EEP_SWREG:
646 return le32_to_cpu(pBase->swreg); 651 return le32_to_cpu(pBase->swreg);
652 case EEP_PAPRD:
653 return !!(pBase->featureEnable & BIT(5));
647 default: 654 default:
648 return 0; 655 return 0;
649 } 656 }
@@ -944,7 +951,7 @@ static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
944 return 1; 951 return 1;
945} 952}
946 953
947static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah, 954static u32 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
948 struct ath9k_channel *chan) 955 struct ath9k_channel *chan)
949{ 956{
950 return -EINVAL; 957 return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 23fb353c3bba..3c533bb983c7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -234,7 +234,9 @@ struct ar9300_modal_eep_header {
234 u8 txEndToRxOn; 234 u8 txEndToRxOn;
235 u8 txFrameToXpaOn; 235 u8 txFrameToXpaOn;
236 u8 thresh62; 236 u8 thresh62;
237 u8 futureModal[32]; 237 __le32 papdRateMaskHt20;
238 __le32 papdRateMaskHt40;
239 u8 futureModal[24];
238} __packed; 240} __packed;
239 241
240struct ar9300_cal_data_per_freq_op_loop { 242struct ar9300_cal_data_per_freq_op_loop {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index b15309caf1da..064168909108 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -16,7 +16,8 @@
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9003_mac.h" 18#include "ar9003_mac.h"
19#include "ar9003_initvals.h" 19#include "ar9003_2p0_initvals.h"
20#include "ar9003_2p2_initvals.h"
20 21
21/* General hardware code for the AR9003 hadware family */ 22/* General hardware code for the AR9003 hadware family */
22 23
@@ -31,12 +32,8 @@ static bool ar9003_hw_macversion_supported(u32 macversion)
31 return false; 32 return false;
32} 33}
33 34
34/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */ 35/* AR9003 2.0 */
35/* 36static void ar9003_2p0_hw_init_mode_regs(struct ath_hw *ah)
36 * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
37 * ensuring it does not affect hardware bring up
38 */
39static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
40{ 37{
41 /* mac */ 38 /* mac */
42 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 39 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -106,27 +103,128 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
106 3); 103 3);
107} 104}
108 105
106/* AR9003 2.2 */
107static void ar9003_2p2_hw_init_mode_regs(struct ath_hw *ah)
108{
109 /* mac */
110 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
111 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
112 ar9300_2p2_mac_core,
113 ARRAY_SIZE(ar9300_2p2_mac_core), 2);
114 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
115 ar9300_2p2_mac_postamble,
116 ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
117
118 /* bb */
119 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
120 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
121 ar9300_2p2_baseband_core,
122 ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
123 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
124 ar9300_2p2_baseband_postamble,
125 ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
126
127 /* radio */
128 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
129 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
130 ar9300_2p2_radio_core,
131 ARRAY_SIZE(ar9300_2p2_radio_core), 2);
132 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
133 ar9300_2p2_radio_postamble,
134 ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
135
136 /* soc */
137 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
138 ar9300_2p2_soc_preamble,
139 ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
140 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
141 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
142 ar9300_2p2_soc_postamble,
143 ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
144
145 /* rx/tx gain */
146 INIT_INI_ARRAY(&ah->iniModesRxGain,
147 ar9300Common_rx_gain_table_2p2,
148 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
149 INIT_INI_ARRAY(&ah->iniModesTxGain,
150 ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
151 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
152 5);
153
154 /* Load PCIE SERDES settings from INI */
155
156 /* Awake Setting */
157
158 INIT_INI_ARRAY(&ah->iniPcieSerdes,
159 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
160 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
161 2);
162
163 /* Sleep Setting */
164
165 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
166 ar9300PciePhy_clkreq_enable_L1_2p2,
167 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2),
168 2);
169
170 /* Fast clock modal settings */
171 INIT_INI_ARRAY(&ah->iniModesAdditional,
172 ar9300Modes_fast_clock_2p2,
173 ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
174 3);
175}
176
177/*
178 * The AR9003 family uses a new INI format (pre, core, post
179 * arrays per subsystem).
180 */
181static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
182{
183 if (AR_SREV_9300_20(ah))
184 ar9003_2p0_hw_init_mode_regs(ah);
185 else
186 ar9003_2p2_hw_init_mode_regs(ah);
187}
188
109static void ar9003_tx_gain_table_apply(struct ath_hw *ah) 189static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
110{ 190{
111 switch (ar9003_hw_get_tx_gain_idx(ah)) { 191 switch (ar9003_hw_get_tx_gain_idx(ah)) {
112 case 0: 192 case 0:
113 default: 193 default:
114 INIT_INI_ARRAY(&ah->iniModesTxGain, 194 if (AR_SREV_9300_20(ah))
115 ar9300Modes_lowest_ob_db_tx_gain_table_2p0, 195 INIT_INI_ARRAY(&ah->iniModesTxGain,
116 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0), 196 ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
117 5); 197 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
198 5);
199 else
200 INIT_INI_ARRAY(&ah->iniModesTxGain,
201 ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
202 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
203 5);
118 break; 204 break;
119 case 1: 205 case 1:
120 INIT_INI_ARRAY(&ah->iniModesTxGain, 206 if (AR_SREV_9300_20(ah))
121 ar9300Modes_high_ob_db_tx_gain_table_2p0, 207 INIT_INI_ARRAY(&ah->iniModesTxGain,
122 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0), 208 ar9300Modes_high_ob_db_tx_gain_table_2p0,
123 5); 209 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
210 5);
211 else
212 INIT_INI_ARRAY(&ah->iniModesTxGain,
213 ar9300Modes_high_ob_db_tx_gain_table_2p2,
214 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
215 5);
124 break; 216 break;
125 case 2: 217 case 2:
126 INIT_INI_ARRAY(&ah->iniModesTxGain, 218 if (AR_SREV_9300_20(ah))
127 ar9300Modes_low_ob_db_tx_gain_table_2p0, 219 INIT_INI_ARRAY(&ah->iniModesTxGain,
128 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0), 220 ar9300Modes_low_ob_db_tx_gain_table_2p0,
129 5); 221 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
222 5);
223 else
224 INIT_INI_ARRAY(&ah->iniModesTxGain,
225 ar9300Modes_low_ob_db_tx_gain_table_2p2,
226 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
227 5);
130 break; 228 break;
131 } 229 }
132} 230}
@@ -136,15 +234,28 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
136 switch (ar9003_hw_get_rx_gain_idx(ah)) { 234 switch (ar9003_hw_get_rx_gain_idx(ah)) {
137 case 0: 235 case 0:
138 default: 236 default:
139 INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0, 237 if (AR_SREV_9300_20(ah))
140 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 238 INIT_INI_ARRAY(&ah->iniModesRxGain,
141 2); 239 ar9300Common_rx_gain_table_2p0,
240 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
241 2);
242 else
243 INIT_INI_ARRAY(&ah->iniModesRxGain,
244 ar9300Common_rx_gain_table_2p2,
245 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
246 2);
142 break; 247 break;
143 case 1: 248 case 1:
144 INIT_INI_ARRAY(&ah->iniModesRxGain, 249 if (AR_SREV_9300_20(ah))
145 ar9300Common_wo_xlna_rx_gain_table_2p0, 250 INIT_INI_ARRAY(&ah->iniModesRxGain,
146 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0), 251 ar9300Common_wo_xlna_rx_gain_table_2p0,
147 2); 252 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
253 2);
254 else
255 INIT_INI_ARRAY(&ah->iniModesRxGain,
256 ar9300Common_wo_xlna_rx_gain_table_2p2,
257 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
258 2);
148 break; 259 break;
149 } 260 }
150} 261}
@@ -184,6 +295,26 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
184 /* Several PCIe massages to ensure proper behaviour */ 295 /* Several PCIe massages to ensure proper behaviour */
185 if (ah->config.pcie_waen) 296 if (ah->config.pcie_waen)
186 REG_WRITE(ah, AR_WA, ah->config.pcie_waen); 297 REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
298 else
299 REG_WRITE(ah, AR_WA, ah->WARegVal);
300 }
301
302 /*
303 * Configire PCIE after Ini init. SERDES values now come from ini file
304 * This enables PCIe low power mode.
305 */
306 if (ah->config.pcieSerDesWrite) {
307 unsigned int i;
308 struct ar5416IniArray *array;
309
310 array = power_off ? &ah->iniPcieSerdes :
311 &ah->iniPcieSerdesLowPower;
312
313 for (i = 0; i < array->ia_rows; i++) {
314 REG_WRITE(ah,
315 INI_RA(array, i, 0),
316 INI_RA(array, i, 1));
317 }
187 } 318 }
188} 319}
189 320
@@ -202,4 +333,6 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
202 ar9003_hw_attach_phy_ops(ah); 333 ar9003_hw_attach_phy_ops(ah);
203 ar9003_hw_attach_calib_ops(ah); 334 ar9003_hw_attach_calib_ops(ah);
204 ar9003_hw_attach_mac_ops(ah); 335 ar9003_hw_attach_mac_ops(ah);
336
337 ath9k_hw_attach_ani_ops_new(ah);
205} 338}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 37ba37481a47..5b995bee70ae 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -90,6 +90,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
90 MAP_ISR_S2_CST); 90 MAP_ISR_S2_CST);
91 mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >> 91 mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
92 MAP_ISR_S2_TSFOOR); 92 MAP_ISR_S2_TSFOOR);
93 mask2 |= ((isr2 & AR_ISR_S2_BB_WATCHDOG) >>
94 MAP_ISR_S2_BB_WATCHDOG);
93 95
94 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { 96 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
95 REG_WRITE(ah, AR_ISR_S2, isr2); 97 REG_WRITE(ah, AR_ISR_S2, isr2);
@@ -167,6 +169,9 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
167 169
168 (void) REG_READ(ah, AR_ISR); 170 (void) REG_READ(ah, AR_ISR);
169 } 171 }
172
173 if (*masked & ATH9K_INT_BB_WATCHDOG)
174 ar9003_hw_bb_watchdog_read(ah);
170 } 175 }
171 176
172 if (sync_cause) { 177 if (sync_cause) {
@@ -465,6 +470,14 @@ static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
465 ads->ctl11 &= ~AR_VirtMoreFrag; 470 ads->ctl11 &= ~AR_VirtMoreFrag;
466} 471}
467 472
473void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains)
474{
475 struct ar9003_txc *ads = ds;
476
477 ads->ctl12 |= SM(chains, AR_PAPRDChainMask);
478}
479EXPORT_SYMBOL(ar9003_hw_set_paprd_txdesc);
480
468void ar9003_hw_attach_mac_ops(struct ath_hw *hw) 481void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
469{ 482{
470 struct ath_hw_ops *ops = ath9k_hw_ops(hw); 483 struct ath_hw_ops *ops = ath9k_hw_ops(hw);
@@ -566,12 +579,39 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
566 rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 579 rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
567 580
568 if ((rxsp->status11 & AR_RxFrameOK) == 0) { 581 if ((rxsp->status11 & AR_RxFrameOK) == 0) {
582 /*
583 * AR_CRCErr will bet set to true if we're on the last
584 * subframe and the AR_PostDelimCRCErr is caught.
585 * In a way this also gives us a guarantee that when
586 * (!(AR_CRCErr) && (AR_PostDelimCRCErr)) we cannot
587 * possibly be reviewing the last subframe. AR_CRCErr
588 * is the CRC of the actual data.
589 */
569 if (rxsp->status11 & AR_CRCErr) { 590 if (rxsp->status11 & AR_CRCErr) {
570 rxs->rs_status |= ATH9K_RXERR_CRC; 591 rxs->rs_status |= ATH9K_RXERR_CRC;
571 } else if (rxsp->status11 & AR_PHYErr) { 592 } else if (rxsp->status11 & AR_PHYErr) {
572 rxs->rs_status |= ATH9K_RXERR_PHY;
573 phyerr = MS(rxsp->status11, AR_PHYErrCode); 593 phyerr = MS(rxsp->status11, AR_PHYErrCode);
574 rxs->rs_phyerr = phyerr; 594 /*
595 * If we reach a point here where AR_PostDelimCRCErr is
596 * true it implies we're *not* on the last subframe. In
597 * in that case that we know already that the CRC of
598 * the frame was OK, and MAC would send an ACK for that
599 * subframe, even if we did get a phy error of type
600 * ATH9K_PHYERR_OFDM_RESTART. This is only applicable
601 * to frame that are prior to the last subframe.
602 * The AR_PostDelimCRCErr is the CRC for the MPDU
603 * delimiter, which contains the 4 reserved bits,
604 * the MPDU length (12 bits), and follows the MPDU
605 * delimiter for an A-MPDU subframe (0x4E = 'N' ASCII).
606 */
607 if ((phyerr == ATH9K_PHYERR_OFDM_RESTART) &&
608 (rxsp->status11 & AR_PostDelimCRCErr)) {
609 rxs->rs_phyerr = 0;
610 } else {
611 rxs->rs_status |= ATH9K_RXERR_PHY;
612 rxs->rs_phyerr = phyerr;
613 }
614
575 } else if (rxsp->status11 & AR_DecryptCRCErr) { 615 } else if (rxsp->status11 & AR_DecryptCRCErr) {
576 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 616 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
577 } else if (rxsp->status11 & AR_MichaelErr) { 617 } else if (rxsp->status11 & AR_MichaelErr) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index f17558b14539..9f2cea70a840 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -33,13 +33,14 @@
33#define AR_TxDescId_S 16 33#define AR_TxDescId_S 16
34#define AR_TxPtrChkSum 0x0000ffff 34#define AR_TxPtrChkSum 0x0000ffff
35 35
36#define AR_TxTid 0xf0000000
37#define AR_TxTid_S 28
38
39#define AR_LowRxChain 0x00004000 36#define AR_LowRxChain 0x00004000
40 37
41#define AR_Not_Sounding 0x20000000 38#define AR_Not_Sounding 0x20000000
42 39
40/* ctl 12 */
41#define AR_PAPRDChainMask 0x00000e00
42#define AR_PAPRDChainMask_S 9
43
43#define MAP_ISR_S2_CST 6 44#define MAP_ISR_S2_CST 6
44#define MAP_ISR_S2_GTT 6 45#define MAP_ISR_S2_GTT 6
45#define MAP_ISR_S2_TIM 3 46#define MAP_ISR_S2_TIM 3
@@ -47,6 +48,7 @@
47#define MAP_ISR_S2_DTIMSYNC 7 48#define MAP_ISR_S2_DTIMSYNC 7
48#define MAP_ISR_S2_DTIM 7 49#define MAP_ISR_S2_DTIM 7
49#define MAP_ISR_S2_TSFOOR 4 50#define MAP_ISR_S2_TSFOOR 4
51#define MAP_ISR_S2_BB_WATCHDOG 6
50 52
51#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds) 53#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
52 54
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
new file mode 100644
index 000000000000..49e0c865ce5c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -0,0 +1,714 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar9003_phy.h"
19
20void ar9003_paprd_enable(struct ath_hw *ah, bool val)
21{
22 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
23 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
24 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
25 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
26 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
27 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
28}
29EXPORT_SYMBOL(ar9003_paprd_enable);
30
31static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
32{
33 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
34 struct ar9300_modal_eep_header *hdr;
35 const u32 ctrl0[3] = {
36 AR_PHY_PAPRD_CTRL0_B0,
37 AR_PHY_PAPRD_CTRL0_B1,
38 AR_PHY_PAPRD_CTRL0_B2
39 };
40 const u32 ctrl1[3] = {
41 AR_PHY_PAPRD_CTRL1_B0,
42 AR_PHY_PAPRD_CTRL1_B1,
43 AR_PHY_PAPRD_CTRL1_B2
44 };
45 u32 am_mask, ht40_mask;
46 int i;
47
48 if (ah->curchan && IS_CHAN_5GHZ(ah->curchan))
49 hdr = &eep->modalHeader5G;
50 else
51 hdr = &eep->modalHeader2G;
52
53 am_mask = le32_to_cpu(hdr->papdRateMaskHt20);
54 ht40_mask = le32_to_cpu(hdr->papdRateMaskHt40);
55
56 REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, am_mask);
57 REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, am_mask);
58 REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ht40_mask);
59
60 for (i = 0; i < 3; i++) {
61 REG_RMW_FIELD(ah, ctrl0[i],
62 AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
63 REG_RMW_FIELD(ah, ctrl1[i],
64 AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE, 1);
65 REG_RMW_FIELD(ah, ctrl1[i],
66 AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE, 1);
67 REG_RMW_FIELD(ah, ctrl1[i],
68 AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
69 REG_RMW_FIELD(ah, ctrl1[i],
70 AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK, 181);
71 REG_RMW_FIELD(ah, ctrl1[i],
72 AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT, 361);
73 REG_RMW_FIELD(ah, ctrl1[i],
74 AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
75 REG_RMW_FIELD(ah, ctrl0[i],
76 AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH, 3);
77 }
78
79 ar9003_paprd_enable(ah, false);
80
81 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
82 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30);
83 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
84 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1);
85 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
86 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1);
87 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
88 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0);
89 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
90 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0);
91 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
92 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
93 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
94 AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
95 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
96 AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, 147);
97 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
98 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
99 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
100 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4);
101 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
102 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
103 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
104 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
105 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
106 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
107 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
108 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
109 -15);
110 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
111 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
112 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
113 AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0);
114 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
115 AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400);
116 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
117 AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES,
118 100);
119 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0,
120 AR_PHY_PAPRD_PRE_POST_SCALING, 261376);
121 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_1_B0,
122 AR_PHY_PAPRD_PRE_POST_SCALING, 248079);
123 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_2_B0,
124 AR_PHY_PAPRD_PRE_POST_SCALING, 233759);
125 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_3_B0,
126 AR_PHY_PAPRD_PRE_POST_SCALING, 220464);
127 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_4_B0,
128 AR_PHY_PAPRD_PRE_POST_SCALING, 208194);
129 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_5_B0,
130 AR_PHY_PAPRD_PRE_POST_SCALING, 196949);
131 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_6_B0,
132 AR_PHY_PAPRD_PRE_POST_SCALING, 185706);
133 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0,
134 AR_PHY_PAPRD_PRE_POST_SCALING, 175487);
135}
136
137static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
138{
139 u32 *entry = ah->paprd_gain_table_entries;
140 u8 *index = ah->paprd_gain_table_index;
141 u32 reg = AR_PHY_TXGAIN_TABLE;
142 int i;
143
144 memset(entry, 0, sizeof(ah->paprd_gain_table_entries));
145 memset(index, 0, sizeof(ah->paprd_gain_table_index));
146
147 for (i = 0; i < 32; i++) {
148 entry[i] = REG_READ(ah, reg);
149 index[i] = (entry[i] >> 24) & 0xff;
150 reg += 4;
151 }
152}
153
154static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
155 int target_power)
156{
157 int olpc_gain_delta = 0;
158 int alpha_therm, alpha_volt;
159 int therm_cal_value, volt_cal_value;
160 int therm_value, volt_value;
161 int thermal_gain_corr, voltage_gain_corr;
162 int desired_scale, desired_gain = 0;
163 u32 reg;
164
165 REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
166 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
167 desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12,
168 AR_PHY_TPC_12_DESIRED_SCALE_HT40_5);
169 alpha_therm = REG_READ_FIELD(ah, AR_PHY_TPC_19,
170 AR_PHY_TPC_19_ALPHA_THERM);
171 alpha_volt = REG_READ_FIELD(ah, AR_PHY_TPC_19,
172 AR_PHY_TPC_19_ALPHA_VOLT);
173 therm_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
174 AR_PHY_TPC_18_THERM_CAL_VALUE);
175 volt_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
176 AR_PHY_TPC_18_VOLT_CAL_VALUE);
177 therm_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
178 AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE);
179 volt_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
180 AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE);
181
182 if (chain == 0)
183 reg = AR_PHY_TPC_11_B0;
184 else if (chain == 1)
185 reg = AR_PHY_TPC_11_B1;
186 else
187 reg = AR_PHY_TPC_11_B2;
188
189 olpc_gain_delta = REG_READ_FIELD(ah, reg,
190 AR_PHY_TPC_11_OLPC_GAIN_DELTA);
191
192 if (olpc_gain_delta >= 128)
193 olpc_gain_delta = olpc_gain_delta - 256;
194
195 thermal_gain_corr = (alpha_therm * (therm_value - therm_cal_value) +
196 (256 / 2)) / 256;
197 voltage_gain_corr = (alpha_volt * (volt_value - volt_cal_value) +
198 (128 / 2)) / 128;
199 desired_gain = target_power - olpc_gain_delta - thermal_gain_corr -
200 voltage_gain_corr + desired_scale;
201
202 return desired_gain;
203}
204
205static void ar9003_tx_force_gain(struct ath_hw *ah, unsigned int gain_index)
206{
207 int selected_gain_entry, txbb1dbgain, txbb6dbgain, txmxrgain;
208 int padrvgnA, padrvgnB, padrvgnC, padrvgnD;
209 u32 *gain_table_entries = ah->paprd_gain_table_entries;
210
211 selected_gain_entry = gain_table_entries[gain_index];
212 txbb1dbgain = selected_gain_entry & 0x7;
213 txbb6dbgain = (selected_gain_entry >> 3) & 0x3;
214 txmxrgain = (selected_gain_entry >> 5) & 0xf;
215 padrvgnA = (selected_gain_entry >> 9) & 0xf;
216 padrvgnB = (selected_gain_entry >> 13) & 0xf;
217 padrvgnC = (selected_gain_entry >> 17) & 0xf;
218 padrvgnD = (selected_gain_entry >> 21) & 0x3;
219
220 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
221 AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN, txbb1dbgain);
222 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
223 AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN, txbb6dbgain);
224 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
225 AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN, txmxrgain);
226 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
227 AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA, padrvgnA);
228 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
229 AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB, padrvgnB);
230 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
231 AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC, padrvgnC);
232 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
233 AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND, padrvgnD);
234 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
235 AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL, 0);
236 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
237 AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN, 0);
238 REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCED_DAC_GAIN, 0);
239 REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCE_DAC_GAIN, 0);
240}
241
242static inline int find_expn(int num)
243{
244 return fls(num) - 1;
245}
246
247static inline int find_proper_scale(int expn, int N)
248{
249 return (expn > N) ? expn - 10 : 0;
250}
251
252#define NUM_BIN 23
253
254static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
255{
256 unsigned int thresh_accum_cnt;
257 int x_est[NUM_BIN + 1], Y[NUM_BIN + 1], theta[NUM_BIN + 1];
258 int PA_in[NUM_BIN + 1];
259 int B1_tmp[NUM_BIN + 1], B2_tmp[NUM_BIN + 1];
260 unsigned int B1_abs_max, B2_abs_max;
261 int max_index, scale_factor;
262 int y_est[NUM_BIN + 1];
263 int x_est_fxp1_nonlin, x_tilde[NUM_BIN + 1];
264 unsigned int x_tilde_abs;
265 int G_fxp, Y_intercept, order_x_by_y, M, I, L, sum_y_sqr, sum_y_quad;
266 int Q_x, Q_B1, Q_B2, beta_raw, alpha_raw, scale_B;
267 int Q_scale_B, Q_beta, Q_alpha, alpha, beta, order_1, order_2;
268 int order1_5x, order2_3x, order1_5x_rem, order2_3x_rem;
269 int y5, y3, tmp;
270 int theta_low_bin = 0;
271 int i;
272
273 /* disregard any bin that contains <= 16 samples */
274 thresh_accum_cnt = 16;
275 scale_factor = 5;
276 max_index = 0;
277 memset(theta, 0, sizeof(theta));
278 memset(x_est, 0, sizeof(x_est));
279 memset(Y, 0, sizeof(Y));
280 memset(y_est, 0, sizeof(y_est));
281 memset(x_tilde, 0, sizeof(x_tilde));
282
283 for (i = 0; i < NUM_BIN; i++) {
284 s32 accum_cnt, accum_tx, accum_rx, accum_ang;
285
286 /* number of samples */
287 accum_cnt = data_L[i] & 0xffff;
288
289 if (accum_cnt <= thresh_accum_cnt)
290 continue;
291
292 /* sum(tx amplitude) */
293 accum_tx = ((data_L[i] >> 16) & 0xffff) |
294 ((data_U[i] & 0x7ff) << 16);
295
296 /* sum(rx amplitude distance to lower bin edge) */
297 accum_rx = ((data_U[i] >> 11) & 0x1f) |
298 ((data_L[i + 23] & 0xffff) << 5);
299
300 /* sum(angles) */
301 accum_ang = ((data_L[i + 23] >> 16) & 0xffff) |
302 ((data_U[i + 23] & 0x7ff) << 16);
303
304 accum_tx <<= scale_factor;
305 accum_rx <<= scale_factor;
306 x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
307 scale_factor;
308
309 Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
310 scale_factor) +
311 (1 << scale_factor) * max_index + 16;
312
313 if (accum_ang >= (1 << 26))
314 accum_ang -= 1 << 27;
315
316 theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) /
317 accum_cnt;
318
319 max_index++;
320 }
321
322 /*
323 * Find average theta of first 5 bin and all of those to same value.
324 * Curve is linear at that range.
325 */
326 for (i = 1; i < 6; i++)
327 theta_low_bin += theta[i];
328
329 theta_low_bin = theta_low_bin / 5;
330 for (i = 1; i < 6; i++)
331 theta[i] = theta_low_bin;
332
333 /* Set values at origin */
334 theta[0] = theta_low_bin;
335 for (i = 0; i <= max_index; i++)
336 theta[i] -= theta_low_bin;
337
338 x_est[0] = 0;
339 Y[0] = 0;
340 scale_factor = 8;
341
342 /* low signal gain */
343 if (x_est[6] == x_est[3])
344 return false;
345
346 G_fxp =
347 (((Y[6] - Y[3]) * 1 << scale_factor) +
348 (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
349
350 Y_intercept =
351 (G_fxp * (x_est[0] - x_est[3]) +
352 (1 << scale_factor)) / (1 << scale_factor) + Y[3];
353
354 for (i = 0; i <= max_index; i++)
355 y_est[i] = Y[i] - Y_intercept;
356
357 for (i = 0; i <= 3; i++) {
358 y_est[i] = i * 32;
359
360 /* prevent division by zero */
361 if (G_fxp == 0)
362 return false;
363
364 x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
365 }
366
367 x_est_fxp1_nonlin =
368 x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
369 G_fxp) / G_fxp;
370
371 order_x_by_y =
372 (x_est_fxp1_nonlin + y_est[max_index]) / y_est[max_index];
373
374 if (order_x_by_y == 0)
375 M = 10;
376 else if (order_x_by_y == 1)
377 M = 9;
378 else
379 M = 8;
380
381 I = (max_index > 15) ? 7 : max_index >> 1;
382 L = max_index - I;
383 scale_factor = 8;
384 sum_y_sqr = 0;
385 sum_y_quad = 0;
386 x_tilde_abs = 0;
387
388 for (i = 0; i <= L; i++) {
389 unsigned int y_sqr;
390 unsigned int y_quad;
391 unsigned int tmp_abs;
392
393 /* prevent division by zero */
394 if (y_est[i + I] == 0)
395 return false;
396
397 x_est_fxp1_nonlin =
398 x_est[i + I] - ((1 << scale_factor) * y_est[i + I] +
399 G_fxp) / G_fxp;
400
401 x_tilde[i] =
402 (x_est_fxp1_nonlin * (1 << M) + y_est[i + I]) / y_est[i +
403 I];
404 x_tilde[i] =
405 (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
406 x_tilde[i] =
407 (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
408 y_sqr =
409 (y_est[i + I] * y_est[i + I] +
410 (scale_factor * scale_factor)) / (scale_factor *
411 scale_factor);
412 tmp_abs = abs(x_tilde[i]);
413 if (tmp_abs > x_tilde_abs)
414 x_tilde_abs = tmp_abs;
415
416 y_quad = y_sqr * y_sqr;
417 sum_y_sqr = sum_y_sqr + y_sqr;
418 sum_y_quad = sum_y_quad + y_quad;
419 B1_tmp[i] = y_sqr * (L + 1);
420 B2_tmp[i] = y_sqr;
421 }
422
423 B1_abs_max = 0;
424 B2_abs_max = 0;
425 for (i = 0; i <= L; i++) {
426 int abs_val;
427
428 B1_tmp[i] -= sum_y_sqr;
429 B2_tmp[i] = sum_y_quad - sum_y_sqr * B2_tmp[i];
430
431 abs_val = abs(B1_tmp[i]);
432 if (abs_val > B1_abs_max)
433 B1_abs_max = abs_val;
434
435 abs_val = abs(B2_tmp[i]);
436 if (abs_val > B2_abs_max)
437 B2_abs_max = abs_val;
438 }
439
440 Q_x = find_proper_scale(find_expn(x_tilde_abs), 10);
441 Q_B1 = find_proper_scale(find_expn(B1_abs_max), 10);
442 Q_B2 = find_proper_scale(find_expn(B2_abs_max), 10);
443
444 beta_raw = 0;
445 alpha_raw = 0;
446 for (i = 0; i <= L; i++) {
447 x_tilde[i] = x_tilde[i] / (1 << Q_x);
448 B1_tmp[i] = B1_tmp[i] / (1 << Q_B1);
449 B2_tmp[i] = B2_tmp[i] / (1 << Q_B2);
450 beta_raw = beta_raw + B1_tmp[i] * x_tilde[i];
451 alpha_raw = alpha_raw + B2_tmp[i] * x_tilde[i];
452 }
453
454 scale_B =
455 ((sum_y_quad / scale_factor) * (L + 1) -
456 (sum_y_sqr / scale_factor) * sum_y_sqr) * scale_factor;
457
458 Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
459 scale_B = scale_B / (1 << Q_scale_B);
460 Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
461 Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
462 beta_raw = beta_raw / (1 << Q_beta);
463 alpha_raw = alpha_raw / (1 << Q_alpha);
464 alpha = (alpha_raw << 10) / scale_B;
465 beta = (beta_raw << 10) / scale_B;
466 order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B;
467 order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B;
468 order1_5x = order_1 / 5;
469 order2_3x = order_2 / 3;
470 order1_5x_rem = order_1 - 5 * order1_5x;
471 order2_3x_rem = order_2 - 3 * order2_3x;
472
473 for (i = 0; i < PAPRD_TABLE_SZ; i++) {
474 tmp = i * 32;
475 y5 = ((beta * tmp) >> 6) >> order1_5x;
476 y5 = (y5 * tmp) >> order1_5x;
477 y5 = (y5 * tmp) >> order1_5x;
478 y5 = (y5 * tmp) >> order1_5x;
479 y5 = (y5 * tmp) >> order1_5x;
480 y5 = y5 >> order1_5x_rem;
481 y3 = (alpha * tmp) >> order2_3x;
482 y3 = (y3 * tmp) >> order2_3x;
483 y3 = (y3 * tmp) >> order2_3x;
484 y3 = y3 >> order2_3x_rem;
485 PA_in[i] = y5 + y3 + (256 * tmp) / G_fxp;
486
487 if (i >= 2) {
488 tmp = PA_in[i] - PA_in[i - 1];
489 if (tmp < 0)
490 PA_in[i] =
491 PA_in[i - 1] + (PA_in[i - 1] -
492 PA_in[i - 2]);
493 }
494
495 PA_in[i] = (PA_in[i] < 1400) ? PA_in[i] : 1400;
496 }
497
498 beta_raw = 0;
499 alpha_raw = 0;
500
501 for (i = 0; i <= L; i++) {
502 int theta_tilde =
503 ((theta[i + I] << M) + y_est[i + I]) / y_est[i + I];
504 theta_tilde =
505 ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
506 theta_tilde =
507 ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
508 beta_raw = beta_raw + B1_tmp[i] * theta_tilde;
509 alpha_raw = alpha_raw + B2_tmp[i] * theta_tilde;
510 }
511
512 Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
513 Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
514 beta_raw = beta_raw / (1 << Q_beta);
515 alpha_raw = alpha_raw / (1 << Q_alpha);
516
517 alpha = (alpha_raw << 10) / scale_B;
518 beta = (beta_raw << 10) / scale_B;
519 order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B + 5;
520 order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B + 5;
521 order1_5x = order_1 / 5;
522 order2_3x = order_2 / 3;
523 order1_5x_rem = order_1 - 5 * order1_5x;
524 order2_3x_rem = order_2 - 3 * order2_3x;
525
526 for (i = 0; i < PAPRD_TABLE_SZ; i++) {
527 int PA_angle;
528
529 /* pa_table[4] is calculated from PA_angle for i=5 */
530 if (i == 4)
531 continue;
532
533 tmp = i * 32;
534 if (beta > 0)
535 y5 = (((beta * tmp - 64) >> 6) -
536 (1 << order1_5x)) / (1 << order1_5x);
537 else
538 y5 = ((((beta * tmp - 64) >> 6) +
539 (1 << order1_5x)) / (1 << order1_5x));
540
541 y5 = (y5 * tmp) / (1 << order1_5x);
542 y5 = (y5 * tmp) / (1 << order1_5x);
543 y5 = (y5 * tmp) / (1 << order1_5x);
544 y5 = (y5 * tmp) / (1 << order1_5x);
545 y5 = y5 / (1 << order1_5x_rem);
546
547 if (beta > 0)
548 y3 = (alpha * tmp -
549 (1 << order2_3x)) / (1 << order2_3x);
550 else
551 y3 = (alpha * tmp +
552 (1 << order2_3x)) / (1 << order2_3x);
553 y3 = (y3 * tmp) / (1 << order2_3x);
554 y3 = (y3 * tmp) / (1 << order2_3x);
555 y3 = y3 / (1 << order2_3x_rem);
556
557 if (i < 4) {
558 PA_angle = 0;
559 } else {
560 PA_angle = y5 + y3;
561 if (PA_angle < -150)
562 PA_angle = -150;
563 else if (PA_angle > 150)
564 PA_angle = 150;
565 }
566
567 pa_table[i] = ((PA_in[i] & 0x7ff) << 11) + (PA_angle & 0x7ff);
568 if (i == 5) {
569 PA_angle = (PA_angle + 2) >> 1;
570 pa_table[i - 1] = ((PA_in[i - 1] & 0x7ff) << 11) +
571 (PA_angle & 0x7ff);
572 }
573 }
574
575 *gain = G_fxp;
576 return true;
577}
578
579void ar9003_paprd_populate_single_table(struct ath_hw *ah,
580 struct ath9k_channel *chan, int chain)
581{
582 u32 *paprd_table_val = chan->pa_table[chain];
583 u32 small_signal_gain = chan->small_signal_gain[chain];
584 u32 training_power;
585 u32 reg = 0;
586 int i;
587
588 training_power =
589 REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
590 AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
591 training_power -= 4;
592
593 if (chain == 0)
594 reg = AR_PHY_PAPRD_MEM_TAB_B0;
595 else if (chain == 1)
596 reg = AR_PHY_PAPRD_MEM_TAB_B1;
597 else if (chain == 2)
598 reg = AR_PHY_PAPRD_MEM_TAB_B2;
599
600 for (i = 0; i < PAPRD_TABLE_SZ; i++) {
601 REG_WRITE(ah, reg, paprd_table_val[i]);
602 reg = reg + 4;
603 }
604
605 if (chain == 0)
606 reg = AR_PHY_PA_GAIN123_B0;
607 else if (chain == 1)
608 reg = AR_PHY_PA_GAIN123_B1;
609 else
610 reg = AR_PHY_PA_GAIN123_B2;
611
612 REG_RMW_FIELD(ah, reg, AR_PHY_PA_GAIN123_PA_GAIN1, small_signal_gain);
613
614 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B0,
615 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
616 training_power);
617
618 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
619 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
620 training_power);
621
622 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
623 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
624 training_power);
625}
626EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
627
628int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
629{
630
631 unsigned int i, desired_gain, gain_index;
632 unsigned int train_power;
633
634 train_power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
635 AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
636
637 train_power = train_power - 4;
638
639 desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
640
641 gain_index = 0;
642 for (i = 0; i < 32; i++) {
643 if (ah->paprd_gain_table_index[i] >= desired_gain)
644 break;
645 gain_index++;
646 }
647
648 ar9003_tx_force_gain(ah, gain_index);
649
650 REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
651 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
652
653 return 0;
654}
655EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
656
657int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
658 int chain)
659{
660 u16 *small_signal_gain = &chan->small_signal_gain[chain];
661 u32 *pa_table = chan->pa_table[chain];
662 u32 *data_L, *data_U;
663 int i, status = 0;
664 u32 *buf;
665 u32 reg;
666
667 memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain]));
668
669 buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC);
670 if (!buf)
671 return -ENOMEM;
672
673 data_L = &buf[0];
674 data_U = &buf[48];
675
676 REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
677 AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
678
679 reg = AR_PHY_CHAN_INFO_TAB_0;
680 for (i = 0; i < 48; i++)
681 data_L[i] = REG_READ(ah, reg + (i << 2));
682
683 REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
684 AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
685
686 for (i = 0; i < 48; i++)
687 data_U[i] = REG_READ(ah, reg + (i << 2));
688
689 if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain))
690 status = -2;
691
692 REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
693 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
694
695 kfree(buf);
696
697 return status;
698}
699EXPORT_SYMBOL(ar9003_paprd_create_curve);
700
701int ar9003_paprd_init_table(struct ath_hw *ah)
702{
703 ar9003_paprd_setup_single_table(ah);
704 ar9003_paprd_get_gain_table(ah);
705 return 0;
706}
707EXPORT_SYMBOL(ar9003_paprd_init_table);
708
709bool ar9003_paprd_is_done(struct ath_hw *ah)
710{
711 return !!REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
712 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
713}
714EXPORT_SYMBOL(ar9003_paprd_is_done);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 80431a2f6dc1..a753a431bb13 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -17,6 +17,28 @@
17#include "hw.h" 17#include "hw.h"
18#include "ar9003_phy.h" 18#include "ar9003_phy.h"
19 19
20static const int firstep_table[] =
21/* level: 0 1 2 3 4 5 6 7 8 */
22 { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
23
24static const int cycpwrThr1_table[] =
25/* level: 0 1 2 3 4 5 6 7 8 */
26 { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
27
28/*
29 * register values to turn OFDM weak signal detection OFF
30 */
31static const int m1ThreshLow_off = 127;
32static const int m2ThreshLow_off = 127;
33static const int m1Thresh_off = 127;
34static const int m2Thresh_off = 127;
35static const int m2CountThr_off = 31;
36static const int m2CountThrLow_off = 63;
37static const int m1ThreshLowExt_off = 127;
38static const int m2ThreshLowExt_off = 127;
39static const int m1ThreshExt_off = 127;
40static const int m2ThreshExt_off = 127;
41
20/** 42/**
21 * ar9003_hw_set_channel - set channel on single-chip device 43 * ar9003_hw_set_channel - set channel on single-chip device
22 * @ah: atheros hardware structure 44 * @ah: atheros hardware structure
@@ -94,7 +116,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
94} 116}
95 117
96/** 118/**
97 * ar9003_hw_spur_mitigate - convert baseband spur frequency 119 * ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency
98 * @ah: atheros hardware structure 120 * @ah: atheros hardware structure
99 * @chan: 121 * @chan:
100 * 122 *
@@ -521,15 +543,6 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
521 u32 val = INI_RA(iniArr, i, column); 543 u32 val = INI_RA(iniArr, i, column);
522 544
523 REG_WRITE(ah, reg, val); 545 REG_WRITE(ah, reg, val);
524
525 /*
526 * Determine if this is a shift register value, and insert the
527 * configured delay if so.
528 */
529 if (reg >= 0x16000 && reg < 0x17000
530 && ah->config.analog_shiftreg)
531 udelay(100);
532
533 DO_DELAY(regWrites); 546 DO_DELAY(regWrites);
534 } 547 }
535} 548}
@@ -732,71 +745,68 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
732{ 745{
733 struct ar5416AniState *aniState = ah->curani; 746 struct ar5416AniState *aniState = ah->curani;
734 struct ath_common *common = ath9k_hw_common(ah); 747 struct ath_common *common = ath9k_hw_common(ah);
748 struct ath9k_channel *chan = ah->curchan;
749 s32 value, value2;
735 750
736 switch (cmd & ah->ani_function) { 751 switch (cmd & ah->ani_function) {
737 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
738 u32 level = param;
739
740 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
741 ath_print(common, ATH_DBG_ANI,
742 "level out of range (%u > %u)\n",
743 level,
744 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
745 return false;
746 }
747
748 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
749 AR_PHY_DESIRED_SZ_TOT_DES,
750 ah->totalSizeDesired[level]);
751 REG_RMW_FIELD(ah, AR_PHY_AGC,
752 AR_PHY_AGC_COARSE_LOW,
753 ah->coarse_low[level]);
754 REG_RMW_FIELD(ah, AR_PHY_AGC,
755 AR_PHY_AGC_COARSE_HIGH,
756 ah->coarse_high[level]);
757 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
758 AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
759
760 if (level > aniState->noiseImmunityLevel)
761 ah->stats.ast_ani_niup++;
762 else if (level < aniState->noiseImmunityLevel)
763 ah->stats.ast_ani_nidown++;
764 aniState->noiseImmunityLevel = level;
765 break;
766 }
767 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ 752 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
768 const int m1ThreshLow[] = { 127, 50 }; 753 /*
769 const int m2ThreshLow[] = { 127, 40 }; 754 * on == 1 means ofdm weak signal detection is ON
770 const int m1Thresh[] = { 127, 0x4d }; 755 * on == 1 is the default, for less noise immunity
771 const int m2Thresh[] = { 127, 0x40 }; 756 *
772 const int m2CountThr[] = { 31, 16 }; 757 * on == 0 means ofdm weak signal detection is OFF
773 const int m2CountThrLow[] = { 63, 48 }; 758 * on == 0 means more noise imm
759 */
774 u32 on = param ? 1 : 0; 760 u32 on = param ? 1 : 0;
761 /*
762 * make register setting for default
763 * (weak sig detect ON) come from INI file
764 */
765 int m1ThreshLow = on ?
766 aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
767 int m2ThreshLow = on ?
768 aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
769 int m1Thresh = on ?
770 aniState->iniDef.m1Thresh : m1Thresh_off;
771 int m2Thresh = on ?
772 aniState->iniDef.m2Thresh : m2Thresh_off;
773 int m2CountThr = on ?
774 aniState->iniDef.m2CountThr : m2CountThr_off;
775 int m2CountThrLow = on ?
776 aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
777 int m1ThreshLowExt = on ?
778 aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
779 int m2ThreshLowExt = on ?
780 aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
781 int m1ThreshExt = on ?
782 aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
783 int m2ThreshExt = on ?
784 aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
775 785
776 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, 786 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
777 AR_PHY_SFCORR_LOW_M1_THRESH_LOW, 787 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
778 m1ThreshLow[on]); 788 m1ThreshLow);
779 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, 789 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
780 AR_PHY_SFCORR_LOW_M2_THRESH_LOW, 790 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
781 m2ThreshLow[on]); 791 m2ThreshLow);
782 REG_RMW_FIELD(ah, AR_PHY_SFCORR, 792 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
783 AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]); 793 AR_PHY_SFCORR_M1_THRESH, m1Thresh);
784 REG_RMW_FIELD(ah, AR_PHY_SFCORR, 794 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
785 AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]); 795 AR_PHY_SFCORR_M2_THRESH, m2Thresh);
786 REG_RMW_FIELD(ah, AR_PHY_SFCORR, 796 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
787 AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]); 797 AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
788 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, 798 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
789 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, 799 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
790 m2CountThrLow[on]); 800 m2CountThrLow);
791 801
792 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, 802 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
793 AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]); 803 AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
794 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, 804 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
795 AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]); 805 AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
796 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, 806 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
797 AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]); 807 AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
798 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, 808 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
799 AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]); 809 AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
800 810
801 if (on) 811 if (on)
802 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, 812 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
@@ -806,6 +816,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
806 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 816 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
807 817
808 if (!on != aniState->ofdmWeakSigDetectOff) { 818 if (!on != aniState->ofdmWeakSigDetectOff) {
819 ath_print(common, ATH_DBG_ANI,
820 "** ch %d: ofdm weak signal: %s=>%s\n",
821 chan->channel,
822 !aniState->ofdmWeakSigDetectOff ?
823 "on" : "off",
824 on ? "on" : "off");
809 if (on) 825 if (on)
810 ah->stats.ast_ani_ofdmon++; 826 ah->stats.ast_ani_ofdmon++;
811 else 827 else
@@ -814,64 +830,167 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
814 } 830 }
815 break; 831 break;
816 } 832 }
817 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
818 const int weakSigThrCck[] = { 8, 6 };
819 u32 high = param ? 1 : 0;
820
821 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
822 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
823 weakSigThrCck[high]);
824 if (high != aniState->cckWeakSigThreshold) {
825 if (high)
826 ah->stats.ast_ani_cckhigh++;
827 else
828 ah->stats.ast_ani_ccklow++;
829 aniState->cckWeakSigThreshold = high;
830 }
831 break;
832 }
833 case ATH9K_ANI_FIRSTEP_LEVEL:{ 833 case ATH9K_ANI_FIRSTEP_LEVEL:{
834 const int firstep[] = { 0, 4, 8 };
835 u32 level = param; 834 u32 level = param;
836 835
837 if (level >= ARRAY_SIZE(firstep)) { 836 if (level >= ARRAY_SIZE(firstep_table)) {
838 ath_print(common, ATH_DBG_ANI, 837 ath_print(common, ATH_DBG_ANI,
839 "level out of range (%u > %u)\n", 838 "ATH9K_ANI_FIRSTEP_LEVEL: level "
839 "out of range (%u > %u)\n",
840 level, 840 level,
841 (unsigned) ARRAY_SIZE(firstep)); 841 (unsigned) ARRAY_SIZE(firstep_table));
842 return false; 842 return false;
843 } 843 }
844
845 /*
846 * make register setting relative to default
847 * from INI file & cap value
848 */
849 value = firstep_table[level] -
850 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
851 aniState->iniDef.firstep;
852 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
853 value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
854 if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
855 value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
844 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 856 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
845 AR_PHY_FIND_SIG_FIRSTEP, 857 AR_PHY_FIND_SIG_FIRSTEP,
846 firstep[level]); 858 value);
847 if (level > aniState->firstepLevel) 859 /*
848 ah->stats.ast_ani_stepup++; 860 * we need to set first step low register too
849 else if (level < aniState->firstepLevel) 861 * make register setting relative to default
850 ah->stats.ast_ani_stepdown++; 862 * from INI file & cap value
851 aniState->firstepLevel = level; 863 */
864 value2 = firstep_table[level] -
865 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
866 aniState->iniDef.firstepLow;
867 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
868 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
869 if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
870 value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
871
872 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
873 AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
874
875 if (level != aniState->firstepLevel) {
876 ath_print(common, ATH_DBG_ANI,
877 "** ch %d: level %d=>%d[def:%d] "
878 "firstep[level]=%d ini=%d\n",
879 chan->channel,
880 aniState->firstepLevel,
881 level,
882 ATH9K_ANI_FIRSTEP_LVL_NEW,
883 value,
884 aniState->iniDef.firstep);
885 ath_print(common, ATH_DBG_ANI,
886 "** ch %d: level %d=>%d[def:%d] "
887 "firstep_low[level]=%d ini=%d\n",
888 chan->channel,
889 aniState->firstepLevel,
890 level,
891 ATH9K_ANI_FIRSTEP_LVL_NEW,
892 value2,
893 aniState->iniDef.firstepLow);
894 if (level > aniState->firstepLevel)
895 ah->stats.ast_ani_stepup++;
896 else if (level < aniState->firstepLevel)
897 ah->stats.ast_ani_stepdown++;
898 aniState->firstepLevel = level;
899 }
852 break; 900 break;
853 } 901 }
854 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ 902 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
855 const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
856 u32 level = param; 903 u32 level = param;
857 904
858 if (level >= ARRAY_SIZE(cycpwrThr1)) { 905 if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
859 ath_print(common, ATH_DBG_ANI, 906 ath_print(common, ATH_DBG_ANI,
860 "level out of range (%u > %u)\n", 907 "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
908 "out of range (%u > %u)\n",
861 level, 909 level,
862 (unsigned) ARRAY_SIZE(cycpwrThr1)); 910 (unsigned) ARRAY_SIZE(cycpwrThr1_table));
863 return false; 911 return false;
864 } 912 }
913 /*
914 * make register setting relative to default
915 * from INI file & cap value
916 */
917 value = cycpwrThr1_table[level] -
918 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
919 aniState->iniDef.cycpwrThr1;
920 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
921 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
922 if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
923 value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
865 REG_RMW_FIELD(ah, AR_PHY_TIMING5, 924 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
866 AR_PHY_TIMING5_CYCPWR_THR1, 925 AR_PHY_TIMING5_CYCPWR_THR1,
867 cycpwrThr1[level]); 926 value);
868 if (level > aniState->spurImmunityLevel) 927
869 ah->stats.ast_ani_spurup++; 928 /*
870 else if (level < aniState->spurImmunityLevel) 929 * set AR_PHY_EXT_CCA for extension channel
871 ah->stats.ast_ani_spurdown++; 930 * make register setting relative to default
872 aniState->spurImmunityLevel = level; 931 * from INI file & cap value
932 */
933 value2 = cycpwrThr1_table[level] -
934 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
935 aniState->iniDef.cycpwrThr1Ext;
936 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
937 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
938 if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
939 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
940 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
941 AR_PHY_EXT_CYCPWR_THR1, value2);
942
943 if (level != aniState->spurImmunityLevel) {
944 ath_print(common, ATH_DBG_ANI,
945 "** ch %d: level %d=>%d[def:%d] "
946 "cycpwrThr1[level]=%d ini=%d\n",
947 chan->channel,
948 aniState->spurImmunityLevel,
949 level,
950 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
951 value,
952 aniState->iniDef.cycpwrThr1);
953 ath_print(common, ATH_DBG_ANI,
954 "** ch %d: level %d=>%d[def:%d] "
955 "cycpwrThr1Ext[level]=%d ini=%d\n",
956 chan->channel,
957 aniState->spurImmunityLevel,
958 level,
959 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
960 value2,
961 aniState->iniDef.cycpwrThr1Ext);
962 if (level > aniState->spurImmunityLevel)
963 ah->stats.ast_ani_spurup++;
964 else if (level < aniState->spurImmunityLevel)
965 ah->stats.ast_ani_spurdown++;
966 aniState->spurImmunityLevel = level;
967 }
873 break; 968 break;
874 } 969 }
970 case ATH9K_ANI_MRC_CCK:{
971 /*
972 * is_on == 1 means MRC CCK ON (default, less noise imm)
973 * is_on == 0 means MRC CCK is OFF (more noise imm)
974 */
975 bool is_on = param ? 1 : 0;
976 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
977 AR_PHY_MRC_CCK_ENABLE, is_on);
978 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
979 AR_PHY_MRC_CCK_MUX_REG, is_on);
980 if (!is_on != aniState->mrcCCKOff) {
981 ath_print(common, ATH_DBG_ANI,
982 "** ch %d: MRC CCK: %s=>%s\n",
983 chan->channel,
984 !aniState->mrcCCKOff ? "on" : "off",
985 is_on ? "on" : "off");
986 if (is_on)
987 ah->stats.ast_ani_ccklow++;
988 else
989 ah->stats.ast_ani_cckhigh++;
990 aniState->mrcCCKOff = !is_on;
991 }
992 break;
993 }
875 case ATH9K_ANI_PRESENT: 994 case ATH9K_ANI_PRESENT:
876 break; 995 break;
877 default: 996 default:
@@ -880,158 +999,126 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
880 return false; 999 return false;
881 } 1000 }
882 1001
883 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
884 ath_print(common, ATH_DBG_ANI, 1002 ath_print(common, ATH_DBG_ANI,
885 "noiseImmunityLevel=%d, spurImmunityLevel=%d, " 1003 "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
886 "ofdmWeakSigDetectOff=%d\n", 1004 "MRCcck=%s listenTime=%d CC=%d listen=%d "
887 aniState->noiseImmunityLevel, 1005 "ofdmErrs=%d cckErrs=%d\n",
888 aniState->spurImmunityLevel, 1006 aniState->spurImmunityLevel,
889 !aniState->ofdmWeakSigDetectOff); 1007 !aniState->ofdmWeakSigDetectOff ? "on" : "off",
890 ath_print(common, ATH_DBG_ANI,
891 "cckWeakSigThreshold=%d, "
892 "firstepLevel=%d, listenTime=%d\n",
893 aniState->cckWeakSigThreshold,
894 aniState->firstepLevel, 1008 aniState->firstepLevel,
895 aniState->listenTime); 1009 !aniState->mrcCCKOff ? "on" : "off",
896 ath_print(common, ATH_DBG_ANI, 1010 aniState->listenTime,
897 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", 1011 aniState->cycleCount,
898 aniState->cycleCount, 1012 aniState->listenTime,
899 aniState->ofdmPhyErrCount, 1013 aniState->ofdmPhyErrCount,
900 aniState->cckPhyErrCount); 1014 aniState->cckPhyErrCount);
901
902 return true; 1015 return true;
903} 1016}
904 1017
905static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf)
906{
907 struct ath_common *common = ath9k_hw_common(ah);
908
909 if (*nf > ah->nf_2g_max) {
910 ath_print(common, ATH_DBG_CALIBRATE,
911 "2 GHz NF (%d) > MAX (%d), "
912 "correcting to MAX",
913 *nf, ah->nf_2g_max);
914 *nf = ah->nf_2g_max;
915 } else if (*nf < ah->nf_2g_min) {
916 ath_print(common, ATH_DBG_CALIBRATE,
917 "2 GHz NF (%d) < MIN (%d), "
918 "correcting to MIN",
919 *nf, ah->nf_2g_min);
920 *nf = ah->nf_2g_min;
921 }
922}
923
924static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf)
925{
926 struct ath_common *common = ath9k_hw_common(ah);
927
928 if (*nf > ah->nf_5g_max) {
929 ath_print(common, ATH_DBG_CALIBRATE,
930 "5 GHz NF (%d) > MAX (%d), "
931 "correcting to MAX",
932 *nf, ah->nf_5g_max);
933 *nf = ah->nf_5g_max;
934 } else if (*nf < ah->nf_5g_min) {
935 ath_print(common, ATH_DBG_CALIBRATE,
936 "5 GHz NF (%d) < MIN (%d), "
937 "correcting to MIN",
938 *nf, ah->nf_5g_min);
939 *nf = ah->nf_5g_min;
940 }
941}
942
943static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
944{
945 if (IS_CHAN_2GHZ(ah->curchan))
946 ar9003_hw_nf_sanitize_2g(ah, nf);
947 else
948 ar9003_hw_nf_sanitize_5g(ah, nf);
949}
950
951static void ar9003_hw_do_getnf(struct ath_hw *ah, 1018static void ar9003_hw_do_getnf(struct ath_hw *ah,
952 int16_t nfarray[NUM_NF_READINGS]) 1019 int16_t nfarray[NUM_NF_READINGS])
953{ 1020{
954 struct ath_common *common = ath9k_hw_common(ah);
955 int16_t nf; 1021 int16_t nf;
956 1022
957 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR); 1023 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
958 if (nf & 0x100) 1024 nfarray[0] = sign_extend(nf, 9);
959 nf = 0 - ((nf ^ 0x1ff) + 1);
960 ar9003_hw_nf_sanitize(ah, &nf);
961 ath_print(common, ATH_DBG_CALIBRATE,
962 "NF calibrated [ctl] [chain 0] is %d\n", nf);
963 nfarray[0] = nf;
964 1025
965 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR); 1026 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
966 if (nf & 0x100) 1027 nfarray[1] = sign_extend(nf, 9);
967 nf = 0 - ((nf ^ 0x1ff) + 1);
968 ar9003_hw_nf_sanitize(ah, &nf);
969 ath_print(common, ATH_DBG_CALIBRATE,
970 "NF calibrated [ctl] [chain 1] is %d\n", nf);
971 nfarray[1] = nf;
972 1028
973 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR); 1029 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
974 if (nf & 0x100) 1030 nfarray[2] = sign_extend(nf, 9);
975 nf = 0 - ((nf ^ 0x1ff) + 1); 1031
976 ar9003_hw_nf_sanitize(ah, &nf); 1032 if (!IS_CHAN_HT40(ah->curchan))
977 ath_print(common, ATH_DBG_CALIBRATE, 1033 return;
978 "NF calibrated [ctl] [chain 2] is %d\n", nf);
979 nfarray[2] = nf;
980 1034
981 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1035 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
982 if (nf & 0x100) 1036 nfarray[3] = sign_extend(nf, 9);
983 nf = 0 - ((nf ^ 0x1ff) + 1);
984 ar9003_hw_nf_sanitize(ah, &nf);
985 ath_print(common, ATH_DBG_CALIBRATE,
986 "NF calibrated [ext] [chain 0] is %d\n", nf);
987 nfarray[3] = nf;
988 1037
989 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR); 1038 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
990 if (nf & 0x100) 1039 nfarray[4] = sign_extend(nf, 9);
991 nf = 0 - ((nf ^ 0x1ff) + 1);
992 ar9003_hw_nf_sanitize(ah, &nf);
993 ath_print(common, ATH_DBG_CALIBRATE,
994 "NF calibrated [ext] [chain 1] is %d\n", nf);
995 nfarray[4] = nf;
996 1040
997 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR); 1041 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
998 if (nf & 0x100) 1042 nfarray[5] = sign_extend(nf, 9);
999 nf = 0 - ((nf ^ 0x1ff) + 1);
1000 ar9003_hw_nf_sanitize(ah, &nf);
1001 ath_print(common, ATH_DBG_CALIBRATE,
1002 "NF calibrated [ext] [chain 2] is %d\n", nf);
1003 nfarray[5] = nf;
1004} 1043}
1005 1044
1006void ar9003_hw_set_nf_limits(struct ath_hw *ah) 1045static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
1007{ 1046{
1008 ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ; 1047 ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
1009 ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ; 1048 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
1010 ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ; 1049 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ;
1011 ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ; 1050 ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
1051 ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
1052 ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9300_5GHZ;
1012} 1053}
1013 1054
1014/* 1055/*
1015 * Find out which of the RX chains are enabled 1056 * Initialize the ANI register values with default (ini) values.
1057 * This routine is called during a (full) hardware reset after
1058 * all the registers are initialised from the INI.
1016 */ 1059 */
1017static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah) 1060static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1018{ 1061{
1019 u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK); 1062 struct ar5416AniState *aniState;
1020 /* 1063 struct ath_common *common = ath9k_hw_common(ah);
1021 * The bits [2:0] indicate the rx chain mask and are to be 1064 struct ath9k_channel *chan = ah->curchan;
1022 * interpreted as follows: 1065 struct ath9k_ani_default *iniDef;
1023 * 00x => Only chain 0 is enabled 1066 int index;
1024 * 01x => Chain 1 and 0 enabled 1067 u32 val;
1025 * 1xx => Chain 2,1 and 0 enabled 1068
1026 */ 1069 index = ath9k_hw_get_ani_channel_idx(ah, chan);
1027 return chain & 0x7; 1070 aniState = &ah->ani[index];
1071 ah->curani = aniState;
1072 iniDef = &aniState->iniDef;
1073
1074 ath_print(common, ATH_DBG_ANI,
1075 "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
1076 ah->hw_version.macVersion,
1077 ah->hw_version.macRev,
1078 ah->opmode,
1079 chan->channel,
1080 chan->channelFlags);
1081
1082 val = REG_READ(ah, AR_PHY_SFCORR);
1083 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
1084 iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
1085 iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
1086
1087 val = REG_READ(ah, AR_PHY_SFCORR_LOW);
1088 iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
1089 iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
1090 iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
1091
1092 val = REG_READ(ah, AR_PHY_SFCORR_EXT);
1093 iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
1094 iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
1095 iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
1096 iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
1097 iniDef->firstep = REG_READ_FIELD(ah,
1098 AR_PHY_FIND_SIG,
1099 AR_PHY_FIND_SIG_FIRSTEP);
1100 iniDef->firstepLow = REG_READ_FIELD(ah,
1101 AR_PHY_FIND_SIG_LOW,
1102 AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW);
1103 iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
1104 AR_PHY_TIMING5,
1105 AR_PHY_TIMING5_CYCPWR_THR1);
1106 iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
1107 AR_PHY_EXT_CCA,
1108 AR_PHY_EXT_CYCPWR_THR1);
1109
1110 /* these levels just got reset to defaults by the INI */
1111 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
1112 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
1113 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
1114 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
1115
1116 aniState->cycleCount = 0;
1028} 1117}
1029 1118
1030static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) 1119void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1031{ 1120{
1032 struct ath9k_nfcal_hist *h; 1121 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1033 unsigned i, j;
1034 int32_t val;
1035 const u32 ar9300_cca_regs[6] = { 1122 const u32 ar9300_cca_regs[6] = {
1036 AR_PHY_CCA_0, 1123 AR_PHY_CCA_0,
1037 AR_PHY_CCA_1, 1124 AR_PHY_CCA_1,
@@ -1040,95 +1127,143 @@ static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
1040 AR_PHY_EXT_CCA_1, 1127 AR_PHY_EXT_CCA_1,
1041 AR_PHY_EXT_CCA_2, 1128 AR_PHY_EXT_CCA_2,
1042 }; 1129 };
1043 u8 chainmask, rx_chain_status;
1044 struct ath_common *common = ath9k_hw_common(ah);
1045 1130
1046 rx_chain_status = ar9003_hw_get_rx_chainmask(ah); 1131 priv_ops->rf_set_freq = ar9003_hw_set_channel;
1132 priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
1133 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
1134 priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
1135 priv_ops->init_bb = ar9003_hw_init_bb;
1136 priv_ops->process_ini = ar9003_hw_process_ini;
1137 priv_ops->set_rfmode = ar9003_hw_set_rfmode;
1138 priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
1139 priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
1140 priv_ops->rfbus_req = ar9003_hw_rfbus_req;
1141 priv_ops->rfbus_done = ar9003_hw_rfbus_done;
1142 priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
1143 priv_ops->set_diversity = ar9003_hw_set_diversity;
1144 priv_ops->ani_control = ar9003_hw_ani_control;
1145 priv_ops->do_getnf = ar9003_hw_do_getnf;
1146 priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
1047 1147
1048 chainmask = 0x3F; 1148 ar9003_hw_set_nf_limits(ah);
1049 h = ah->nfCalHist; 1149 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
1150}
1050 1151
1051 for (i = 0; i < NUM_NF_READINGS; i++) { 1152void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
1052 if (chainmask & (1 << i)) { 1153{
1053 val = REG_READ(ah, ar9300_cca_regs[i]); 1154 struct ath_common *common = ath9k_hw_common(ah);
1054 val &= 0xFFFFFE00; 1155 u32 idle_tmo_ms = ah->bb_watchdog_timeout_ms;
1055 val |= (((u32) (h[i].privNF) << 1) & 0x1ff); 1156 u32 val, idle_count;
1056 REG_WRITE(ah, ar9300_cca_regs[i], val); 1157
1057 } 1158 if (!idle_tmo_ms) {
1159 /* disable IRQ, disable chip-reset for BB panic */
1160 REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
1161 REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) &
1162 ~(AR_PHY_WATCHDOG_RST_ENABLE |
1163 AR_PHY_WATCHDOG_IRQ_ENABLE));
1164
1165 /* disable watchdog in non-IDLE mode, disable in IDLE mode */
1166 REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
1167 REG_READ(ah, AR_PHY_WATCHDOG_CTL_1) &
1168 ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
1169 AR_PHY_WATCHDOG_IDLE_ENABLE));
1170
1171 ath_print(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
1172 return;
1058 } 1173 }
1059 1174
1175 /* enable IRQ, disable chip-reset for BB watchdog */
1176 val = REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & AR_PHY_WATCHDOG_CNTL2_MASK;
1177 REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
1178 (val | AR_PHY_WATCHDOG_IRQ_ENABLE) &
1179 ~AR_PHY_WATCHDOG_RST_ENABLE);
1180
1181 /* bound limit to 10 secs */
1182 if (idle_tmo_ms > 10000)
1183 idle_tmo_ms = 10000;
1184
1060 /* 1185 /*
1061 * Load software filtered NF value into baseband internal minCCApwr 1186 * The time unit for watchdog event is 2^15 44/88MHz cycles.
1062 * variable. 1187 *
1188 * For HT20 we have a time unit of 2^15/44 MHz = .74 ms per tick
1189 * For HT40 we have a time unit of 2^15/88 MHz = .37 ms per tick
1190 *
1191 * Given we use fast clock now in 5 GHz, these time units should
1192 * be common for both 2 GHz and 5 GHz.
1063 */ 1193 */
1064 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, 1194 idle_count = (100 * idle_tmo_ms) / 74;
1065 AR_PHY_AGC_CONTROL_ENABLE_NF); 1195 if (ah->curchan && IS_CHAN_HT40(ah->curchan))
1066 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, 1196 idle_count = (100 * idle_tmo_ms) / 37;
1067 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1068 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1069 1197
1070 /* 1198 /*
1071 * Wait for load to complete, should be fast, a few 10s of us. 1199 * enable watchdog in non-IDLE mode, disable in IDLE mode,
1072 * The max delay was changed from an original 250us to 10000us 1200 * set idle time-out.
1073 * since 250us often results in NF load timeout and causes deaf
1074 * condition during stress testing 12/12/2009
1075 */ 1201 */
1076 for (j = 0; j < 1000; j++) { 1202 REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
1077 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & 1203 AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
1078 AR_PHY_AGC_CONTROL_NF) == 0) 1204 AR_PHY_WATCHDOG_IDLE_MASK |
1079 break; 1205 (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
1080 udelay(10); 1206
1081 } 1207 ath_print(common, ATH_DBG_RESET,
1208 "Enabled BB Watchdog timeout (%u ms)\n",
1209 idle_tmo_ms);
1210}
1082 1211
1212void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
1213{
1083 /* 1214 /*
1084 * We timed out waiting for the noisefloor to load, probably due to an 1215 * we want to avoid printing in ISR context so we save the
1085 * in-progress rx. Simply return here and allow the load plenty of time 1216 * watchdog status to be printed later in bottom half context.
1086 * to complete before the next calibration interval. We need to avoid
1087 * trying to load -50 (which happens below) while the previous load is
1088 * still in progress as this can cause rx deafness. Instead by returning
1089 * here, the baseband nf cal will just be capped by our present
1090 * noisefloor until the next calibration timer.
1091 */ 1217 */
1092 if (j == 1000) { 1218 ah->bb_watchdog_last_status = REG_READ(ah, AR_PHY_WATCHDOG_STATUS);
1093 ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
1094 "to load: AR_PHY_AGC_CONTROL=0x%x\n",
1095 REG_READ(ah, AR_PHY_AGC_CONTROL));
1096 return;
1097 }
1098 1219
1099 /* 1220 /*
1100 * Restore maxCCAPower register parameter again so that we're not capped 1221 * the watchdog timer should reset on status read but to be sure
1101 * by the median we just loaded. This will be initial (and max) value 1222 * sure we write 0 to the watchdog status bit.
1102 * of next noise floor calibration the baseband does.
1103 */ 1223 */
1104 for (i = 0; i < NUM_NF_READINGS; i++) { 1224 REG_WRITE(ah, AR_PHY_WATCHDOG_STATUS,
1105 if (chainmask & (1 << i)) { 1225 ah->bb_watchdog_last_status & ~AR_PHY_WATCHDOG_STATUS_CLR);
1106 val = REG_READ(ah, ar9300_cca_regs[i]);
1107 val &= 0xFFFFFE00;
1108 val |= (((u32) (-50) << 1) & 0x1ff);
1109 REG_WRITE(ah, ar9300_cca_regs[i], val);
1110 }
1111 }
1112} 1226}
1113 1227
1114void ar9003_hw_attach_phy_ops(struct ath_hw *ah) 1228void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
1115{ 1229{
1116 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1230 struct ath_common *common = ath9k_hw_common(ah);
1231 u32 rxc_pcnt = 0, rxf_pcnt = 0, txf_pcnt = 0, status;
1117 1232
1118 priv_ops->rf_set_freq = ar9003_hw_set_channel; 1233 if (likely(!(common->debug_mask & ATH_DBG_RESET)))
1119 priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate; 1234 return;
1120 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control; 1235
1121 priv_ops->set_channel_regs = ar9003_hw_set_channel_regs; 1236 status = ah->bb_watchdog_last_status;
1122 priv_ops->init_bb = ar9003_hw_init_bb; 1237 ath_print(common, ATH_DBG_RESET,
1123 priv_ops->process_ini = ar9003_hw_process_ini; 1238 "\n==== BB update: BB status=0x%08x ====\n", status);
1124 priv_ops->set_rfmode = ar9003_hw_set_rfmode; 1239 ath_print(common, ATH_DBG_RESET,
1125 priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive; 1240 "** BB state: wd=%u det=%u rdar=%u rOFDM=%d "
1126 priv_ops->set_delta_slope = ar9003_hw_set_delta_slope; 1241 "rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
1127 priv_ops->rfbus_req = ar9003_hw_rfbus_req; 1242 MS(status, AR_PHY_WATCHDOG_INFO),
1128 priv_ops->rfbus_done = ar9003_hw_rfbus_done; 1243 MS(status, AR_PHY_WATCHDOG_DET_HANG),
1129 priv_ops->enable_rfkill = ar9003_hw_enable_rfkill; 1244 MS(status, AR_PHY_WATCHDOG_RADAR_SM),
1130 priv_ops->set_diversity = ar9003_hw_set_diversity; 1245 MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
1131 priv_ops->ani_control = ar9003_hw_ani_control; 1246 MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
1132 priv_ops->do_getnf = ar9003_hw_do_getnf; 1247 MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
1133 priv_ops->loadnf = ar9003_hw_loadnf; 1248 MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
1249 MS(status, AR_PHY_WATCHDOG_AGC_SM),
1250 MS(status,AR_PHY_WATCHDOG_SRCH_SM));
1251
1252 ath_print(common, ATH_DBG_RESET,
1253 "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
1254 REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
1255 REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
1256 ath_print(common, ATH_DBG_RESET,
1257 "** BB mode: BB_gen_controls=0x%08x **\n",
1258 REG_READ(ah, AR_PHY_GEN_CTRL));
1259
1260 if (ath9k_hw_GetMibCycleCountsPct(ah, &rxc_pcnt, &rxf_pcnt, &txf_pcnt))
1261 ath_print(common, ATH_DBG_RESET,
1262 "** BB busy times: rx_clear=%d%%, "
1263 "rx_frame=%d%%, tx_frame=%d%% **\n",
1264 rxc_pcnt, rxf_pcnt, txf_pcnt);
1265
1266 ath_print(common, ATH_DBG_RESET,
1267 "==== BB update: done ====\n\n");
1134} 1268}
1269EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index f08cc8bda005..3394dfe52b42 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -149,6 +149,8 @@
149#define AR_PHY_EXT_CCA_THRESH62_S 16 149#define AR_PHY_EXT_CCA_THRESH62_S 16
150#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000 150#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
151#define AR_PHY_EXT_MINCCA_PWR_S 16 151#define AR_PHY_EXT_MINCCA_PWR_S 16
152#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
153#define AR_PHY_EXT_CYCPWR_THR1_S 9
152#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE 154#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
153#define AR_PHY_TIMING5_CYCPWR_THR1_S 1 155#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
154#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001 156#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
@@ -283,6 +285,12 @@
283#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00 285#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
284#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9 286#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
285 287
288#define AR_PHY_MRC_CCK_CTRL (AR_AGC_BASE + 0x1d0)
289#define AR_PHY_MRC_CCK_ENABLE 0x00000001
290#define AR_PHY_MRC_CCK_ENABLE_S 0
291#define AR_PHY_MRC_CCK_MUX_REG 0x00000002
292#define AR_PHY_MRC_CCK_MUX_REG_S 1
293
286#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200) 294#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
287 295
288#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110 296#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
@@ -451,7 +459,11 @@
451#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168) 459#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
452 460
453#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c) 461#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
454#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170) 462
463#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
464#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
465#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
466
455#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174) 467#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
456#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178) 468#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
457#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c) 469#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
@@ -467,30 +479,86 @@
467#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0) 479#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
468#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4) 480#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
469 481
470#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204) 482#define AR_PHY_TPC_1 (AR_SM_BASE + 0x1f8)
471#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208) 483#define AR_PHY_TPC_1_FORCED_DAC_GAIN 0x0000003e
472#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c) 484#define AR_PHY_TPC_1_FORCED_DAC_GAIN_S 1
473#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220) 485#define AR_PHY_TPC_1_FORCE_DAC_GAIN 0x00000001
474#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c) 486#define AR_PHY_TPC_1_FORCE_DAC_GAIN_S 0
475#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240) 487
488#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
489#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
490#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
491
492#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
493#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
494#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
495#define AR_PHY_TPC_11_OLPC_GAIN_DELTA 0x00ff0000
496#define AR_PHY_TPC_11_OLPC_GAIN_DELTA_S 16
497
498#define AR_PHY_TPC_12 (AR_SM_BASE + 0x224)
499#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5 0x3e000000
500#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5_S 25
501
502#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
503#define AR_PHY_TPC_18_THERM_CAL_VALUE 0x000000ff
504#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
505#define AR_PHY_TPC_18_VOLT_CAL_VALUE 0x0000ff00
506#define AR_PHY_TPC_18_VOLT_CAL_VALUE_S 8
507
508#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
509#define AR_PHY_TPC_19_ALPHA_VOLT 0x001f0000
510#define AR_PHY_TPC_19_ALPHA_VOLT_S 16
511#define AR_PHY_TPC_19_ALPHA_THERM 0xff
512#define AR_PHY_TPC_19_ALPHA_THERM_S 0
513
514#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
515#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN 0x00000001
516#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN_S 0
517#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN 0x0000000e
518#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_S 1
519#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN 0x00000030
520#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_S 4
521#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN 0x000003c0
522#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN_S 6
523#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA 0x00003c00
524#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA_S 10
525#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB 0x0003c000
526#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB_S 14
527#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC 0x003c0000
528#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC_S 18
529#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND 0x00c00000
530#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND_S 22
531#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL 0x01000000
532#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL_S 24
476 533
477#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
478 534
479#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280) 535#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
480 536
537#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
538
481#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448) 539#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
482#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440) 540#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
483#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c) 541#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
484#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450) 542#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
485 543
486#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0) 544#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0)
487#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4) 545#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4)
488#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8) 546#define AR_PHY_WATCHDOG_CTL_2 (AR_SM_BASE + 0x5c8)
489#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc) 547#define AR_PHY_WATCHDOG_CTL (AR_SM_BASE + 0x5cc)
490#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0) 548#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
491#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4) 549#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
492#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc) 550#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
493#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248) 551
552#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
553#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
554#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
555
556#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254)
557#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff
558#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0
559#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
560#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
561
494 562
495#define AR_PHY_65NM_CH0_SYNTH4 0x1608c 563#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
496#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002 564#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
@@ -660,17 +728,9 @@
660#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff 728#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
661#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0 729#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
662 730
663#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
664#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
665#define AR_PHY_TPC_19_ALPHA_THERM 0xff
666#define AR_PHY_TPC_19_ALPHA_THERM_S 0
667
668#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000 731#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
669#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28 732#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
670 733
671#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
672#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
673
674/* 734/*
675 * Channel 1 Register Map 735 * Channel 1 Register Map
676 */ 736 */
@@ -812,35 +872,173 @@
812#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i))) 872#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
813#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i))) 873#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
814 874
815#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001 875#define AR_PHY_WATCHDOG_NON_IDLE_ENABLE 0x00000001
816#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002 876#define AR_PHY_WATCHDOG_IDLE_ENABLE 0x00000002
817#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000 877#define AR_PHY_WATCHDOG_IDLE_MASK 0xFFFF0000
818#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC 878#define AR_PHY_WATCHDOG_NON_IDLE_MASK 0x0000FFFC
819 879
820#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002 880#define AR_PHY_WATCHDOG_RST_ENABLE 0x00000002
821#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004 881#define AR_PHY_WATCHDOG_IRQ_ENABLE 0x00000004
822#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9 882#define AR_PHY_WATCHDOG_CNTL2_MASK 0xFFFFFFF9
823 883
824#define AR_PHY_BB_WD_STATUS 0x00000007 884#define AR_PHY_WATCHDOG_INFO 0x00000007
825#define AR_PHY_BB_WD_STATUS_S 0 885#define AR_PHY_WATCHDOG_INFO_S 0
826#define AR_PHY_BB_WD_DET_HANG 0x00000008 886#define AR_PHY_WATCHDOG_DET_HANG 0x00000008
827#define AR_PHY_BB_WD_DET_HANG_S 3 887#define AR_PHY_WATCHDOG_DET_HANG_S 3
828#define AR_PHY_BB_WD_RADAR_SM 0x000000F0 888#define AR_PHY_WATCHDOG_RADAR_SM 0x000000F0
829#define AR_PHY_BB_WD_RADAR_SM_S 4 889#define AR_PHY_WATCHDOG_RADAR_SM_S 4
830#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00 890#define AR_PHY_WATCHDOG_RX_OFDM_SM 0x00000F00
831#define AR_PHY_BB_WD_RX_OFDM_SM_S 8 891#define AR_PHY_WATCHDOG_RX_OFDM_SM_S 8
832#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000 892#define AR_PHY_WATCHDOG_RX_CCK_SM 0x0000F000
833#define AR_PHY_BB_WD_RX_CCK_SM_S 12 893#define AR_PHY_WATCHDOG_RX_CCK_SM_S 12
834#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000 894#define AR_PHY_WATCHDOG_TX_OFDM_SM 0x000F0000
835#define AR_PHY_BB_WD_TX_OFDM_SM_S 16 895#define AR_PHY_WATCHDOG_TX_OFDM_SM_S 16
836#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000 896#define AR_PHY_WATCHDOG_TX_CCK_SM 0x00F00000
837#define AR_PHY_BB_WD_TX_CCK_SM_S 20 897#define AR_PHY_WATCHDOG_TX_CCK_SM_S 20
838#define AR_PHY_BB_WD_AGC_SM 0x0F000000 898#define AR_PHY_WATCHDOG_AGC_SM 0x0F000000
839#define AR_PHY_BB_WD_AGC_SM_S 24 899#define AR_PHY_WATCHDOG_AGC_SM_S 24
840#define AR_PHY_BB_WD_SRCH_SM 0xF0000000 900#define AR_PHY_WATCHDOG_SRCH_SM 0xF0000000
841#define AR_PHY_BB_WD_SRCH_SM_S 28 901#define AR_PHY_WATCHDOG_SRCH_SM_S 28
842 902
843#define AR_PHY_BB_WD_STATUS_CLR 0x00000008 903#define AR_PHY_WATCHDOG_STATUS_CLR 0x00000008
904
905/*
906 * PAPRD registers
907 */
908#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
909
910#define AR_PHY_PAPRD_AM2AM (AR_CHAN_BASE + 0xe4)
911#define AR_PHY_PAPRD_AM2AM_MASK 0x01ffffff
912#define AR_PHY_PAPRD_AM2AM_MASK_S 0
913
914#define AR_PHY_PAPRD_AM2PM (AR_CHAN_BASE + 0xe8)
915#define AR_PHY_PAPRD_AM2PM_MASK 0x01ffffff
916#define AR_PHY_PAPRD_AM2PM_MASK_S 0
917
918#define AR_PHY_PAPRD_HT40 (AR_CHAN_BASE + 0xec)
919#define AR_PHY_PAPRD_HT40_MASK 0x01ffffff
920#define AR_PHY_PAPRD_HT40_MASK_S 0
921
922#define AR_PHY_PAPRD_CTRL0_B0 (AR_CHAN_BASE + 0xf0)
923#define AR_PHY_PAPRD_CTRL0_B1 (AR_CHAN1_BASE + 0xf0)
924#define AR_PHY_PAPRD_CTRL0_B2 (AR_CHAN2_BASE + 0xf0)
925#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE 0x00000001
926#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE_S 0
927#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK 0x00000002
928#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK_S 1
929#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH 0xf8000000
930#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH_S 27
931
932#define AR_PHY_PAPRD_CTRL1_B0 (AR_CHAN_BASE + 0xf4)
933#define AR_PHY_PAPRD_CTRL1_B1 (AR_CHAN1_BASE + 0xf4)
934#define AR_PHY_PAPRD_CTRL1_B2 (AR_CHAN2_BASE + 0xf4)
935#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA 0x00000001
936#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA_S 0
937#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE 0x00000002
938#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE_S 1
939#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE 0x00000004
940#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE_S 2
941#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL 0x000001f8
942#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_S 3
943#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK 0x0001fe00
944#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK_S 9
945#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
946#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
947
948#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + 0x490)
949#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
950#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
951#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
952#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_S 1
953#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE 0x00000100
954#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_S 8
955#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE 0x00000200
956#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_S 9
957#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE 0x00000400
958#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_S 10
959#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE 0x00000800
960#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_S 11
961#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
962#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
963
964#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + 0x494)
965#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
966#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
967
968#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + 0x498)
969#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
970#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
971#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
972#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_S 6
973#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL 0x0001f000
974#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_S 12
975#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES 0x000e0000
976#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_S 17
977#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN 0x00f00000
978#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_S 20
979#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN 0x0f000000
980#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_S 24
981#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
982#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
983
984#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + 0x49c)
985#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
986#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
987#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
988#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_S 12
989#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR 0x00000fff
990#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_S 0
991
992#define AR_PHY_PAPRD_PRE_POST_SCALE_0_B0 (AR_CHAN_BASE + 0x100)
993#define AR_PHY_PAPRD_PRE_POST_SCALE_1_B0 (AR_CHAN_BASE + 0x104)
994#define AR_PHY_PAPRD_PRE_POST_SCALE_2_B0 (AR_CHAN_BASE + 0x108)
995#define AR_PHY_PAPRD_PRE_POST_SCALE_3_B0 (AR_CHAN_BASE + 0x10c)
996#define AR_PHY_PAPRD_PRE_POST_SCALE_4_B0 (AR_CHAN_BASE + 0x110)
997#define AR_PHY_PAPRD_PRE_POST_SCALE_5_B0 (AR_CHAN_BASE + 0x114)
998#define AR_PHY_PAPRD_PRE_POST_SCALE_6_B0 (AR_CHAN_BASE + 0x118)
999#define AR_PHY_PAPRD_PRE_POST_SCALE_7_B0 (AR_CHAN_BASE + 0x11c)
1000#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
1001#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
1002
1003#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + 0x4a0)
1004#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
1005#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
1006#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002
1007#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_S 1
1008#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR 0x00000004
1009#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_S 2
1010#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE 0x00000008
1011#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_S 3
1012#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX 0x000001f0
1013#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_S 4
1014#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
1015#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
1016
1017#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + 0x4a4)
1018#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
1019#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
1020#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000
1021#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_S 16
1022#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
1023#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
1024
1025#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + 0x4a8)
1026#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
1027#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
1028
1029#define AR_PHY_PAPRD_MEM_TAB_B0 (AR_CHAN_BASE + 0x120)
1030#define AR_PHY_PAPRD_MEM_TAB_B1 (AR_CHAN1_BASE + 0x120)
1031#define AR_PHY_PAPRD_MEM_TAB_B2 (AR_CHAN2_BASE + 0x120)
1032
1033#define AR_PHY_PA_GAIN123_B0 (AR_CHAN_BASE + 0xf8)
1034#define AR_PHY_PA_GAIN123_B1 (AR_CHAN1_BASE + 0xf8)
1035#define AR_PHY_PA_GAIN123_B2 (AR_CHAN2_BASE + 0xf8)
1036#define AR_PHY_PA_GAIN123_PA_GAIN1 0x3FF
1037#define AR_PHY_PA_GAIN123_PA_GAIN1_S 0
1038
1039#define AR_PHY_POWERTX_RATE5 (AR_SM_BASE + 0x1d0)
1040#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F
1041#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0
844 1042
845void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); 1043void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
846 1044
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 5ea87736a6ae..998ae2c49ed2 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -20,6 +20,7 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/leds.h> 22#include <linux/leds.h>
23#include <linux/completion.h>
23 24
24#include "debug.h" 25#include "debug.h"
25#include "common.h" 26#include "common.h"
@@ -136,6 +137,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
136#define ATH_MAX_ANTENNA 3 137#define ATH_MAX_ANTENNA 3
137#define ATH_RXBUF 512 138#define ATH_RXBUF 512
138#define ATH_TXBUF 512 139#define ATH_TXBUF 512
140#define ATH_TXBUF_RESERVE 5
141#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
139#define ATH_TXMAXTRY 13 142#define ATH_TXMAXTRY 13
140#define ATH_MGT_TXMAXTRY 4 143#define ATH_MGT_TXMAXTRY 4
141 144
@@ -192,6 +195,7 @@ enum ATH_AGGR_STATUS {
192 195
193#define ATH_TXFIFO_DEPTH 8 196#define ATH_TXFIFO_DEPTH 8
194struct ath_txq { 197struct ath_txq {
198 int axq_class;
195 u32 axq_qnum; 199 u32 axq_qnum;
196 u32 *axq_link; 200 u32 *axq_link;
197 struct list_head axq_q; 201 struct list_head axq_q;
@@ -206,6 +210,71 @@ struct ath_txq {
206 u8 txq_tailidx; 210 u8 txq_tailidx;
207}; 211};
208 212
213struct ath_atx_ac {
214 int sched;
215 int qnum;
216 struct list_head list;
217 struct list_head tid_q;
218};
219
220struct ath_buf_state {
221 int bfs_nframes;
222 u16 bfs_al;
223 u16 bfs_frmlen;
224 int bfs_seqno;
225 int bfs_tidno;
226 int bfs_retries;
227 u8 bf_type;
228 u8 bfs_paprd;
229 unsigned long bfs_paprd_timestamp;
230 u32 bfs_keyix;
231 enum ath9k_key_type bfs_keytype;
232};
233
234struct ath_buf {
235 struct list_head list;
236 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
237 an aggregate) */
238 struct ath_buf *bf_next; /* next subframe in the aggregate */
239 struct sk_buff *bf_mpdu; /* enclosing frame structure */
240 void *bf_desc; /* virtual addr of desc */
241 dma_addr_t bf_daddr; /* physical addr of desc */
242 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
243 bool bf_stale;
244 bool bf_isnullfunc;
245 bool bf_tx_aborted;
246 u16 bf_flags;
247 struct ath_buf_state bf_state;
248 dma_addr_t bf_dmacontext;
249 struct ath_wiphy *aphy;
250};
251
252struct ath_atx_tid {
253 struct list_head list;
254 struct list_head buf_q;
255 struct ath_node *an;
256 struct ath_atx_ac *ac;
257 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
258 u16 seq_start;
259 u16 seq_next;
260 u16 baw_size;
261 int tidno;
262 int baw_head; /* first un-acked tx buffer */
263 int baw_tail; /* next unused tx buffer slot */
264 int sched;
265 int paused;
266 u8 state;
267};
268
269struct ath_node {
270 struct ath_common *common;
271 struct ath_atx_tid tid[WME_NUM_TID];
272 struct ath_atx_ac ac[WME_NUM_AC];
273 u16 maxampdu;
274 u8 mpdudensity;
275 int last_rssi;
276};
277
209#define AGGR_CLEANUP BIT(1) 278#define AGGR_CLEANUP BIT(1)
210#define AGGR_ADDBA_COMPLETE BIT(2) 279#define AGGR_ADDBA_COMPLETE BIT(2)
211#define AGGR_ADDBA_PROGRESS BIT(3) 280#define AGGR_ADDBA_PROGRESS BIT(3)
@@ -214,6 +283,7 @@ struct ath_tx_control {
214 struct ath_txq *txq; 283 struct ath_txq *txq;
215 int if_id; 284 int if_id;
216 enum ath9k_internal_frame_type frame_type; 285 enum ath9k_internal_frame_type frame_type;
286 u8 paprd;
217}; 287};
218 288
219#define ATH_TX_ERROR 0x01 289#define ATH_TX_ERROR 0x01
@@ -223,11 +293,12 @@ struct ath_tx_control {
223struct ath_tx { 293struct ath_tx {
224 u16 seq_no; 294 u16 seq_no;
225 u32 txqsetup; 295 u32 txqsetup;
226 int hwq_map[ATH9K_WME_AC_VO+1]; 296 int hwq_map[WME_NUM_AC];
227 spinlock_t txbuflock; 297 spinlock_t txbuflock;
228 struct list_head txbuf; 298 struct list_head txbuf;
229 struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; 299 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
230 struct ath_descdma txdma; 300 struct ath_descdma txdma;
301 int pending_frames[WME_NUM_AC];
231}; 302};
232 303
233struct ath_rx_edma { 304struct ath_rx_edma {
@@ -267,7 +338,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
267void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); 338void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
268int ath_tx_init(struct ath_softc *sc, int nbufs); 339int ath_tx_init(struct ath_softc *sc, int nbufs);
269void ath_tx_cleanup(struct ath_softc *sc); 340void ath_tx_cleanup(struct ath_softc *sc);
270struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
271int ath_txq_update(struct ath_softc *sc, int qnum, 341int ath_txq_update(struct ath_softc *sc, int qnum,
272 struct ath9k_tx_queue_info *q); 342 struct ath9k_tx_queue_info *q);
273int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 343int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -351,10 +421,15 @@ int ath_beaconq_config(struct ath_softc *sc);
351 421
352#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ 422#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
353#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ 423#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
354#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */ 424#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */
425#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */
355#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 426#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
356#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 427#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
357 428
429#define ATH_PAPRD_TIMEOUT 100 /* msecs */
430
431void ath_hw_check(struct work_struct *work);
432void ath_paprd_calibrate(struct work_struct *work);
358void ath_ani_calibrate(unsigned long data); 433void ath_ani_calibrate(unsigned long data);
359 434
360/**********/ 435/**********/
@@ -487,6 +562,9 @@ struct ath_softc {
487 spinlock_t sc_serial_rw; 562 spinlock_t sc_serial_rw;
488 spinlock_t sc_pm_lock; 563 spinlock_t sc_pm_lock;
489 struct mutex mutex; 564 struct mutex mutex;
565 struct work_struct paprd_work;
566 struct work_struct hw_check_work;
567 struct completion paprd_complete;
490 568
491 u32 intrstatus; 569 u32 intrstatus;
492 u32 sc_flags; /* SC_OP_* */ 570 u32 sc_flags; /* SC_OP_* */
@@ -545,7 +623,6 @@ struct ath_wiphy {
545 623
546void ath9k_tasklet(unsigned long data); 624void ath9k_tasklet(unsigned long data);
547int ath_reset(struct ath_softc *sc, bool retry_tx); 625int ath_reset(struct ath_softc *sc, bool retry_tx);
548int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
549int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 626int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
550int ath_cabq_update(struct ath_softc *); 627int ath_cabq_update(struct ath_softc *);
551 628
@@ -556,13 +633,12 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
556 633
557extern struct ieee80211_ops ath9k_ops; 634extern struct ieee80211_ops ath9k_ops;
558extern int modparam_nohwcrypt; 635extern int modparam_nohwcrypt;
636extern int led_blink;
559 637
560irqreturn_t ath_isr(int irq, void *dev); 638irqreturn_t ath_isr(int irq, void *dev);
561int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, 639int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
562 const struct ath_bus_ops *bus_ops); 640 const struct ath_bus_ops *bus_ops);
563void ath9k_deinit_device(struct ath_softc *sc); 641void ath9k_deinit_device(struct ath_softc *sc);
564const char *ath_mac_bb_name(u32 mac_bb_version);
565const char *ath_rf_name(u16 rf_version);
566void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 642void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
567void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 643void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
568 struct ath9k_channel *ichan); 644 struct ath9k_channel *ichan);
@@ -611,9 +687,7 @@ bool ath9k_all_wiphys_idle(struct ath_softc *sc);
611void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle); 687void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
612 688
613void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue); 689void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
614void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); 690bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
615
616int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
617 691
618void ath_start_rfkill_poll(struct ath_softc *sc); 692void ath_start_rfkill_poll(struct ath_softc *sc);
619extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); 693extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index f43d85a302c4..4d4b22d52dfd 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -38,8 +38,7 @@ int ath_beaconq_config(struct ath_softc *sc)
38 qi.tqi_cwmax = 0; 38 qi.tqi_cwmax = 0;
39 } else { 39 } else {
40 /* Adhoc mode; important thing is to use 2x cwmin. */ 40 /* Adhoc mode; important thing is to use 2x cwmin. */
41 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, 41 qnum = sc->tx.hwq_map[WME_AC_BE];
42 ATH9K_WME_AC_BE);
43 ath9k_hw_get_txq_props(ah, qnum, &qi_be); 42 ath9k_hw_get_txq_props(ah, qnum, &qi_be);
44 qi.tqi_aifs = qi_be.tqi_aifs; 43 qi.tqi_aifs = qi_be.tqi_aifs;
45 qi.tqi_cwmin = 4*qi_be.tqi_cwmin; 44 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 07b8fa6fb62f..139289e4e933 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -74,13 +74,8 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
74 h[i].currIndex = 0; 74 h[i].currIndex = 0;
75 75
76 if (h[i].invalidNFcount > 0) { 76 if (h[i].invalidNFcount > 0) {
77 if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE || 77 h[i].invalidNFcount--;
78 nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) { 78 h[i].privNF = nfarray[i];
79 h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX;
80 } else {
81 h[i].invalidNFcount--;
82 h[i].privNF = nfarray[i];
83 }
84 } else { 79 } else {
85 h[i].privNF = 80 h[i].privNF =
86 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); 81 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
@@ -172,6 +167,116 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah)
172 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 167 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
173} 168}
174 169
170void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
171{
172 struct ath9k_nfcal_hist *h;
173 unsigned i, j;
174 int32_t val;
175 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
176 struct ath_common *common = ath9k_hw_common(ah);
177
178 h = ah->nfCalHist;
179
180 for (i = 0; i < NUM_NF_READINGS; i++) {
181 if (chainmask & (1 << i)) {
182 val = REG_READ(ah, ah->nf_regs[i]);
183 val &= 0xFFFFFE00;
184 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
185 REG_WRITE(ah, ah->nf_regs[i], val);
186 }
187 }
188
189 /*
190 * Load software filtered NF value into baseband internal minCCApwr
191 * variable.
192 */
193 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
194 AR_PHY_AGC_CONTROL_ENABLE_NF);
195 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
196 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
197 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
198
199 /*
200 * Wait for load to complete, should be fast, a few 10s of us.
201 * The max delay was changed from an original 250us to 10000us
202 * since 250us often results in NF load timeout and causes deaf
203 * condition during stress testing 12/12/2009
204 */
205 for (j = 0; j < 1000; j++) {
206 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
207 AR_PHY_AGC_CONTROL_NF) == 0)
208 break;
209 udelay(10);
210 }
211
212 /*
213 * We timed out waiting for the noisefloor to load, probably due to an
214 * in-progress rx. Simply return here and allow the load plenty of time
215 * to complete before the next calibration interval. We need to avoid
216 * trying to load -50 (which happens below) while the previous load is
217 * still in progress as this can cause rx deafness. Instead by returning
218 * here, the baseband nf cal will just be capped by our present
219 * noisefloor until the next calibration timer.
220 */
221 if (j == 1000) {
222 ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
223 "to load: AR_PHY_AGC_CONTROL=0x%x\n",
224 REG_READ(ah, AR_PHY_AGC_CONTROL));
225 return;
226 }
227
228 /*
229 * Restore maxCCAPower register parameter again so that we're not capped
230 * by the median we just loaded. This will be initial (and max) value
231 * of next noise floor calibration the baseband does.
232 */
233 ENABLE_REGWRITE_BUFFER(ah);
234 for (i = 0; i < NUM_NF_READINGS; i++) {
235 if (chainmask & (1 << i)) {
236 val = REG_READ(ah, ah->nf_regs[i]);
237 val &= 0xFFFFFE00;
238 val |= (((u32) (-50) << 1) & 0x1ff);
239 REG_WRITE(ah, ah->nf_regs[i], val);
240 }
241 }
242 REGWRITE_BUFFER_FLUSH(ah);
243 DISABLE_REGWRITE_BUFFER(ah);
244}
245
246
247static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
248{
249 struct ath_common *common = ath9k_hw_common(ah);
250 struct ath_nf_limits *limit;
251 int i;
252
253 if (IS_CHAN_2GHZ(ah->curchan))
254 limit = &ah->nf_2g;
255 else
256 limit = &ah->nf_5g;
257
258 for (i = 0; i < NUM_NF_READINGS; i++) {
259 if (!nf[i])
260 continue;
261
262 ath_print(common, ATH_DBG_CALIBRATE,
263 "NF calibrated [%s] [chain %d] is %d\n",
264 (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
265
266 if (nf[i] > limit->max) {
267 ath_print(common, ATH_DBG_CALIBRATE,
268 "NF[%d] (%d) > MAX (%d), correcting to MAX",
269 i, nf[i], limit->max);
270 nf[i] = limit->max;
271 } else if (nf[i] < limit->min) {
272 ath_print(common, ATH_DBG_CALIBRATE,
273 "NF[%d] (%d) < MIN (%d), correcting to NOM",
274 i, nf[i], limit->min);
275 nf[i] = limit->nominal;
276 }
277 }
278}
279
175int16_t ath9k_hw_getnf(struct ath_hw *ah, 280int16_t ath9k_hw_getnf(struct ath_hw *ah,
176 struct ath9k_channel *chan) 281 struct ath9k_channel *chan)
177{ 282{
@@ -190,6 +295,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
190 return chan->rawNoiseFloor; 295 return chan->rawNoiseFloor;
191 } else { 296 } else {
192 ath9k_hw_do_getnf(ah, nfarray); 297 ath9k_hw_do_getnf(ah, nfarray);
298 ath9k_hw_nf_sanitize(ah, nfarray);
193 nf = nfarray[0]; 299 nf = nfarray[0];
194 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) 300 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
195 && nf > nfThresh) { 301 && nf > nfThresh) {
@@ -211,25 +317,21 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
211 317
212void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah) 318void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
213{ 319{
320 struct ath_nf_limits *limit;
214 int i, j; 321 int i, j;
215 s16 noise_floor; 322
216 323 if (!ah->curchan || IS_CHAN_2GHZ(ah->curchan))
217 if (AR_SREV_9280(ah)) 324 limit = &ah->nf_2g;
218 noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
219 else if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
220 noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
221 else if (AR_SREV_9287(ah))
222 noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
223 else 325 else
224 noise_floor = AR_PHY_CCA_MAX_AR5416_GOOD_VALUE; 326 limit = &ah->nf_5g;
225 327
226 for (i = 0; i < NUM_NF_READINGS; i++) { 328 for (i = 0; i < NUM_NF_READINGS; i++) {
227 ah->nfCalHist[i].currIndex = 0; 329 ah->nfCalHist[i].currIndex = 0;
228 ah->nfCalHist[i].privNF = noise_floor; 330 ah->nfCalHist[i].privNF = limit->nominal;
229 ah->nfCalHist[i].invalidNFcount = 331 ah->nfCalHist[i].invalidNFcount =
230 AR_PHY_CCA_FILTERWINDOW_LENGTH; 332 AR_PHY_CCA_FILTERWINDOW_LENGTH;
231 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { 333 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
232 ah->nfCalHist[i].nfCalBuffer[j] = noise_floor; 334 ah->nfCalHist[i].nfCalBuffer[j] = limit->nominal;
233 } 335 }
234 } 336 }
235} 337}
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 24538bdb9126..cd60d09cdda7 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,12 +19,6 @@
19 19
20#include "hw.h" 20#include "hw.h"
21 21
22#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
23#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
24#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
25#define AR_PHY_CCA_MAX_AR9287_GOOD_VALUE -118
26#define AR_PHY_CCA_MAX_HIGH_VALUE -62
27#define AR_PHY_CCA_MIN_BAD_VALUE -140
28#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3 22#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
29#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 23#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
30 24
@@ -115,6 +109,7 @@ struct ath9k_pacal_info{
115 109
116bool ath9k_hw_reset_calvalid(struct ath_hw *ah); 110bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
117void ath9k_hw_start_nfcal(struct ath_hw *ah); 111void ath9k_hw_start_nfcal(struct ath_hw *ah);
112void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
118int16_t ath9k_hw_getnf(struct ath_hw *ah, 113int16_t ath9k_hw_getnf(struct ath_hw *ah,
119 struct ath9k_channel *chan); 114 struct ath9k_channel *chan);
120void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); 115void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 7707341cd0d3..c86f7d3593ab 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,270 +27,6 @@ MODULE_AUTHOR("Atheros Communications");
27MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards."); 27MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
28MODULE_LICENSE("Dual BSD/GPL"); 28MODULE_LICENSE("Dual BSD/GPL");
29 29
30/* Common RX processing */
31
32/* Assumes you've already done the endian to CPU conversion */
33static bool ath9k_rx_accept(struct ath_common *common,
34 struct sk_buff *skb,
35 struct ieee80211_rx_status *rxs,
36 struct ath_rx_status *rx_stats,
37 bool *decrypt_error)
38{
39 struct ath_hw *ah = common->ah;
40 struct ieee80211_hdr *hdr;
41 __le16 fc;
42
43 hdr = (struct ieee80211_hdr *) skb->data;
44 fc = hdr->frame_control;
45
46 if (!rx_stats->rs_datalen)
47 return false;
48 /*
49 * rs_status follows rs_datalen so if rs_datalen is too large
50 * we can take a hint that hardware corrupted it, so ignore
51 * those frames.
52 */
53 if (rx_stats->rs_datalen > common->rx_bufsize)
54 return false;
55
56 /*
57 * rs_more indicates chained descriptors which can be used
58 * to link buffers together for a sort of scatter-gather
59 * operation.
60 * reject the frame, we don't support scatter-gather yet and
61 * the frame is probably corrupt anyway
62 */
63 if (rx_stats->rs_more)
64 return false;
65
66 /*
67 * The rx_stats->rs_status will not be set until the end of the
68 * chained descriptors so it can be ignored if rs_more is set. The
69 * rs_more will be false at the last element of the chained
70 * descriptors.
71 */
72 if (rx_stats->rs_status != 0) {
73 if (rx_stats->rs_status & ATH9K_RXERR_CRC)
74 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
75 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
76 return false;
77
78 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
79 *decrypt_error = true;
80 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
81 if (ieee80211_is_ctl(fc))
82 /*
83 * Sometimes, we get invalid
84 * MIC failures on valid control frames.
85 * Remove these mic errors.
86 */
87 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
88 else
89 rxs->flag |= RX_FLAG_MMIC_ERROR;
90 }
91 /*
92 * Reject error frames with the exception of
93 * decryption and MIC failures. For monitor mode,
94 * we also ignore the CRC error.
95 */
96 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
97 if (rx_stats->rs_status &
98 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
99 ATH9K_RXERR_CRC))
100 return false;
101 } else {
102 if (rx_stats->rs_status &
103 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
104 return false;
105 }
106 }
107 }
108 return true;
109}
110
111static int ath9k_process_rate(struct ath_common *common,
112 struct ieee80211_hw *hw,
113 struct ath_rx_status *rx_stats,
114 struct ieee80211_rx_status *rxs,
115 struct sk_buff *skb)
116{
117 struct ieee80211_supported_band *sband;
118 enum ieee80211_band band;
119 unsigned int i = 0;
120
121 band = hw->conf.channel->band;
122 sband = hw->wiphy->bands[band];
123
124 if (rx_stats->rs_rate & 0x80) {
125 /* HT rate */
126 rxs->flag |= RX_FLAG_HT;
127 if (rx_stats->rs_flags & ATH9K_RX_2040)
128 rxs->flag |= RX_FLAG_40MHZ;
129 if (rx_stats->rs_flags & ATH9K_RX_GI)
130 rxs->flag |= RX_FLAG_SHORT_GI;
131 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
132 return 0;
133 }
134
135 for (i = 0; i < sband->n_bitrates; i++) {
136 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
137 rxs->rate_idx = i;
138 return 0;
139 }
140 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
141 rxs->flag |= RX_FLAG_SHORTPRE;
142 rxs->rate_idx = i;
143 return 0;
144 }
145 }
146
147 /*
148 * No valid hardware bitrate found -- we should not get here
149 * because hardware has already validated this frame as OK.
150 */
151 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
152 "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
153 if ((common->debug_mask & ATH_DBG_XMIT))
154 print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
155
156 return -EINVAL;
157}
158
159static void ath9k_process_rssi(struct ath_common *common,
160 struct ieee80211_hw *hw,
161 struct sk_buff *skb,
162 struct ath_rx_status *rx_stats)
163{
164 struct ath_hw *ah = common->ah;
165 struct ieee80211_sta *sta;
166 struct ieee80211_hdr *hdr;
167 struct ath_node *an;
168 int last_rssi = ATH_RSSI_DUMMY_MARKER;
169 __le16 fc;
170
171 hdr = (struct ieee80211_hdr *)skb->data;
172 fc = hdr->frame_control;
173
174 rcu_read_lock();
175 /*
176 * XXX: use ieee80211_find_sta! This requires quite a bit of work
177 * under the current ath9k virtual wiphy implementation as we have
178 * no way of tying a vif to wiphy. Typically vifs are attached to
179 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
180 * wiphy you'd have to iterate over every wiphy and each sdata.
181 */
182 sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
183 if (sta) {
184 an = (struct ath_node *) sta->drv_priv;
185 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
186 !rx_stats->rs_moreaggr)
187 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
188 last_rssi = an->last_rssi;
189 }
190 rcu_read_unlock();
191
192 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
193 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
194 ATH_RSSI_EP_MULTIPLIER);
195 if (rx_stats->rs_rssi < 0)
196 rx_stats->rs_rssi = 0;
197
198 /* Update Beacon RSSI, this is used by ANI. */
199 if (ieee80211_is_beacon(fc))
200 ah->stats.avgbrssi = rx_stats->rs_rssi;
201}
202
203/*
204 * For Decrypt or Demic errors, we only mark packet status here and always push
205 * up the frame up to let mac80211 handle the actual error case, be it no
206 * decryption key or real decryption error. This let us keep statistics there.
207 */
208int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
209 struct ieee80211_hw *hw,
210 struct sk_buff *skb,
211 struct ath_rx_status *rx_stats,
212 struct ieee80211_rx_status *rx_status,
213 bool *decrypt_error)
214{
215 struct ath_hw *ah = common->ah;
216
217 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
218
219 /*
220 * everything but the rate is checked here, the rate check is done
221 * separately to avoid doing two lookups for a rate for each frame.
222 */
223 if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
224 return -EINVAL;
225
226 ath9k_process_rssi(common, hw, skb, rx_stats);
227
228 if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
229 return -EINVAL;
230
231 rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
232 rx_status->band = hw->conf.channel->band;
233 rx_status->freq = hw->conf.channel->center_freq;
234 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
235 rx_status->antenna = rx_stats->rs_antenna;
236 rx_status->flag |= RX_FLAG_TSFT;
237
238 return 0;
239}
240EXPORT_SYMBOL(ath9k_cmn_rx_skb_preprocess);
241
242void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
243 struct sk_buff *skb,
244 struct ath_rx_status *rx_stats,
245 struct ieee80211_rx_status *rxs,
246 bool decrypt_error)
247{
248 struct ath_hw *ah = common->ah;
249 struct ieee80211_hdr *hdr;
250 int hdrlen, padpos, padsize;
251 u8 keyix;
252 __le16 fc;
253
254 /* see if any padding is done by the hw and remove it */
255 hdr = (struct ieee80211_hdr *) skb->data;
256 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
257 fc = hdr->frame_control;
258 padpos = ath9k_cmn_padpos(hdr->frame_control);
259
260 /* The MAC header is padded to have 32-bit boundary if the
261 * packet payload is non-zero. The general calculation for
262 * padsize would take into account odd header lengths:
263 * padsize = (4 - padpos % 4) % 4; However, since only
264 * even-length headers are used, padding can only be 0 or 2
265 * bytes and we can optimize this a bit. In addition, we must
266 * not try to remove padding from short control frames that do
267 * not have payload. */
268 padsize = padpos & 3;
269 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
270 memmove(skb->data + padsize, skb->data, padpos);
271 skb_pull(skb, padsize);
272 }
273
274 keyix = rx_stats->rs_keyix;
275
276 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
277 ieee80211_has_protected(fc)) {
278 rxs->flag |= RX_FLAG_DECRYPTED;
279 } else if (ieee80211_has_protected(fc)
280 && !decrypt_error && skb->len >= hdrlen + 4) {
281 keyix = skb->data[hdrlen + 3] >> 6;
282
283 if (test_bit(keyix, common->keymap))
284 rxs->flag |= RX_FLAG_DECRYPTED;
285 }
286 if (ah->sw_mgmt_crypto &&
287 (rxs->flag & RX_FLAG_DECRYPTED) &&
288 ieee80211_is_mgmt(fc))
289 /* Use software decrypt for management frames. */
290 rxs->flag &= ~RX_FLAG_DECRYPTED;
291}
292EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
293
294int ath9k_cmn_padpos(__le16 frame_control) 30int ath9k_cmn_padpos(__le16 frame_control)
295{ 31{
296 int padpos = 24; 32 int padpos = 24;
@@ -475,10 +211,14 @@ static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
475 return -1; 211 return -1;
476} 212}
477 213
478static int ath_reserve_key_cache_slot(struct ath_common *common) 214static int ath_reserve_key_cache_slot(struct ath_common *common,
215 enum ieee80211_key_alg alg)
479{ 216{
480 int i; 217 int i;
481 218
219 if (alg == ALG_TKIP)
220 return ath_reserve_key_cache_slot_tkip(common);
221
482 /* First, try to find slots that would not be available for TKIP. */ 222 /* First, try to find slots that would not be available for TKIP. */
483 if (common->splitmic) { 223 if (common->splitmic) {
484 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) { 224 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
@@ -547,6 +287,7 @@ int ath9k_cmn_key_config(struct ath_common *common,
547 struct ath_hw *ah = common->ah; 287 struct ath_hw *ah = common->ah;
548 struct ath9k_keyval hk; 288 struct ath9k_keyval hk;
549 const u8 *mac = NULL; 289 const u8 *mac = NULL;
290 u8 gmac[ETH_ALEN];
550 int ret = 0; 291 int ret = 0;
551 int idx; 292 int idx;
552 293
@@ -570,9 +311,27 @@ int ath9k_cmn_key_config(struct ath_common *common,
570 memcpy(hk.kv_val, key->key, key->keylen); 311 memcpy(hk.kv_val, key->key, key->keylen);
571 312
572 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 313 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
573 /* For now, use the default keys for broadcast keys. This may 314 switch (vif->type) {
574 * need to change with virtual interfaces. */ 315 case NL80211_IFTYPE_AP:
575 idx = key->keyidx; 316 memcpy(gmac, vif->addr, ETH_ALEN);
317 gmac[0] |= 0x01;
318 mac = gmac;
319 idx = ath_reserve_key_cache_slot(common, key->alg);
320 break;
321 case NL80211_IFTYPE_ADHOC:
322 if (!sta) {
323 idx = key->keyidx;
324 break;
325 }
326 memcpy(gmac, sta->addr, ETH_ALEN);
327 gmac[0] |= 0x01;
328 mac = gmac;
329 idx = ath_reserve_key_cache_slot(common, key->alg);
330 break;
331 default:
332 idx = key->keyidx;
333 break;
334 }
576 } else if (key->keyidx) { 335 } else if (key->keyidx) {
577 if (WARN_ON(!sta)) 336 if (WARN_ON(!sta))
578 return -EOPNOTSUPP; 337 return -EOPNOTSUPP;
@@ -589,14 +348,12 @@ int ath9k_cmn_key_config(struct ath_common *common,
589 return -EOPNOTSUPP; 348 return -EOPNOTSUPP;
590 mac = sta->addr; 349 mac = sta->addr;
591 350
592 if (key->alg == ALG_TKIP) 351 idx = ath_reserve_key_cache_slot(common, key->alg);
593 idx = ath_reserve_key_cache_slot_tkip(common);
594 else
595 idx = ath_reserve_key_cache_slot(common);
596 if (idx < 0)
597 return -ENOSPC; /* no free key cache entries */
598 } 352 }
599 353
354 if (idx < 0)
355 return -ENOSPC; /* no free key cache entries */
356
600 if (key->alg == ALG_TKIP) 357 if (key->alg == ALG_TKIP)
601 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac, 358 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
602 vif->type == NL80211_IFTYPE_AP); 359 vif->type == NL80211_IFTYPE_AP);
@@ -644,6 +401,19 @@ void ath9k_cmn_key_delete(struct ath_common *common,
644} 401}
645EXPORT_SYMBOL(ath9k_cmn_key_delete); 402EXPORT_SYMBOL(ath9k_cmn_key_delete);
646 403
404int ath9k_cmn_count_streams(unsigned int chainmask, int max)
405{
406 int streams = 0;
407
408 do {
409 if (++streams == max)
410 break;
411 } while ((chainmask = chainmask & (chainmask - 1)));
412
413 return streams;
414}
415EXPORT_SYMBOL(ath9k_cmn_count_streams);
416
647static int __init ath9k_cmn_init(void) 417static int __init ath9k_cmn_init(void)
648{ 418{
649 return 0; 419 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e08f7e5a26e0..97809d39c73f 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -52,82 +52,6 @@
52#define ATH_EP_RND(x, mul) \ 52#define ATH_EP_RND(x, mul) \
53 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 53 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
54 54
55struct ath_atx_ac {
56 int sched;
57 int qnum;
58 struct list_head list;
59 struct list_head tid_q;
60};
61
62struct ath_buf_state {
63 int bfs_nframes;
64 u16 bfs_al;
65 u16 bfs_frmlen;
66 int bfs_seqno;
67 int bfs_tidno;
68 int bfs_retries;
69 u8 bf_type;
70 u32 bfs_keyix;
71 enum ath9k_key_type bfs_keytype;
72};
73
74struct ath_buf {
75 struct list_head list;
76 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
77 an aggregate) */
78 struct ath_buf *bf_next; /* next subframe in the aggregate */
79 struct sk_buff *bf_mpdu; /* enclosing frame structure */
80 void *bf_desc; /* virtual addr of desc */
81 dma_addr_t bf_daddr; /* physical addr of desc */
82 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
83 bool bf_stale;
84 bool bf_isnullfunc;
85 bool bf_tx_aborted;
86 u16 bf_flags;
87 struct ath_buf_state bf_state;
88 dma_addr_t bf_dmacontext;
89 struct ath_wiphy *aphy;
90};
91
92struct ath_atx_tid {
93 struct list_head list;
94 struct list_head buf_q;
95 struct ath_node *an;
96 struct ath_atx_ac *ac;
97 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
98 u16 seq_start;
99 u16 seq_next;
100 u16 baw_size;
101 int tidno;
102 int baw_head; /* first un-acked tx buffer */
103 int baw_tail; /* next unused tx buffer slot */
104 int sched;
105 int paused;
106 u8 state;
107};
108
109struct ath_node {
110 struct ath_common *common;
111 struct ath_atx_tid tid[WME_NUM_TID];
112 struct ath_atx_ac ac[WME_NUM_AC];
113 u16 maxampdu;
114 u8 mpdudensity;
115 int last_rssi;
116};
117
118int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
119 struct ieee80211_hw *hw,
120 struct sk_buff *skb,
121 struct ath_rx_status *rx_stats,
122 struct ieee80211_rx_status *rx_status,
123 bool *decrypt_error);
124
125void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
126 struct sk_buff *skb,
127 struct ath_rx_status *rx_stats,
128 struct ieee80211_rx_status *rxs,
129 bool decrypt_error);
130
131int ath9k_cmn_padpos(__le16 frame_control); 55int ath9k_cmn_padpos(__le16 frame_control);
132int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 56int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
133void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, 57void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
@@ -140,3 +64,4 @@ int ath9k_cmn_key_config(struct ath_common *common,
140 struct ieee80211_key_conf *key); 64 struct ieee80211_key_conf *key);
141void ath9k_cmn_key_delete(struct ath_common *common, 65void ath9k_cmn_key_delete(struct ath_common *common,
142 struct ieee80211_key_conf *key); 66 struct ieee80211_key_conf *key);
67int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 29898f8d1893..54aae931424e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -42,7 +42,7 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
42 char buf[32]; 42 char buf[32];
43 unsigned int len; 43 unsigned int len;
44 44
45 len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask); 45 len = sprintf(buf, "0x%08x\n", common->debug_mask);
46 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 46 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
47} 47}
48 48
@@ -57,7 +57,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
57 57
58 len = min(count, sizeof(buf) - 1); 58 len = min(count, sizeof(buf) - 1);
59 if (copy_from_user(buf, user_buf, len)) 59 if (copy_from_user(buf, user_buf, len))
60 return -EINVAL; 60 return -EFAULT;
61 61
62 buf[len] = '\0'; 62 buf[len] = '\0';
63 if (strict_strtoul(buf, 0, &mask)) 63 if (strict_strtoul(buf, 0, &mask))
@@ -86,7 +86,7 @@ static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
86 char buf[32]; 86 char buf[32];
87 unsigned int len; 87 unsigned int len;
88 88
89 len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask); 89 len = sprintf(buf, "0x%08x\n", common->tx_chainmask);
90 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 90 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
91} 91}
92 92
@@ -101,7 +101,7 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use
101 101
102 len = min(count, sizeof(buf) - 1); 102 len = min(count, sizeof(buf) - 1);
103 if (copy_from_user(buf, user_buf, len)) 103 if (copy_from_user(buf, user_buf, len))
104 return -EINVAL; 104 return -EFAULT;
105 105
106 buf[len] = '\0'; 106 buf[len] = '\0';
107 if (strict_strtoul(buf, 0, &mask)) 107 if (strict_strtoul(buf, 0, &mask))
@@ -128,7 +128,7 @@ static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
128 char buf[32]; 128 char buf[32];
129 unsigned int len; 129 unsigned int len;
130 130
131 len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask); 131 len = sprintf(buf, "0x%08x\n", common->rx_chainmask);
132 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 132 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
133} 133}
134 134
@@ -143,7 +143,7 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use
143 143
144 len = min(count, sizeof(buf) - 1); 144 len = min(count, sizeof(buf) - 1);
145 if (copy_from_user(buf, user_buf, len)) 145 if (copy_from_user(buf, user_buf, len))
146 return -EINVAL; 146 return -EFAULT;
147 147
148 buf[len] = '\0'; 148 buf[len] = '\0';
149 if (strict_strtoul(buf, 0, &mask)) 149 if (strict_strtoul(buf, 0, &mask))
@@ -176,7 +176,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
176 176
177 buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL); 177 buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
178 if (!buf) 178 if (!buf)
179 return 0; 179 return -ENOMEM;
180 180
181 ath9k_ps_wakeup(sc); 181 ath9k_ps_wakeup(sc);
182 182
@@ -248,6 +248,9 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
248 248
249 ath9k_ps_restore(sc); 249 ath9k_ps_restore(sc);
250 250
251 if (len > DMA_BUF_LEN)
252 len = DMA_BUF_LEN;
253
251 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); 254 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
252 kfree(buf); 255 kfree(buf);
253 return retval; 256 return retval;
@@ -269,6 +272,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
269 sc->debug.stats.istats.rxlp++; 272 sc->debug.stats.istats.rxlp++;
270 if (status & ATH9K_INT_RXHP) 273 if (status & ATH9K_INT_RXHP)
271 sc->debug.stats.istats.rxhp++; 274 sc->debug.stats.istats.rxhp++;
275 if (status & ATH9K_INT_BB_WATCHDOG)
276 sc->debug.stats.istats.bb_watchdog++;
272 } else { 277 } else {
273 if (status & ATH9K_INT_RX) 278 if (status & ATH9K_INT_RX)
274 sc->debug.stats.istats.rxok++; 279 sc->debug.stats.istats.rxok++;
@@ -319,6 +324,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
319 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp); 324 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
320 len += snprintf(buf + len, sizeof(buf) - len, 325 len += snprintf(buf + len, sizeof(buf) - len,
321 "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp); 326 "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
327 len += snprintf(buf + len, sizeof(buf) - len,
328 "%8s: %10u\n", "WATCHDOG",
329 sc->debug.stats.istats.bb_watchdog);
322 } else { 330 } else {
323 len += snprintf(buf + len, sizeof(buf) - len, 331 len += snprintf(buf + len, sizeof(buf) - len,
324 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); 332 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
@@ -358,6 +366,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
358 len += snprintf(buf + len, sizeof(buf) - len, 366 len += snprintf(buf + len, sizeof(buf) - len,
359 "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); 367 "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total);
360 368
369 if (len > sizeof(buf))
370 len = sizeof(buf);
371
361 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 372 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
362} 373}
363 374
@@ -397,11 +408,10 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
397 if (sc->cur_rate_table == NULL) 408 if (sc->cur_rate_table == NULL)
398 return 0; 409 return 0;
399 410
400 max = 80 + sc->cur_rate_table->rate_cnt * 1024; 411 max = 80 + sc->cur_rate_table->rate_cnt * 1024 + 1;
401 buf = kmalloc(max + 1, GFP_KERNEL); 412 buf = kmalloc(max, GFP_KERNEL);
402 if (buf == NULL) 413 if (buf == NULL)
403 return 0; 414 return -ENOMEM;
404 buf[max] = 0;
405 415
406 len += sprintf(buf, "%6s %6s %6s " 416 len += sprintf(buf, "%6s %6s %6s "
407 "%10s %10s %10s %10s\n", 417 "%10s %10s %10s %10s\n",
@@ -443,6 +453,9 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
443 stats->per); 453 stats->per);
444 } 454 }
445 455
456 if (len > max)
457 len = max;
458
446 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); 459 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
447 kfree(buf); 460 kfree(buf);
448 return retval; 461 return retval;
@@ -505,6 +518,9 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
505 len += snprintf(buf + len, sizeof(buf) - len, 518 len += snprintf(buf + len, sizeof(buf) - len,
506 "addrmask: %pM\n", addr); 519 "addrmask: %pM\n", addr);
507 520
521 if (len > sizeof(buf))
522 len = sizeof(buf);
523
508 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 524 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
509} 525}
510 526
@@ -614,10 +630,10 @@ static const struct file_operations fops_wiphy = {
614 do { \ 630 do { \
615 len += snprintf(buf + len, size - len, \ 631 len += snprintf(buf + len, size - len, \
616 "%s%13u%11u%10u%10u\n", str, \ 632 "%s%13u%11u%10u%10u\n", str, \
617 sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BE]].elem, \ 633 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
618 sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BK]].elem, \ 634 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
619 sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VI]].elem, \ 635 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
620 sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VO]].elem); \ 636 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
621} while(0) 637} while(0)
622 638
623static ssize_t read_file_xmit(struct file *file, char __user *user_buf, 639static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@@ -630,7 +646,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
630 646
631 buf = kzalloc(size, GFP_KERNEL); 647 buf = kzalloc(size, GFP_KERNEL);
632 if (buf == NULL) 648 if (buf == NULL)
633 return 0; 649 return -ENOMEM;
634 650
635 len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); 651 len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
636 652
@@ -648,6 +664,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
648 PR("DATA Underrun: ", data_underrun); 664 PR("DATA Underrun: ", data_underrun);
649 PR("DELIM Underrun: ", delim_underrun); 665 PR("DELIM Underrun: ", delim_underrun);
650 666
667 if (len > size)
668 len = size;
669
651 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); 670 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
652 kfree(buf); 671 kfree(buf);
653 672
@@ -700,7 +719,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
700 719
701 buf = kzalloc(size, GFP_KERNEL); 720 buf = kzalloc(size, GFP_KERNEL);
702 if (buf == NULL) 721 if (buf == NULL)
703 return 0; 722 return -ENOMEM;
704 723
705 len += snprintf(buf + len, size - len, 724 len += snprintf(buf + len, size - len,
706 "%18s : %10u\n", "CRC ERR", 725 "%18s : %10u\n", "CRC ERR",
@@ -751,6 +770,9 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
751 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); 770 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
752 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL); 771 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
753 772
773 if (len > size)
774 len = size;
775
754 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); 776 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
755 kfree(buf); 777 kfree(buf);
756 778
@@ -802,7 +824,7 @@ static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
802 char buf[32]; 824 char buf[32];
803 unsigned int len; 825 unsigned int len;
804 826
805 len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx); 827 len = sprintf(buf, "0x%08x\n", sc->debug.regidx);
806 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 828 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
807} 829}
808 830
@@ -816,7 +838,7 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
816 838
817 len = min(count, sizeof(buf) - 1); 839 len = min(count, sizeof(buf) - 1);
818 if (copy_from_user(buf, user_buf, len)) 840 if (copy_from_user(buf, user_buf, len))
819 return -EINVAL; 841 return -EFAULT;
820 842
821 buf[len] = '\0'; 843 buf[len] = '\0';
822 if (strict_strtoul(buf, 0, &regidx)) 844 if (strict_strtoul(buf, 0, &regidx))
@@ -843,7 +865,7 @@ static ssize_t read_file_regval(struct file *file, char __user *user_buf,
843 u32 regval; 865 u32 regval;
844 866
845 regval = REG_READ_D(ah, sc->debug.regidx); 867 regval = REG_READ_D(ah, sc->debug.regidx);
846 len = snprintf(buf, sizeof(buf), "0x%08x\n", regval); 868 len = sprintf(buf, "0x%08x\n", regval);
847 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 869 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
848} 870}
849 871
@@ -858,7 +880,7 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
858 880
859 len = min(count, sizeof(buf) - 1); 881 len = min(count, sizeof(buf) - 1);
860 if (copy_from_user(buf, user_buf, len)) 882 if (copy_from_user(buf, user_buf, len))
861 return -EINVAL; 883 return -EFAULT;
862 884
863 buf[len] = '\0'; 885 buf[len] = '\0';
864 if (strict_strtoul(buf, 0, &regval)) 886 if (strict_strtoul(buf, 0, &regval))
@@ -934,6 +956,10 @@ int ath9k_init_debug(struct ath_hw *ah)
934 sc->debug.debugfs_phy, sc, &fops_regval)) 956 sc->debug.debugfs_phy, sc, &fops_regval))
935 goto err; 957 goto err;
936 958
959 if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
960 sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
961 goto err;
962
937 sc->debug.regidx = 0; 963 sc->debug.regidx = 0;
938 return 0; 964 return 0;
939err: 965err:
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 5147b8709e10..5d21704e87ff 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -53,6 +53,7 @@ struct ath_buf;
53 * @cabend: RX End of CAB traffic 53 * @cabend: RX End of CAB traffic
54 * @dtimsync: DTIM sync lossage 54 * @dtimsync: DTIM sync lossage
55 * @dtim: RX Beacon with DTIM 55 * @dtim: RX Beacon with DTIM
56 * @bb_watchdog: Baseband watchdog
56 */ 57 */
57struct ath_interrupt_stats { 58struct ath_interrupt_stats {
58 u32 total; 59 u32 total;
@@ -76,6 +77,7 @@ struct ath_interrupt_stats {
76 u32 cabend; 77 u32 cabend;
77 u32 dtimsync; 78 u32 dtimsync;
78 u32 dtim; 79 u32 dtim;
80 u32 bb_watchdog;
79}; 81};
80 82
81struct ath_rc_stats { 83struct ath_rc_stats {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index ca8704a9d7ac..1266333f586d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -24,6 +24,14 @@ static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
24 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); 24 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
25} 25}
26 26
27void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
28{
29 REG_WRITE(ah, reg, val);
30
31 if (ah->config.analog_shiftreg)
32 udelay(100);
33}
34
27void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, 35void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
28 u32 shift, u32 val) 36 u32 shift, u32 val)
29{ 37{
@@ -250,6 +258,27 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
250 return twiceMaxEdgePower; 258 return twiceMaxEdgePower;
251} 259}
252 260
261void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
262{
263 struct ath_common *common = ath9k_hw_common(ah);
264 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
265
266 switch (ar5416_get_ntxchains(ah->txchainmask)) {
267 case 1:
268 break;
269 case 2:
270 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
271 break;
272 case 3:
273 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
274 break;
275 default:
276 ath_print(common, ATH_DBG_EEPROM,
277 "Invalid chainmask configuration\n");
278 break;
279 }
280}
281
253int ath9k_hw_eeprom_init(struct ath_hw *ah) 282int ath9k_hw_eeprom_init(struct ath_hw *ah)
254{ 283{
255 int status; 284 int status;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 21354c15a9a9..8750c558c221 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -263,7 +263,8 @@ enum eeprom_param {
263 EEP_PWR_TABLE_OFFSET, 263 EEP_PWR_TABLE_OFFSET,
264 EEP_DRIVE_STRENGTH, 264 EEP_DRIVE_STRENGTH,
265 EEP_INTERNAL_REGULATOR, 265 EEP_INTERNAL_REGULATOR,
266 EEP_SWREG 266 EEP_SWREG,
267 EEP_PAPRD,
267}; 268};
268 269
269enum ar5416_rates { 270enum ar5416_rates {
@@ -669,7 +670,7 @@ struct eeprom_ops {
669 int (*get_eeprom_ver)(struct ath_hw *hw); 670 int (*get_eeprom_ver)(struct ath_hw *hw);
670 int (*get_eeprom_rev)(struct ath_hw *hw); 671 int (*get_eeprom_rev)(struct ath_hw *hw);
671 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); 672 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band);
672 u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, 673 u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
673 struct ath9k_channel *chan); 674 struct ath9k_channel *chan);
674 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); 675 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
675 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); 676 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
@@ -679,6 +680,7 @@ struct eeprom_ops {
679 u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz); 680 u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
680}; 681};
681 682
683void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val);
682void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, 684void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
683 u32 shift, u32 val); 685 u32 shift, u32 val);
684int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, 686int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
@@ -704,6 +706,7 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah,
704 u16 numRates, bool isHt40Target); 706 u16 numRates, bool isHt40Target);
705u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, 707u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
706 bool is2GHz, int num_band_edges); 708 bool is2GHz, int num_band_edges);
709void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
707int ath9k_hw_eeprom_init(struct ath_hw *ah); 710int ath9k_hw_eeprom_init(struct ath_hw *ah);
708 711
709#define ar5416_get_ntxchains(_txchainmask) \ 712#define ar5416_get_ntxchains(_txchainmask) \
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 41a77d1bd439..9cccd12e8f21 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -222,7 +222,7 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
222 struct ath9k_channel *chan, 222 struct ath9k_channel *chan,
223 struct cal_data_per_freq_4k *pRawDataSet, 223 struct cal_data_per_freq_4k *pRawDataSet,
224 u8 *bChans, u16 availPiers, 224 u8 *bChans, u16 availPiers,
225 u16 tPdGainOverlap, int16_t *pMinCalPower, 225 u16 tPdGainOverlap,
226 u16 *pPdGainBoundaries, u8 *pPDADCValues, 226 u16 *pPdGainBoundaries, u8 *pPDADCValues,
227 u16 numXpdGains) 227 u16 numXpdGains)
228{ 228{
@@ -249,6 +249,7 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
249 struct chan_centers centers; 249 struct chan_centers centers;
250#define PD_GAIN_BOUNDARY_DEFAULT 58; 250#define PD_GAIN_BOUNDARY_DEFAULT 58;
251 251
252 memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
252 ath9k_hw_get_channel_centers(ah, chan, &centers); 253 ath9k_hw_get_channel_centers(ah, chan, &centers);
253 254
254 for (numPiers = 0; numPiers < availPiers; numPiers++) { 255 for (numPiers = 0; numPiers < availPiers; numPiers++) {
@@ -307,8 +308,6 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
307 } 308 }
308 } 309 }
309 310
310 *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
311
312 k = 0; 311 k = 0;
313 312
314 for (i = 0; i < numXpdGains; i++) { 313 for (i = 0; i < numXpdGains; i++) {
@@ -398,7 +397,6 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
398 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; 397 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
399 u16 gainBoundaries[AR5416_EEP4K_PD_GAINS_IN_MASK]; 398 u16 gainBoundaries[AR5416_EEP4K_PD_GAINS_IN_MASK];
400 u16 numPiers, i, j; 399 u16 numPiers, i, j;
401 int16_t tMinCalPower;
402 u16 numXpdGain, xpdMask; 400 u16 numXpdGain, xpdMask;
403 u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; 401 u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 };
404 u32 reg32, regOffset, regChainOffset; 402 u32 reg32, regOffset, regChainOffset;
@@ -451,7 +449,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
451 ath9k_hw_get_4k_gain_boundaries_pdadcs(ah, chan, 449 ath9k_hw_get_4k_gain_boundaries_pdadcs(ah, chan,
452 pRawDataset, pCalBChans, 450 pRawDataset, pCalBChans,
453 numPiers, pdGainOverlap_t2, 451 numPiers, pdGainOverlap_t2,
454 &tMinCalPower, gainBoundaries, 452 gainBoundaries,
455 pdadcValues, numXpdGain); 453 pdadcValues, numXpdGain);
456 454
457 ENABLE_REGWRITE_BUFFER(ah); 455 ENABLE_REGWRITE_BUFFER(ah);
@@ -1149,13 +1147,13 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1149 } 1147 }
1150} 1148}
1151 1149
1152static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, 1150static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
1153 struct ath9k_channel *chan) 1151 struct ath9k_channel *chan)
1154{ 1152{
1155 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; 1153 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1156 struct modal_eep_4k_header *pModal = &eep->modalHeader; 1154 struct modal_eep_4k_header *pModal = &eep->modalHeader;
1157 1155
1158 return pModal->antCtrlCommon & 0xFFFF; 1156 return pModal->antCtrlCommon;
1159} 1157}
1160 1158
1161static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah, 1159static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index b471db5fb82d..4a52cf03808b 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,17 +17,19 @@
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h" 18#include "ar9002_phy.h"
19 19
20static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah) 20#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
21
22static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
21{ 23{
22 return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF; 24 return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF;
23} 25}
24 26
25static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah) 27static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
26{ 28{
27 return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; 29 return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
28} 30}
29 31
30static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah) 32static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
31{ 33{
32 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 34 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
33 struct ath_common *common = ath9k_hw_common(ah); 35 struct ath_common *common = ath9k_hw_common(ah);
@@ -40,20 +42,20 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
40 "Reading from EEPROM, not flash\n"); 42 "Reading from EEPROM, not flash\n");
41 } 43 }
42 44
43 for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16); 45 for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
44 addr++) { 46 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
45 if (!ath9k_hw_nvram_read(common, 47 eep_data)) {
46 addr + eep_start_loc, eep_data)) {
47 ath_print(common, ATH_DBG_EEPROM, 48 ath_print(common, ATH_DBG_EEPROM,
48 "Unable to read eeprom region\n"); 49 "Unable to read eeprom region\n");
49 return false; 50 return false;
50 } 51 }
51 eep_data++; 52 eep_data++;
52 } 53 }
54
53 return true; 55 return true;
54} 56}
55 57
56static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) 58static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
57{ 59{
58 u32 sum = 0, el, integer; 60 u32 sum = 0, el, integer;
59 u16 temp, word, magic, magic2, *eepdata; 61 u16 temp, word, magic, magic2, *eepdata;
@@ -63,8 +65,8 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
63 struct ath_common *common = ath9k_hw_common(ah); 65 struct ath_common *common = ath9k_hw_common(ah);
64 66
65 if (!ath9k_hw_use_flash(ah)) { 67 if (!ath9k_hw_use_flash(ah)) {
66 if (!ath9k_hw_nvram_read(common, 68 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
67 AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 69 &magic)) {
68 ath_print(common, ATH_DBG_FATAL, 70 ath_print(common, ATH_DBG_FATAL,
69 "Reading Magic # failed\n"); 71 "Reading Magic # failed\n");
70 return false; 72 return false;
@@ -72,6 +74,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
72 74
73 ath_print(common, ATH_DBG_EEPROM, 75 ath_print(common, ATH_DBG_EEPROM,
74 "Read Magic = 0x%04X\n", magic); 76 "Read Magic = 0x%04X\n", magic);
77
75 if (magic != AR5416_EEPROM_MAGIC) { 78 if (magic != AR5416_EEPROM_MAGIC) {
76 magic2 = swab16(magic); 79 magic2 = swab16(magic);
77 80
@@ -79,9 +82,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
79 need_swap = true; 82 need_swap = true;
80 eepdata = (u16 *)(&ah->eeprom); 83 eepdata = (u16 *)(&ah->eeprom);
81 84
82 for (addr = 0; 85 for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
83 addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
84 addr++) {
85 temp = swab16(*eepdata); 86 temp = swab16(*eepdata);
86 *eepdata = temp; 87 *eepdata = temp;
87 eepdata++; 88 eepdata++;
@@ -89,13 +90,14 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
89 } else { 90 } else {
90 ath_print(common, ATH_DBG_FATAL, 91 ath_print(common, ATH_DBG_FATAL,
91 "Invalid EEPROM Magic. " 92 "Invalid EEPROM Magic. "
92 "endianness mismatch.\n"); 93 "Endianness mismatch.\n");
93 return -EINVAL; 94 return -EINVAL;
94 } 95 }
95 } 96 }
96 } 97 }
97 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? 98
98 "True" : "False"); 99 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
100 need_swap ? "True" : "False");
99 101
100 if (need_swap) 102 if (need_swap)
101 el = swab16(ah->eeprom.map9287.baseEepHeader.length); 103 el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -108,6 +110,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
108 el = el / sizeof(u16); 110 el = el / sizeof(u16);
109 111
110 eepdata = (u16 *)(&ah->eeprom); 112 eepdata = (u16 *)(&ah->eeprom);
113
111 for (i = 0; i < el; i++) 114 for (i = 0; i < el; i++)
112 sum ^= *eepdata++; 115 sum ^= *eepdata++;
113 116
@@ -161,7 +164,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
161 return 0; 164 return 0;
162} 165}
163 166
164static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah, 167static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
165 enum eeprom_param param) 168 enum eeprom_param param)
166{ 169{
167 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 170 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
@@ -170,6 +173,7 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
170 u16 ver_minor; 173 u16 ver_minor;
171 174
172 ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK; 175 ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK;
176
173 switch (param) { 177 switch (param) {
174 case EEP_NFTHRESH_2: 178 case EEP_NFTHRESH_2:
175 return pModal->noiseFloorThreshCh[0]; 179 return pModal->noiseFloorThreshCh[0];
@@ -214,29 +218,29 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
214 } 218 }
215} 219}
216 220
217 221static void ath9k_hw_get_ar9287_gain_boundaries_pdadcs(struct ath_hw *ah,
218static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, 222 struct ath9k_channel *chan,
219 struct ath9k_channel *chan, 223 struct cal_data_per_freq_ar9287 *pRawDataSet,
220 struct cal_data_per_freq_ar9287 *pRawDataSet, 224 u8 *bChans, u16 availPiers,
221 u8 *bChans, u16 availPiers, 225 u16 tPdGainOverlap,
222 u16 tPdGainOverlap, int16_t *pMinCalPower, 226 u16 *pPdGainBoundaries,
223 u16 *pPdGainBoundaries, u8 *pPDADCValues, 227 u8 *pPDADCValues,
224 u16 numXpdGains) 228 u16 numXpdGains)
225{ 229{
226#define TMP_VAL_VPD_TABLE \ 230#define TMP_VAL_VPD_TABLE \
227 ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep)); 231 ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
228 232
229 int i, j, k; 233 int i, j, k;
230 int16_t ss; 234 int16_t ss;
231 u16 idxL = 0, idxR = 0, numPiers; 235 u16 idxL = 0, idxR = 0, numPiers;
232 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; 236 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
233 u8 minPwrT4[AR9287_NUM_PD_GAINS]; 237 u8 minPwrT4[AR9287_NUM_PD_GAINS];
234 u8 maxPwrT4[AR9287_NUM_PD_GAINS]; 238 u8 maxPwrT4[AR9287_NUM_PD_GAINS];
235 int16_t vpdStep; 239 int16_t vpdStep;
236 int16_t tmpVal; 240 int16_t tmpVal;
237 u16 sizeCurrVpdTable, maxIndex, tgtIndex; 241 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
238 bool match; 242 bool match;
239 int16_t minDelta = 0; 243 int16_t minDelta = 0;
240 struct chan_centers centers; 244 struct chan_centers centers;
241 static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS] 245 static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
242 [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; 246 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
@@ -245,6 +249,7 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
245 static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS] 249 static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
246 [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; 250 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
247 251
252 memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
248 ath9k_hw_get_channel_centers(ah, chan, &centers); 253 ath9k_hw_get_channel_centers(ah, chan, &centers);
249 254
250 for (numPiers = 0; numPiers < availPiers; numPiers++) { 255 for (numPiers = 0; numPiers < availPiers; numPiers++) {
@@ -253,18 +258,18 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
253 } 258 }
254 259
255 match = ath9k_hw_get_lower_upper_index( 260 match = ath9k_hw_get_lower_upper_index(
256 (u8)FREQ2FBIN(centers.synth_center, 261 (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
257 IS_CHAN_2GHZ(chan)), bChans, numPiers, 262 bChans, numPiers, &idxL, &idxR);
258 &idxL, &idxR);
259 263
260 if (match) { 264 if (match) {
261 for (i = 0; i < numXpdGains; i++) { 265 for (i = 0; i < numXpdGains; i++) {
262 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0]; 266 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
263 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4]; 267 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
264 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 268 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
265 pRawDataSet[idxL].pwrPdg[i], 269 pRawDataSet[idxL].pwrPdg[i],
266 pRawDataSet[idxL].vpdPdg[i], 270 pRawDataSet[idxL].vpdPdg[i],
267 AR9287_PD_GAIN_ICEPTS, vpdTableI[i]); 271 AR9287_PD_GAIN_ICEPTS,
272 vpdTableI[i]);
268 } 273 }
269 } else { 274 } else {
270 for (i = 0; i < numXpdGains; i++) { 275 for (i = 0; i < numXpdGains; i++) {
@@ -275,61 +280,58 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
275 280
276 minPwrT4[i] = max(pPwrL[0], pPwrR[0]); 281 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
277 282
278 maxPwrT4[i] = 283 maxPwrT4[i] = min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1],
279 min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1], 284 pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
280 pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
281 285
282 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 286 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
283 pPwrL, pVpdL, 287 pPwrL, pVpdL,
284 AR9287_PD_GAIN_ICEPTS, 288 AR9287_PD_GAIN_ICEPTS,
285 vpdTableL[i]); 289 vpdTableL[i]);
286 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 290 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
287 pPwrR, pVpdR, 291 pPwrR, pVpdR,
288 AR9287_PD_GAIN_ICEPTS, 292 AR9287_PD_GAIN_ICEPTS,
289 vpdTableR[i]); 293 vpdTableR[i]);
290 294
291 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { 295 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
292 vpdTableI[i][j] = 296 vpdTableI[i][j] = (u8)(ath9k_hw_interpolate(
293 (u8)(ath9k_hw_interpolate((u16) 297 (u16)FREQ2FBIN(centers. synth_center,
294 FREQ2FBIN(centers. synth_center, 298 IS_CHAN_2GHZ(chan)),
295 IS_CHAN_2GHZ(chan)), 299 bChans[idxL], bChans[idxR],
296 bChans[idxL], bChans[idxR], 300 vpdTableL[i][j], vpdTableR[i][j]));
297 vpdTableL[i][j], vpdTableR[i][j]));
298 } 301 }
299 } 302 }
300 } 303 }
301 *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
302 304
303 k = 0; 305 k = 0;
306
304 for (i = 0; i < numXpdGains; i++) { 307 for (i = 0; i < numXpdGains; i++) {
305 if (i == (numXpdGains - 1)) 308 if (i == (numXpdGains - 1))
306 pPdGainBoundaries[i] = (u16)(maxPwrT4[i] / 2); 309 pPdGainBoundaries[i] =
310 (u16)(maxPwrT4[i] / 2);
307 else 311 else
308 pPdGainBoundaries[i] = (u16)((maxPwrT4[i] + 312 pPdGainBoundaries[i] =
309 minPwrT4[i+1]) / 4); 313 (u16)((maxPwrT4[i] + minPwrT4[i+1]) / 4);
310 314
311 pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER, 315 pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER,
312 pPdGainBoundaries[i]); 316 pPdGainBoundaries[i]);
313 317
314 318
315 if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) { 319 minDelta = 0;
316 minDelta = pPdGainBoundaries[0] - 23;
317 pPdGainBoundaries[0] = 23;
318 } else
319 minDelta = 0;
320 320
321 if (i == 0) { 321 if (i == 0) {
322 if (AR_SREV_9280_10_OR_LATER(ah)) 322 if (AR_SREV_9280_10_OR_LATER(ah))
323 ss = (int16_t)(0 - (minPwrT4[i] / 2)); 323 ss = (int16_t)(0 - (minPwrT4[i] / 2));
324 else 324 else
325 ss = 0; 325 ss = 0;
326 } else 326 } else {
327 ss = (int16_t)((pPdGainBoundaries[i-1] - 327 ss = (int16_t)((pPdGainBoundaries[i-1] -
328 (minPwrT4[i] / 2)) - 328 (minPwrT4[i] / 2)) -
329 tPdGainOverlap + 1 + minDelta); 329 tPdGainOverlap + 1 + minDelta);
330 }
330 331
331 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); 332 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
332 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); 333 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
334
333 while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1))) { 335 while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1))) {
334 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); 336 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
335 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); 337 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
@@ -348,12 +350,13 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
348 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - 350 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
349 vpdTableI[i][sizeCurrVpdTable - 2]); 351 vpdTableI[i][sizeCurrVpdTable - 2]);
350 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); 352 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
353
351 if (tgtIndex > maxIndex) { 354 if (tgtIndex > maxIndex) {
352 while ((ss <= tgtIndex) && 355 while ((ss <= tgtIndex) &&
353 (k < (AR9287_NUM_PDADC_VALUES - 1))) { 356 (k < (AR9287_NUM_PDADC_VALUES - 1))) {
354 tmpVal = (int16_t) TMP_VAL_VPD_TABLE; 357 tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
355 pPDADCValues[k++] = (u8)((tmpVal > 255) ? 358 pPDADCValues[k++] =
356 255 : tmpVal); 359 (u8)((tmpVal > 255) ? 255 : tmpVal);
357 ss++; 360 ss++;
358 } 361 }
359 } 362 }
@@ -375,10 +378,9 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
375static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah, 378static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
376 struct ath9k_channel *chan, 379 struct ath9k_channel *chan,
377 struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop, 380 struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
378 u8 *pCalChans, u16 availPiers, 381 u8 *pCalChans, u16 availPiers, int8_t *pPwr)
379 int8_t *pPwr)
380{ 382{
381 u16 idxL = 0, idxR = 0, numPiers; 383 u16 idxL = 0, idxR = 0, numPiers;
382 bool match; 384 bool match;
383 struct chan_centers centers; 385 struct chan_centers centers;
384 386
@@ -390,15 +392,14 @@ static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
390 } 392 }
391 393
392 match = ath9k_hw_get_lower_upper_index( 394 match = ath9k_hw_get_lower_upper_index(
393 (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), 395 (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
394 pCalChans, numPiers, 396 pCalChans, numPiers, &idxL, &idxR);
395 &idxL, &idxR);
396 397
397 if (match) { 398 if (match) {
398 *pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0]; 399 *pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0];
399 } else { 400 } else {
400 *pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] + 401 *pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] +
401 (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2; 402 (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
402 } 403 }
403 404
404} 405}
@@ -409,16 +410,22 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
409 u32 tmpVal; 410 u32 tmpVal;
410 u32 a; 411 u32 a;
411 412
413 /* Enable OLPC for chain 0 */
414
412 tmpVal = REG_READ(ah, 0xa270); 415 tmpVal = REG_READ(ah, 0xa270);
413 tmpVal = tmpVal & 0xFCFFFFFF; 416 tmpVal = tmpVal & 0xFCFFFFFF;
414 tmpVal = tmpVal | (0x3 << 24); 417 tmpVal = tmpVal | (0x3 << 24);
415 REG_WRITE(ah, 0xa270, tmpVal); 418 REG_WRITE(ah, 0xa270, tmpVal);
416 419
420 /* Enable OLPC for chain 1 */
421
417 tmpVal = REG_READ(ah, 0xb270); 422 tmpVal = REG_READ(ah, 0xb270);
418 tmpVal = tmpVal & 0xFCFFFFFF; 423 tmpVal = tmpVal & 0xFCFFFFFF;
419 tmpVal = tmpVal | (0x3 << 24); 424 tmpVal = tmpVal | (0x3 << 24);
420 REG_WRITE(ah, 0xb270, tmpVal); 425 REG_WRITE(ah, 0xb270, tmpVal);
421 426
427 /* Write the OLPC ref power for chain 0 */
428
422 if (chain == 0) { 429 if (chain == 0) {
423 tmpVal = REG_READ(ah, 0xa398); 430 tmpVal = REG_READ(ah, 0xa398);
424 tmpVal = tmpVal & 0xff00ffff; 431 tmpVal = tmpVal & 0xff00ffff;
@@ -427,6 +434,8 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
427 REG_WRITE(ah, 0xa398, tmpVal); 434 REG_WRITE(ah, 0xa398, tmpVal);
428 } 435 }
429 436
437 /* Write the OLPC ref power for chain 1 */
438
430 if (chain == 1) { 439 if (chain == 1) {
431 tmpVal = REG_READ(ah, 0xb398); 440 tmpVal = REG_READ(ah, 0xb398);
432 tmpVal = tmpVal & 0xff00ffff; 441 tmpVal = tmpVal & 0xff00ffff;
@@ -436,28 +445,28 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
436 } 445 }
437} 446}
438 447
439static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah, 448static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
440 struct ath9k_channel *chan, 449 struct ath9k_channel *chan,
441 int16_t *pTxPowerIndexOffset) 450 int16_t *pTxPowerIndexOffset)
442{ 451{
443 struct ath_common *common = ath9k_hw_common(ah);
444 struct cal_data_per_freq_ar9287 *pRawDataset; 452 struct cal_data_per_freq_ar9287 *pRawDataset;
445 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop; 453 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
446 u8 *pCalBChans = NULL; 454 u8 *pCalBChans = NULL;
447 u16 pdGainOverlap_t2; 455 u16 pdGainOverlap_t2;
448 u8 pdadcValues[AR9287_NUM_PDADC_VALUES]; 456 u8 pdadcValues[AR9287_NUM_PDADC_VALUES];
449 u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK]; 457 u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK];
450 u16 numPiers = 0, i, j; 458 u16 numPiers = 0, i, j;
451 int16_t tMinCalPower;
452 u16 numXpdGain, xpdMask; 459 u16 numXpdGain, xpdMask;
453 u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0}; 460 u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0};
454 u32 reg32, regOffset, regChainOffset; 461 u32 reg32, regOffset, regChainOffset, regval;
455 int16_t modalIdx, diff = 0; 462 int16_t modalIdx, diff = 0;
456 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 463 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
464
457 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; 465 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
458 xpdMask = pEepData->modalHeader.xpdGain; 466 xpdMask = pEepData->modalHeader.xpdGain;
467
459 if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= 468 if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
460 AR9287_EEP_MINOR_VER_2) 469 AR9287_EEP_MINOR_VER_2)
461 pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; 470 pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap;
462 else 471 else
463 pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), 472 pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
@@ -466,15 +475,16 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
466 if (IS_CHAN_2GHZ(chan)) { 475 if (IS_CHAN_2GHZ(chan)) {
467 pCalBChans = pEepData->calFreqPier2G; 476 pCalBChans = pEepData->calFreqPier2G;
468 numPiers = AR9287_NUM_2G_CAL_PIERS; 477 numPiers = AR9287_NUM_2G_CAL_PIERS;
469 if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { 478 if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
470 pRawDatasetOpenLoop = 479 pRawDatasetOpenLoop =
471 (struct cal_data_op_loop_ar9287 *) 480 (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[0];
472 pEepData->calPierData2G[0];
473 ah->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0]; 481 ah->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0];
474 } 482 }
475 } 483 }
476 484
477 numXpdGain = 0; 485 numXpdGain = 0;
486
487 /* Calculate the value of xpdgains from the xpdGain Mask */
478 for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) { 488 for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) {
479 if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) { 489 if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) {
480 if (numXpdGain >= AR9287_NUM_PD_GAINS) 490 if (numXpdGain >= AR9287_NUM_PD_GAINS)
@@ -496,99 +506,79 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
496 506
497 for (i = 0; i < AR9287_MAX_CHAINS; i++) { 507 for (i = 0; i < AR9287_MAX_CHAINS; i++) {
498 regChainOffset = i * 0x1000; 508 regChainOffset = i * 0x1000;
509
499 if (pEepData->baseEepHeader.txMask & (1 << i)) { 510 if (pEepData->baseEepHeader.txMask & (1 << i)) {
500 pRawDatasetOpenLoop = (struct cal_data_op_loop_ar9287 *) 511 pRawDatasetOpenLoop =
501 pEepData->calPierData2G[i]; 512 (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[i];
502 if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { 513
514 if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
503 int8_t txPower; 515 int8_t txPower;
504 ar9287_eeprom_get_tx_gain_index(ah, chan, 516 ar9287_eeprom_get_tx_gain_index(ah, chan,
505 pRawDatasetOpenLoop, 517 pRawDatasetOpenLoop,
506 pCalBChans, numPiers, 518 pCalBChans, numPiers,
507 &txPower); 519 &txPower);
508 ar9287_eeprom_olpc_set_pdadcs(ah, txPower, i); 520 ar9287_eeprom_olpc_set_pdadcs(ah, txPower, i);
509 } else { 521 } else {
510 pRawDataset = 522 pRawDataset =
511 (struct cal_data_per_freq_ar9287 *) 523 (struct cal_data_per_freq_ar9287 *)
512 pEepData->calPierData2G[i]; 524 pEepData->calPierData2G[i];
513 ath9k_hw_get_AR9287_gain_boundaries_pdadcs( 525
514 ah, chan, pRawDataset, 526 ath9k_hw_get_ar9287_gain_boundaries_pdadcs(ah, chan,
515 pCalBChans, numPiers, 527 pRawDataset,
516 pdGainOverlap_t2, 528 pCalBChans, numPiers,
517 &tMinCalPower, gainBoundaries, 529 pdGainOverlap_t2,
518 pdadcValues, numXpdGain); 530 gainBoundaries,
531 pdadcValues,
532 numXpdGain);
519 } 533 }
520 534
521 if (i == 0) { 535 if (i == 0) {
522 if (!ath9k_hw_AR9287_get_eeprom( 536 if (!ath9k_hw_ar9287_get_eeprom(ah,
523 ah, EEP_OL_PWRCTRL)) { 537 EEP_OL_PWRCTRL)) {
524 REG_WRITE(ah, AR_PHY_TPCRG5 + 538
525 regChainOffset, 539 regval = SM(pdGainOverlap_t2,
526 SM(pdGainOverlap_t2, 540 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
527 AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | 541 | SM(gainBoundaries[0],
528 SM(gainBoundaries[0], 542 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
529 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) 543 | SM(gainBoundaries[1],
530 | SM(gainBoundaries[1], 544 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
531 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) 545 | SM(gainBoundaries[2],
532 | SM(gainBoundaries[2], 546 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
533 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) 547 | SM(gainBoundaries[3],
534 | SM(gainBoundaries[3], 548 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4);
535 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); 549
550 REG_WRITE(ah,
551 AR_PHY_TPCRG5 + regChainOffset,
552 regval);
536 } 553 }
537 } 554 }
538 555
539 if ((int32_t)AR9287_PWR_TABLE_OFFSET_DB != 556 if ((int32_t)AR9287_PWR_TABLE_OFFSET_DB !=
540 pEepData->baseEepHeader.pwrTableOffset) { 557 pEepData->baseEepHeader.pwrTableOffset) {
541 diff = (u16) 558 diff = (u16)(pEepData->baseEepHeader.pwrTableOffset -
542 (pEepData->baseEepHeader.pwrTableOffset 559 (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
543 - (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
544 diff *= 2; 560 diff *= 2;
545 561
546 for (j = 0; 562 for (j = 0; j < ((u16)AR9287_NUM_PDADC_VALUES-diff); j++)
547 j < ((u16)AR9287_NUM_PDADC_VALUES-diff);
548 j++)
549 pdadcValues[j] = pdadcValues[j+diff]; 563 pdadcValues[j] = pdadcValues[j+diff];
550 564
551 for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff); 565 for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff);
552 j < AR9287_NUM_PDADC_VALUES; j++) 566 j < AR9287_NUM_PDADC_VALUES; j++)
553 pdadcValues[j] = 567 pdadcValues[j] =
554 pdadcValues[ 568 pdadcValues[AR9287_NUM_PDADC_VALUES-diff];
555 AR9287_NUM_PDADC_VALUES-diff];
556 } 569 }
557 570
558 if (!ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { 571 if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
559 regOffset = AR_PHY_BASE + (672 << 2) + 572 regOffset = AR_PHY_BASE +
560 regChainOffset; 573 (672 << 2) + regChainOffset;
561 for (j = 0; j < 32; j++) {
562 reg32 = ((pdadcValues[4*j + 0]
563 & 0xFF) << 0) |
564 ((pdadcValues[4*j + 1]
565 & 0xFF) << 8) |
566 ((pdadcValues[4*j + 2]
567 & 0xFF) << 16) |
568 ((pdadcValues[4*j + 3]
569 & 0xFF) << 24) ;
570 REG_WRITE(ah, regOffset, reg32);
571 574
572 ath_print(common, ATH_DBG_EEPROM, 575 for (j = 0; j < 32; j++) {
573 "PDADC (%d,%4x): %4.4x " 576 reg32 = ((pdadcValues[4*j + 0] & 0xFF) << 0)
574 "%8.8x\n", 577 | ((pdadcValues[4*j + 1] & 0xFF) << 8)
575 i, regChainOffset, regOffset, 578 | ((pdadcValues[4*j + 2] & 0xFF) << 16)
576 reg32); 579 | ((pdadcValues[4*j + 3] & 0xFF) << 24);
577
578 ath_print(common, ATH_DBG_EEPROM,
579 "PDADC: Chain %d | "
580 "PDADC %3d Value %3d | "
581 "PDADC %3d Value %3d | "
582 "PDADC %3d Value %3d | "
583 "PDADC %3d Value %3d |\n",
584 i, 4 * j, pdadcValues[4 * j],
585 4 * j + 1,
586 pdadcValues[4 * j + 1],
587 4 * j + 2,
588 pdadcValues[4 * j + 2],
589 4 * j + 3,
590 pdadcValues[4 * j + 3]);
591 580
581 REG_WRITE(ah, regOffset, reg32);
592 regOffset += 4; 582 regOffset += 4;
593 } 583 }
594 } 584 }
@@ -598,30 +588,45 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
598 *pTxPowerIndexOffset = 0; 588 *pTxPowerIndexOffset = 0;
599} 589}
600 590
601static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, 591static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
602 struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl, 592 struct ath9k_channel *chan,
603 u16 AntennaReduction, u16 twiceMaxRegulatoryPower, 593 int16_t *ratesArray,
604 u16 powerLimit) 594 u16 cfgCtl,
595 u16 AntennaReduction,
596 u16 twiceMaxRegulatoryPower,
597 u16 powerLimit)
605{ 598{
599#define CMP_CTL \
600 (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
601 pEepData->ctlIndex[i])
602
603#define CMP_NO_CTL \
604 (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
605 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
606
606#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 607#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
607#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 608#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
609
608 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 610 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
609 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 611 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
610 static const u16 tpScaleReductionTable[5] = 612 static const u16 tpScaleReductionTable[5] =
611 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; 613 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
612 int i; 614 int i;
613 int16_t twiceLargestAntenna; 615 int16_t twiceLargestAntenna;
614 struct cal_ctl_data_ar9287 *rep; 616 struct cal_ctl_data_ar9287 *rep;
615 struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} }, 617 struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
616 targetPowerCck = {0, {0, 0, 0, 0} }; 618 targetPowerCck = {0, {0, 0, 0, 0} };
617 struct cal_target_power_leg targetPowerOfdmExt = {0, {0, 0, 0, 0} }, 619 struct cal_target_power_leg targetPowerOfdmExt = {0, {0, 0, 0, 0} },
618 targetPowerCckExt = {0, {0, 0, 0, 0} }; 620 targetPowerCckExt = {0, {0, 0, 0, 0} };
619 struct cal_target_power_ht targetPowerHt20, 621 struct cal_target_power_ht targetPowerHt20,
620 targetPowerHt40 = {0, {0, 0, 0, 0} }; 622 targetPowerHt40 = {0, {0, 0, 0, 0} };
621 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 623 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
622 u16 ctlModesFor11g[] = 624 u16 ctlModesFor11g[] = {CTL_11B,
623 {CTL_11B, CTL_11G, CTL_2GHT20, 625 CTL_11G,
624 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40}; 626 CTL_2GHT20,
627 CTL_11B_EXT,
628 CTL_11G_EXT,
629 CTL_2GHT40};
625 u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq; 630 u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq;
626 struct chan_centers centers; 631 struct chan_centers centers;
627 int tx_chainmask; 632 int tx_chainmask;
@@ -631,19 +636,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
631 636
632 ath9k_hw_get_channel_centers(ah, chan, &centers); 637 ath9k_hw_get_channel_centers(ah, chan, &centers);
633 638
639 /* Compute TxPower reduction due to Antenna Gain */
634 twiceLargestAntenna = max(pEepData->modalHeader.antennaGainCh[0], 640 twiceLargestAntenna = max(pEepData->modalHeader.antennaGainCh[0],
635 pEepData->modalHeader.antennaGainCh[1]); 641 pEepData->modalHeader.antennaGainCh[1]);
642 twiceLargestAntenna = (int16_t)min((AntennaReduction) -
643 twiceLargestAntenna, 0);
636 644
637 twiceLargestAntenna = (int16_t)min((AntennaReduction) - 645 /*
638 twiceLargestAntenna, 0); 646 * scaledPower is the minimum of the user input power level
639 647 * and the regulatory allowed power level.
648 */
640 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; 649 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
650
641 if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) 651 if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX)
642 maxRegAllowedPower -= 652 maxRegAllowedPower -=
643 (tpScaleReductionTable[(regulatory->tp_scale)] * 2); 653 (tpScaleReductionTable[(regulatory->tp_scale)] * 2);
644 654
645 scaledPower = min(powerLimit, maxRegAllowedPower); 655 scaledPower = min(powerLimit, maxRegAllowedPower);
646 656
657 /*
658 * Reduce scaled Power by number of chains active
659 * to get the per chain tx power level.
660 */
647 switch (ar5416_get_ntxchains(tx_chainmask)) { 661 switch (ar5416_get_ntxchains(tx_chainmask)) {
648 case 1: 662 case 1:
649 break; 663 break;
@@ -656,9 +670,14 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
656 } 670 }
657 scaledPower = max((u16)0, scaledPower); 671 scaledPower = max((u16)0, scaledPower);
658 672
673 /*
674 * Get TX power from EEPROM.
675 */
659 if (IS_CHAN_2GHZ(chan)) { 676 if (IS_CHAN_2GHZ(chan)) {
677 /* CTL_11B, CTL_11G, CTL_2GHT20 */
660 numCtlModes = 678 numCtlModes =
661 ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; 679 ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
680
662 pCtlMode = ctlModesFor11g; 681 pCtlMode = ctlModesFor11g;
663 682
664 ath9k_hw_get_legacy_target_powers(ah, chan, 683 ath9k_hw_get_legacy_target_powers(ah, chan,
@@ -675,6 +694,7 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
675 &targetPowerHt20, 8, false); 694 &targetPowerHt20, 8, false);
676 695
677 if (IS_CHAN_HT40(chan)) { 696 if (IS_CHAN_HT40(chan)) {
697 /* All 2G CTLs */
678 numCtlModes = ARRAY_SIZE(ctlModesFor11g); 698 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
679 ath9k_hw_get_target_powers(ah, chan, 699 ath9k_hw_get_target_powers(ah, chan,
680 pEepData->calTargetPower2GHT40, 700 pEepData->calTargetPower2GHT40,
@@ -692,8 +712,9 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
692 } 712 }
693 713
694 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { 714 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
695 bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || 715 bool isHt40CtlMode =
696 (pCtlMode[ctlMode] == CTL_2GHT40); 716 (pCtlMode[ctlMode] == CTL_2GHT40) ? true : false;
717
697 if (isHt40CtlMode) 718 if (isHt40CtlMode)
698 freq = centers.synth_center; 719 freq = centers.synth_center;
699 else if (pCtlMode[ctlMode] & EXT_ADDITIVE) 720 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
@@ -701,31 +722,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
701 else 722 else
702 freq = centers.ctl_center; 723 freq = centers.ctl_center;
703 724
704 if (ah->eep_ops->get_eeprom_ver(ah) == 14 && 725 /* Walk through the CTL indices stored in EEPROM */
705 ah->eep_ops->get_eeprom_rev(ah) <= 2)
706 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
707
708 for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { 726 for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
709 if ((((cfgCtl & ~CTL_MODE_M) | 727 struct cal_ctl_edges *pRdEdgesPower;
710 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
711 pEepData->ctlIndex[i]) ||
712 (((cfgCtl & ~CTL_MODE_M) |
713 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
714 ((pEepData->ctlIndex[i] &
715 CTL_MODE_M) | SD_NO_CTL))) {
716 728
729 /*
730 * Compare test group from regulatory channel list
731 * with test mode from pCtlMode list
732 */
733 if (CMP_CTL || CMP_NO_CTL) {
717 rep = &(pEepData->ctlData[i]); 734 rep = &(pEepData->ctlData[i]);
718 twiceMinEdgePower = ath9k_hw_get_max_edge_power( 735 pRdEdgesPower =
719 freq, 736 rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1];
720 rep->ctlEdges[ar5416_get_ntxchains( 737
721 tx_chainmask) - 1], 738 twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
722 IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES); 739 pRdEdgesPower,
723 740 IS_CHAN_2GHZ(chan),
724 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) 741 AR5416_NUM_BAND_EDGES);
725 twiceMaxEdgePower = min( 742
726 twiceMaxEdgePower, 743 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
727 twiceMinEdgePower); 744 twiceMaxEdgePower = min(twiceMaxEdgePower,
728 else { 745 twiceMinEdgePower);
746 } else {
729 twiceMaxEdgePower = twiceMinEdgePower; 747 twiceMaxEdgePower = twiceMinEdgePower;
730 break; 748 break;
731 } 749 }
@@ -734,55 +752,48 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
734 752
735 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); 753 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
736 754
755 /* Apply ctl mode to correct target power set */
737 switch (pCtlMode[ctlMode]) { 756 switch (pCtlMode[ctlMode]) {
738 case CTL_11B: 757 case CTL_11B:
739 for (i = 0; 758 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
740 i < ARRAY_SIZE(targetPowerCck.tPow2x); 759 targetPowerCck.tPow2x[i] =
741 i++) { 760 (u8)min((u16)targetPowerCck.tPow2x[i],
742 targetPowerCck.tPow2x[i] = (u8)min( 761 minCtlPower);
743 (u16)targetPowerCck.tPow2x[i],
744 minCtlPower);
745 } 762 }
746 break; 763 break;
747 case CTL_11A: 764 case CTL_11A:
748 case CTL_11G: 765 case CTL_11G:
749 for (i = 0; 766 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
750 i < ARRAY_SIZE(targetPowerOfdm.tPow2x); 767 targetPowerOfdm.tPow2x[i] =
751 i++) { 768 (u8)min((u16)targetPowerOfdm.tPow2x[i],
752 targetPowerOfdm.tPow2x[i] = (u8)min( 769 minCtlPower);
753 (u16)targetPowerOfdm.tPow2x[i],
754 minCtlPower);
755 } 770 }
756 break; 771 break;
757 case CTL_5GHT20: 772 case CTL_5GHT20:
758 case CTL_2GHT20: 773 case CTL_2GHT20:
759 for (i = 0; 774 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
760 i < ARRAY_SIZE(targetPowerHt20.tPow2x); 775 targetPowerHt20.tPow2x[i] =
761 i++) { 776 (u8)min((u16)targetPowerHt20.tPow2x[i],
762 targetPowerHt20.tPow2x[i] = (u8)min( 777 minCtlPower);
763 (u16)targetPowerHt20.tPow2x[i],
764 minCtlPower);
765 } 778 }
766 break; 779 break;
767 case CTL_11B_EXT: 780 case CTL_11B_EXT:
768 targetPowerCckExt.tPow2x[0] = (u8)min( 781 targetPowerCckExt.tPow2x[0] =
769 (u16)targetPowerCckExt.tPow2x[0], 782 (u8)min((u16)targetPowerCckExt.tPow2x[0],
770 minCtlPower); 783 minCtlPower);
771 break; 784 break;
772 case CTL_11A_EXT: 785 case CTL_11A_EXT:
773 case CTL_11G_EXT: 786 case CTL_11G_EXT:
774 targetPowerOfdmExt.tPow2x[0] = (u8)min( 787 targetPowerOfdmExt.tPow2x[0] =
775 (u16)targetPowerOfdmExt.tPow2x[0], 788 (u8)min((u16)targetPowerOfdmExt.tPow2x[0],
776 minCtlPower); 789 minCtlPower);
777 break; 790 break;
778 case CTL_5GHT40: 791 case CTL_5GHT40:
779 case CTL_2GHT40: 792 case CTL_2GHT40:
780 for (i = 0; 793 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
781 i < ARRAY_SIZE(targetPowerHt40.tPow2x); 794 targetPowerHt40.tPow2x[i] =
782 i++) { 795 (u8)min((u16)targetPowerHt40.tPow2x[i],
783 targetPowerHt40.tPow2x[i] = (u8)min( 796 minCtlPower);
784 (u16)targetPowerHt40.tPow2x[i],
785 minCtlPower);
786 } 797 }
787 break; 798 break;
788 default: 799 default:
@@ -790,12 +801,13 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
790 } 801 }
791 } 802 }
792 803
804 /* Now set the rates array */
805
793 ratesArray[rate6mb] = 806 ratesArray[rate6mb] =
794 ratesArray[rate9mb] = 807 ratesArray[rate9mb] =
795 ratesArray[rate12mb] = 808 ratesArray[rate12mb] =
796 ratesArray[rate18mb] = 809 ratesArray[rate18mb] =
797 ratesArray[rate24mb] = 810 ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0];
798 targetPowerOfdm.tPow2x[0];
799 811
800 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; 812 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
801 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; 813 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
@@ -807,12 +819,12 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
807 819
808 if (IS_CHAN_2GHZ(chan)) { 820 if (IS_CHAN_2GHZ(chan)) {
809 ratesArray[rate1l] = targetPowerCck.tPow2x[0]; 821 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
810 ratesArray[rate2s] = ratesArray[rate2l] = 822 ratesArray[rate2s] =
811 targetPowerCck.tPow2x[1]; 823 ratesArray[rate2l] = targetPowerCck.tPow2x[1];
812 ratesArray[rate5_5s] = ratesArray[rate5_5l] = 824 ratesArray[rate5_5s] =
813 targetPowerCck.tPow2x[2]; 825 ratesArray[rate5_5l] = targetPowerCck.tPow2x[2];
814 ratesArray[rate11s] = ratesArray[rate11l] = 826 ratesArray[rate11s] =
815 targetPowerCck.tPow2x[3]; 827 ratesArray[rate11l] = targetPowerCck.tPow2x[3];
816 } 828 }
817 if (IS_CHAN_HT40(chan)) { 829 if (IS_CHAN_HT40(chan)) {
818 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) 830 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++)
@@ -821,28 +833,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
821 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; 833 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
822 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; 834 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
823 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; 835 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
836
824 if (IS_CHAN_2GHZ(chan)) 837 if (IS_CHAN_2GHZ(chan))
825 ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; 838 ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0];
826 } 839 }
827 840
841#undef CMP_CTL
842#undef CMP_NO_CTL
828#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN 843#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN
829#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN 844#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN
830} 845}
831 846
832static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, 847static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
833 struct ath9k_channel *chan, u16 cfgCtl, 848 struct ath9k_channel *chan, u16 cfgCtl,
834 u8 twiceAntennaReduction, 849 u8 twiceAntennaReduction,
835 u8 twiceMaxRegulatoryPower, 850 u8 twiceMaxRegulatoryPower,
836 u8 powerLimit) 851 u8 powerLimit)
837{ 852{
838#define INCREASE_MAXPOW_BY_TWO_CHAIN 6
839#define INCREASE_MAXPOW_BY_THREE_CHAIN 10
840 struct ath_common *common = ath9k_hw_common(ah);
841 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 853 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
842 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 854 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
843 struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader; 855 struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
844 int16_t ratesArray[Ar5416RateSize]; 856 int16_t ratesArray[Ar5416RateSize];
845 int16_t txPowerIndexOffset = 0; 857 int16_t txPowerIndexOffset = 0;
846 u8 ht40PowerIncForPdadc = 2; 858 u8 ht40PowerIncForPdadc = 2;
847 int i; 859 int i;
848 860
@@ -852,13 +864,13 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
852 AR9287_EEP_MINOR_VER_2) 864 AR9287_EEP_MINOR_VER_2)
853 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; 865 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
854 866
855 ath9k_hw_set_AR9287_power_per_rate_table(ah, chan, 867 ath9k_hw_set_ar9287_power_per_rate_table(ah, chan,
856 &ratesArray[0], cfgCtl, 868 &ratesArray[0], cfgCtl,
857 twiceAntennaReduction, 869 twiceAntennaReduction,
858 twiceMaxRegulatoryPower, 870 twiceMaxRegulatoryPower,
859 powerLimit); 871 powerLimit);
860 872
861 ath9k_hw_set_AR9287_power_cal_table(ah, chan, &txPowerIndexOffset); 873 ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
862 874
863 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 875 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
864 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 876 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
@@ -871,6 +883,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
871 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; 883 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
872 } 884 }
873 885
886 /* OFDM power per rate */
874 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 887 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
875 ATH9K_POW_SM(ratesArray[rate18mb], 24) 888 ATH9K_POW_SM(ratesArray[rate18mb], 24)
876 | ATH9K_POW_SM(ratesArray[rate12mb], 16) 889 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
@@ -883,6 +896,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
883 | ATH9K_POW_SM(ratesArray[rate36mb], 8) 896 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
884 | ATH9K_POW_SM(ratesArray[rate24mb], 0)); 897 | ATH9K_POW_SM(ratesArray[rate24mb], 0));
885 898
899 /* CCK power per rate */
886 if (IS_CHAN_2GHZ(chan)) { 900 if (IS_CHAN_2GHZ(chan)) {
887 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, 901 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
888 ATH9K_POW_SM(ratesArray[rate2s], 24) 902 ATH9K_POW_SM(ratesArray[rate2s], 24)
@@ -896,6 +910,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
896 | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); 910 | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
897 } 911 }
898 912
913 /* HT20 power per rate */
899 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, 914 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
900 ATH9K_POW_SM(ratesArray[rateHt20_3], 24) 915 ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
901 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) 916 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
@@ -908,8 +923,9 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
908 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) 923 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
909 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); 924 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
910 925
926 /* HT40 power per rate */
911 if (IS_CHAN_HT40(chan)) { 927 if (IS_CHAN_HT40(chan)) {
912 if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { 928 if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
913 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, 929 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
914 ATH9K_POW_SM(ratesArray[rateHt40_3], 24) 930 ATH9K_POW_SM(ratesArray[rateHt40_3], 24)
915 | ATH9K_POW_SM(ratesArray[rateHt40_2], 16) 931 | ATH9K_POW_SM(ratesArray[rateHt40_2], 16)
@@ -943,6 +959,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
943 ht40PowerIncForPdadc, 0)); 959 ht40PowerIncForPdadc, 0));
944 } 960 }
945 961
962 /* Dup/Ext power per rate */
946 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, 963 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
947 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) 964 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
948 | ATH9K_POW_SM(ratesArray[rateExtCck], 16) 965 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
@@ -960,37 +977,20 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
960 ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2; 977 ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
961 else 978 else
962 regulatory->max_power_level = ratesArray[i]; 979 regulatory->max_power_level = ratesArray[i];
963
964 switch (ar5416_get_ntxchains(ah->txchainmask)) {
965 case 1:
966 break;
967 case 2:
968 regulatory->max_power_level +=
969 INCREASE_MAXPOW_BY_TWO_CHAIN;
970 break;
971 case 3:
972 regulatory->max_power_level +=
973 INCREASE_MAXPOW_BY_THREE_CHAIN;
974 break;
975 default:
976 ath_print(common, ATH_DBG_EEPROM,
977 "Invalid chainmask configuration\n");
978 break;
979 }
980} 980}
981 981
982static void ath9k_hw_AR9287_set_addac(struct ath_hw *ah, 982static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
983 struct ath9k_channel *chan) 983 struct ath9k_channel *chan)
984{ 984{
985} 985}
986 986
987static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah, 987static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
988 struct ath9k_channel *chan) 988 struct ath9k_channel *chan)
989{ 989{
990 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 990 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
991 struct modal_eep_ar9287_header *pModal = &eep->modalHeader; 991 struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
992 u16 antWrites[AR9287_ANT_16S]; 992 u16 antWrites[AR9287_ANT_16S];
993 u32 regChainOffset; 993 u32 regChainOffset, regval;
994 u8 txRxAttenLocal; 994 u8 txRxAttenLocal;
995 int i, j, offset_num; 995 int i, j, offset_num;
996 996
@@ -1077,42 +1077,37 @@ static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah,
1077 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, 1077 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
1078 AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); 1078 AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62);
1079 1079
1080 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB1, 1080 regval = REG_READ(ah, AR9287_AN_RF2G3_CH0);
1081 AR9287_AN_RF2G3_DB1_S, pModal->db1); 1081 regval &= ~(AR9287_AN_RF2G3_DB1 |
1082 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB2, 1082 AR9287_AN_RF2G3_DB2 |
1083 AR9287_AN_RF2G3_DB2_S, pModal->db2); 1083 AR9287_AN_RF2G3_OB_CCK |
1084 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, 1084 AR9287_AN_RF2G3_OB_PSK |
1085 AR9287_AN_RF2G3_OB_CCK, 1085 AR9287_AN_RF2G3_OB_QAM |
1086 AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck); 1086 AR9287_AN_RF2G3_OB_PAL_OFF);
1087 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, 1087 regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
1088 AR9287_AN_RF2G3_OB_PSK, 1088 SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
1089 AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk); 1089 SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
1090 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, 1090 SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
1091 AR9287_AN_RF2G3_OB_QAM, 1091 SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
1092 AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam); 1092 SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
1093 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, 1093
1094 AR9287_AN_RF2G3_OB_PAL_OFF, 1094 ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH0, regval);
1095 AR9287_AN_RF2G3_OB_PAL_OFF_S, 1095
1096 pModal->ob_pal_off); 1096 regval = REG_READ(ah, AR9287_AN_RF2G3_CH1);
1097 1097 regval &= ~(AR9287_AN_RF2G3_DB1 |
1098 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, 1098 AR9287_AN_RF2G3_DB2 |
1099 AR9287_AN_RF2G3_DB1, AR9287_AN_RF2G3_DB1_S, 1099 AR9287_AN_RF2G3_OB_CCK |
1100 pModal->db1); 1100 AR9287_AN_RF2G3_OB_PSK |
1101 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, AR9287_AN_RF2G3_DB2, 1101 AR9287_AN_RF2G3_OB_QAM |
1102 AR9287_AN_RF2G3_DB2_S, pModal->db2); 1102 AR9287_AN_RF2G3_OB_PAL_OFF);
1103 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, 1103 regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
1104 AR9287_AN_RF2G3_OB_CCK, 1104 SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
1105 AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck); 1105 SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
1106 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, 1106 SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
1107 AR9287_AN_RF2G3_OB_PSK, 1107 SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
1108 AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk); 1108 SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
1109 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, 1109
1110 AR9287_AN_RF2G3_OB_QAM, 1110 ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH1, regval);
1111 AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam);
1112 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
1113 AR9287_AN_RF2G3_OB_PAL_OFF,
1114 AR9287_AN_RF2G3_OB_PAL_OFF_S,
1115 pModal->ob_pal_off);
1116 1111
1117 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, 1112 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
1118 AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); 1113 AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart);
@@ -1125,26 +1120,27 @@ static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah,
1125 pModal->xpaBiasLvl); 1120 pModal->xpaBiasLvl);
1126} 1121}
1127 1122
1128static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah, 1123static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah,
1129 enum ieee80211_band freq_band) 1124 enum ieee80211_band freq_band)
1130{ 1125{
1131 return 1; 1126 return 1;
1132} 1127}
1133 1128
1134static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah, 1129static u32 ath9k_hw_ar9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
1135 struct ath9k_channel *chan) 1130 struct ath9k_channel *chan)
1136{ 1131{
1137 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 1132 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
1138 struct modal_eep_ar9287_header *pModal = &eep->modalHeader; 1133 struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
1139 1134
1140 return pModal->antCtrlCommon & 0xFFFF; 1135 return pModal->antCtrlCommon;
1141} 1136}
1142 1137
1143static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah, 1138static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
1144 u16 i, bool is2GHz) 1139 u16 i, bool is2GHz)
1145{ 1140{
1146#define EEP_MAP9287_SPURCHAN \ 1141#define EEP_MAP9287_SPURCHAN \
1147 (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan) 1142 (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
1143
1148 struct ath_common *common = ath9k_hw_common(ah); 1144 struct ath_common *common = ath9k_hw_common(ah);
1149 u16 spur_val = AR_NO_SPUR; 1145 u16 spur_val = AR_NO_SPUR;
1150 1146
@@ -1171,15 +1167,15 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
1171} 1167}
1172 1168
1173const struct eeprom_ops eep_ar9287_ops = { 1169const struct eeprom_ops eep_ar9287_ops = {
1174 .check_eeprom = ath9k_hw_AR9287_check_eeprom, 1170 .check_eeprom = ath9k_hw_ar9287_check_eeprom,
1175 .get_eeprom = ath9k_hw_AR9287_get_eeprom, 1171 .get_eeprom = ath9k_hw_ar9287_get_eeprom,
1176 .fill_eeprom = ath9k_hw_AR9287_fill_eeprom, 1172 .fill_eeprom = ath9k_hw_ar9287_fill_eeprom,
1177 .get_eeprom_ver = ath9k_hw_AR9287_get_eeprom_ver, 1173 .get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver,
1178 .get_eeprom_rev = ath9k_hw_AR9287_get_eeprom_rev, 1174 .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev,
1179 .get_num_ant_config = ath9k_hw_AR9287_get_num_ant_config, 1175 .get_num_ant_config = ath9k_hw_ar9287_get_num_ant_config,
1180 .get_eeprom_antenna_cfg = ath9k_hw_AR9287_get_eeprom_antenna_cfg, 1176 .get_eeprom_antenna_cfg = ath9k_hw_ar9287_get_eeprom_antenna_cfg,
1181 .set_board_values = ath9k_hw_AR9287_set_board_values, 1177 .set_board_values = ath9k_hw_ar9287_set_board_values,
1182 .set_addac = ath9k_hw_AR9287_set_addac, 1178 .set_addac = ath9k_hw_ar9287_set_addac,
1183 .set_txpower = ath9k_hw_AR9287_set_txpower, 1179 .set_txpower = ath9k_hw_ar9287_set_txpower,
1184 .get_spur_channel = ath9k_hw_AR9287_get_spur_channel 1180 .get_spur_channel = ath9k_hw_ar9287_get_spur_channel
1185}; 1181};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 7e1ed78d0e64..afa2b73ddbdd 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -593,7 +593,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
593 struct ath9k_channel *chan, 593 struct ath9k_channel *chan,
594 struct cal_data_per_freq *pRawDataSet, 594 struct cal_data_per_freq *pRawDataSet,
595 u8 *bChans, u16 availPiers, 595 u8 *bChans, u16 availPiers,
596 u16 tPdGainOverlap, int16_t *pMinCalPower, 596 u16 tPdGainOverlap,
597 u16 *pPdGainBoundaries, u8 *pPDADCValues, 597 u16 *pPdGainBoundaries, u8 *pPDADCValues,
598 u16 numXpdGains) 598 u16 numXpdGains)
599{ 599{
@@ -617,6 +617,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
617 int16_t minDelta = 0; 617 int16_t minDelta = 0;
618 struct chan_centers centers; 618 struct chan_centers centers;
619 619
620 memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
620 ath9k_hw_get_channel_centers(ah, chan, &centers); 621 ath9k_hw_get_channel_centers(ah, chan, &centers);
621 622
622 for (numPiers = 0; numPiers < availPiers; numPiers++) { 623 for (numPiers = 0; numPiers < availPiers; numPiers++) {
@@ -674,8 +675,6 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
674 } 675 }
675 } 676 }
676 677
677 *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
678
679 k = 0; 678 k = 0;
680 679
681 for (i = 0; i < numXpdGains; i++) { 680 for (i = 0; i < numXpdGains; i++) {
@@ -729,7 +728,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
729 vpdTableI[i][sizeCurrVpdTable - 2]); 728 vpdTableI[i][sizeCurrVpdTable - 2]);
730 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); 729 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
731 730
732 if (tgtIndex > maxIndex) { 731 if (tgtIndex >= maxIndex) {
733 while ((ss <= tgtIndex) && 732 while ((ss <= tgtIndex) &&
734 (k < (AR5416_NUM_PDADC_VALUES - 1))) { 733 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
735 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] + 734 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
@@ -837,7 +836,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
837 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; 836 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
838 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; 837 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
839 u16 numPiers, i, j; 838 u16 numPiers, i, j;
840 int16_t tMinCalPower, diff = 0; 839 int16_t diff = 0;
841 u16 numXpdGain, xpdMask; 840 u16 numXpdGain, xpdMask;
842 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 }; 841 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
843 u32 reg32, regOffset, regChainOffset; 842 u32 reg32, regOffset, regChainOffset;
@@ -922,7 +921,6 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
922 chan, pRawDataset, 921 chan, pRawDataset,
923 pCalBChans, numPiers, 922 pCalBChans, numPiers,
924 pdGainOverlap_t2, 923 pdGainOverlap_t2,
925 &tMinCalPower,
926 gainBoundaries, 924 gainBoundaries,
927 pdadcValues, 925 pdadcValues,
928 numXpdGain); 926 numXpdGain);
@@ -1437,14 +1435,14 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
1437 return num_ant_config; 1435 return num_ant_config;
1438} 1436}
1439 1437
1440static u16 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah, 1438static u32 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
1441 struct ath9k_channel *chan) 1439 struct ath9k_channel *chan)
1442{ 1440{
1443 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1441 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1444 struct modal_eep_header *pModal = 1442 struct modal_eep_header *pModal =
1445 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); 1443 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
1446 1444
1447 return pModal->antCtrlCommon & 0xFFFF; 1445 return pModal->antCtrlCommon;
1448} 1446}
1449 1447
1450static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) 1448static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 0ee75e79fe35..3a8ee999da5d 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -76,7 +76,8 @@ static void ath_led_brightness(struct led_classdev *led_cdev,
76 case LED_FULL: 76 case LED_FULL:
77 if (led->led_type == ATH_LED_ASSOC) { 77 if (led->led_type == ATH_LED_ASSOC) {
78 sc->sc_flags |= SC_OP_LED_ASSOCIATED; 78 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
79 ieee80211_queue_delayed_work(sc->hw, 79 if (led_blink)
80 ieee80211_queue_delayed_work(sc->hw,
80 &sc->ath_led_blink_work, 0); 81 &sc->ath_led_blink_work, 0);
81 } else if (led->led_type == ATH_LED_RADIO) { 82 } else if (led->led_type == ATH_LED_RADIO) {
82 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); 83 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
@@ -143,7 +144,8 @@ void ath_init_leds(struct ath_softc *sc)
143 /* LED off, active low */ 144 /* LED off, active low */
144 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 145 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
145 146
146 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); 147 if (led_blink)
148 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
147 149
148 trigger = ieee80211_get_radio_led_name(sc->hw); 150 trigger = ieee80211_get_radio_led_name(sc->hw);
149 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), 151 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
@@ -180,7 +182,8 @@ void ath_init_leds(struct ath_softc *sc)
180 return; 182 return;
181 183
182fail: 184fail:
183 cancel_delayed_work_sync(&sc->ath_led_blink_work); 185 if (led_blink)
186 cancel_delayed_work_sync(&sc->ath_led_blink_work);
184 ath_deinit_leds(sc); 187 ath_deinit_leds(sc);
185} 188}
186 189
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 23c15aa9fbd5..61c1bee3f26a 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -16,12 +16,27 @@
16 16
17#include "htc.h" 17#include "htc.h"
18 18
19#define ATH9K_FW_USB_DEV(devid, fw) \ 19/* identify firmware images */
20 { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw } 20#define FIRMWARE_AR7010 "ar7010.fw"
21#define FIRMWARE_AR7010_1_1 "ar7010_1_1.fw"
22#define FIRMWARE_AR9271 "ar9271.fw"
23
24MODULE_FIRMWARE(FIRMWARE_AR7010);
25MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
26MODULE_FIRMWARE(FIRMWARE_AR9271);
21 27
22static struct usb_device_id ath9k_hif_usb_ids[] = { 28static struct usb_device_id ath9k_hif_usb_ids[] = {
23 ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"), 29 { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
24 ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"), 30 { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
31 { USB_DEVICE(0x0cf3, 0x7010) }, /* Atheros */
32 { USB_DEVICE(0x0cf3, 0x7015) }, /* Atheros */
33 { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
34 { USB_DEVICE(0x0846, 0x9018) }, /* Netgear WNDA3200 */
35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
38 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
39 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
25 { }, 40 { },
26}; 41};
27 42
@@ -760,6 +775,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
760 size_t len = hif_dev->firmware->size; 775 size_t len = hif_dev->firmware->size;
761 u32 addr = AR9271_FIRMWARE; 776 u32 addr = AR9271_FIRMWARE;
762 u8 *buf = kzalloc(4096, GFP_KERNEL); 777 u8 *buf = kzalloc(4096, GFP_KERNEL);
778 u32 firm_offset;
763 779
764 if (!buf) 780 if (!buf)
765 return -ENOMEM; 781 return -ENOMEM;
@@ -783,32 +799,37 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
783 } 799 }
784 kfree(buf); 800 kfree(buf);
785 801
802 if (hif_dev->device_id == 0x7010)
803 firm_offset = AR7010_FIRMWARE_TEXT;
804 else
805 firm_offset = AR9271_FIRMWARE_TEXT;
806
786 /* 807 /*
787 * Issue FW download complete command to firmware. 808 * Issue FW download complete command to firmware.
788 */ 809 */
789 err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0), 810 err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
790 FIRMWARE_DOWNLOAD_COMP, 811 FIRMWARE_DOWNLOAD_COMP,
791 0x40 | USB_DIR_OUT, 812 0x40 | USB_DIR_OUT,
792 AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ); 813 firm_offset >> 8, 0, NULL, 0, HZ);
793 if (err) 814 if (err)
794 return -EIO; 815 return -EIO;
795 816
796 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n", 817 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
797 "ar9271.fw", (unsigned long) hif_dev->firmware->size); 818 hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
798 819
799 return 0; 820 return 0;
800} 821}
801 822
802static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, 823static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
803 const char *fw_name)
804{ 824{
805 int ret; 825 int ret;
806 826
807 /* Request firmware */ 827 /* Request firmware */
808 ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev); 828 ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name,
829 &hif_dev->udev->dev);
809 if (ret) { 830 if (ret) {
810 dev_err(&hif_dev->udev->dev, 831 dev_err(&hif_dev->udev->dev,
811 "ath9k_htc: Firmware - %s not found\n", fw_name); 832 "ath9k_htc: Firmware - %s not found\n", hif_dev->fw_name);
812 goto err_fw_req; 833 goto err_fw_req;
813 } 834 }
814 835
@@ -824,7 +845,8 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
824 ret = ath9k_hif_usb_download_fw(hif_dev); 845 ret = ath9k_hif_usb_download_fw(hif_dev);
825 if (ret) { 846 if (ret) {
826 dev_err(&hif_dev->udev->dev, 847 dev_err(&hif_dev->udev->dev,
827 "ath9k_htc: Firmware - %s download failed\n", fw_name); 848 "ath9k_htc: Firmware - %s download failed\n",
849 hif_dev->fw_name);
828 goto err_fw_download; 850 goto err_fw_download;
829 } 851 }
830 852
@@ -851,7 +873,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
851{ 873{
852 struct usb_device *udev = interface_to_usbdev(interface); 874 struct usb_device *udev = interface_to_usbdev(interface);
853 struct hif_device_usb *hif_dev; 875 struct hif_device_usb *hif_dev;
854 const char *fw_name = (const char *) id->driver_info;
855 int ret = 0; 876 int ret = 0;
856 877
857 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL); 878 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
@@ -876,7 +897,27 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
876 goto err_htc_hw_alloc; 897 goto err_htc_hw_alloc;
877 } 898 }
878 899
879 ret = ath9k_hif_usb_dev_init(hif_dev, fw_name); 900 /* Find out which firmware to load */
901
902 switch(hif_dev->device_id) {
903 case 0x7010:
904 case 0x9018:
905 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
906 hif_dev->fw_name = FIRMWARE_AR7010_1_1;
907 else
908 hif_dev->fw_name = FIRMWARE_AR7010;
909 break;
910 default:
911 hif_dev->fw_name = FIRMWARE_AR9271;
912 break;
913 }
914
915 if (!hif_dev->fw_name) {
916 dev_err(&udev->dev, "Can't determine firmware !\n");
917 goto err_htc_hw_alloc;
918 }
919
920 ret = ath9k_hif_usb_dev_init(hif_dev);
880 if (ret) { 921 if (ret) {
881 ret = -EINVAL; 922 ret = -EINVAL;
882 goto err_hif_init_usb; 923 goto err_hif_init_usb;
@@ -911,12 +952,10 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
911 void *buf; 952 void *buf;
912 int ret; 953 int ret;
913 954
914 buf = kmalloc(4, GFP_KERNEL); 955 buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL);
915 if (!buf) 956 if (!buf)
916 return; 957 return;
917 958
918 memcpy(buf, &reboot_cmd, 4);
919
920 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE), 959 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
921 buf, 4, NULL, HZ); 960 buf, 4, NULL, HZ);
922 if (ret) 961 if (ret)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 0aca49b6fcb6..2daf97b11c08 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -19,6 +19,7 @@
19 19
20#define AR9271_FIRMWARE 0x501000 20#define AR9271_FIRMWARE 0x501000
21#define AR9271_FIRMWARE_TEXT 0x903000 21#define AR9271_FIRMWARE_TEXT 0x903000
22#define AR7010_FIRMWARE_TEXT 0x906000
22 23
23#define FIRMWARE_DOWNLOAD 0x30 24#define FIRMWARE_DOWNLOAD 0x30
24#define FIRMWARE_DOWNLOAD_COMP 0x31 25#define FIRMWARE_DOWNLOAD_COMP 0x31
@@ -90,6 +91,7 @@ struct hif_device_usb {
90 struct usb_anchor regout_submitted; 91 struct usb_anchor regout_submitted;
91 struct usb_anchor rx_submitted; 92 struct usb_anchor rx_submitted;
92 struct sk_buff *remain_skb; 93 struct sk_buff *remain_skb;
94 const char *fw_name;
93 int rx_remain_len; 95 int rx_remain_len;
94 int rx_pkt_len; 96 int rx_pkt_len;
95 int rx_transfer_len; 97 int rx_transfer_len;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index c251603ab032..3756400e6bf9 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -223,15 +223,6 @@ struct ath9k_htc_sta {
223 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID]; 223 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
224}; 224};
225 225
226struct ath9k_htc_aggr_work {
227 u16 tid;
228 u8 sta_addr[ETH_ALEN];
229 struct ieee80211_hw *hw;
230 struct ieee80211_vif *vif;
231 enum ieee80211_ampdu_mlme_action action;
232 struct mutex mutex;
233};
234
235#define ATH9K_HTC_RXBUF 256 226#define ATH9K_HTC_RXBUF 256
236#define HTC_RX_FRAME_HEADER_SIZE 40 227#define HTC_RX_FRAME_HEADER_SIZE 40
237 228
@@ -257,12 +248,15 @@ struct ath9k_htc_tx_ctl {
257#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) 248#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
258#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++) 249#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
259 250
251#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
252
260struct ath_tx_stats { 253struct ath_tx_stats {
261 u32 buf_queued; 254 u32 buf_queued;
262 u32 buf_completed; 255 u32 buf_completed;
263 u32 skb_queued; 256 u32 skb_queued;
264 u32 skb_completed; 257 u32 skb_completed;
265 u32 skb_dropped; 258 u32 skb_dropped;
259 u32 queue_stats[WME_NUM_AC];
266}; 260};
267 261
268struct ath_rx_stats { 262struct ath_rx_stats {
@@ -286,11 +280,14 @@ struct ath9k_debug {
286#define TX_STAT_INC(c) do { } while (0) 280#define TX_STAT_INC(c) do { } while (0)
287#define RX_STAT_INC(c) do { } while (0) 281#define RX_STAT_INC(c) do { } while (0)
288 282
283#define TX_QSTAT_INC(c) do { } while (0)
284
289#endif /* CONFIG_ATH9K_HTC_DEBUGFS */ 285#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
290 286
291#define ATH_LED_PIN_DEF 1 287#define ATH_LED_PIN_DEF 1
292#define ATH_LED_PIN_9287 8 288#define ATH_LED_PIN_9287 8
293#define ATH_LED_PIN_9271 15 289#define ATH_LED_PIN_9271 15
290#define ATH_LED_PIN_7010 12
294#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */ 291#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
295#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */ 292#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
296 293
@@ -326,11 +323,10 @@ struct htc_beacon_config {
326#define OP_LED_ON BIT(4) 323#define OP_LED_ON BIT(4)
327#define OP_PREAMBLE_SHORT BIT(5) 324#define OP_PREAMBLE_SHORT BIT(5)
328#define OP_PROTECT_ENABLE BIT(6) 325#define OP_PROTECT_ENABLE BIT(6)
329#define OP_TXAGGR BIT(7) 326#define OP_ASSOCIATED BIT(7)
330#define OP_ASSOCIATED BIT(8) 327#define OP_ENABLE_BEACON BIT(8)
331#define OP_ENABLE_BEACON BIT(9) 328#define OP_LED_DEINIT BIT(9)
332#define OP_LED_DEINIT BIT(10) 329#define OP_UNPLUGGED BIT(10)
333#define OP_UNPLUGGED BIT(11)
334 330
335struct ath9k_htc_priv { 331struct ath9k_htc_priv {
336 struct device *dev; 332 struct device *dev;
@@ -371,8 +367,6 @@ struct ath9k_htc_priv {
371 struct ath9k_htc_rx rx; 367 struct ath9k_htc_rx rx;
372 struct tasklet_struct tx_tasklet; 368 struct tasklet_struct tx_tasklet;
373 struct sk_buff_head tx_queue; 369 struct sk_buff_head tx_queue;
374 struct ath9k_htc_aggr_work aggr_work;
375 struct delayed_work ath9k_aggr_work;
376 struct delayed_work ath9k_ani_work; 370 struct delayed_work ath9k_ani_work;
377 struct work_struct ps_work; 371 struct work_struct ps_work;
378 372
@@ -390,13 +384,14 @@ struct ath9k_htc_priv {
390 int led_off_duration; 384 int led_off_duration;
391 int led_on_cnt; 385 int led_on_cnt;
392 int led_off_cnt; 386 int led_off_cnt;
393 int hwq_map[ATH9K_WME_AC_VO+1]; 387
388 int beaconq;
389 int cabq;
390 int hwq_map[WME_NUM_AC];
394 391
395#ifdef CONFIG_ATH9K_HTC_DEBUGFS 392#ifdef CONFIG_ATH9K_HTC_DEBUGFS
396 struct ath9k_debug debug; 393 struct ath9k_debug debug;
397#endif 394#endif
398 struct ath9k_htc_target_rate tgt_rate;
399
400 struct mutex mutex; 395 struct mutex mutex;
401}; 396};
402 397
@@ -405,6 +400,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
405 common->bus_ops->read_cachesize(common, csz); 400 common->bus_ops->read_cachesize(common, csz);
406} 401}
407 402
403void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
408void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 404void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
409 struct ieee80211_vif *vif); 405 struct ieee80211_vif *vif);
410void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending); 406void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
@@ -424,8 +420,8 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv);
424void ath9k_tx_tasklet(unsigned long data); 420void ath9k_tx_tasklet(unsigned long data);
425int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb); 421int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
426void ath9k_tx_cleanup(struct ath9k_htc_priv *priv); 422void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
427bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, 423bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
428 enum ath9k_tx_queue_subtype qtype); 424int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv);
429int get_hw_qnum(u16 queue, int *hwq_map); 425int get_hw_qnum(u16 queue, int *hwq_map);
430int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum, 426int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
431 struct ath9k_tx_queue_info *qinfo); 427 struct ath9k_tx_queue_info *qinfo);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index c10c7d002eb7..bd1506e69105 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -222,6 +222,29 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
222 spin_unlock_bh(&priv->beacon_lock); 222 spin_unlock_bh(&priv->beacon_lock);
223} 223}
224 224
225/* Currently, only for IBSS */
226void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
227{
228 struct ath_hw *ah = priv->ah;
229 struct ath9k_tx_queue_info qi, qi_be;
230 int qnum = priv->hwq_map[WME_AC_BE];
231
232 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
233 memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
234
235 ath9k_hw_get_txq_props(ah, qnum, &qi_be);
236
237 qi.tqi_aifs = qi_be.tqi_aifs;
238 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
239 qi.tqi_cwmax = qi_be.tqi_cwmax;
240
241 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
242 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
243 "Unable to update beacon queue %u!\n", qnum);
244 } else {
245 ath9k_hw_resettxqueue(ah, priv->beaconq);
246 }
247}
225 248
226void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 249void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
227 struct ieee80211_vif *vif) 250 struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index dc015077a8d9..148b43317fdb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,13 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
34 .max_power = 20, \ 34 .max_power = 20, \
35} 35}
36 36
37#define CHAN5G(_freq, _idx) { \
38 .band = IEEE80211_BAND_5GHZ, \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42}
43
37static struct ieee80211_channel ath9k_2ghz_channels[] = { 44static struct ieee80211_channel ath9k_2ghz_channels[] = {
38 CHAN2G(2412, 0), /* Channel 1 */ 45 CHAN2G(2412, 0), /* Channel 1 */
39 CHAN2G(2417, 1), /* Channel 2 */ 46 CHAN2G(2417, 1), /* Channel 2 */
@@ -51,6 +58,37 @@ static struct ieee80211_channel ath9k_2ghz_channels[] = {
51 CHAN2G(2484, 13), /* Channel 14 */ 58 CHAN2G(2484, 13), /* Channel 14 */
52}; 59};
53 60
61static struct ieee80211_channel ath9k_5ghz_channels[] = {
62 /* _We_ call this UNII 1 */
63 CHAN5G(5180, 14), /* Channel 36 */
64 CHAN5G(5200, 15), /* Channel 40 */
65 CHAN5G(5220, 16), /* Channel 44 */
66 CHAN5G(5240, 17), /* Channel 48 */
67 /* _We_ call this UNII 2 */
68 CHAN5G(5260, 18), /* Channel 52 */
69 CHAN5G(5280, 19), /* Channel 56 */
70 CHAN5G(5300, 20), /* Channel 60 */
71 CHAN5G(5320, 21), /* Channel 64 */
72 /* _We_ call this "Middle band" */
73 CHAN5G(5500, 22), /* Channel 100 */
74 CHAN5G(5520, 23), /* Channel 104 */
75 CHAN5G(5540, 24), /* Channel 108 */
76 CHAN5G(5560, 25), /* Channel 112 */
77 CHAN5G(5580, 26), /* Channel 116 */
78 CHAN5G(5600, 27), /* Channel 120 */
79 CHAN5G(5620, 28), /* Channel 124 */
80 CHAN5G(5640, 29), /* Channel 128 */
81 CHAN5G(5660, 30), /* Channel 132 */
82 CHAN5G(5680, 31), /* Channel 136 */
83 CHAN5G(5700, 32), /* Channel 140 */
84 /* _We_ call this UNII 3 */
85 CHAN5G(5745, 33), /* Channel 149 */
86 CHAN5G(5765, 34), /* Channel 153 */
87 CHAN5G(5785, 35), /* Channel 157 */
88 CHAN5G(5805, 36), /* Channel 161 */
89 CHAN5G(5825, 37), /* Channel 165 */
90};
91
54/* Atheros hardware rate code addition for short premble */ 92/* Atheros hardware rate code addition for short premble */
55#define SHPCHECK(__hw_rate, __flags) \ 93#define SHPCHECK(__hw_rate, __flags) \
56 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0) 94 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
@@ -141,7 +179,7 @@ static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
141 return htc_connect_service(priv->htc, &req, ep_id); 179 return htc_connect_service(priv->htc, &req, ep_id);
142} 180}
143 181
144static int ath9k_init_htc_services(struct ath9k_htc_priv *priv) 182static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
145{ 183{
146 int ret; 184 int ret;
147 185
@@ -199,10 +237,28 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
199 if (ret) 237 if (ret)
200 goto err; 238 goto err;
201 239
240 /*
241 * Setup required credits before initializing HTC.
242 * This is a bit hacky, but, since queuing is done in
243 * the HIF layer, shouldn't matter much.
244 */
245
246 switch(devid) {
247 case 0x7010:
248 case 0x9018:
249 priv->htc->credits = 45;
250 break;
251 default:
252 priv->htc->credits = 33;
253 }
254
202 ret = htc_init(priv->htc); 255 ret = htc_init(priv->htc);
203 if (ret) 256 if (ret)
204 goto err; 257 goto err;
205 258
259 dev_info(priv->dev, "ath9k_htc: HTC initialized with %d credits\n",
260 priv->htc->credits);
261
206 return 0; 262 return 0;
207 263
208err: 264err:
@@ -398,17 +454,43 @@ static const struct ath_bus_ops ath9k_usb_bus_ops = {
398static void setup_ht_cap(struct ath9k_htc_priv *priv, 454static void setup_ht_cap(struct ath9k_htc_priv *priv,
399 struct ieee80211_sta_ht_cap *ht_info) 455 struct ieee80211_sta_ht_cap *ht_info)
400{ 456{
457 struct ath_common *common = ath9k_hw_common(priv->ah);
458 u8 tx_streams, rx_streams;
459 int i;
460
401 ht_info->ht_supported = true; 461 ht_info->ht_supported = true;
402 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 462 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
403 IEEE80211_HT_CAP_SM_PS | 463 IEEE80211_HT_CAP_SM_PS |
404 IEEE80211_HT_CAP_SGI_40 | 464 IEEE80211_HT_CAP_SGI_40 |
405 IEEE80211_HT_CAP_DSSSCCK40; 465 IEEE80211_HT_CAP_DSSSCCK40;
406 466
467 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
468 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
469
470 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
471
407 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 472 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
408 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 473 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
409 474
410 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 475 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
411 ht_info->mcs.rx_mask[0] = 0xff; 476
477 /* ath9k_htc supports only 1 or 2 stream devices */
478 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2);
479 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2);
480
481 ath_print(common, ATH_DBG_CONFIG,
482 "TX streams %d, RX streams: %d\n",
483 tx_streams, rx_streams);
484
485 if (tx_streams != rx_streams) {
486 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
487 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
488 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
489 }
490
491 for (i = 0; i < rx_streams; i++)
492 ht_info->mcs.rx_mask[i] = 0xff;
493
412 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 494 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
413} 495}
414 496
@@ -420,23 +502,37 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
420 for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++) 502 for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
421 priv->hwq_map[i] = -1; 503 priv->hwq_map[i] = -1;
422 504
423 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) { 505 priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
506 if (priv->beaconq == -1) {
507 ath_print(common, ATH_DBG_FATAL,
508 "Unable to setup BEACON xmit queue\n");
509 goto err;
510 }
511
512 priv->cabq = ath9k_htc_cabq_setup(priv);
513 if (priv->cabq == -1) {
514 ath_print(common, ATH_DBG_FATAL,
515 "Unable to setup CAB xmit queue\n");
516 goto err;
517 }
518
519 if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
424 ath_print(common, ATH_DBG_FATAL, 520 ath_print(common, ATH_DBG_FATAL,
425 "Unable to setup xmit queue for BE traffic\n"); 521 "Unable to setup xmit queue for BE traffic\n");
426 goto err; 522 goto err;
427 } 523 }
428 524
429 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) { 525 if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
430 ath_print(common, ATH_DBG_FATAL, 526 ath_print(common, ATH_DBG_FATAL,
431 "Unable to setup xmit queue for BK traffic\n"); 527 "Unable to setup xmit queue for BK traffic\n");
432 goto err; 528 goto err;
433 } 529 }
434 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) { 530 if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
435 ath_print(common, ATH_DBG_FATAL, 531 ath_print(common, ATH_DBG_FATAL,
436 "Unable to setup xmit queue for VI traffic\n"); 532 "Unable to setup xmit queue for VI traffic\n");
437 goto err; 533 goto err;
438 } 534 }
439 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) { 535 if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
440 ath_print(common, ATH_DBG_FATAL, 536 ath_print(common, ATH_DBG_FATAL,
441 "Unable to setup xmit queue for VO traffic\n"); 537 "Unable to setup xmit queue for VO traffic\n");
442 goto err; 538 goto err;
@@ -468,36 +564,6 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
468 */ 564 */
469 for (i = 0; i < common->keymax; i++) 565 for (i = 0; i < common->keymax; i++)
470 ath9k_hw_keyreset(priv->ah, (u16) i); 566 ath9k_hw_keyreset(priv->ah, (u16) i);
471
472 if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
473 ATH9K_CIPHER_TKIP, NULL)) {
474 /*
475 * Whether we should enable h/w TKIP MIC.
476 * XXX: if we don't support WME TKIP MIC, then we wouldn't
477 * report WMM capable, so it's always safe to turn on
478 * TKIP MIC in this case.
479 */
480 ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
481 }
482
483 /*
484 * Check whether the separate key cache entries
485 * are required to handle both tx+rx MIC keys.
486 * With split mic keys the number of stations is limited
487 * to 27 otherwise 59.
488 */
489 if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
490 ATH9K_CIPHER_TKIP, NULL)
491 && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
492 ATH9K_CIPHER_MIC, NULL)
493 && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
494 0, NULL))
495 common->splitmic = 1;
496
497 /* turn on mcast key search if possible */
498 if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
499 (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
500 1, 1, NULL);
501} 567}
502 568
503static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv) 569static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
@@ -512,6 +578,17 @@ static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
512 priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates = 578 priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
513 ARRAY_SIZE(ath9k_legacy_rates); 579 ARRAY_SIZE(ath9k_legacy_rates);
514 } 580 }
581
582 if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) {
583 priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
584 priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
585 priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
586 ARRAY_SIZE(ath9k_5ghz_channels);
587 priv->sbands[IEEE80211_BAND_5GHZ].bitrates =
588 ath9k_legacy_rates + 4;
589 priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
590 ARRAY_SIZE(ath9k_legacy_rates) - 4;
591 }
515} 592}
516 593
517static void ath9k_init_misc(struct ath9k_htc_priv *priv) 594static void ath9k_init_misc(struct ath9k_htc_priv *priv)
@@ -524,7 +601,6 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
524 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 601 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
525 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 602 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
526 603
527 priv->op_flags |= OP_TXAGGR;
528 priv->ah->opmode = NL80211_IFTYPE_STATION; 604 priv->ah->opmode = NL80211_IFTYPE_STATION;
529} 605}
530 606
@@ -556,14 +632,12 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
556 spin_lock_init(&priv->beacon_lock); 632 spin_lock_init(&priv->beacon_lock);
557 spin_lock_init(&priv->tx_lock); 633 spin_lock_init(&priv->tx_lock);
558 mutex_init(&priv->mutex); 634 mutex_init(&priv->mutex);
559 mutex_init(&priv->aggr_work.mutex);
560 mutex_init(&priv->htc_pm_lock); 635 mutex_init(&priv->htc_pm_lock);
561 tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet, 636 tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
562 (unsigned long)priv); 637 (unsigned long)priv);
563 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet, 638 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
564 (unsigned long)priv); 639 (unsigned long)priv);
565 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv); 640 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
566 INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
567 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work); 641 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
568 INIT_WORK(&priv->ps_work, ath9k_ps_work); 642 INIT_WORK(&priv->ps_work, ath9k_ps_work);
569 643
@@ -643,11 +717,17 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
643 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) 717 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
644 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 718 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
645 &priv->sbands[IEEE80211_BAND_2GHZ]; 719 &priv->sbands[IEEE80211_BAND_2GHZ];
720 if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes))
721 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
722 &priv->sbands[IEEE80211_BAND_5GHZ];
646 723
647 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 724 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
648 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) 725 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
649 setup_ht_cap(priv, 726 setup_ht_cap(priv,
650 &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap); 727 &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
728 if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes))
729 setup_ht_cap(priv,
730 &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
651 } 731 }
652 732
653 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 733 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
@@ -747,7 +827,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
747 goto err_free; 827 goto err_free;
748 } 828 }
749 829
750 ret = ath9k_init_htc_services(priv); 830 ret = ath9k_init_htc_services(priv, devid);
751 if (ret) 831 if (ret)
752 goto err_init; 832 goto err_init;
753 833
@@ -790,7 +870,8 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
790 if (ret) 870 if (ret)
791 return ret; 871 return ret;
792 872
793 ret = ath9k_init_htc_services(htc_handle->drv_priv); 873 ret = ath9k_init_htc_services(htc_handle->drv_priv,
874 htc_handle->drv_priv->ah->hw_version.devid);
794 return ret; 875 return ret;
795} 876}
796#endif 877#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9d371c18eb41..cf9bcc67ade2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -27,13 +27,11 @@ static struct dentry *ath9k_debugfs_root;
27static void ath_update_txpow(struct ath9k_htc_priv *priv) 27static void ath_update_txpow(struct ath9k_htc_priv *priv)
28{ 28{
29 struct ath_hw *ah = priv->ah; 29 struct ath_hw *ah = priv->ah;
30 u32 txpow;
31 30
32 if (priv->curtxpow != priv->txpowlimit) { 31 if (priv->curtxpow != priv->txpowlimit) {
33 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit); 32 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
34 /* read back in case value is clamped */ 33 /* read back in case value is clamped */
35 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); 34 priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
36 priv->curtxpow = txpow;
37 } 35 }
38} 36}
39 37
@@ -325,142 +323,129 @@ static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
325 tcap.flags_ext = 0x80601000; 323 tcap.flags_ext = 0x80601000;
326 tcap.ampdu_limit = 0xffff0000; 324 tcap.ampdu_limit = 0xffff0000;
327 tcap.ampdu_subframes = 20; 325 tcap.ampdu_subframes = 20;
328 tcap.tx_chainmask_legacy = 1; 326 tcap.tx_chainmask_legacy = priv->ah->caps.tx_chainmask;
329 tcap.protmode = 1; 327 tcap.protmode = 1;
330 tcap.tx_chainmask = 1; 328 tcap.tx_chainmask = priv->ah->caps.tx_chainmask;
331 329
332 WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap); 330 WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
333 331
334 return ret; 332 return ret;
335} 333}
336 334
337static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv, 335static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
338 struct ieee80211_vif *vif, 336 struct ieee80211_sta *sta,
339 struct ieee80211_sta *sta) 337 struct ath9k_htc_target_rate *trate)
340{ 338{
341 struct ath_common *common = ath9k_hw_common(priv->ah);
342 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv; 339 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
343 struct ieee80211_supported_band *sband; 340 struct ieee80211_supported_band *sband;
344 struct ath9k_htc_target_rate trate;
345 u32 caps = 0; 341 u32 caps = 0;
346 u8 cmd_rsp; 342 int i, j;
347 int i, j, ret;
348 343
349 memset(&trate, 0, sizeof(trate)); 344 sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band];
350
351 /* Only 2GHz is supported */
352 sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
353 345
354 for (i = 0, j = 0; i < sband->n_bitrates; i++) { 346 for (i = 0, j = 0; i < sband->n_bitrates; i++) {
355 if (sta->supp_rates[sband->band] & BIT(i)) { 347 if (sta->supp_rates[sband->band] & BIT(i)) {
356 priv->tgt_rate.rates.legacy_rates.rs_rates[j] 348 trate->rates.legacy_rates.rs_rates[j]
357 = (sband->bitrates[i].bitrate * 2) / 10; 349 = (sband->bitrates[i].bitrate * 2) / 10;
358 j++; 350 j++;
359 } 351 }
360 } 352 }
361 priv->tgt_rate.rates.legacy_rates.rs_nrates = j; 353 trate->rates.legacy_rates.rs_nrates = j;
362 354
363 if (sta->ht_cap.ht_supported) { 355 if (sta->ht_cap.ht_supported) {
364 for (i = 0, j = 0; i < 77; i++) { 356 for (i = 0, j = 0; i < 77; i++) {
365 if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8))) 357 if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
366 priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i; 358 trate->rates.ht_rates.rs_rates[j++] = i;
367 if (j == ATH_HTC_RATE_MAX) 359 if (j == ATH_HTC_RATE_MAX)
368 break; 360 break;
369 } 361 }
370 priv->tgt_rate.rates.ht_rates.rs_nrates = j; 362 trate->rates.ht_rates.rs_nrates = j;
371 363
372 caps = WLAN_RC_HT_FLAG; 364 caps = WLAN_RC_HT_FLAG;
365 if (sta->ht_cap.mcs.rx_mask[1])
366 caps |= WLAN_RC_DS_FLAG;
373 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) 367 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
374 caps |= WLAN_RC_40_FLAG; 368 caps |= WLAN_RC_40_FLAG;
375 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 369 if (conf_is_ht40(&priv->hw->conf) &&
370 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
371 caps |= WLAN_RC_SGI_FLAG;
372 else if (conf_is_ht20(&priv->hw->conf) &&
373 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
376 caps |= WLAN_RC_SGI_FLAG; 374 caps |= WLAN_RC_SGI_FLAG;
377
378 } 375 }
379 376
380 priv->tgt_rate.sta_index = ista->index; 377 trate->sta_index = ista->index;
381 priv->tgt_rate.isnew = 1; 378 trate->isnew = 1;
382 trate = priv->tgt_rate; 379 trate->capflags = cpu_to_be32(caps);
383 priv->tgt_rate.capflags = cpu_to_be32(caps); 380}
384 trate.capflags = cpu_to_be32(caps);
385 381
386 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate); 382static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv,
383 struct ath9k_htc_target_rate *trate)
384{
385 struct ath_common *common = ath9k_hw_common(priv->ah);
386 int ret;
387 u8 cmd_rsp;
388
389 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate);
387 if (ret) { 390 if (ret) {
388 ath_print(common, ATH_DBG_FATAL, 391 ath_print(common, ATH_DBG_FATAL,
389 "Unable to initialize Rate information on target\n"); 392 "Unable to initialize Rate information on target\n");
390 return ret;
391 } 393 }
392 394
393 ath_print(common, ATH_DBG_CONFIG, 395 return ret;
394 "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
395 return 0;
396} 396}
397 397
398static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40) 398static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
399 struct ieee80211_sta *sta)
399{ 400{
400 struct ath9k_htc_priv *priv = hw->priv; 401 struct ath_common *common = ath9k_hw_common(priv->ah);
401 struct ieee80211_conf *conf = &hw->conf; 402 struct ath9k_htc_target_rate trate;
402 403 int ret;
403 if (!conf_is_ht(conf))
404 return false;
405
406 if (!(priv->op_flags & OP_ASSOCIATED) ||
407 (priv->op_flags & OP_SCANNING))
408 return false;
409 404
410 if (conf_is_ht40(conf)) { 405 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
411 if (priv->ah->curchan->chanmode & 406 ath9k_htc_setup_rate(priv, sta, &trate);
412 (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) { 407 ret = ath9k_htc_send_rate_cmd(priv, &trate);
413 return false; 408 if (!ret)
414 } else { 409 ath_print(common, ATH_DBG_CONFIG,
415 *cw40 = true; 410 "Updated target sta: %pM, rate caps: 0x%X\n",
416 return true; 411 sta->addr, be32_to_cpu(trate.capflags));
417 }
418 } else { /* ht20 */
419 if (priv->ah->curchan->chanmode & CHANNEL_HT20)
420 return false;
421 else
422 return true;
423 }
424} 412}
425 413
426static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40) 414static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
415 struct ieee80211_vif *vif,
416 struct ieee80211_bss_conf *bss_conf)
427{ 417{
428 struct ath9k_htc_target_rate trate;
429 struct ath_common *common = ath9k_hw_common(priv->ah); 418 struct ath_common *common = ath9k_hw_common(priv->ah);
419 struct ath9k_htc_target_rate trate;
420 struct ieee80211_sta *sta;
430 int ret; 421 int ret;
431 u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
432 u8 cmd_rsp;
433
434 memset(&trate, 0, sizeof(trate));
435
436 trate = priv->tgt_rate;
437
438 if (is_cw40)
439 caps |= WLAN_RC_40_FLAG;
440 else
441 caps &= ~WLAN_RC_40_FLAG;
442 422
443 priv->tgt_rate.capflags = cpu_to_be32(caps); 423 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
444 trate.capflags = cpu_to_be32(caps);
445 424
446 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate); 425 rcu_read_lock();
447 if (ret) { 426 sta = ieee80211_find_sta(vif, bss_conf->bssid);
448 ath_print(common, ATH_DBG_FATAL, 427 if (!sta) {
449 "Unable to update Rate information on target\n"); 428 rcu_read_unlock();
450 return; 429 return;
451 } 430 }
431 ath9k_htc_setup_rate(priv, sta, &trate);
432 rcu_read_unlock();
452 433
453 ath_print(common, ATH_DBG_CONFIG, "Rate control updated with " 434 ret = ath9k_htc_send_rate_cmd(priv, &trate);
454 "caps:0x%x on target\n", priv->tgt_rate.capflags); 435 if (!ret)
436 ath_print(common, ATH_DBG_CONFIG,
437 "Updated target sta: %pM, rate caps: 0x%X\n",
438 bss_conf->bssid, be32_to_cpu(trate.capflags));
455} 439}
456 440
457static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv, 441static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
458 struct ieee80211_vif *vif, 442 struct ieee80211_vif *vif,
459 u8 *sta_addr, u8 tid, bool oper) 443 struct ieee80211_sta *sta,
444 enum ieee80211_ampdu_mlme_action action,
445 u16 tid)
460{ 446{
461 struct ath_common *common = ath9k_hw_common(priv->ah); 447 struct ath_common *common = ath9k_hw_common(priv->ah);
462 struct ath9k_htc_target_aggr aggr; 448 struct ath9k_htc_target_aggr aggr;
463 struct ieee80211_sta *sta = NULL;
464 struct ath9k_htc_sta *ista; 449 struct ath9k_htc_sta *ista;
465 int ret = 0; 450 int ret = 0;
466 u8 cmd_rsp; 451 u8 cmd_rsp;
@@ -469,72 +454,28 @@ static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
469 return -EINVAL; 454 return -EINVAL;
470 455
471 memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr)); 456 memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
472
473 rcu_read_lock();
474
475 /* Check if we are able to retrieve the station */
476 sta = ieee80211_find_sta(vif, sta_addr);
477 if (!sta) {
478 rcu_read_unlock();
479 return -EINVAL;
480 }
481
482 ista = (struct ath9k_htc_sta *) sta->drv_priv; 457 ista = (struct ath9k_htc_sta *) sta->drv_priv;
483 458
484 if (oper)
485 ista->tid_state[tid] = AGGR_START;
486 else
487 ista->tid_state[tid] = AGGR_STOP;
488
489 aggr.sta_index = ista->index; 459 aggr.sta_index = ista->index;
490 460 aggr.tidno = tid & 0xf;
491 rcu_read_unlock(); 461 aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false;
492
493 aggr.tidno = tid;
494 aggr.aggr_enable = oper;
495 462
496 WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); 463 WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
497 if (ret) 464 if (ret)
498 ath_print(common, ATH_DBG_CONFIG, 465 ath_print(common, ATH_DBG_CONFIG,
499 "Unable to %s TX aggregation for (%pM, %d)\n", 466 "Unable to %s TX aggregation for (%pM, %d)\n",
500 (oper) ? "start" : "stop", sta->addr, tid); 467 (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
501 else 468 else
502 ath_print(common, ATH_DBG_CONFIG, 469 ath_print(common, ATH_DBG_CONFIG,
503 "%s aggregation for (%pM, %d)\n", 470 "%s TX aggregation for (%pM, %d)\n",
504 (oper) ? "Starting" : "Stopping", sta->addr, tid); 471 (aggr.aggr_enable) ? "Starting" : "Stopping",
505 472 sta->addr, tid);
506 return ret;
507}
508 473
509void ath9k_htc_aggr_work(struct work_struct *work) 474 spin_lock_bh(&priv->tx_lock);
510{ 475 ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
511 int ret = 0; 476 spin_unlock_bh(&priv->tx_lock);
512 struct ath9k_htc_priv *priv =
513 container_of(work, struct ath9k_htc_priv,
514 ath9k_aggr_work.work);
515 struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
516
517 mutex_lock(&wk->mutex);
518
519 switch (wk->action) {
520 case IEEE80211_AMPDU_TX_START:
521 ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
522 wk->tid, true);
523 if (!ret)
524 ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
525 wk->tid);
526 break;
527 case IEEE80211_AMPDU_TX_STOP:
528 ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
529 wk->tid, false);
530 ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
531 break;
532 default:
533 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
534 "Unknown AMPDU action\n");
535 }
536 477
537 mutex_unlock(&wk->mutex); 478 return ret;
538} 479}
539 480
540/*********/ 481/*********/
@@ -552,8 +493,7 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file)
552static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, 493static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
553 size_t count, loff_t *ppos) 494 size_t count, loff_t *ppos)
554{ 495{
555 struct ath9k_htc_priv *priv = 496 struct ath9k_htc_priv *priv = file->private_data;
556 (struct ath9k_htc_priv *) file->private_data;
557 struct ath9k_htc_target_stats cmd_rsp; 497 struct ath9k_htc_target_stats cmd_rsp;
558 char buf[512]; 498 char buf[512];
559 unsigned int len = 0; 499 unsigned int len = 0;
@@ -584,6 +524,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
584 len += snprintf(buf + len, sizeof(buf) - len, 524 len += snprintf(buf + len, sizeof(buf) - len,
585 "%19s : %10u\n", "TX Rate", priv->debug.txrate); 525 "%19s : %10u\n", "TX Rate", priv->debug.txrate);
586 526
527 if (len > sizeof(buf))
528 len = sizeof(buf);
529
587 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 530 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
588} 531}
589 532
@@ -596,8 +539,7 @@ static const struct file_operations fops_tgt_stats = {
596static ssize_t read_file_xmit(struct file *file, char __user *user_buf, 539static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
597 size_t count, loff_t *ppos) 540 size_t count, loff_t *ppos)
598{ 541{
599 struct ath9k_htc_priv *priv = 542 struct ath9k_htc_priv *priv = file->private_data;
600 (struct ath9k_htc_priv *) file->private_data;
601 char buf[512]; 543 char buf[512];
602 unsigned int len = 0; 544 unsigned int len = 0;
603 545
@@ -617,6 +559,22 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
617 "%20s : %10u\n", "SKBs dropped", 559 "%20s : %10u\n", "SKBs dropped",
618 priv->debug.tx_stats.skb_dropped); 560 priv->debug.tx_stats.skb_dropped);
619 561
562 len += snprintf(buf + len, sizeof(buf) - len,
563 "%20s : %10u\n", "BE queued",
564 priv->debug.tx_stats.queue_stats[WME_AC_BE]);
565 len += snprintf(buf + len, sizeof(buf) - len,
566 "%20s : %10u\n", "BK queued",
567 priv->debug.tx_stats.queue_stats[WME_AC_BK]);
568 len += snprintf(buf + len, sizeof(buf) - len,
569 "%20s : %10u\n", "VI queued",
570 priv->debug.tx_stats.queue_stats[WME_AC_VI]);
571 len += snprintf(buf + len, sizeof(buf) - len,
572 "%20s : %10u\n", "VO queued",
573 priv->debug.tx_stats.queue_stats[WME_AC_VO]);
574
575 if (len > sizeof(buf))
576 len = sizeof(buf);
577
620 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 578 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
621} 579}
622 580
@@ -629,8 +587,7 @@ static const struct file_operations fops_xmit = {
629static ssize_t read_file_recv(struct file *file, char __user *user_buf, 587static ssize_t read_file_recv(struct file *file, char __user *user_buf,
630 size_t count, loff_t *ppos) 588 size_t count, loff_t *ppos)
631{ 589{
632 struct ath9k_htc_priv *priv = 590 struct ath9k_htc_priv *priv = file->private_data;
633 (struct ath9k_htc_priv *) file->private_data;
634 char buf[512]; 591 char buf[512];
635 unsigned int len = 0; 592 unsigned int len = 0;
636 593
@@ -644,6 +601,9 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
644 "%20s : %10u\n", "SKBs Dropped", 601 "%20s : %10u\n", "SKBs Dropped",
645 priv->debug.rx_stats.skb_dropped); 602 priv->debug.rx_stats.skb_dropped);
646 603
604 if (len > sizeof(buf))
605 len = sizeof(buf);
606
647 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 607 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
648} 608}
649 609
@@ -978,6 +938,8 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
978 priv->ah->led_pin = ATH_LED_PIN_9287; 938 priv->ah->led_pin = ATH_LED_PIN_9287;
979 else if (AR_SREV_9271(priv->ah)) 939 else if (AR_SREV_9271(priv->ah))
980 priv->ah->led_pin = ATH_LED_PIN_9271; 940 priv->ah->led_pin = ATH_LED_PIN_9271;
941 else if (AR_DEVID_7010(priv->ah))
942 priv->ah->led_pin = ATH_LED_PIN_7010;
981 else 943 else
982 priv->ah->led_pin = ATH_LED_PIN_DEF; 944 priv->ah->led_pin = ATH_LED_PIN_DEF;
983 945
@@ -1054,6 +1016,95 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
1054 wiphy_rfkill_start_polling(priv->hw->wiphy); 1016 wiphy_rfkill_start_polling(priv->hw->wiphy);
1055} 1017}
1056 1018
1019static void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
1020{
1021 struct ath9k_htc_priv *priv = hw->priv;
1022 struct ath_hw *ah = priv->ah;
1023 struct ath_common *common = ath9k_hw_common(ah);
1024 int ret;
1025 u8 cmd_rsp;
1026
1027 if (!ah->curchan)
1028 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
1029
1030 /* Reset the HW */
1031 ret = ath9k_hw_reset(ah, ah->curchan, false);
1032 if (ret) {
1033 ath_print(common, ATH_DBG_FATAL,
1034 "Unable to reset hardware; reset status %d "
1035 "(freq %u MHz)\n", ret, ah->curchan->channel);
1036 }
1037
1038 ath_update_txpow(priv);
1039
1040 /* Start RX */
1041 WMI_CMD(WMI_START_RECV_CMDID);
1042 ath9k_host_rx_init(priv);
1043
1044 /* Start TX */
1045 htc_start(priv->htc);
1046 spin_lock_bh(&priv->tx_lock);
1047 priv->tx_queues_stop = false;
1048 spin_unlock_bh(&priv->tx_lock);
1049 ieee80211_wake_queues(hw);
1050
1051 WMI_CMD(WMI_ENABLE_INTR_CMDID);
1052
1053 /* Enable LED */
1054 ath9k_hw_cfg_output(ah, ah->led_pin,
1055 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1056 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
1057}
1058
1059static void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
1060{
1061 struct ath9k_htc_priv *priv = hw->priv;
1062 struct ath_hw *ah = priv->ah;
1063 struct ath_common *common = ath9k_hw_common(ah);
1064 int ret;
1065 u8 cmd_rsp;
1066
1067 ath9k_htc_ps_wakeup(priv);
1068
1069 /* Disable LED */
1070 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
1071 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
1072
1073 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1074
1075 /* Stop TX */
1076 ieee80211_stop_queues(hw);
1077 htc_stop(priv->htc);
1078 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1079 skb_queue_purge(&priv->tx_queue);
1080
1081 /* Stop RX */
1082 WMI_CMD(WMI_STOP_RECV_CMDID);
1083
1084 /*
1085 * The MIB counters have to be disabled here,
1086 * since the target doesn't do it.
1087 */
1088 ath9k_hw_disable_mib_counters(ah);
1089
1090 if (!ah->curchan)
1091 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
1092
1093 /* Reset the HW */
1094 ret = ath9k_hw_reset(ah, ah->curchan, false);
1095 if (ret) {
1096 ath_print(common, ATH_DBG_FATAL,
1097 "Unable to reset hardware; reset status %d "
1098 "(freq %u MHz)\n", ret, ah->curchan->channel);
1099 }
1100
1101 /* Disable the PHY */
1102 ath9k_hw_phy_disable(ah);
1103
1104 ath9k_htc_ps_restore(priv);
1105 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1106}
1107
1057/**********************/ 1108/**********************/
1058/* mac80211 Callbacks */ 1109/* mac80211 Callbacks */
1059/**********************/ 1110/**********************/
@@ -1099,7 +1150,7 @@ fail_tx:
1099 return 0; 1150 return 0;
1100} 1151}
1101 1152
1102static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led) 1153static int ath9k_htc_start(struct ieee80211_hw *hw)
1103{ 1154{
1104 struct ath9k_htc_priv *priv = hw->priv; 1155 struct ath9k_htc_priv *priv = hw->priv;
1105 struct ath_hw *ah = priv->ah; 1156 struct ath_hw *ah = priv->ah;
@@ -1111,10 +1162,16 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
1111 __be16 htc_mode; 1162 __be16 htc_mode;
1112 u8 cmd_rsp; 1163 u8 cmd_rsp;
1113 1164
1165 mutex_lock(&priv->mutex);
1166
1114 ath_print(common, ATH_DBG_CONFIG, 1167 ath_print(common, ATH_DBG_CONFIG,
1115 "Starting driver with initial channel: %d MHz\n", 1168 "Starting driver with initial channel: %d MHz\n",
1116 curchan->center_freq); 1169 curchan->center_freq);
1117 1170
1171 /* Ensure that HW is awake before flushing RX */
1172 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1173 WMI_CMD(WMI_FLUSH_RECV_CMDID);
1174
1118 /* setup initial channel */ 1175 /* setup initial channel */
1119 init_channel = ath9k_cmn_get_curchannel(hw, ah); 1176 init_channel = ath9k_cmn_get_curchannel(hw, ah);
1120 1177
@@ -1127,6 +1184,7 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
1127 ath_print(common, ATH_DBG_FATAL, 1184 ath_print(common, ATH_DBG_FATAL,
1128 "Unable to reset hardware; reset status %d " 1185 "Unable to reset hardware; reset status %d "
1129 "(freq %u MHz)\n", ret, curchan->center_freq); 1186 "(freq %u MHz)\n", ret, curchan->center_freq);
1187 mutex_unlock(&priv->mutex);
1130 return ret; 1188 return ret;
1131 } 1189 }
1132 1190
@@ -1147,31 +1205,14 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
1147 priv->tx_queues_stop = false; 1205 priv->tx_queues_stop = false;
1148 spin_unlock_bh(&priv->tx_lock); 1206 spin_unlock_bh(&priv->tx_lock);
1149 1207
1150 if (led) {
1151 /* Enable LED */
1152 ath9k_hw_cfg_output(ah, ah->led_pin,
1153 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1154 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
1155 }
1156
1157 ieee80211_wake_queues(hw); 1208 ieee80211_wake_queues(hw);
1158 1209
1159 return ret;
1160}
1161
1162static int ath9k_htc_start(struct ieee80211_hw *hw)
1163{
1164 struct ath9k_htc_priv *priv = hw->priv;
1165 int ret = 0;
1166
1167 mutex_lock(&priv->mutex);
1168 ret = ath9k_htc_radio_enable(hw, false);
1169 mutex_unlock(&priv->mutex); 1210 mutex_unlock(&priv->mutex);
1170 1211
1171 return ret; 1212 return ret;
1172} 1213}
1173 1214
1174static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led) 1215static void ath9k_htc_stop(struct ieee80211_hw *hw)
1175{ 1216{
1176 struct ath9k_htc_priv *priv = hw->priv; 1217 struct ath9k_htc_priv *priv = hw->priv;
1177 struct ath_hw *ah = priv->ah; 1218 struct ath_hw *ah = priv->ah;
@@ -1179,21 +1220,17 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
1179 int ret = 0; 1220 int ret = 0;
1180 u8 cmd_rsp; 1221 u8 cmd_rsp;
1181 1222
1223 mutex_lock(&priv->mutex);
1224
1182 if (priv->op_flags & OP_INVALID) { 1225 if (priv->op_flags & OP_INVALID) {
1183 ath_print(common, ATH_DBG_ANY, "Device not present\n"); 1226 ath_print(common, ATH_DBG_ANY, "Device not present\n");
1227 mutex_unlock(&priv->mutex);
1184 return; 1228 return;
1185 } 1229 }
1186 1230
1187 if (led) {
1188 /* Disable LED */
1189 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
1190 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
1191 }
1192
1193 /* Cancel all the running timers/work .. */ 1231 /* Cancel all the running timers/work .. */
1194 cancel_work_sync(&priv->ps_work); 1232 cancel_work_sync(&priv->ps_work);
1195 cancel_delayed_work_sync(&priv->ath9k_ani_work); 1233 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1196 cancel_delayed_work_sync(&priv->ath9k_aggr_work);
1197 cancel_delayed_work_sync(&priv->ath9k_led_blink_work); 1234 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1198 ath9k_led_stop_brightness(priv); 1235 ath9k_led_stop_brightness(priv);
1199 1236
@@ -1202,12 +1239,6 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
1202 WMI_CMD(WMI_DISABLE_INTR_CMDID); 1239 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1203 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 1240 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1204 WMI_CMD(WMI_STOP_RECV_CMDID); 1241 WMI_CMD(WMI_STOP_RECV_CMDID);
1205 ath9k_hw_phy_disable(ah);
1206 ath9k_hw_disable(ah);
1207 ath9k_hw_configpcipowersave(ah, 1, 1);
1208 ath9k_htc_ps_restore(priv);
1209 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1210
1211 skb_queue_purge(&priv->tx_queue); 1242 skb_queue_purge(&priv->tx_queue);
1212 1243
1213 /* Remove monitor interface here */ 1244 /* Remove monitor interface here */
@@ -1220,21 +1251,18 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
1220 "Monitor interface removed\n"); 1251 "Monitor interface removed\n");
1221 } 1252 }
1222 1253
1254 ath9k_hw_phy_disable(ah);
1255 ath9k_hw_disable(ah);
1256 ath9k_hw_configpcipowersave(ah, 1, 1);
1257 ath9k_htc_ps_restore(priv);
1258 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1259
1223 priv->op_flags |= OP_INVALID; 1260 priv->op_flags |= OP_INVALID;
1224 1261
1225 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); 1262 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
1226}
1227
1228static void ath9k_htc_stop(struct ieee80211_hw *hw)
1229{
1230 struct ath9k_htc_priv *priv = hw->priv;
1231
1232 mutex_lock(&priv->mutex);
1233 ath9k_htc_radio_disable(hw, false);
1234 mutex_unlock(&priv->mutex); 1263 mutex_unlock(&priv->mutex);
1235} 1264}
1236 1265
1237
1238static int ath9k_htc_add_interface(struct ieee80211_hw *hw, 1266static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1239 struct ieee80211_vif *vif) 1267 struct ieee80211_vif *vif)
1240{ 1268{
@@ -1302,6 +1330,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1302out: 1330out:
1303 ath9k_htc_ps_restore(priv); 1331 ath9k_htc_ps_restore(priv);
1304 mutex_unlock(&priv->mutex); 1332 mutex_unlock(&priv->mutex);
1333
1305 return ret; 1334 return ret;
1306} 1335}
1307 1336
@@ -1318,6 +1347,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1318 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1347 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
1319 1348
1320 mutex_lock(&priv->mutex); 1349 mutex_lock(&priv->mutex);
1350 ath9k_htc_ps_wakeup(priv);
1321 1351
1322 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 1352 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
1323 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN); 1353 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
@@ -1328,6 +1358,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1328 ath9k_htc_remove_station(priv, vif, NULL); 1358 ath9k_htc_remove_station(priv, vif, NULL);
1329 priv->vif = NULL; 1359 priv->vif = NULL;
1330 1360
1361 ath9k_htc_ps_restore(priv);
1331 mutex_unlock(&priv->mutex); 1362 mutex_unlock(&priv->mutex);
1332} 1363}
1333 1364
@@ -1343,30 +1374,27 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1343 bool enable_radio = false; 1374 bool enable_radio = false;
1344 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1375 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1345 1376
1377 mutex_lock(&priv->htc_pm_lock);
1346 if (!idle && priv->ps_idle) 1378 if (!idle && priv->ps_idle)
1347 enable_radio = true; 1379 enable_radio = true;
1348
1349 priv->ps_idle = idle; 1380 priv->ps_idle = idle;
1381 mutex_unlock(&priv->htc_pm_lock);
1350 1382
1351 if (enable_radio) { 1383 if (enable_radio) {
1352 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1353 ath9k_htc_radio_enable(hw, true);
1354 ath_print(common, ATH_DBG_CONFIG, 1384 ath_print(common, ATH_DBG_CONFIG,
1355 "not-idle: enabling radio\n"); 1385 "not-idle: enabling radio\n");
1386 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1387 ath9k_htc_radio_enable(hw);
1356 } 1388 }
1357 } 1389 }
1358 1390
1359 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1391 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1360 struct ieee80211_channel *curchan = hw->conf.channel; 1392 struct ieee80211_channel *curchan = hw->conf.channel;
1361 int pos = curchan->hw_value; 1393 int pos = curchan->hw_value;
1362 bool is_cw40 = false;
1363 1394
1364 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 1395 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1365 curchan->center_freq); 1396 curchan->center_freq);
1366 1397
1367 if (check_rc_update(hw, &is_cw40))
1368 ath9k_htc_rc_update(priv, is_cw40);
1369
1370 ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]); 1398 ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
1371 1399
1372 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1400 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
@@ -1399,14 +1427,21 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1399 } 1427 }
1400 } 1428 }
1401 1429
1402 if (priv->ps_idle) { 1430 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1431 mutex_lock(&priv->htc_pm_lock);
1432 if (!priv->ps_idle) {
1433 mutex_unlock(&priv->htc_pm_lock);
1434 goto out;
1435 }
1436 mutex_unlock(&priv->htc_pm_lock);
1437
1403 ath_print(common, ATH_DBG_CONFIG, 1438 ath_print(common, ATH_DBG_CONFIG,
1404 "idle: disabling radio\n"); 1439 "idle: disabling radio\n");
1405 ath9k_htc_radio_disable(hw, true); 1440 ath9k_htc_radio_disable(hw);
1406 } 1441 }
1407 1442
1443out:
1408 mutex_unlock(&priv->mutex); 1444 mutex_unlock(&priv->mutex);
1409
1410 return 0; 1445 return 0;
1411} 1446}
1412 1447
@@ -1428,8 +1463,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1428 u32 rfilt; 1463 u32 rfilt;
1429 1464
1430 mutex_lock(&priv->mutex); 1465 mutex_lock(&priv->mutex);
1431
1432 ath9k_htc_ps_wakeup(priv); 1466 ath9k_htc_ps_wakeup(priv);
1467
1433 changed_flags &= SUPPORTED_FILTERS; 1468 changed_flags &= SUPPORTED_FILTERS;
1434 *total_flags &= SUPPORTED_FILTERS; 1469 *total_flags &= SUPPORTED_FILTERS;
1435 1470
@@ -1444,30 +1479,38 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1444 mutex_unlock(&priv->mutex); 1479 mutex_unlock(&priv->mutex);
1445} 1480}
1446 1481
1447static void ath9k_htc_sta_notify(struct ieee80211_hw *hw, 1482static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
1448 struct ieee80211_vif *vif, 1483 struct ieee80211_vif *vif,
1449 enum sta_notify_cmd cmd, 1484 struct ieee80211_sta *sta)
1450 struct ieee80211_sta *sta)
1451{ 1485{
1452 struct ath9k_htc_priv *priv = hw->priv; 1486 struct ath9k_htc_priv *priv = hw->priv;
1453 int ret; 1487 int ret;
1454 1488
1455 mutex_lock(&priv->mutex); 1489 mutex_lock(&priv->mutex);
1490 ath9k_htc_ps_wakeup(priv);
1491 ret = ath9k_htc_add_station(priv, vif, sta);
1492 if (!ret)
1493 ath9k_htc_init_rate(priv, sta);
1494 ath9k_htc_ps_restore(priv);
1495 mutex_unlock(&priv->mutex);
1456 1496
1457 switch (cmd) { 1497 return ret;
1458 case STA_NOTIFY_ADD: 1498}
1459 ret = ath9k_htc_add_station(priv, vif, sta); 1499
1460 if (!ret) 1500static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
1461 ath9k_htc_init_rate(priv, vif, sta); 1501 struct ieee80211_vif *vif,
1462 break; 1502 struct ieee80211_sta *sta)
1463 case STA_NOTIFY_REMOVE: 1503{
1464 ath9k_htc_remove_station(priv, vif, sta); 1504 struct ath9k_htc_priv *priv = hw->priv;
1465 break; 1505 int ret;
1466 default:
1467 break;
1468 }
1469 1506
1507 mutex_lock(&priv->mutex);
1508 ath9k_htc_ps_wakeup(priv);
1509 ret = ath9k_htc_remove_station(priv, vif, sta);
1510 ath9k_htc_ps_restore(priv);
1470 mutex_unlock(&priv->mutex); 1511 mutex_unlock(&priv->mutex);
1512
1513 return ret;
1471} 1514}
1472 1515
1473static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue, 1516static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -1482,6 +1525,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
1482 return 0; 1525 return 0;
1483 1526
1484 mutex_lock(&priv->mutex); 1527 mutex_lock(&priv->mutex);
1528 ath9k_htc_ps_wakeup(priv);
1485 1529
1486 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 1530 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
1487 1531
@@ -1499,9 +1543,16 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
1499 params->cw_max, params->txop); 1543 params->cw_max, params->txop);
1500 1544
1501 ret = ath_htc_txq_update(priv, qnum, &qi); 1545 ret = ath_htc_txq_update(priv, qnum, &qi);
1502 if (ret) 1546 if (ret) {
1503 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); 1547 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
1548 goto out;
1549 }
1504 1550
1551 if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
1552 (qnum == priv->hwq_map[WME_AC_BE]))
1553 ath9k_htc_beaconq_config(priv);
1554out:
1555 ath9k_htc_ps_restore(priv);
1505 mutex_unlock(&priv->mutex); 1556 mutex_unlock(&priv->mutex);
1506 1557
1507 return ret; 1558 return ret;
@@ -1574,7 +1625,6 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1574 ath_start_ani(priv); 1625 ath_start_ani(priv);
1575 } else { 1626 } else {
1576 priv->op_flags &= ~OP_ASSOCIATED; 1627 priv->op_flags &= ~OP_ASSOCIATED;
1577 cancel_work_sync(&priv->ps_work);
1578 cancel_delayed_work_sync(&priv->ath9k_ani_work); 1628 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1579 } 1629 }
1580 } 1630 }
@@ -1631,6 +1681,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1631 ath9k_hw_init_global_settings(ah); 1681 ath9k_hw_init_global_settings(ah);
1632 } 1682 }
1633 1683
1684 if (changed & BSS_CHANGED_HT)
1685 ath9k_htc_update_rate(priv, vif, bss_conf);
1686
1634 ath9k_htc_ps_restore(priv); 1687 ath9k_htc_ps_restore(priv);
1635 mutex_unlock(&priv->mutex); 1688 mutex_unlock(&priv->mutex);
1636} 1689}
@@ -1641,7 +1694,9 @@ static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
1641 u64 tsf; 1694 u64 tsf;
1642 1695
1643 mutex_lock(&priv->mutex); 1696 mutex_lock(&priv->mutex);
1697 ath9k_htc_ps_wakeup(priv);
1644 tsf = ath9k_hw_gettsf64(priv->ah); 1698 tsf = ath9k_hw_gettsf64(priv->ah);
1699 ath9k_htc_ps_restore(priv);
1645 mutex_unlock(&priv->mutex); 1700 mutex_unlock(&priv->mutex);
1646 1701
1647 return tsf; 1702 return tsf;
@@ -1652,7 +1707,9 @@ static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
1652 struct ath9k_htc_priv *priv = hw->priv; 1707 struct ath9k_htc_priv *priv = hw->priv;
1653 1708
1654 mutex_lock(&priv->mutex); 1709 mutex_lock(&priv->mutex);
1710 ath9k_htc_ps_wakeup(priv);
1655 ath9k_hw_settsf64(priv->ah, tsf); 1711 ath9k_hw_settsf64(priv->ah, tsf);
1712 ath9k_htc_ps_restore(priv);
1656 mutex_unlock(&priv->mutex); 1713 mutex_unlock(&priv->mutex);
1657} 1714}
1658 1715
@@ -1660,11 +1717,11 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
1660{ 1717{
1661 struct ath9k_htc_priv *priv = hw->priv; 1718 struct ath9k_htc_priv *priv = hw->priv;
1662 1719
1663 ath9k_htc_ps_wakeup(priv);
1664 mutex_lock(&priv->mutex); 1720 mutex_lock(&priv->mutex);
1721 ath9k_htc_ps_wakeup(priv);
1665 ath9k_hw_reset_tsf(priv->ah); 1722 ath9k_hw_reset_tsf(priv->ah);
1666 mutex_unlock(&priv->mutex);
1667 ath9k_htc_ps_restore(priv); 1723 ath9k_htc_ps_restore(priv);
1724 mutex_unlock(&priv->mutex);
1668} 1725}
1669 1726
1670static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, 1727static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
@@ -1674,8 +1731,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1674 u16 tid, u16 *ssn) 1731 u16 tid, u16 *ssn)
1675{ 1732{
1676 struct ath9k_htc_priv *priv = hw->priv; 1733 struct ath9k_htc_priv *priv = hw->priv;
1677 struct ath9k_htc_aggr_work *work = &priv->aggr_work;
1678 struct ath9k_htc_sta *ista; 1734 struct ath9k_htc_sta *ista;
1735 int ret = 0;
1679 1736
1680 switch (action) { 1737 switch (action) {
1681 case IEEE80211_AMPDU_RX_START: 1738 case IEEE80211_AMPDU_RX_START:
@@ -1683,26 +1740,26 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1683 case IEEE80211_AMPDU_RX_STOP: 1740 case IEEE80211_AMPDU_RX_STOP:
1684 break; 1741 break;
1685 case IEEE80211_AMPDU_TX_START: 1742 case IEEE80211_AMPDU_TX_START:
1743 ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
1744 if (!ret)
1745 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1746 break;
1686 case IEEE80211_AMPDU_TX_STOP: 1747 case IEEE80211_AMPDU_TX_STOP:
1687 if (!(priv->op_flags & OP_TXAGGR)) 1748 ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
1688 return -ENOTSUPP; 1749 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1689 memcpy(work->sta_addr, sta->addr, ETH_ALEN);
1690 work->hw = hw;
1691 work->vif = vif;
1692 work->action = action;
1693 work->tid = tid;
1694 ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
1695 break; 1750 break;
1696 case IEEE80211_AMPDU_TX_OPERATIONAL: 1751 case IEEE80211_AMPDU_TX_OPERATIONAL:
1697 ista = (struct ath9k_htc_sta *) sta->drv_priv; 1752 ista = (struct ath9k_htc_sta *) sta->drv_priv;
1753 spin_lock_bh(&priv->tx_lock);
1698 ista->tid_state[tid] = AGGR_OPERATIONAL; 1754 ista->tid_state[tid] = AGGR_OPERATIONAL;
1755 spin_unlock_bh(&priv->tx_lock);
1699 break; 1756 break;
1700 default: 1757 default:
1701 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL, 1758 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
1702 "Unknown AMPDU action\n"); 1759 "Unknown AMPDU action\n");
1703 } 1760 }
1704 1761
1705 return 0; 1762 return ret;
1706} 1763}
1707 1764
1708static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw) 1765static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
@@ -1722,8 +1779,8 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1722{ 1779{
1723 struct ath9k_htc_priv *priv = hw->priv; 1780 struct ath9k_htc_priv *priv = hw->priv;
1724 1781
1725 ath9k_htc_ps_wakeup(priv);
1726 mutex_lock(&priv->mutex); 1782 mutex_lock(&priv->mutex);
1783 ath9k_htc_ps_wakeup(priv);
1727 spin_lock_bh(&priv->beacon_lock); 1784 spin_lock_bh(&priv->beacon_lock);
1728 priv->op_flags &= ~OP_SCANNING; 1785 priv->op_flags &= ~OP_SCANNING;
1729 spin_unlock_bh(&priv->beacon_lock); 1786 spin_unlock_bh(&priv->beacon_lock);
@@ -1731,8 +1788,8 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1731 if (priv->op_flags & OP_ASSOCIATED) 1788 if (priv->op_flags & OP_ASSOCIATED)
1732 ath9k_htc_beacon_config(priv, priv->vif); 1789 ath9k_htc_beacon_config(priv, priv->vif);
1733 ath_start_ani(priv); 1790 ath_start_ani(priv);
1734 mutex_unlock(&priv->mutex);
1735 ath9k_htc_ps_restore(priv); 1791 ath9k_htc_ps_restore(priv);
1792 mutex_unlock(&priv->mutex);
1736} 1793}
1737 1794
1738static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 1795static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@@ -1746,8 +1803,10 @@ static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
1746 struct ath9k_htc_priv *priv = hw->priv; 1803 struct ath9k_htc_priv *priv = hw->priv;
1747 1804
1748 mutex_lock(&priv->mutex); 1805 mutex_lock(&priv->mutex);
1806 ath9k_htc_ps_wakeup(priv);
1749 priv->ah->coverage_class = coverage_class; 1807 priv->ah->coverage_class = coverage_class;
1750 ath9k_hw_init_global_settings(priv->ah); 1808 ath9k_hw_init_global_settings(priv->ah);
1809 ath9k_htc_ps_restore(priv);
1751 mutex_unlock(&priv->mutex); 1810 mutex_unlock(&priv->mutex);
1752} 1811}
1753 1812
@@ -1759,7 +1818,8 @@ struct ieee80211_ops ath9k_htc_ops = {
1759 .remove_interface = ath9k_htc_remove_interface, 1818 .remove_interface = ath9k_htc_remove_interface,
1760 .config = ath9k_htc_config, 1819 .config = ath9k_htc_config,
1761 .configure_filter = ath9k_htc_configure_filter, 1820 .configure_filter = ath9k_htc_configure_filter,
1762 .sta_notify = ath9k_htc_sta_notify, 1821 .sta_add = ath9k_htc_sta_add,
1822 .sta_remove = ath9k_htc_sta_remove,
1763 .conf_tx = ath9k_htc_conf_tx, 1823 .conf_tx = ath9k_htc_conf_tx,
1764 .bss_info_changed = ath9k_htc_bss_info_changed, 1824 .bss_info_changed = ath9k_htc_bss_info_changed,
1765 .set_key = ath9k_htc_set_key, 1825 .set_key = ath9k_htc_set_key,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2571b443ac82..bd0b4acc3ece 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -20,19 +20,29 @@
20/* TX */ 20/* TX */
21/******/ 21/******/
22 22
23#define ATH9K_HTC_INIT_TXQ(subtype) do { \
24 qi.tqi_subtype = subtype; \
25 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
26 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
27 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
28 qi.tqi_physCompBuf = 0; \
29 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | \
30 TXQ_FLAG_TXDESCINT_ENABLE; \
31 } while (0)
32
23int get_hw_qnum(u16 queue, int *hwq_map) 33int get_hw_qnum(u16 queue, int *hwq_map)
24{ 34{
25 switch (queue) { 35 switch (queue) {
26 case 0: 36 case 0:
27 return hwq_map[ATH9K_WME_AC_VO]; 37 return hwq_map[WME_AC_VO];
28 case 1: 38 case 1:
29 return hwq_map[ATH9K_WME_AC_VI]; 39 return hwq_map[WME_AC_VI];
30 case 2: 40 case 2:
31 return hwq_map[ATH9K_WME_AC_BE]; 41 return hwq_map[WME_AC_BE];
32 case 3: 42 case 3:
33 return hwq_map[ATH9K_WME_AC_BK]; 43 return hwq_map[WME_AC_BK];
34 default: 44 default:
35 return hwq_map[ATH9K_WME_AC_BE]; 45 return hwq_map[WME_AC_BE];
36 } 46 }
37} 47}
38 48
@@ -71,7 +81,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
71 struct ath9k_htc_vif *avp; 81 struct ath9k_htc_vif *avp;
72 struct ath9k_htc_tx_ctl tx_ctl; 82 struct ath9k_htc_tx_ctl tx_ctl;
73 enum htc_endpoint_id epid; 83 enum htc_endpoint_id epid;
74 u16 qnum, hw_qnum; 84 u16 qnum;
75 __le16 fc; 85 __le16 fc;
76 u8 *tx_fhdr; 86 u8 *tx_fhdr;
77 u8 sta_idx; 87 u8 sta_idx;
@@ -131,20 +141,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
131 memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr)); 141 memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
132 142
133 qnum = skb_get_queue_mapping(skb); 143 qnum = skb_get_queue_mapping(skb);
134 hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
135 144
136 switch (hw_qnum) { 145 switch (qnum) {
137 case 0: 146 case 0:
138 epid = priv->data_be_ep; 147 TX_QSTAT_INC(WME_AC_VO);
148 epid = priv->data_vo_ep;
139 break; 149 break;
140 case 2: 150 case 1:
151 TX_QSTAT_INC(WME_AC_VI);
141 epid = priv->data_vi_ep; 152 epid = priv->data_vi_ep;
142 break; 153 break;
143 case 3: 154 case 2:
144 epid = priv->data_vo_ep; 155 TX_QSTAT_INC(WME_AC_BE);
156 epid = priv->data_be_ep;
145 break; 157 break;
146 case 1: 158 case 3:
147 default: 159 default:
160 TX_QSTAT_INC(WME_AC_BK);
148 epid = priv->data_bk_ep; 161 epid = priv->data_bk_ep;
149 break; 162 break;
150 } 163 }
@@ -174,6 +187,19 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
174 return htc_send(priv->htc, skb, epid, &tx_ctl); 187 return htc_send(priv->htc, skb, epid, &tx_ctl);
175} 188}
176 189
190static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
191 struct ath9k_htc_sta *ista, u8 tid)
192{
193 bool ret = false;
194
195 spin_lock_bh(&priv->tx_lock);
196 if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
197 ret = true;
198 spin_unlock_bh(&priv->tx_lock);
199
200 return ret;
201}
202
177void ath9k_tx_tasklet(unsigned long data) 203void ath9k_tx_tasklet(unsigned long data)
178{ 204{
179 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 205 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
@@ -203,8 +229,7 @@ void ath9k_tx_tasklet(unsigned long data)
203 /* Check if we need to start aggregation */ 229 /* Check if we need to start aggregation */
204 230
205 if (sta && conf_is_ht(&priv->hw->conf) && 231 if (sta && conf_is_ht(&priv->hw->conf) &&
206 (priv->op_flags & OP_TXAGGR) 232 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
207 && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
208 if (ieee80211_is_data_qos(fc)) { 233 if (ieee80211_is_data_qos(fc)) {
209 u8 *qc, tid; 234 u8 *qc, tid;
210 struct ath9k_htc_sta *ista; 235 struct ath9k_htc_sta *ista;
@@ -213,10 +238,11 @@ void ath9k_tx_tasklet(unsigned long data)
213 tid = qc[0] & 0xf; 238 tid = qc[0] & 0xf;
214 ista = (struct ath9k_htc_sta *)sta->drv_priv; 239 ista = (struct ath9k_htc_sta *)sta->drv_priv;
215 240
216 if ((tid < ATH9K_HTC_MAX_TID) && 241 if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
217 ista->tid_state[tid] == AGGR_STOP) {
218 ieee80211_start_tx_ba_session(sta, tid); 242 ieee80211_start_tx_ba_session(sta, tid);
243 spin_lock_bh(&priv->tx_lock);
219 ista->tid_state[tid] = AGGR_PROGRESS; 244 ista->tid_state[tid] = AGGR_PROGRESS;
245 spin_unlock_bh(&priv->tx_lock);
220 } 246 }
221 } 247 }
222 } 248 }
@@ -284,8 +310,7 @@ void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
284 310
285} 311}
286 312
287bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, 313bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
288 enum ath9k_tx_queue_subtype subtype)
289{ 314{
290 struct ath_hw *ah = priv->ah; 315 struct ath_hw *ah = priv->ah;
291 struct ath_common *common = ath9k_hw_common(ah); 316 struct ath_common *common = ath9k_hw_common(ah);
@@ -293,13 +318,7 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
293 int qnum; 318 int qnum;
294 319
295 memset(&qi, 0, sizeof(qi)); 320 memset(&qi, 0, sizeof(qi));
296 321 ATH9K_HTC_INIT_TXQ(subtype);
297 qi.tqi_subtype = subtype;
298 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
299 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
300 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
301 qi.tqi_physCompBuf = 0;
302 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
303 322
304 qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi); 323 qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
305 if (qnum == -1) 324 if (qnum == -1)
@@ -317,6 +336,16 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
317 return true; 336 return true;
318} 337}
319 338
339int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv)
340{
341 struct ath9k_tx_queue_info qi;
342
343 memset(&qi, 0, sizeof(qi));
344 ATH9K_HTC_INIT_TXQ(0);
345
346 return ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_CAB, &qi);
347}
348
320/******/ 349/******/
321/* RX */ 350/* RX */
322/******/ 351/******/
@@ -387,9 +416,6 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
387 /* configure operational mode */ 416 /* configure operational mode */
388 ath9k_hw_setopmode(ah); 417 ath9k_hw_setopmode(ah);
389 418
390 /* Handle any link-level address change. */
391 ath9k_hw_setmac(ah, common->macaddr);
392
393 /* calculate and install multicast filter */ 419 /* calculate and install multicast filter */
394 mfilt[0] = mfilt[1] = ~0; 420 mfilt[0] = mfilt[1] = ~0;
395 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 421 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -399,7 +425,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
399{ 425{
400 ath9k_hw_rxena(priv->ah); 426 ath9k_hw_rxena(priv->ah);
401 ath9k_htc_opmode_init(priv); 427 ath9k_htc_opmode_init(priv);
402 ath9k_hw_startpcureceive(priv->ah); 428 ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
403 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER; 429 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
404} 430}
405 431
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 064397fd738e..705c0f342e1c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -89,7 +89,6 @@ static void htc_process_target_rdy(struct htc_target *target,
89 struct htc_endpoint *endpoint; 89 struct htc_endpoint *endpoint;
90 struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf; 90 struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
91 91
92 target->credits = be16_to_cpu(htc_ready_msg->credits);
93 target->credit_size = be16_to_cpu(htc_ready_msg->credit_size); 92 target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
94 93
95 endpoint = &target->endpoint[ENDPOINT0]; 94 endpoint = &target->endpoint[ENDPOINT0];
@@ -159,7 +158,7 @@ static int htc_config_pipe_credits(struct htc_target *target)
159 158
160 cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID); 159 cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
161 cp_msg->pipe_id = USB_WLAN_TX_PIPE; 160 cp_msg->pipe_id = USB_WLAN_TX_PIPE;
162 cp_msg->credits = 28; 161 cp_msg->credits = target->credits;
163 162
164 target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS; 163 target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
165 164
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 624422a8169e..ffecbadaea4a 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -128,6 +128,17 @@ static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
128 ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf); 128 ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
129} 129}
130 130
131static inline void ath9k_hw_procmibevent(struct ath_hw *ah)
132{
133 ath9k_hw_ops(ah)->ani_proc_mib_event(ah);
134}
135
136static inline void ath9k_hw_ani_monitor(struct ath_hw *ah,
137 struct ath9k_channel *chan)
138{
139 ath9k_hw_ops(ah)->ani_monitor(ah, chan);
140}
141
131/* Private hardware call ops */ 142/* Private hardware call ops */
132 143
133/* PHY ops */ 144/* PHY ops */
@@ -253,12 +264,6 @@ static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
253 ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray); 264 ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
254} 265}
255 266
256static inline void ath9k_hw_loadnf(struct ath_hw *ah,
257 struct ath9k_channel *chan)
258{
259 ath9k_hw_private_ops(ah)->loadnf(ah, chan);
260}
261
262static inline bool ath9k_hw_init_cal(struct ath_hw *ah, 267static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
263 struct ath9k_channel *chan) 268 struct ath9k_channel *chan)
264{ 269{
@@ -277,4 +282,9 @@ static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
277 return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType); 282 return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
278} 283}
279 284
285static inline void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
286{
287 ath9k_hw_private_ops(ah)->ani_reset(ah, is_scanning);
288}
289
280#endif /* ATH9K_HW_OPS_H */ 290#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c33f17dbe6f1..8d291ccf5c88 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,11 +23,6 @@
23#include "rc.h" 23#include "rc.h"
24#include "ar9003_mac.h" 24#include "ar9003_mac.h"
25 25
26#define ATH9K_CLOCK_RATE_CCK 22
27#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
28#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
29#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
30
31static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 26static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
32 27
33MODULE_AUTHOR("Atheros Communications"); 28MODULE_AUTHOR("Atheros Communications");
@@ -80,6 +75,15 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
80 ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah); 75 ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
81} 76}
82 77
78static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
79{
80 /* You will not have this callback if using the old ANI */
81 if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
82 return;
83
84 ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
85}
86
83/********************/ 87/********************/
84/* Helper Functions */ 88/* Helper Functions */
85/********************/ 89/********************/
@@ -371,13 +375,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
371 ah->config.ofdm_trig_high = 500; 375 ah->config.ofdm_trig_high = 500;
372 ah->config.cck_trig_high = 200; 376 ah->config.cck_trig_high = 200;
373 ah->config.cck_trig_low = 100; 377 ah->config.cck_trig_low = 100;
374 378 ah->config.enable_ani = true;
375 /*
376 * For now ANI is disabled for AR9003, it is still
377 * being tested.
378 */
379 if (!AR_SREV_9300_20_OR_LATER(ah))
380 ah->config.enable_ani = 1;
381 379
382 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 380 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
383 ah->config.spurchans[i][0] = AR_NO_SPUR; 381 ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -390,12 +388,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
390 ah->config.ht_enable = 0; 388 ah->config.ht_enable = 0;
391 389
392 ah->config.rx_intr_mitigation = true; 390 ah->config.rx_intr_mitigation = true;
393 391 ah->config.pcieSerDesWrite = true;
394 /*
395 * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
396 * used by AR9003, but it is showing reliability issues.
397 * It will take a while to fix so this is currently disabled.
398 */
399 392
400 /* 393 /*
401 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 394 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -433,7 +426,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
433 ah->ah_flags = AH_USE_EEPROM; 426 ah->ah_flags = AH_USE_EEPROM;
434 427
435 ah->atim_window = 0; 428 ah->atim_window = 0;
436 ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE; 429 ah->sta_id1_defaults =
430 AR_STA_ID1_CRPT_MIC_ENABLE |
431 AR_STA_ID1_MCAST_KSRCH;
437 ah->beacon_interval = 100; 432 ah->beacon_interval = 100;
438 ah->enable_32kHz_clock = DONT_USE_32KHZ; 433 ah->enable_32kHz_clock = DONT_USE_32KHZ;
439 ah->slottime = (u32) -1; 434 ah->slottime = (u32) -1;
@@ -537,7 +532,8 @@ static int __ath9k_hw_init(struct ath_hw *ah)
537 532
538 if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { 533 if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
539 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || 534 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
540 (AR_SREV_9280(ah) && !ah->is_pciexpress)) { 535 ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
536 !ah->is_pciexpress)) {
541 ah->config.serialize_regmode = 537 ah->config.serialize_regmode =
542 SER_REG_MODE_ON; 538 SER_REG_MODE_ON;
543 } else { 539 } else {
@@ -571,28 +567,19 @@ static int __ath9k_hw_init(struct ath_hw *ah)
571 ah->ani_function = ATH9K_ANI_ALL; 567 ah->ani_function = ATH9K_ANI_ALL;
572 if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 568 if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
573 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 569 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
570 if (!AR_SREV_9300_20_OR_LATER(ah))
571 ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
574 572
575 ath9k_hw_init_mode_regs(ah); 573 ath9k_hw_init_mode_regs(ah);
576 574
577 /* 575 /*
578 * Configire PCIE after Ini init. SERDES values now come from ini file 576 * Read back AR_WA into a permanent copy and set bits 14 and 17.
579 * This enables PCIe low power mode. 577 * We need to do this to avoid RMW of this register. We cannot
578 * read the reg when chip is asleep.
580 */ 579 */
581 if (AR_SREV_9300_20_OR_LATER(ah)) { 580 ah->WARegVal = REG_READ(ah, AR_WA);
582 u32 regval; 581 ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
583 unsigned int i; 582 AR_WA_ASPM_TIMER_BASED_DISABLE);
584
585 /* Set Bits 16 and 17 in the AR_WA register. */
586 regval = REG_READ(ah, AR_WA);
587 regval |= 0x00030000;
588 REG_WRITE(ah, AR_WA, regval);
589
590 for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
591 REG_WRITE(ah,
592 INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
593 INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
594 }
595 }
596 583
597 if (ah->is_pciexpress) 584 if (ah->is_pciexpress)
598 ath9k_hw_configpcipowersave(ah, 0, 0); 585 ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -623,10 +610,8 @@ static int __ath9k_hw_init(struct ath_hw *ah)
623 else 610 else
624 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); 611 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
625 612
626 if (AR_SREV_9300_20_OR_LATER(ah))
627 ar9003_hw_set_nf_limits(ah);
628
629 ath9k_init_nfcal_hist_buffer(ah); 613 ath9k_init_nfcal_hist_buffer(ah);
614 ah->bb_watchdog_timeout_ms = 25;
630 615
631 common->state = ATH_HW_INITIALIZED; 616 common->state = ATH_HW_INITIALIZED;
632 617
@@ -1012,6 +997,11 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1012 997
1013 ENABLE_REGWRITE_BUFFER(ah); 998 ENABLE_REGWRITE_BUFFER(ah);
1014 999
1000 if (AR_SREV_9300_20_OR_LATER(ah)) {
1001 REG_WRITE(ah, AR_WA, ah->WARegVal);
1002 udelay(10);
1003 }
1004
1015 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1005 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1016 AR_RTC_FORCE_WAKE_ON_INT); 1006 AR_RTC_FORCE_WAKE_ON_INT);
1017 1007
@@ -1066,6 +1056,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1066{ 1056{
1067 ENABLE_REGWRITE_BUFFER(ah); 1057 ENABLE_REGWRITE_BUFFER(ah);
1068 1058
1059 if (AR_SREV_9300_20_OR_LATER(ah)) {
1060 REG_WRITE(ah, AR_WA, ah->WARegVal);
1061 udelay(10);
1062 }
1063
1069 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1064 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1070 AR_RTC_FORCE_WAKE_ON_INT); 1065 AR_RTC_FORCE_WAKE_ON_INT);
1071 1066
@@ -1073,6 +1068,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1073 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1068 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1074 1069
1075 REG_WRITE(ah, AR_RTC_RESET, 0); 1070 REG_WRITE(ah, AR_RTC_RESET, 0);
1071 udelay(2);
1076 1072
1077 REGWRITE_BUFFER_FLUSH(ah); 1073 REGWRITE_BUFFER_FLUSH(ah);
1078 DISABLE_REGWRITE_BUFFER(ah); 1074 DISABLE_REGWRITE_BUFFER(ah);
@@ -1102,6 +1098,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1102 1098
1103static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) 1099static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1104{ 1100{
1101 if (AR_SREV_9300_20_OR_LATER(ah)) {
1102 REG_WRITE(ah, AR_WA, ah->WARegVal);
1103 udelay(10);
1104 }
1105
1105 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 1106 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1106 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); 1107 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1107 1108
@@ -1232,9 +1233,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1232 1233
1233 if (!ah->chip_fullsleep) { 1234 if (!ah->chip_fullsleep) {
1234 ath9k_hw_abortpcurecv(ah); 1235 ath9k_hw_abortpcurecv(ah);
1235 if (!ath9k_hw_stopdmarecv(ah)) 1236 if (!ath9k_hw_stopdmarecv(ah)) {
1236 ath_print(common, ATH_DBG_XMIT, 1237 ath_print(common, ATH_DBG_XMIT,
1237 "Failed to stop receive dma\n"); 1238 "Failed to stop receive dma\n");
1239 bChannelChange = false;
1240 }
1238 } 1241 }
1239 1242
1240 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1243 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
@@ -1265,7 +1268,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1265 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; 1268 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
1266 1269
1267 /* For chips on which RTC reset is done, save TSF before it gets cleared */ 1270 /* For chips on which RTC reset is done, save TSF before it gets cleared */
1268 if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) 1271 if (AR_SREV_9100(ah) ||
1272 (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
1269 tsf = ath9k_hw_gettsf64(ah); 1273 tsf = ath9k_hw_gettsf64(ah);
1270 1274
1271 saveLedState = REG_READ(ah, AR_CFG_LED) & 1275 saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -1297,16 +1301,30 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1297 } 1301 }
1298 1302
1299 /* Restore TSF */ 1303 /* Restore TSF */
1300 if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) 1304 if (tsf)
1301 ath9k_hw_settsf64(ah, tsf); 1305 ath9k_hw_settsf64(ah, tsf);
1302 1306
1303 if (AR_SREV_9280_10_OR_LATER(ah)) 1307 if (AR_SREV_9280_10_OR_LATER(ah))
1304 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); 1308 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
1305 1309
1310 if (!AR_SREV_9300_20_OR_LATER(ah))
1311 ar9002_hw_enable_async_fifo(ah);
1312
1306 r = ath9k_hw_process_ini(ah, chan); 1313 r = ath9k_hw_process_ini(ah, chan);
1307 if (r) 1314 if (r)
1308 return r; 1315 return r;
1309 1316
1317 /*
1318 * Some AR91xx SoC devices frequently fail to accept TSF writes
1319 * right after the chip reset. When that happens, write a new
1320 * value after the initvals have been applied, with an offset
1321 * based on measured time difference
1322 */
1323 if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
1324 tsf += 1500;
1325 ath9k_hw_settsf64(ah, tsf);
1326 }
1327
1310 /* Setup MFP options for CCMP */ 1328 /* Setup MFP options for CCMP */
1311 if (AR_SREV_9280_20_OR_LATER(ah)) { 1329 if (AR_SREV_9280_20_OR_LATER(ah)) {
1312 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt 1330 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
@@ -1367,6 +1385,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1367 ath9k_hw_resettxqueue(ah, i); 1385 ath9k_hw_resettxqueue(ah, i);
1368 1386
1369 ath9k_hw_init_interrupt_masks(ah, ah->opmode); 1387 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
1388 ath9k_hw_ani_cache_ini_regs(ah);
1370 ath9k_hw_init_qos(ah); 1389 ath9k_hw_init_qos(ah);
1371 1390
1372 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1391 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
@@ -1375,7 +1394,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1375 ath9k_hw_init_global_settings(ah); 1394 ath9k_hw_init_global_settings(ah);
1376 1395
1377 if (!AR_SREV_9300_20_OR_LATER(ah)) { 1396 if (!AR_SREV_9300_20_OR_LATER(ah)) {
1378 ar9002_hw_enable_async_fifo(ah); 1397 ar9002_hw_update_async_fifo(ah);
1379 ar9002_hw_enable_wep_aggregation(ah); 1398 ar9002_hw_enable_wep_aggregation(ah);
1380 } 1399 }
1381 1400
@@ -1426,9 +1445,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1426 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); 1445 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
1427 } 1446 }
1428 } else { 1447 } else {
1429 /* Configure AR9271 target WLAN */ 1448 if (common->bus_ops->ath_bus_type == ATH_USB) {
1430 if (AR_SREV_9271(ah)) 1449 /* Configure AR9271 target WLAN */
1431 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); 1450 if (AR_SREV_9271(ah))
1451 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
1452 else
1453 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1454 }
1432#ifdef __BIG_ENDIAN 1455#ifdef __BIG_ENDIAN
1433 else 1456 else
1434 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 1457 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1441,6 +1464,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1441 if (AR_SREV_9300_20_OR_LATER(ah)) { 1464 if (AR_SREV_9300_20_OR_LATER(ah)) {
1442 ath9k_hw_loadnf(ah, curchan); 1465 ath9k_hw_loadnf(ah, curchan);
1443 ath9k_hw_start_nfcal(ah); 1466 ath9k_hw_start_nfcal(ah);
1467 ar9003_hw_bb_watchdog_config(ah);
1444 } 1468 }
1445 1469
1446 return 0; 1470 return 0;
@@ -1486,9 +1510,10 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
1486} 1510}
1487EXPORT_SYMBOL(ath9k_hw_keyreset); 1511EXPORT_SYMBOL(ath9k_hw_keyreset);
1488 1512
1489bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) 1513static bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
1490{ 1514{
1491 u32 macHi, macLo; 1515 u32 macHi, macLo;
1516 u32 unicast_flag = AR_KEYTABLE_VALID;
1492 1517
1493 if (entry >= ah->caps.keycache_size) { 1518 if (entry >= ah->caps.keycache_size) {
1494 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 1519 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
@@ -1497,6 +1522,16 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
1497 } 1522 }
1498 1523
1499 if (mac != NULL) { 1524 if (mac != NULL) {
1525 /*
1526 * AR_KEYTABLE_VALID indicates that the address is a unicast
1527 * address, which must match the transmitter address for
1528 * decrypting frames.
1529 * Not setting this bit allows the hardware to use the key
1530 * for multicast frame decryption.
1531 */
1532 if (mac[0] & 0x01)
1533 unicast_flag = 0;
1534
1500 macHi = (mac[5] << 8) | mac[4]; 1535 macHi = (mac[5] << 8) | mac[4];
1501 macLo = (mac[3] << 24) | 1536 macLo = (mac[3] << 24) |
1502 (mac[2] << 16) | 1537 (mac[2] << 16) |
@@ -1509,11 +1544,10 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
1509 macLo = macHi = 0; 1544 macLo = macHi = 0;
1510 } 1545 }
1511 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); 1546 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
1512 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID); 1547 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
1513 1548
1514 return true; 1549 return true;
1515} 1550}
1516EXPORT_SYMBOL(ath9k_hw_keysetmac);
1517 1551
1518bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, 1552bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
1519 const struct ath9k_keyval *k, 1553 const struct ath9k_keyval *k,
@@ -1714,17 +1748,6 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
1714} 1748}
1715EXPORT_SYMBOL(ath9k_hw_set_keycache_entry); 1749EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
1716 1750
1717bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
1718{
1719 if (entry < ah->caps.keycache_size) {
1720 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
1721 if (val & AR_KEYTABLE_VALID)
1722 return true;
1723 }
1724 return false;
1725}
1726EXPORT_SYMBOL(ath9k_hw_keyisvalid);
1727
1728/******************************/ 1751/******************************/
1729/* Power Management (Chipset) */ 1752/* Power Management (Chipset) */
1730/******************************/ 1753/******************************/
@@ -1751,6 +1774,11 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
1751 REG_CLR_BIT(ah, (AR_RTC_RESET), 1774 REG_CLR_BIT(ah, (AR_RTC_RESET),
1752 AR_RTC_RESET_EN); 1775 AR_RTC_RESET_EN);
1753 } 1776 }
1777
1778 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
1779 if (AR_SREV_9300_20_OR_LATER(ah))
1780 REG_WRITE(ah, AR_WA,
1781 ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
1754} 1782}
1755 1783
1756/* 1784/*
@@ -1777,6 +1805,10 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
1777 AR_RTC_FORCE_WAKE_EN); 1805 AR_RTC_FORCE_WAKE_EN);
1778 } 1806 }
1779 } 1807 }
1808
1809 /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
1810 if (AR_SREV_9300_20_OR_LATER(ah))
1811 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
1780} 1812}
1781 1813
1782static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) 1814static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
@@ -1784,6 +1816,12 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
1784 u32 val; 1816 u32 val;
1785 int i; 1817 int i;
1786 1818
1819 /* Set Bits 14 and 17 of AR_WA before powering on the chip. */
1820 if (AR_SREV_9300_20_OR_LATER(ah)) {
1821 REG_WRITE(ah, AR_WA, ah->WARegVal);
1822 udelay(10);
1823 }
1824
1787 if (setChip) { 1825 if (setChip) {
1788 if ((REG_READ(ah, AR_RTC_STATUS) & 1826 if ((REG_READ(ah, AR_RTC_STATUS) &
1789 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { 1827 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
@@ -2138,6 +2176,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2138 2176
2139 if (AR_SREV_9271(ah)) 2177 if (AR_SREV_9271(ah))
2140 pCap->num_gpio_pins = AR9271_NUM_GPIO; 2178 pCap->num_gpio_pins = AR9271_NUM_GPIO;
2179 else if (AR_DEVID_7010(ah))
2180 pCap->num_gpio_pins = AR7010_NUM_GPIO;
2141 else if (AR_SREV_9285_10_OR_LATER(ah)) 2181 else if (AR_SREV_9285_10_OR_LATER(ah))
2142 pCap->num_gpio_pins = AR9285_NUM_GPIO; 2182 pCap->num_gpio_pins = AR9285_NUM_GPIO;
2143 else if (AR_SREV_9280_10_OR_LATER(ah)) 2183 else if (AR_SREV_9280_10_OR_LATER(ah))
@@ -2165,7 +2205,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2165 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; 2205 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
2166 } 2206 }
2167#endif 2207#endif
2168 if (AR_SREV_9271(ah)) 2208 if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah))
2169 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP; 2209 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
2170 else 2210 else
2171 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; 2211 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
@@ -2220,6 +2260,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2220 pCap->rx_status_len = sizeof(struct ar9003_rxs); 2260 pCap->rx_status_len = sizeof(struct ar9003_rxs);
2221 pCap->tx_desc_len = sizeof(struct ar9003_txc); 2261 pCap->tx_desc_len = sizeof(struct ar9003_txc);
2222 pCap->txs_len = sizeof(struct ar9003_txs); 2262 pCap->txs_len = sizeof(struct ar9003_txs);
2263 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
2264 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
2223 } else { 2265 } else {
2224 pCap->tx_desc_len = sizeof(struct ath_desc); 2266 pCap->tx_desc_len = sizeof(struct ath_desc);
2225 if (AR_SREV_9280_20(ah) && 2267 if (AR_SREV_9280_20(ah) &&
@@ -2232,100 +2274,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2232 if (AR_SREV_9300_20_OR_LATER(ah)) 2274 if (AR_SREV_9300_20_OR_LATER(ah))
2233 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; 2275 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
2234 2276
2235 return 0; 2277 if (AR_SREV_9287_10_OR_LATER(ah) || AR_SREV_9271(ah))
2236} 2278 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
2237
2238bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
2239 u32 capability, u32 *result)
2240{
2241 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
2242 switch (type) {
2243 case ATH9K_CAP_CIPHER:
2244 switch (capability) {
2245 case ATH9K_CIPHER_AES_CCM:
2246 case ATH9K_CIPHER_AES_OCB:
2247 case ATH9K_CIPHER_TKIP:
2248 case ATH9K_CIPHER_WEP:
2249 case ATH9K_CIPHER_MIC:
2250 case ATH9K_CIPHER_CLR:
2251 return true;
2252 default:
2253 return false;
2254 }
2255 case ATH9K_CAP_TKIP_MIC:
2256 switch (capability) {
2257 case 0:
2258 return true;
2259 case 1:
2260 return (ah->sta_id1_defaults &
2261 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
2262 false;
2263 }
2264 case ATH9K_CAP_TKIP_SPLIT:
2265 return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
2266 false : true;
2267 case ATH9K_CAP_MCAST_KEYSRCH:
2268 switch (capability) {
2269 case 0:
2270 return true;
2271 case 1:
2272 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
2273 return false;
2274 } else {
2275 return (ah->sta_id1_defaults &
2276 AR_STA_ID1_MCAST_KSRCH) ? true :
2277 false;
2278 }
2279 }
2280 return false;
2281 case ATH9K_CAP_TXPOW:
2282 switch (capability) {
2283 case 0:
2284 return 0;
2285 case 1:
2286 *result = regulatory->power_limit;
2287 return 0;
2288 case 2:
2289 *result = regulatory->max_power_level;
2290 return 0;
2291 case 3:
2292 *result = regulatory->tp_scale;
2293 return 0;
2294 }
2295 return false;
2296 case ATH9K_CAP_DS:
2297 return (AR_SREV_9280_20_OR_LATER(ah) &&
2298 (ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1))
2299 ? false : true;
2300 default:
2301 return false;
2302 }
2303}
2304EXPORT_SYMBOL(ath9k_hw_getcapability);
2305 2279
2306bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, 2280 return 0;
2307 u32 capability, u32 setting, int *status)
2308{
2309 switch (type) {
2310 case ATH9K_CAP_TKIP_MIC:
2311 if (setting)
2312 ah->sta_id1_defaults |=
2313 AR_STA_ID1_CRPT_MIC_ENABLE;
2314 else
2315 ah->sta_id1_defaults &=
2316 ~AR_STA_ID1_CRPT_MIC_ENABLE;
2317 return true;
2318 case ATH9K_CAP_MCAST_KEYSRCH:
2319 if (setting)
2320 ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
2321 else
2322 ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH;
2323 return true;
2324 default:
2325 return false;
2326 }
2327} 2281}
2328EXPORT_SYMBOL(ath9k_hw_setcapability);
2329 2282
2330/****************************/ 2283/****************************/
2331/* GPIO / RFKILL / Antennae */ 2284/* GPIO / RFKILL / Antennae */
@@ -2365,8 +2318,15 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
2365 2318
2366 BUG_ON(gpio >= ah->caps.num_gpio_pins); 2319 BUG_ON(gpio >= ah->caps.num_gpio_pins);
2367 2320
2368 gpio_shift = gpio << 1; 2321 if (AR_DEVID_7010(ah)) {
2322 gpio_shift = gpio;
2323 REG_RMW(ah, AR7010_GPIO_OE,
2324 (AR7010_GPIO_OE_AS_INPUT << gpio_shift),
2325 (AR7010_GPIO_OE_MASK << gpio_shift));
2326 return;
2327 }
2369 2328
2329 gpio_shift = gpio << 1;
2370 REG_RMW(ah, 2330 REG_RMW(ah,
2371 AR_GPIO_OE_OUT, 2331 AR_GPIO_OE_OUT,
2372 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), 2332 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
@@ -2382,7 +2342,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
2382 if (gpio >= ah->caps.num_gpio_pins) 2342 if (gpio >= ah->caps.num_gpio_pins)
2383 return 0xffffffff; 2343 return 0xffffffff;
2384 2344
2385 if (AR_SREV_9300_20_OR_LATER(ah)) 2345 if (AR_DEVID_7010(ah)) {
2346 u32 val;
2347 val = REG_READ(ah, AR7010_GPIO_IN);
2348 return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
2349 } else if (AR_SREV_9300_20_OR_LATER(ah))
2386 return MS_REG_READ(AR9300, gpio) != 0; 2350 return MS_REG_READ(AR9300, gpio) != 0;
2387 else if (AR_SREV_9271(ah)) 2351 else if (AR_SREV_9271(ah))
2388 return MS_REG_READ(AR9271, gpio) != 0; 2352 return MS_REG_READ(AR9271, gpio) != 0;
@@ -2402,10 +2366,16 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
2402{ 2366{
2403 u32 gpio_shift; 2367 u32 gpio_shift;
2404 2368
2405 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); 2369 if (AR_DEVID_7010(ah)) {
2370 gpio_shift = gpio;
2371 REG_RMW(ah, AR7010_GPIO_OE,
2372 (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
2373 (AR7010_GPIO_OE_MASK << gpio_shift));
2374 return;
2375 }
2406 2376
2377 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2407 gpio_shift = 2 * gpio; 2378 gpio_shift = 2 * gpio;
2408
2409 REG_RMW(ah, 2379 REG_RMW(ah,
2410 AR_GPIO_OE_OUT, 2380 AR_GPIO_OE_OUT,
2411 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), 2381 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
@@ -2415,6 +2385,13 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
2415 2385
2416void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) 2386void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
2417{ 2387{
2388 if (AR_DEVID_7010(ah)) {
2389 val = val ? 0 : 1;
2390 REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
2391 AR_GPIO_BIT(gpio));
2392 return;
2393 }
2394
2418 if (AR_SREV_9271(ah)) 2395 if (AR_SREV_9271(ah))
2419 val = ~val; 2396 val = ~val;
2420 2397
@@ -2520,12 +2497,6 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
2520} 2497}
2521EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); 2498EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
2522 2499
2523void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
2524{
2525 memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
2526}
2527EXPORT_SYMBOL(ath9k_hw_setmac);
2528
2529void ath9k_hw_setopmode(struct ath_hw *ah) 2500void ath9k_hw_setopmode(struct ath_hw *ah)
2530{ 2501{
2531 ath9k_hw_set_operating_mode(ah, ah->opmode); 2502 ath9k_hw_set_operating_mode(ah, ah->opmode);
@@ -2598,21 +2569,6 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
2598} 2569}
2599EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); 2570EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
2600 2571
2601/*
2602 * Extend 15-bit time stamp from rx descriptor to
2603 * a full 64-bit TSF using the current h/w TSF.
2604*/
2605u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
2606{
2607 u64 tsf;
2608
2609 tsf = ath9k_hw_gettsf64(ah);
2610 if ((tsf & 0x7fff) < rstamp)
2611 tsf -= 0x8000;
2612 return (tsf & ~0x7fff) | rstamp;
2613}
2614EXPORT_SYMBOL(ath9k_hw_extend_tsf);
2615
2616void ath9k_hw_set11nmac2040(struct ath_hw *ah) 2572void ath9k_hw_set11nmac2040(struct ath_hw *ah)
2617{ 2573{
2618 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 2574 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 77245dff5993..2d30efc0b94f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -158,6 +158,9 @@
158#define ATH9K_HW_RX_HP_QDEPTH 16 158#define ATH9K_HW_RX_HP_QDEPTH 16
159#define ATH9K_HW_RX_LP_QDEPTH 128 159#define ATH9K_HW_RX_LP_QDEPTH 128
160 160
161#define PAPRD_GAIN_TABLE_ENTRIES 32
162#define PAPRD_TABLE_SZ 24
163
161enum ath_ini_subsys { 164enum ath_ini_subsys {
162 ATH_INI_PRE = 0, 165 ATH_INI_PRE = 0,
163 ATH_INI_CORE, 166 ATH_INI_CORE,
@@ -199,15 +202,8 @@ enum ath9k_hw_caps {
199 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18), 202 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
200 ATH9K_HW_CAP_LDPC = BIT(19), 203 ATH9K_HW_CAP_LDPC = BIT(19),
201 ATH9K_HW_CAP_FASTCLOCK = BIT(20), 204 ATH9K_HW_CAP_FASTCLOCK = BIT(20),
202}; 205 ATH9K_HW_CAP_SGI_20 = BIT(21),
203 206 ATH9K_HW_CAP_PAPRD = BIT(22),
204enum ath9k_capability_type {
205 ATH9K_CAP_CIPHER = 0,
206 ATH9K_CAP_TKIP_MIC,
207 ATH9K_CAP_TKIP_SPLIT,
208 ATH9K_CAP_TXPOW,
209 ATH9K_CAP_MCAST_KEYSRCH,
210 ATH9K_CAP_DS
211}; 207};
212 208
213struct ath9k_hw_capabilities { 209struct ath9k_hw_capabilities {
@@ -237,8 +233,9 @@ struct ath9k_ops_config {
237 int sw_beacon_response_time; 233 int sw_beacon_response_time;
238 int additional_swba_backoff; 234 int additional_swba_backoff;
239 int ack_6mb; 235 int ack_6mb;
240 int cwm_ignore_extcca; 236 u32 cwm_ignore_extcca;
241 u8 pcie_powersave_enable; 237 u8 pcie_powersave_enable;
238 bool pcieSerDesWrite;
242 u8 pcie_clock_req; 239 u8 pcie_clock_req;
243 u32 pcie_waen; 240 u32 pcie_waen;
244 u8 analog_shiftreg; 241 u8 analog_shiftreg;
@@ -262,10 +259,10 @@ struct ath9k_ops_config {
262#define AR_BASE_FREQ_5GHZ 4900 259#define AR_BASE_FREQ_5GHZ 4900
263#define AR_SPUR_FEEQ_BOUND_HT40 19 260#define AR_SPUR_FEEQ_BOUND_HT40 19
264#define AR_SPUR_FEEQ_BOUND_HT20 10 261#define AR_SPUR_FEEQ_BOUND_HT20 10
265 bool tx_iq_calibration; /* Only available for >= AR9003 */
266 int spurmode; 262 int spurmode;
267 u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; 263 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
268 u8 max_txtrig_level; 264 u8 max_txtrig_level;
265 u16 ani_poll_interval; /* ANI poll interval in ms */
269}; 266};
270 267
271enum ath9k_int { 268enum ath9k_int {
@@ -279,6 +276,7 @@ enum ath9k_int {
279 ATH9K_INT_TX = 0x00000040, 276 ATH9K_INT_TX = 0x00000040,
280 ATH9K_INT_TXDESC = 0x00000080, 277 ATH9K_INT_TXDESC = 0x00000080,
281 ATH9K_INT_TIM_TIMER = 0x00000100, 278 ATH9K_INT_TIM_TIMER = 0x00000100,
279 ATH9K_INT_BB_WATCHDOG = 0x00000400,
282 ATH9K_INT_TXURN = 0x00000800, 280 ATH9K_INT_TXURN = 0x00000800,
283 ATH9K_INT_MIB = 0x00001000, 281 ATH9K_INT_MIB = 0x00001000,
284 ATH9K_INT_RXPHY = 0x00004000, 282 ATH9K_INT_RXPHY = 0x00004000,
@@ -358,6 +356,9 @@ struct ath9k_channel {
358 int8_t iCoff; 356 int8_t iCoff;
359 int8_t qCoff; 357 int8_t qCoff;
360 int16_t rawNoiseFloor; 358 int16_t rawNoiseFloor;
359 bool paprd_done;
360 u16 small_signal_gain[AR9300_MAX_CHAINS];
361 u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
361}; 362};
362 363
363#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ 364#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
@@ -459,7 +460,7 @@ struct ath9k_hw_version {
459#define AR_GENTMR_BIT(_index) (1 << (_index)) 460#define AR_GENTMR_BIT(_index) (1 << (_index))
460 461
461/* 462/*
462 * Using de Bruijin sequence to to look up 1's index in a 32 bit number 463 * Using de Bruijin sequence to look up 1's index in a 32 bit number
463 * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001 464 * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
464 */ 465 */
465#define debruijn32 0x077CB531U 466#define debruijn32 0x077CB531U
@@ -509,7 +510,17 @@ struct ath_gen_timer_table {
509 * AR_RTC_PLL_CONTROL for a given channel 510 * AR_RTC_PLL_CONTROL for a given channel
510 * @setup_calibration: set up calibration 511 * @setup_calibration: set up calibration
511 * @iscal_supported: used to query if a type of calibration is supported 512 * @iscal_supported: used to query if a type of calibration is supported
512 * @loadnf: load noise floor read from each chain on the CCA registers 513 *
514 * @ani_reset: reset ANI parameters to default values
515 * @ani_lower_immunity: lower the noise immunity level. The level controls
516 * the power-based packet detection on hardware. If a power jump is
517 * detected the adapter takes it as an indication that a packet has
518 * arrived. The level ranges from 0-5. Each level corresponds to a
519 * few dB more of noise immunity. If you have a strong time-varying
520 * interference that is causing false detections (OFDM timing errors or
521 * CCK timing errors) the level can be increased.
522 * @ani_cache_ini_regs: cache the values for ANI from the initial
523 * register settings through the register initialization.
513 */ 524 */
514struct ath_hw_private_ops { 525struct ath_hw_private_ops {
515 /* Calibration ops */ 526 /* Calibration ops */
@@ -552,7 +563,11 @@ struct ath_hw_private_ops {
552 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd, 563 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
553 int param); 564 int param);
554 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]); 565 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
555 void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan); 566
567 /* ANI */
568 void (*ani_reset)(struct ath_hw *ah, bool is_scanning);
569 void (*ani_lower_immunity)(struct ath_hw *ah);
570 void (*ani_cache_ini_regs)(struct ath_hw *ah);
556}; 571};
557 572
558/** 573/**
@@ -563,6 +578,11 @@ struct ath_hw_private_ops {
563 * 578 *
564 * @config_pci_powersave: 579 * @config_pci_powersave:
565 * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC 580 * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
581 *
582 * @ani_proc_mib_event: process MIB events, this would happen upon specific ANI
583 * thresholds being reached or having overflowed.
584 * @ani_monitor: called periodically by the core driver to collect
585 * MIB stats and adjust ANI if specific thresholds have been reached.
566 */ 586 */
567struct ath_hw_ops { 587struct ath_hw_ops {
568 void (*config_pci_powersave)(struct ath_hw *ah, 588 void (*config_pci_powersave)(struct ath_hw *ah,
@@ -603,6 +623,15 @@ struct ath_hw_ops {
603 u32 burstDuration); 623 u32 burstDuration);
604 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds, 624 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
605 u32 vmf); 625 u32 vmf);
626
627 void (*ani_proc_mib_event)(struct ath_hw *ah);
628 void (*ani_monitor)(struct ath_hw *ah, struct ath9k_channel *chan);
629};
630
631struct ath_nf_limits {
632 s16 max;
633 s16 min;
634 s16 nominal;
606}; 635};
607 636
608struct ath_hw { 637struct ath_hw {
@@ -626,10 +655,10 @@ struct ath_hw {
626 bool is_pciexpress; 655 bool is_pciexpress;
627 bool need_an_top2_fixup; 656 bool need_an_top2_fixup;
628 u16 tx_trig_level; 657 u16 tx_trig_level;
629 s16 nf_2g_max; 658
630 s16 nf_2g_min; 659 u32 nf_regs[6];
631 s16 nf_5g_max; 660 struct ath_nf_limits nf_2g;
632 s16 nf_5g_min; 661 struct ath_nf_limits nf_5g;
633 u16 rfsilent; 662 u16 rfsilent;
634 u32 rfkill_gpio; 663 u32 rfkill_gpio;
635 u32 rfkill_polarity; 664 u32 rfkill_polarity;
@@ -789,6 +818,18 @@ struct ath_hw {
789 u32 ts_paddr_end; 818 u32 ts_paddr_end;
790 u16 ts_tail; 819 u16 ts_tail;
791 u8 ts_size; 820 u8 ts_size;
821
822 u32 bb_watchdog_last_status;
823 u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
824
825 u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES];
826 u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES];
827 /*
828 * Store the permanent value of Reg 0x4004in WARegVal
829 * so we dont have to R/M/W. We should not be reading
830 * this register when in sleep states.
831 */
832 u32 WARegVal;
792}; 833};
793 834
794static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 835static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -811,6 +852,12 @@ static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
811 return &ah->ops; 852 return &ah->ops;
812} 853}
813 854
855static inline int sign_extend(int val, const int nbits)
856{
857 int order = BIT(nbits-1);
858 return (val ^ order) - order;
859}
860
814/* Initialization, Detach, Reset */ 861/* Initialization, Detach, Reset */
815const char *ath9k_hw_probe(u16 vendorid, u16 devid); 862const char *ath9k_hw_probe(u16 vendorid, u16 devid);
816void ath9k_hw_deinit(struct ath_hw *ah); 863void ath9k_hw_deinit(struct ath_hw *ah);
@@ -818,19 +865,13 @@ int ath9k_hw_init(struct ath_hw *ah);
818int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 865int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
819 bool bChannelChange); 866 bool bChannelChange);
820int ath9k_hw_fill_cap_info(struct ath_hw *ah); 867int ath9k_hw_fill_cap_info(struct ath_hw *ah);
821bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
822 u32 capability, u32 *result);
823bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
824 u32 capability, u32 setting, int *status);
825u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); 868u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
826 869
827/* Key Cache Management */ 870/* Key Cache Management */
828bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry); 871bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
829bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac);
830bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, 872bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
831 const struct ath9k_keyval *k, 873 const struct ath9k_keyval *k,
832 const u8 *mac); 874 const u8 *mac);
833bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry);
834 875
835/* GPIO / RFKILL / Antennae */ 876/* GPIO / RFKILL / Antennae */
836void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio); 877void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
@@ -856,7 +897,6 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
856bool ath9k_hw_phy_disable(struct ath_hw *ah); 897bool ath9k_hw_phy_disable(struct ath_hw *ah);
857bool ath9k_hw_disable(struct ath_hw *ah); 898bool ath9k_hw_disable(struct ath_hw *ah);
858void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit); 899void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
859void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
860void ath9k_hw_setopmode(struct ath_hw *ah); 900void ath9k_hw_setopmode(struct ath_hw *ah);
861void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 901void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
862void ath9k_hw_setbssidmask(struct ath_hw *ah); 902void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -865,7 +905,6 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah);
865void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); 905void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
866void ath9k_hw_reset_tsf(struct ath_hw *ah); 906void ath9k_hw_reset_tsf(struct ath_hw *ah);
867void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 907void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
868u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
869void ath9k_hw_init_global_settings(struct ath_hw *ah); 908void ath9k_hw_init_global_settings(struct ath_hw *ah);
870void ath9k_hw_set11nmac2040(struct ath_hw *ah); 909void ath9k_hw_set11nmac2040(struct ath_hw *ah);
871void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 910void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
@@ -907,13 +946,25 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
907void ar9002_hw_cck_chan14_spread(struct ath_hw *ah); 946void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
908int ar9002_hw_rf_claim(struct ath_hw *ah); 947int ar9002_hw_rf_claim(struct ath_hw *ah);
909void ar9002_hw_enable_async_fifo(struct ath_hw *ah); 948void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
949void ar9002_hw_update_async_fifo(struct ath_hw *ah);
910void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah); 950void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
911 951
912/* 952/*
913 * Code specifric to AR9003, we stuff these here to avoid callbacks 953 * Code specific to AR9003, we stuff these here to avoid callbacks
914 * for older families 954 * for older families
915 */ 955 */
916void ar9003_hw_set_nf_limits(struct ath_hw *ah); 956void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
957void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
958void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
959void ar9003_paprd_enable(struct ath_hw *ah, bool val);
960void ar9003_paprd_populate_single_table(struct ath_hw *ah,
961 struct ath9k_channel *chan, int chain);
962int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
963 int chain);
964int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
965int ar9003_paprd_init_table(struct ath_hw *ah);
966bool ar9003_paprd_is_done(struct ath_hw *ah);
967void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
917 968
918/* Hardware family op attach helpers */ 969/* Hardware family op attach helpers */
919void ar5008_hw_attach_phy_ops(struct ath_hw *ah); 970void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
@@ -926,8 +977,24 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
926void ar9002_hw_attach_ops(struct ath_hw *ah); 977void ar9002_hw_attach_ops(struct ath_hw *ah);
927void ar9003_hw_attach_ops(struct ath_hw *ah); 978void ar9003_hw_attach_ops(struct ath_hw *ah);
928 979
980/*
981 * ANI work can be shared between all families but a next
982 * generation implementation of ANI will be used only for AR9003 only
983 * for now as the other families still need to be tested with the same
984 * next generation ANI. Feel free to start testing it though for the
985 * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
986 */
987extern int modparam_force_new_ani;
988void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah);
989void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah);
990
929#define ATH_PCIE_CAP_LINK_CTRL 0x70 991#define ATH_PCIE_CAP_LINK_CTRL 0x70
930#define ATH_PCIE_CAP_LINK_L0S 1 992#define ATH_PCIE_CAP_LINK_L0S 1
931#define ATH_PCIE_CAP_LINK_L1 2 993#define ATH_PCIE_CAP_LINK_L1 2
932 994
995#define ATH9K_CLOCK_RATE_CCK 22
996#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
997#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
998#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
999
933#endif 1000#endif
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d457cb3bd772..243c1775f343 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -33,6 +33,10 @@ int modparam_nohwcrypt;
33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 35
36int led_blink = 1;
37module_param_named(blink, led_blink, int, 0444);
38MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39
36/* We use the hw_value as an index into our private channel structure */ 40/* We use the hw_value as an index into our private channel structure */
37 41
38#define CHAN2G(_freq, _idx) { \ 42#define CHAN2G(_freq, _idx) { \
@@ -175,18 +179,6 @@ static const struct ath_ops ath9k_common_ops = {
175 .write = ath9k_iowrite32, 179 .write = ath9k_iowrite32,
176}; 180};
177 181
178static int count_streams(unsigned int chainmask, int max)
179{
180 int streams = 0;
181
182 do {
183 if (++streams == max)
184 break;
185 } while ((chainmask = chainmask & (chainmask - 1)));
186
187 return streams;
188}
189
190/**************************/ 182/**************************/
191/* Initialization */ 183/* Initialization */
192/**************************/ 184/**************************/
@@ -208,6 +200,9 @@ static void setup_ht_cap(struct ath_softc *sc,
208 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC) 200 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
209 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; 201 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
210 202
203 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
204 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
205
211 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 206 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
212 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 207 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
213 208
@@ -224,8 +219,8 @@ static void setup_ht_cap(struct ath_softc *sc,
224 219
225 /* set up supported mcs set */ 220 /* set up supported mcs set */
226 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 221 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
227 tx_streams = count_streams(common->tx_chainmask, max_streams); 222 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
228 rx_streams = count_streams(common->rx_chainmask, max_streams); 223 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
229 224
230 ath_print(common, ATH_DBG_CONFIG, 225 ath_print(common, ATH_DBG_CONFIG,
231 "TX streams %d, RX streams: %d\n", 226 "TX streams %d, RX streams: %d\n",
@@ -388,36 +383,14 @@ static void ath9k_init_crypto(struct ath_softc *sc)
388 for (i = 0; i < common->keymax; i++) 383 for (i = 0; i < common->keymax; i++)
389 ath9k_hw_keyreset(sc->sc_ah, (u16) i); 384 ath9k_hw_keyreset(sc->sc_ah, (u16) i);
390 385
391 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
392 ATH9K_CIPHER_TKIP, NULL)) {
393 /*
394 * Whether we should enable h/w TKIP MIC.
395 * XXX: if we don't support WME TKIP MIC, then we wouldn't
396 * report WMM capable, so it's always safe to turn on
397 * TKIP MIC in this case.
398 */
399 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
400 }
401
402 /* 386 /*
403 * Check whether the separate key cache entries 387 * Check whether the separate key cache entries
404 * are required to handle both tx+rx MIC keys. 388 * are required to handle both tx+rx MIC keys.
405 * With split mic keys the number of stations is limited 389 * With split mic keys the number of stations is limited
406 * to 27 otherwise 59. 390 * to 27 otherwise 59.
407 */ 391 */
408 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER, 392 if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA))
409 ATH9K_CIPHER_TKIP, NULL)
410 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
411 ATH9K_CIPHER_MIC, NULL)
412 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
413 0, NULL))
414 common->splitmic = 1; 393 common->splitmic = 1;
415
416 /* turn on mcast key search if possible */
417 if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
418 (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
419 1, 1, NULL);
420
421} 394}
422 395
423static int ath9k_init_btcoex(struct ath_softc *sc) 396static int ath9k_init_btcoex(struct ath_softc *sc)
@@ -435,7 +408,7 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
435 r = ath_init_btcoex_timer(sc); 408 r = ath_init_btcoex_timer(sc);
436 if (r) 409 if (r)
437 return -1; 410 return -1;
438 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); 411 qnum = sc->tx.hwq_map[WME_AC_BE];
439 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum); 412 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
440 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 413 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
441 break; 414 break;
@@ -472,23 +445,23 @@ static int ath9k_init_queues(struct ath_softc *sc)
472 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
473 ath_cabq_update(sc); 446 ath_cabq_update(sc);
474 447
475 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) { 448 if (!ath_tx_setup(sc, WME_AC_BK)) {
476 ath_print(common, ATH_DBG_FATAL, 449 ath_print(common, ATH_DBG_FATAL,
477 "Unable to setup xmit queue for BK traffic\n"); 450 "Unable to setup xmit queue for BK traffic\n");
478 goto err; 451 goto err;
479 } 452 }
480 453
481 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) { 454 if (!ath_tx_setup(sc, WME_AC_BE)) {
482 ath_print(common, ATH_DBG_FATAL, 455 ath_print(common, ATH_DBG_FATAL,
483 "Unable to setup xmit queue for BE traffic\n"); 456 "Unable to setup xmit queue for BE traffic\n");
484 goto err; 457 goto err;
485 } 458 }
486 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) { 459 if (!ath_tx_setup(sc, WME_AC_VI)) {
487 ath_print(common, ATH_DBG_FATAL, 460 ath_print(common, ATH_DBG_FATAL,
488 "Unable to setup xmit queue for VI traffic\n"); 461 "Unable to setup xmit queue for VI traffic\n");
489 goto err; 462 goto err;
490 } 463 }
491 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) { 464 if (!ath_tx_setup(sc, WME_AC_VO)) {
492 ath_print(common, ATH_DBG_FATAL, 465 ath_print(common, ATH_DBG_FATAL,
493 "Unable to setup xmit queue for VO traffic\n"); 466 "Unable to setup xmit queue for VO traffic\n");
494 goto err; 467 goto err;
@@ -745,6 +718,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
745 goto error_world; 718 goto error_world;
746 } 719 }
747 720
721 INIT_WORK(&sc->hw_check_work, ath_hw_check);
722 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
748 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 723 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
749 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); 724 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
750 sc->wiphy_scheduler_int = msecs_to_jiffies(500); 725 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
@@ -812,12 +787,12 @@ void ath9k_deinit_device(struct ath_softc *sc)
812 ieee80211_unregister_hw(aphy->hw); 787 ieee80211_unregister_hw(aphy->hw);
813 ieee80211_free_hw(aphy->hw); 788 ieee80211_free_hw(aphy->hw);
814 } 789 }
815 kfree(sc->sec_wiphy);
816 790
817 ieee80211_unregister_hw(hw); 791 ieee80211_unregister_hw(hw);
818 ath_rx_cleanup(sc); 792 ath_rx_cleanup(sc);
819 ath_tx_cleanup(sc); 793 ath_tx_cleanup(sc);
820 ath9k_deinit_softc(sc); 794 ath9k_deinit_softc(sc);
795 kfree(sc->sec_wiphy);
821} 796}
822 797
823void ath_descdma_cleanup(struct ath_softc *sc, 798void ath_descdma_cleanup(struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 0e425cb4bbb1..e955bb9d98cb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h"
18 19
19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 20static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
20 struct ath9k_tx_queue_info *qi) 21 struct ath9k_tx_queue_info *qi)
@@ -554,8 +555,13 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
554 REGWRITE_BUFFER_FLUSH(ah); 555 REGWRITE_BUFFER_FLUSH(ah);
555 DISABLE_REGWRITE_BUFFER(ah); 556 DISABLE_REGWRITE_BUFFER(ah);
556 557
557 /* cwmin and cwmax should be 0 for beacon queue */ 558 /*
558 if (AR_SREV_9300_20_OR_LATER(ah)) { 559 * cwmin and cwmax should be 0 for beacon queue
560 * but not for IBSS as we would create an imbalance
561 * on beaconing fairness for participating nodes.
562 */
563 if (AR_SREV_9300_20_OR_LATER(ah) &&
564 ah->opmode != NL80211_IFTYPE_ADHOC) {
559 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) 565 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
560 | SM(0, AR_D_LCL_IFS_CWMAX) 566 | SM(0, AR_D_LCL_IFS_CWMAX)
561 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 567 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
@@ -756,11 +762,11 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
756} 762}
757EXPORT_SYMBOL(ath9k_hw_putrxbuf); 763EXPORT_SYMBOL(ath9k_hw_putrxbuf);
758 764
759void ath9k_hw_startpcureceive(struct ath_hw *ah) 765void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
760{ 766{
761 ath9k_enable_mib_counters(ah); 767 ath9k_enable_mib_counters(ah);
762 768
763 ath9k_ani_reset(ah); 769 ath9k_ani_reset(ah, is_scanning);
764 770
765 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 771 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
766} 772}
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 00f3e0c7528a..2633896d3998 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -485,6 +485,9 @@ struct ar5416_desc {
485#define AR_TxRSSICombined 0xff000000 485#define AR_TxRSSICombined 0xff000000
486#define AR_TxRSSICombined_S 24 486#define AR_TxRSSICombined_S 24
487 487
488#define AR_TxTid 0xf0000000
489#define AR_TxTid_S 28
490
488#define AR_TxEVM0 ds_txstatus5 491#define AR_TxEVM0 ds_txstatus5
489#define AR_TxEVM1 ds_txstatus6 492#define AR_TxEVM1 ds_txstatus6
490#define AR_TxEVM2 ds_txstatus7 493#define AR_TxEVM2 ds_txstatus7
@@ -577,13 +580,8 @@ enum ath9k_tx_queue {
577 580
578#define ATH9K_NUM_TX_QUEUES 10 581#define ATH9K_NUM_TX_QUEUES 10
579 582
580enum ath9k_tx_queue_subtype { 583/* Used as a queue subtype instead of a WMM AC */
581 ATH9K_WME_AC_BK = 0, 584#define ATH9K_WME_UPSD 4
582 ATH9K_WME_AC_BE,
583 ATH9K_WME_AC_VI,
584 ATH9K_WME_AC_VO,
585 ATH9K_WME_UPSD
586};
587 585
588enum ath9k_tx_queue_flags { 586enum ath9k_tx_queue_flags {
589 TXQ_FLAG_TXOKINT_ENABLE = 0x0001, 587 TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
@@ -617,7 +615,7 @@ enum ath9k_pkt_type {
617struct ath9k_tx_queue_info { 615struct ath9k_tx_queue_info {
618 u32 tqi_ver; 616 u32 tqi_ver;
619 enum ath9k_tx_queue tqi_type; 617 enum ath9k_tx_queue tqi_type;
620 enum ath9k_tx_queue_subtype tqi_subtype; 618 int tqi_subtype;
621 enum ath9k_tx_queue_flags tqi_qflags; 619 enum ath9k_tx_queue_flags tqi_qflags;
622 u32 tqi_priority; 620 u32 tqi_priority;
623 u32 tqi_aifs; 621 u32 tqi_aifs;
@@ -715,7 +713,7 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
715 u32 size, u32 flags); 713 u32 size, u32 flags);
716bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); 714bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
717void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 715void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
718void ath9k_hw_startpcureceive(struct ath_hw *ah); 716void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
719void ath9k_hw_stoppcurecv(struct ath_hw *ah); 717void ath9k_hw_stoppcurecv(struct ath_hw *ah);
720void ath9k_hw_abortpcurecv(struct ath_hw *ah); 718void ath9k_hw_abortpcurecv(struct ath_hw *ah);
721bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 719bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1e2a68ea9355..0429dda0961f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -51,13 +51,11 @@ static void ath_cache_conf_rate(struct ath_softc *sc,
51static void ath_update_txpow(struct ath_softc *sc) 51static void ath_update_txpow(struct ath_softc *sc)
52{ 52{
53 struct ath_hw *ah = sc->sc_ah; 53 struct ath_hw *ah = sc->sc_ah;
54 u32 txpow;
55 54
56 if (sc->curtxpow != sc->config.txpowlimit) { 55 if (sc->curtxpow != sc->config.txpowlimit) {
57 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit); 56 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
58 /* read back in case value is clamped */ 57 /* read back in case value is clamped */
59 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); 58 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
60 sc->curtxpow = txpow;
61 } 59 }
62} 60}
63 61
@@ -232,6 +230,114 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
232 return r; 230 return r;
233} 231}
234 232
233static void ath_paprd_activate(struct ath_softc *sc)
234{
235 struct ath_hw *ah = sc->sc_ah;
236 int chain;
237
238 if (!ah->curchan->paprd_done)
239 return;
240
241 ath9k_ps_wakeup(sc);
242 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
243 if (!(ah->caps.tx_chainmask & BIT(chain)))
244 continue;
245
246 ar9003_paprd_populate_single_table(ah, ah->curchan, chain);
247 }
248
249 ar9003_paprd_enable(ah, true);
250 ath9k_ps_restore(sc);
251}
252
253void ath_paprd_calibrate(struct work_struct *work)
254{
255 struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
256 struct ieee80211_hw *hw = sc->hw;
257 struct ath_hw *ah = sc->sc_ah;
258 struct ieee80211_hdr *hdr;
259 struct sk_buff *skb = NULL;
260 struct ieee80211_tx_info *tx_info;
261 int band = hw->conf.channel->band;
262 struct ieee80211_supported_band *sband = &sc->sbands[band];
263 struct ath_tx_control txctl;
264 int qnum, ftype;
265 int chain_ok = 0;
266 int chain;
267 int len = 1800;
268 int time_left;
269 int i;
270
271 skb = alloc_skb(len, GFP_KERNEL);
272 if (!skb)
273 return;
274
275 tx_info = IEEE80211_SKB_CB(skb);
276
277 skb_put(skb, len);
278 memset(skb->data, 0, len);
279 hdr = (struct ieee80211_hdr *)skb->data;
280 ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
281 hdr->frame_control = cpu_to_le16(ftype);
282 hdr->duration_id = cpu_to_le16(10);
283 memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
284 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
285 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
286
287 memset(&txctl, 0, sizeof(txctl));
288 qnum = sc->tx.hwq_map[WME_AC_BE];
289 txctl.txq = &sc->tx.txq[qnum];
290
291 ath9k_ps_wakeup(sc);
292 ar9003_paprd_init_table(ah);
293 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
294 if (!(ah->caps.tx_chainmask & BIT(chain)))
295 continue;
296
297 chain_ok = 0;
298 memset(tx_info, 0, sizeof(*tx_info));
299 tx_info->band = band;
300
301 for (i = 0; i < 4; i++) {
302 tx_info->control.rates[i].idx = sband->n_bitrates - 1;
303 tx_info->control.rates[i].count = 6;
304 }
305
306 init_completion(&sc->paprd_complete);
307 ar9003_paprd_setup_gain_table(ah, chain);
308 txctl.paprd = BIT(chain);
309 if (ath_tx_start(hw, skb, &txctl) != 0)
310 break;
311
312 time_left = wait_for_completion_timeout(&sc->paprd_complete,
313 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
314 if (!time_left) {
315 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
316 "Timeout waiting for paprd training on "
317 "TX chain %d\n",
318 chain);
319 goto fail_paprd;
320 }
321
322 if (!ar9003_paprd_is_done(ah))
323 break;
324
325 if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0)
326 break;
327
328 chain_ok = 1;
329 }
330 kfree_skb(skb);
331
332 if (chain_ok) {
333 ah->curchan->paprd_done = true;
334 ath_paprd_activate(sc);
335 }
336
337fail_paprd:
338 ath9k_ps_restore(sc);
339}
340
235/* 341/*
236 * This routine performs the periodic noise floor calibration function 342 * This routine performs the periodic noise floor calibration function
237 * that is used to adjust and optimize the chip performance. This 343 * that is used to adjust and optimize the chip performance. This
@@ -285,7 +391,8 @@ void ath_ani_calibrate(unsigned long data)
285 } 391 }
286 392
287 /* Verify whether we must check ANI */ 393 /* Verify whether we must check ANI */
288 if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { 394 if ((timestamp - common->ani.checkani_timer) >=
395 ah->config.ani_poll_interval) {
289 aniflag = true; 396 aniflag = true;
290 common->ani.checkani_timer = timestamp; 397 common->ani.checkani_timer = timestamp;
291 } 398 }
@@ -326,15 +433,24 @@ set_timer:
326 */ 433 */
327 cal_interval = ATH_LONG_CALINTERVAL; 434 cal_interval = ATH_LONG_CALINTERVAL;
328 if (sc->sc_ah->config.enable_ani) 435 if (sc->sc_ah->config.enable_ani)
329 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); 436 cal_interval = min(cal_interval,
437 (u32)ah->config.ani_poll_interval);
330 if (!common->ani.caldone) 438 if (!common->ani.caldone)
331 cal_interval = min(cal_interval, (u32)short_cal_interval); 439 cal_interval = min(cal_interval, (u32)short_cal_interval);
332 440
333 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 441 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
442 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) &&
443 !(sc->sc_flags & SC_OP_SCANNING)) {
444 if (!sc->sc_ah->curchan->paprd_done)
445 ieee80211_queue_work(sc->hw, &sc->paprd_work);
446 else
447 ath_paprd_activate(sc);
448 }
334} 449}
335 450
336static void ath_start_ani(struct ath_common *common) 451static void ath_start_ani(struct ath_common *common)
337{ 452{
453 struct ath_hw *ah = common->ah;
338 unsigned long timestamp = jiffies_to_msecs(jiffies); 454 unsigned long timestamp = jiffies_to_msecs(jiffies);
339 struct ath_softc *sc = (struct ath_softc *) common->priv; 455 struct ath_softc *sc = (struct ath_softc *) common->priv;
340 456
@@ -346,7 +462,8 @@ static void ath_start_ani(struct ath_common *common)
346 common->ani.checkani_timer = timestamp; 462 common->ani.checkani_timer = timestamp;
347 463
348 mod_timer(&common->ani.timer, 464 mod_timer(&common->ani.timer,
349 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 465 jiffies +
466 msecs_to_jiffies((u32)ah->config.ani_poll_interval));
350} 467}
351 468
352/* 469/*
@@ -398,6 +515,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
398 ath_tx_node_cleanup(sc, an); 515 ath_tx_node_cleanup(sc, an);
399} 516}
400 517
518void ath_hw_check(struct work_struct *work)
519{
520 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
521 int i;
522
523 ath9k_ps_wakeup(sc);
524
525 for (i = 0; i < 3; i++) {
526 if (ath9k_hw_check_alive(sc->sc_ah))
527 goto out;
528
529 msleep(1);
530 }
531 ath_reset(sc, false);
532
533out:
534 ath9k_ps_restore(sc);
535}
536
401void ath9k_tasklet(unsigned long data) 537void ath9k_tasklet(unsigned long data)
402{ 538{
403 struct ath_softc *sc = (struct ath_softc *)data; 539 struct ath_softc *sc = (struct ath_softc *)data;
@@ -409,13 +545,15 @@ void ath9k_tasklet(unsigned long data)
409 545
410 ath9k_ps_wakeup(sc); 546 ath9k_ps_wakeup(sc);
411 547
412 if ((status & ATH9K_INT_FATAL) || 548 if (status & ATH9K_INT_FATAL) {
413 !ath9k_hw_check_alive(ah)) {
414 ath_reset(sc, false); 549 ath_reset(sc, false);
415 ath9k_ps_restore(sc); 550 ath9k_ps_restore(sc);
416 return; 551 return;
417 } 552 }
418 553
554 if (!ath9k_hw_check_alive(ah))
555 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
556
419 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 557 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
420 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | 558 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
421 ATH9K_INT_RXORN); 559 ATH9K_INT_RXORN);
@@ -524,6 +662,12 @@ irqreturn_t ath_isr(int irq, void *dev)
524 !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))) 662 !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
525 goto chip_reset; 663 goto chip_reset;
526 664
665 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
666 (status & ATH9K_INT_BB_WATCHDOG)) {
667 ar9003_hw_bb_watchdog_dbg_info(ah);
668 goto chip_reset;
669 }
670
527 if (status & ATH9K_INT_SWBA) 671 if (status & ATH9K_INT_SWBA)
528 tasklet_schedule(&sc->bcon_tasklet); 672 tasklet_schedule(&sc->bcon_tasklet);
529 673
@@ -619,234 +763,6 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
619 return chanmode; 763 return chanmode;
620} 764}
621 765
622static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
623 struct ath9k_keyval *hk, const u8 *addr,
624 bool authenticator)
625{
626 struct ath_hw *ah = common->ah;
627 const u8 *key_rxmic;
628 const u8 *key_txmic;
629
630 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
631 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
632
633 if (addr == NULL) {
634 /*
635 * Group key installation - only two key cache entries are used
636 * regardless of splitmic capability since group key is only
637 * used either for TX or RX.
638 */
639 if (authenticator) {
640 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
641 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
642 } else {
643 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
644 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
645 }
646 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
647 }
648 if (!common->splitmic) {
649 /* TX and RX keys share the same key cache entry. */
650 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
651 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
652 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
653 }
654
655 /* Separate key cache entries for TX and RX */
656
657 /* TX key goes at first index, RX key at +32. */
658 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
659 if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
660 /* TX MIC entry failed. No need to proceed further */
661 ath_print(common, ATH_DBG_FATAL,
662 "Setting TX MIC Key Failed\n");
663 return 0;
664 }
665
666 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
667 /* XXX delete tx key on failure? */
668 return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
669}
670
671static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
672{
673 int i;
674
675 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
676 if (test_bit(i, common->keymap) ||
677 test_bit(i + 64, common->keymap))
678 continue; /* At least one part of TKIP key allocated */
679 if (common->splitmic &&
680 (test_bit(i + 32, common->keymap) ||
681 test_bit(i + 64 + 32, common->keymap)))
682 continue; /* At least one part of TKIP key allocated */
683
684 /* Found a free slot for a TKIP key */
685 return i;
686 }
687 return -1;
688}
689
690static int ath_reserve_key_cache_slot(struct ath_common *common)
691{
692 int i;
693
694 /* First, try to find slots that would not be available for TKIP. */
695 if (common->splitmic) {
696 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
697 if (!test_bit(i, common->keymap) &&
698 (test_bit(i + 32, common->keymap) ||
699 test_bit(i + 64, common->keymap) ||
700 test_bit(i + 64 + 32, common->keymap)))
701 return i;
702 if (!test_bit(i + 32, common->keymap) &&
703 (test_bit(i, common->keymap) ||
704 test_bit(i + 64, common->keymap) ||
705 test_bit(i + 64 + 32, common->keymap)))
706 return i + 32;
707 if (!test_bit(i + 64, common->keymap) &&
708 (test_bit(i , common->keymap) ||
709 test_bit(i + 32, common->keymap) ||
710 test_bit(i + 64 + 32, common->keymap)))
711 return i + 64;
712 if (!test_bit(i + 64 + 32, common->keymap) &&
713 (test_bit(i, common->keymap) ||
714 test_bit(i + 32, common->keymap) ||
715 test_bit(i + 64, common->keymap)))
716 return i + 64 + 32;
717 }
718 } else {
719 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
720 if (!test_bit(i, common->keymap) &&
721 test_bit(i + 64, common->keymap))
722 return i;
723 if (test_bit(i, common->keymap) &&
724 !test_bit(i + 64, common->keymap))
725 return i + 64;
726 }
727 }
728
729 /* No partially used TKIP slots, pick any available slot */
730 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
731 /* Do not allow slots that could be needed for TKIP group keys
732 * to be used. This limitation could be removed if we know that
733 * TKIP will not be used. */
734 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
735 continue;
736 if (common->splitmic) {
737 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
738 continue;
739 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
740 continue;
741 }
742
743 if (!test_bit(i, common->keymap))
744 return i; /* Found a free slot for a key */
745 }
746
747 /* No free slot found */
748 return -1;
749}
750
751static int ath_key_config(struct ath_common *common,
752 struct ieee80211_vif *vif,
753 struct ieee80211_sta *sta,
754 struct ieee80211_key_conf *key)
755{
756 struct ath_hw *ah = common->ah;
757 struct ath9k_keyval hk;
758 const u8 *mac = NULL;
759 int ret = 0;
760 int idx;
761
762 memset(&hk, 0, sizeof(hk));
763
764 switch (key->alg) {
765 case ALG_WEP:
766 hk.kv_type = ATH9K_CIPHER_WEP;
767 break;
768 case ALG_TKIP:
769 hk.kv_type = ATH9K_CIPHER_TKIP;
770 break;
771 case ALG_CCMP:
772 hk.kv_type = ATH9K_CIPHER_AES_CCM;
773 break;
774 default:
775 return -EOPNOTSUPP;
776 }
777
778 hk.kv_len = key->keylen;
779 memcpy(hk.kv_val, key->key, key->keylen);
780
781 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
782 /* For now, use the default keys for broadcast keys. This may
783 * need to change with virtual interfaces. */
784 idx = key->keyidx;
785 } else if (key->keyidx) {
786 if (WARN_ON(!sta))
787 return -EOPNOTSUPP;
788 mac = sta->addr;
789
790 if (vif->type != NL80211_IFTYPE_AP) {
791 /* Only keyidx 0 should be used with unicast key, but
792 * allow this for client mode for now. */
793 idx = key->keyidx;
794 } else
795 return -EIO;
796 } else {
797 if (WARN_ON(!sta))
798 return -EOPNOTSUPP;
799 mac = sta->addr;
800
801 if (key->alg == ALG_TKIP)
802 idx = ath_reserve_key_cache_slot_tkip(common);
803 else
804 idx = ath_reserve_key_cache_slot(common);
805 if (idx < 0)
806 return -ENOSPC; /* no free key cache entries */
807 }
808
809 if (key->alg == ALG_TKIP)
810 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
811 vif->type == NL80211_IFTYPE_AP);
812 else
813 ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
814
815 if (!ret)
816 return -EIO;
817
818 set_bit(idx, common->keymap);
819 if (key->alg == ALG_TKIP) {
820 set_bit(idx + 64, common->keymap);
821 if (common->splitmic) {
822 set_bit(idx + 32, common->keymap);
823 set_bit(idx + 64 + 32, common->keymap);
824 }
825 }
826
827 return idx;
828}
829
830static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
831{
832 struct ath_hw *ah = common->ah;
833
834 ath9k_hw_keyreset(ah, key->hw_key_idx);
835 if (key->hw_key_idx < IEEE80211_WEP_NKID)
836 return;
837
838 clear_bit(key->hw_key_idx, common->keymap);
839 if (key->alg != ALG_TKIP)
840 return;
841
842 clear_bit(key->hw_key_idx + 64, common->keymap);
843 if (common->splitmic) {
844 ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
845 clear_bit(key->hw_key_idx + 32, common->keymap);
846 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
847 }
848}
849
850static void ath9k_bss_assoc_info(struct ath_softc *sc, 766static void ath9k_bss_assoc_info(struct ath_softc *sc,
851 struct ieee80211_vif *vif, 767 struct ieee80211_vif *vif,
852 struct ieee80211_bss_conf *bss_conf) 768 struct ieee80211_bss_conf *bss_conf)
@@ -941,9 +857,14 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
941 ath9k_ps_wakeup(sc); 857 ath9k_ps_wakeup(sc);
942 ieee80211_stop_queues(hw); 858 ieee80211_stop_queues(hw);
943 859
944 /* Disable LED */ 860 /*
945 ath9k_hw_set_gpio(ah, ah->led_pin, 1); 861 * Keep the LED on when the radio is disabled
946 ath9k_hw_cfg_gpio_input(ah, ah->led_pin); 862 * during idle unassociated state.
863 */
864 if (!sc->ps_idle) {
865 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
866 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
867 }
947 868
948 /* Disable interrupts */ 869 /* Disable interrupts */
949 ath9k_hw_set_interrupts(ah, 0); 870 ath9k_hw_set_interrupts(ah, 0);
@@ -1032,25 +953,25 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1032 return r; 953 return r;
1033} 954}
1034 955
1035int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 956static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1036{ 957{
1037 int qnum; 958 int qnum;
1038 959
1039 switch (queue) { 960 switch (queue) {
1040 case 0: 961 case 0:
1041 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO]; 962 qnum = sc->tx.hwq_map[WME_AC_VO];
1042 break; 963 break;
1043 case 1: 964 case 1:
1044 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI]; 965 qnum = sc->tx.hwq_map[WME_AC_VI];
1045 break; 966 break;
1046 case 2: 967 case 2:
1047 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; 968 qnum = sc->tx.hwq_map[WME_AC_BE];
1048 break; 969 break;
1049 case 3: 970 case 3:
1050 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK]; 971 qnum = sc->tx.hwq_map[WME_AC_BK];
1051 break; 972 break;
1052 default: 973 default:
1053 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; 974 qnum = sc->tx.hwq_map[WME_AC_BE];
1054 break; 975 break;
1055 } 976 }
1056 977
@@ -1062,16 +983,16 @@ int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1062 int qnum; 983 int qnum;
1063 984
1064 switch (queue) { 985 switch (queue) {
1065 case ATH9K_WME_AC_VO: 986 case WME_AC_VO:
1066 qnum = 0; 987 qnum = 0;
1067 break; 988 break;
1068 case ATH9K_WME_AC_VI: 989 case WME_AC_VI:
1069 qnum = 1; 990 qnum = 1;
1070 break; 991 break;
1071 case ATH9K_WME_AC_BE: 992 case WME_AC_BE:
1072 qnum = 2; 993 qnum = 2;
1073 break; 994 break;
1074 case ATH9K_WME_AC_BK: 995 case WME_AC_BK:
1075 qnum = 3; 996 qnum = 3;
1076 break; 997 break;
1077 default: 998 default:
@@ -1201,7 +1122,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
1201 ATH9K_INT_GLOBAL; 1122 ATH9K_INT_GLOBAL;
1202 1123
1203 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 1124 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
1204 ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP; 1125 ah->imask |= ATH9K_INT_RXHP |
1126 ATH9K_INT_RXLP |
1127 ATH9K_INT_BB_WATCHDOG;
1205 else 1128 else
1206 ah->imask |= ATH9K_INT_RX; 1129 ah->imask |= ATH9K_INT_RX;
1207 1130
@@ -1251,6 +1174,7 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1251 struct ath_tx_control txctl; 1174 struct ath_tx_control txctl;
1252 int padpos, padsize; 1175 int padpos, padsize;
1253 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1176 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1177 int qnum;
1254 1178
1255 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { 1179 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
1256 ath_print(common, ATH_DBG_XMIT, 1180 ath_print(common, ATH_DBG_XMIT,
@@ -1280,7 +1204,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1280 * completed and if needed, also for RX of buffered frames. 1204 * completed and if needed, also for RX of buffered frames.
1281 */ 1205 */
1282 ath9k_ps_wakeup(sc); 1206 ath9k_ps_wakeup(sc);
1283 ath9k_hw_setrxabort(sc->sc_ah, 0); 1207 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1208 ath9k_hw_setrxabort(sc->sc_ah, 0);
1284 if (ieee80211_is_pspoll(hdr->frame_control)) { 1209 if (ieee80211_is_pspoll(hdr->frame_control)) {
1285 ath_print(common, ATH_DBG_PS, 1210 ath_print(common, ATH_DBG_PS,
1286 "Sending PS-Poll to pick a buffered frame\n"); 1211 "Sending PS-Poll to pick a buffered frame\n");
@@ -1322,11 +1247,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1322 memmove(skb->data, skb->data + padsize, padpos); 1247 memmove(skb->data, skb->data + padsize, padpos);
1323 } 1248 }
1324 1249
1325 /* Check if a tx queue is available */ 1250 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
1326 1251 txctl.txq = &sc->tx.txq[qnum];
1327 txctl.txq = ath_test_get_txq(sc, skb);
1328 if (!txctl.txq)
1329 goto exit;
1330 1252
1331 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); 1253 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
1332 1254
@@ -1347,15 +1269,25 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1347 struct ath_softc *sc = aphy->sc; 1269 struct ath_softc *sc = aphy->sc;
1348 struct ath_hw *ah = sc->sc_ah; 1270 struct ath_hw *ah = sc->sc_ah;
1349 struct ath_common *common = ath9k_hw_common(ah); 1271 struct ath_common *common = ath9k_hw_common(ah);
1272 int i;
1350 1273
1351 mutex_lock(&sc->mutex); 1274 mutex_lock(&sc->mutex);
1352 1275
1353 aphy->state = ATH_WIPHY_INACTIVE; 1276 aphy->state = ATH_WIPHY_INACTIVE;
1354 1277
1355 cancel_delayed_work_sync(&sc->ath_led_blink_work); 1278 if (led_blink)
1279 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1280
1356 cancel_delayed_work_sync(&sc->tx_complete_work); 1281 cancel_delayed_work_sync(&sc->tx_complete_work);
1282 cancel_work_sync(&sc->paprd_work);
1283 cancel_work_sync(&sc->hw_check_work);
1284
1285 for (i = 0; i < sc->num_sec_wiphy; i++) {
1286 if (sc->sec_wiphy[i])
1287 break;
1288 }
1357 1289
1358 if (!sc->num_sec_wiphy) { 1290 if (i == sc->num_sec_wiphy) {
1359 cancel_delayed_work_sync(&sc->wiphy_work); 1291 cancel_delayed_work_sync(&sc->wiphy_work);
1360 cancel_work_sync(&sc->chan_work); 1292 cancel_work_sync(&sc->chan_work);
1361 } 1293 }
@@ -1547,8 +1479,8 @@ void ath9k_enable_ps(struct ath_softc *sc)
1547 ah->imask |= ATH9K_INT_TIM_TIMER; 1479 ah->imask |= ATH9K_INT_TIM_TIMER;
1548 ath9k_hw_set_interrupts(ah, ah->imask); 1480 ath9k_hw_set_interrupts(ah, ah->imask);
1549 } 1481 }
1482 ath9k_hw_setrxabort(ah, 1);
1550 } 1483 }
1551 ath9k_hw_setrxabort(ah, 1);
1552} 1484}
1553 1485
1554static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1486static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1785,7 +1717,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1785 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); 1717 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
1786 1718
1787 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) 1719 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
1788 if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret) 1720 if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
1789 ath_beaconq_config(sc); 1721 ath_beaconq_config(sc);
1790 1722
1791 mutex_unlock(&sc->mutex); 1723 mutex_unlock(&sc->mutex);
@@ -1813,7 +1745,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1813 1745
1814 switch (cmd) { 1746 switch (cmd) {
1815 case SET_KEY: 1747 case SET_KEY:
1816 ret = ath_key_config(common, vif, sta, key); 1748 ret = ath9k_cmn_key_config(common, vif, sta, key);
1817 if (ret >= 0) { 1749 if (ret >= 0) {
1818 key->hw_key_idx = ret; 1750 key->hw_key_idx = ret;
1819 /* push IV and Michael MIC generation to stack */ 1751 /* push IV and Michael MIC generation to stack */
@@ -1826,7 +1758,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1826 } 1758 }
1827 break; 1759 break;
1828 case DISABLE_KEY: 1760 case DISABLE_KEY:
1829 ath_key_delete(common, key); 1761 ath9k_cmn_key_delete(common, key);
1830 break; 1762 break;
1831 default: 1763 default:
1832 ret = -EINVAL; 1764 ret = -EINVAL;
@@ -1999,6 +1931,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1999 struct ath_softc *sc = aphy->sc; 1931 struct ath_softc *sc = aphy->sc;
2000 int ret = 0; 1932 int ret = 0;
2001 1933
1934 local_bh_disable();
1935
2002 switch (action) { 1936 switch (action) {
2003 case IEEE80211_AMPDU_RX_START: 1937 case IEEE80211_AMPDU_RX_START:
2004 if (!(sc->sc_flags & SC_OP_RXAGGR)) 1938 if (!(sc->sc_flags & SC_OP_RXAGGR))
@@ -2028,6 +1962,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2028 "Unknown AMPDU action\n"); 1962 "Unknown AMPDU action\n");
2029 } 1963 }
2030 1964
1965 local_bh_enable();
1966
2031 return ret; 1967 return ret;
2032} 1968}
2033 1969
@@ -2058,11 +1994,12 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2058 1994
2059 mutex_lock(&sc->mutex); 1995 mutex_lock(&sc->mutex);
2060 if (ath9k_wiphy_scanning(sc)) { 1996 if (ath9k_wiphy_scanning(sc)) {
2061 printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the "
2062 "same time\n");
2063 /* 1997 /*
2064 * Do not allow the concurrent scanning state for now. This 1998 * There is a race here in mac80211 but fixing it requires
2065 * could be improved with scanning control moved into ath9k. 1999 * we revisit how we handle the scan complete callback.
2000 * After mac80211 fixes we will not have configured hardware
2001 * to the home channel nor would we have configured the RX
2002 * filter yet.
2066 */ 2003 */
2067 mutex_unlock(&sc->mutex); 2004 mutex_unlock(&sc->mutex);
2068 return; 2005 return;
@@ -2072,10 +2009,16 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2072 ath9k_wiphy_pause_all_forced(sc, aphy); 2009 ath9k_wiphy_pause_all_forced(sc, aphy);
2073 sc->sc_flags |= SC_OP_SCANNING; 2010 sc->sc_flags |= SC_OP_SCANNING;
2074 del_timer_sync(&common->ani.timer); 2011 del_timer_sync(&common->ani.timer);
2012 cancel_work_sync(&sc->paprd_work);
2013 cancel_work_sync(&sc->hw_check_work);
2075 cancel_delayed_work_sync(&sc->tx_complete_work); 2014 cancel_delayed_work_sync(&sc->tx_complete_work);
2076 mutex_unlock(&sc->mutex); 2015 mutex_unlock(&sc->mutex);
2077} 2016}
2078 2017
2018/*
2019 * XXX: this requires a revisit after the driver
2020 * scan_complete gets moved to another place/removed in mac80211.
2021 */
2079static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) 2022static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2080{ 2023{
2081 struct ath_wiphy *aphy = hw->priv; 2024 struct ath_wiphy *aphy = hw->priv;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 1ec836cf1c0d..b5b651413e77 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
31 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
31 { 0 } 32 { 0 }
32}; 33};
33 34
@@ -208,11 +209,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
208 } 209 }
209 210
210 ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); 211 ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
211 printk(KERN_INFO 212 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
212 "%s: %s mem=0x%lx, irq=%d\n", 213 hw_name, (unsigned long)mem, pdev->irq);
213 wiphy_name(hw->wiphy),
214 hw_name,
215 (unsigned long)mem, pdev->irq);
216 214
217 return 0; 215 return 0;
218 216
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 8519452c95f1..e49be733d546 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -20,93 +20,145 @@
20#include "ath9k.h" 20#include "ath9k.h"
21 21
22static const struct ath_rate_table ar5416_11na_ratetable = { 22static const struct ath_rate_table ar5416_11na_ratetable = {
23 42, 23 68,
24 8, /* MCS start */ 24 8, /* MCS start */
25 { 25 {
26 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 26 [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
27 5400, 0, 12, 0, 0, 0, 0, 0 }, 27 5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */
28 { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 28 [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
29 7800, 1, 18, 0, 1, 1, 1, 1 }, 29 7800, 1, 18, 0, 1, 1, 1 }, /* 9 Mb */
30 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 30 [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
31 10000, 2, 24, 2, 2, 2, 2, 2 }, 31 10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */
32 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 32 [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
33 13900, 3, 36, 2, 3, 3, 3, 3 }, 33 13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */
34 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 34 [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
35 17300, 4, 48, 4, 4, 4, 4, 4 }, 35 17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */
36 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 36 [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
37 23000, 5, 72, 4, 5, 5, 5, 5 }, 37 23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */
38 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 38 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
39 27400, 6, 96, 4, 6, 6, 6, 6 }, 39 27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */
40 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 40 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
41 29300, 7, 108, 4, 7, 7, 7, 7 }, 41 29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */
42 { VALID_2040, VALID_2040, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */ 42 [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
43 6400, 0, 0, 0, 8, 24, 8, 24 }, 43 6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */
44 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */ 44 [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
45 12700, 1, 1, 2, 9, 25, 9, 25 }, 45 12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */
46 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */ 46 [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
47 18800, 2, 2, 2, 10, 26, 10, 26 }, 47 18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */
48 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */ 48 [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
49 25000, 3, 3, 4, 11, 27, 11, 27 }, 49 25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */
50 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */ 50 [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
51 36700, 4, 4, 4, 12, 28, 12, 28 }, 51 36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */
52 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */ 52 [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
53 48100, 5, 5, 4, 13, 29, 13, 29 }, 53 48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */
54 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */ 54 [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
55 53500, 6, 6, 4, 14, 30, 14, 30 }, 55 53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */
56 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */ 56 [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
57 59000, 7, 7, 4, 15, 31, 15, 32 }, 57 59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */
58 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */ 58 [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
59 12700, 8, 8, 3, 16, 33, 16, 33 }, 59 65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */
60 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */ 60 [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
61 24800, 9, 9, 2, 17, 34, 17, 34 }, 61 12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */
62 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */ 62 [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
63 36600, 10, 10, 2, 18, 35, 18, 35 }, 63 24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */
64 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */ 64 [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
65 48100, 11, 11, 4, 19, 36, 19, 36 }, 65 36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */
66 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */ 66 [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
67 69500, 12, 12, 4, 20, 37, 20, 37 }, 67 48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */
68 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */ 68 [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
69 89500, 13, 13, 4, 21, 38, 21, 38 }, 69 69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */
70 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */ 70 [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
71 98900, 14, 14, 4, 22, 39, 22, 39 }, 71 89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */
72 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */ 72 [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
73 108300, 15, 15, 4, 23, 40, 23, 41 }, 73 98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */
74 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */ 74 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
75 13200, 0, 0, 0, 8, 24, 24, 24 }, 75 108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */
76 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */ 76 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
77 25900, 1, 1, 2, 9, 25, 25, 25 }, 77 120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */
78 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */ 78 [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
79 38600, 2, 2, 2, 10, 26, 26, 26 }, 79 17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */
80 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */ 80 [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
81 49800, 3, 3, 4, 11, 27, 27, 27 }, 81 35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */
82 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */ 82 [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
83 72200, 4, 4, 4, 12, 28, 28, 28 }, 83 52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */
84 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */ 84 [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
85 92900, 5, 5, 4, 13, 29, 29, 29 }, 85 70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */
86 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */ 86 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
87 102700, 6, 6, 4, 14, 30, 30, 30 }, 87 104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */
88 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */ 88 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
89 112000, 7, 7, 4, 15, 31, 32, 32 }, 89 115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/
90 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ 90 [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
91 122000, 7, 7, 4, 15, 31, 32, 32 }, 91 137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */
92 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */ 92 [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
93 25800, 8, 8, 0, 16, 33, 33, 33 }, 93 151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */
94 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */ 94 [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
95 49800, 9, 9, 2, 17, 34, 34, 34 }, 95 152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */
96 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */ 96 [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
97 71900, 10, 10, 2, 18, 35, 35, 35 }, 97 168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/
98 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */ 98 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
99 92500, 11, 11, 4, 19, 36, 36, 36 }, 99 168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */
100 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */ 100 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
101 130300, 12, 12, 4, 20, 37, 37, 37 }, 101 185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */
102 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */ 102 [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
103 162800, 13, 13, 4, 21, 38, 38, 38 }, 103 13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/
104 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */ 104 [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
105 178200, 14, 14, 4, 22, 39, 39, 39 }, 105 25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/
106 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */ 106 [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
107 192100, 15, 15, 4, 23, 40, 41, 41 }, 107 38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/
108 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ 108 [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
109 207000, 15, 15, 4, 23, 40, 41, 41 }, 109 49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */
110 [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
111 72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */
112 [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
113 92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */
114 [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
115 102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/
116 [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
117 112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */
118 [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
119 122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */
120 [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
121 25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */
122 [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
123 49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */
124 [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
125 71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */
126 [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
127 92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */
128 [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
129 130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */
130 [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
131 162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */
132 [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
133 178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */
134 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
135 192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */
136 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
137 207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */
138 [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
139 36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */
140 [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
141 72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */
142 [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
143 108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */
144 [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
145 142000, 19, 19, 4, 59, 59, 59 }, /* 162 Mb */
146 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
147 205100, 20, 20, 4, 60, 61, 61 }, /* 243 Mb */
148 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
149 224700, 20, 20, 4, 60, 61, 61 }, /* 270 Mb */
150 [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
151 263100, 21, 21, 4, 62, 63, 63 }, /* 324 Mb */
152 [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
153 288000, 21, 21, 4, 62, 63, 63 }, /* 360 Mb */
154 [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
155 290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */
156 [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
157 317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */
158 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
159 317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */
160 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
161 346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */
110 }, 162 },
111 50, /* probe interval */ 163 50, /* probe interval */
112 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ 164 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -116,101 +168,153 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
116 * for HT are the 64K max aggregate limit */ 168 * for HT are the 64K max aggregate limit */
117 169
118static const struct ath_rate_table ar5416_11ng_ratetable = { 170static const struct ath_rate_table ar5416_11ng_ratetable = {
119 46, 171 72,
120 12, /* MCS start */ 172 12, /* MCS start */
121 { 173 {
122 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ 174 [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
123 900, 0, 2, 0, 0, 0, 0, 0 }, 175 900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */
124 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ 176 [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
125 1900, 1, 4, 1, 1, 1, 1, 1 }, 177 1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */
126 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ 178 [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
127 4900, 2, 11, 2, 2, 2, 2, 2 }, 179 4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */
128 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ 180 [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
129 8100, 3, 22, 3, 3, 3, 3, 3 }, 181 8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */
130 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 182 [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
131 5400, 4, 12, 4, 4, 4, 4, 4 }, 183 5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */
132 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 184 [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
133 7800, 5, 18, 4, 5, 5, 5, 5 }, 185 7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */
134 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 186 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
135 10100, 6, 24, 6, 6, 6, 6, 6 }, 187 10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */
136 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 188 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
137 14100, 7, 36, 6, 7, 7, 7, 7 }, 189 14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */
138 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 190 [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
139 17700, 8, 48, 8, 8, 8, 8, 8 }, 191 17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */
140 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 192 [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
141 23700, 9, 72, 8, 9, 9, 9, 9 }, 193 23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */
142 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 194 [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
143 27400, 10, 96, 8, 10, 10, 10, 10 }, 195 27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */
144 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 196 [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
145 30900, 11, 108, 8, 11, 11, 11, 11 }, 197 30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */
146 { INVALID, INVALID, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */ 198 [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
147 6400, 0, 0, 4, 12, 28, 12, 28 }, 199 6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */
148 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */ 200 [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
149 12700, 1, 1, 6, 13, 29, 13, 29 }, 201 12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */
150 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */ 202 [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
151 18800, 2, 2, 6, 14, 30, 14, 30 }, 203 18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/
152 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */ 204 [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
153 25000, 3, 3, 8, 15, 31, 15, 31 }, 205 25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */
154 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */ 206 [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
155 36700, 4, 4, 8, 16, 32, 16, 32 }, 207 36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */
156 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */ 208 [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
157 48100, 5, 5, 8, 17, 33, 17, 33 }, 209 48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */
158 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */ 210 [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
159 53500, 6, 6, 8, 18, 34, 18, 34 }, 211 53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */
160 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */ 212 [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
161 59000, 7, 7, 8, 19, 35, 19, 36 }, 213 59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */
162 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */ 214 [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
163 12700, 8, 8, 4, 20, 37, 20, 37 }, 215 65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/
164 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */ 216 [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
165 24800, 9, 9, 6, 21, 38, 21, 38 }, 217 12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */
166 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */ 218 [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
167 36600, 10, 10, 6, 22, 39, 22, 39 }, 219 24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */
168 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */ 220 [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
169 48100, 11, 11, 8, 23, 40, 23, 40 }, 221 36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */
170 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */ 222 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
171 69500, 12, 12, 8, 24, 41, 24, 41 }, 223 48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */
172 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */ 224 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
173 89500, 13, 13, 8, 25, 42, 25, 42 }, 225 69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */
174 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */ 226 [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
175 98900, 14, 14, 8, 26, 43, 26, 44 }, 227 89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */
176 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */ 228 [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
177 108300, 15, 15, 8, 27, 44, 27, 45 }, 229 98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */
178 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */ 230 [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
179 13200, 0, 0, 8, 12, 28, 28, 28 }, 231 108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */
180 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */ 232 [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
181 25900, 1, 1, 8, 13, 29, 29, 29 }, 233 120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */
182 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */ 234 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
183 38600, 2, 2, 8, 14, 30, 30, 30 }, 235 17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */
184 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */ 236 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
185 49800, 3, 3, 8, 15, 31, 31, 31 }, 237 35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */
186 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */ 238 [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
187 72200, 4, 4, 8, 16, 32, 32, 32 }, 239 52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */
188 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */ 240 [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
189 92900, 5, 5, 8, 17, 33, 33, 33 }, 241 70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */
190 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */ 242 [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
191 102700, 6, 6, 8, 18, 34, 34, 34 }, 243 104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */
192 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */ 244 [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
193 112000, 7, 7, 8, 19, 35, 36, 36 }, 245 115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */
194 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ 246 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
195 122000, 7, 7, 8, 19, 35, 36, 36 }, 247 137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */
196 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */ 248 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
197 25800, 8, 8, 8, 20, 37, 37, 37 }, 249 151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */
198 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */ 250 [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
199 49800, 9, 9, 8, 21, 38, 38, 38 }, 251 152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */
200 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */ 252 [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
201 71900, 10, 10, 8, 22, 39, 39, 39 }, 253 168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */
202 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */ 254 [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
203 92500, 11, 11, 8, 23, 40, 40, 40 }, 255 168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */
204 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */ 256 [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
205 130300, 12, 12, 8, 24, 41, 41, 41 }, 257 185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */
206 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */ 258 [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
207 162800, 13, 13, 8, 25, 42, 42, 42 }, 259 13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */
208 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */ 260 [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
209 178200, 14, 14, 8, 26, 43, 43, 43 }, 261 25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */
210 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */ 262 [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
211 192100, 15, 15, 8, 27, 44, 45, 45 }, 263 38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */
212 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ 264 [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
213 207000, 15, 15, 8, 27, 44, 45, 45 }, 265 49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */
266 [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
267 72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */
268 [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
269 92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */
270 [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
271 102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */
272 [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
273 112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */
274 [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
275 122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */
276 [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
277 25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */
278 [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
279 49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */
280 [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
281 71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */
282 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
283 92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */
284 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
285 130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */
286 [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
287 162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */
288 [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
289 178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */
290 [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
291 192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */
292 [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
293 207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */
294 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
295 36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */
296 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
297 72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */
298 [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
299 108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */
300 [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
301 142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */
302 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
303 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */
304 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
305 224700, 20, 20, 8, 64, 65, 65 }, /* 170 Mb */
306 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
307 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */
308 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
309 288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */
310 [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
311 290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */
312 [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
313 317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */
314 [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
315 317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */
316 [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
317 346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */
214 }, 318 },
215 50, /* probe interval */ 319 50, /* probe interval */
216 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ 320 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -220,22 +324,22 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
220 8, 324 8,
221 0, 325 0,
222 { 326 {
223 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 327 { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
224 5400, 0, 12, 0, 0, 0 }, 328 5400, 0, 12, 0},
225 { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 329 { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
226 7800, 1, 18, 0, 1, 0 }, 330 7800, 1, 18, 0},
227 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 331 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
228 10000, 2, 24, 2, 2, 0 }, 332 10000, 2, 24, 2},
229 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 333 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
230 13900, 3, 36, 2, 3, 0 }, 334 13900, 3, 36, 2},
231 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 335 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
232 17300, 4, 48, 4, 4, 0 }, 336 17300, 4, 48, 4},
233 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 337 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
234 23000, 5, 72, 4, 5, 0 }, 338 23000, 5, 72, 4},
235 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 339 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
236 27400, 6, 96, 4, 6, 0 }, 340 27400, 6, 96, 4},
237 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 341 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
238 29300, 7, 108, 4, 7, 0 }, 342 29300, 7, 108, 4},
239 }, 343 },
240 50, /* probe interval */ 344 50, /* probe interval */
241 0, /* Phy rates allowed initially */ 345 0, /* Phy rates allowed initially */
@@ -245,30 +349,30 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
245 12, 349 12,
246 0, 350 0,
247 { 351 {
248 { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ 352 { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
249 900, 0, 2, 0, 0, 0 }, 353 900, 0, 2, 0},
250 { VALID, VALID, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ 354 { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
251 1900, 1, 4, 1, 1, 0 }, 355 1900, 1, 4, 1},
252 { VALID, VALID, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ 356 { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
253 4900, 2, 11, 2, 2, 0 }, 357 4900, 2, 11, 2},
254 { VALID, VALID, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ 358 { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
255 8100, 3, 22, 3, 3, 0 }, 359 8100, 3, 22, 3},
256 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 360 { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
257 5400, 4, 12, 4, 4, 0 }, 361 5400, 4, 12, 4},
258 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 362 { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
259 7800, 5, 18, 4, 5, 0 }, 363 7800, 5, 18, 4},
260 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 364 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
261 10000, 6, 24, 6, 6, 0 }, 365 10000, 6, 24, 6},
262 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 366 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
263 13900, 7, 36, 6, 7, 0 }, 367 13900, 7, 36, 6},
264 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 368 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
265 17300, 8, 48, 8, 8, 0 }, 369 17300, 8, 48, 8},
266 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 370 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
267 23000, 9, 72, 8, 9, 0 }, 371 23000, 9, 72, 8},
268 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 372 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
269 27400, 10, 96, 8, 10, 0 }, 373 27400, 10, 96, 8},
270 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 374 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
271 29300, 11, 108, 8, 11, 0 }, 375 29300, 11, 108, 8},
272 }, 376 },
273 50, /* probe interval */ 377 50, /* probe interval */
274 0, /* Phy rates allowed initially */ 378 0, /* Phy rates allowed initially */
@@ -338,7 +442,7 @@ static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv,
338 u8 index, int valid_tx_rate) 442 u8 index, int valid_tx_rate)
339{ 443{
340 BUG_ON(index > ath_rc_priv->rate_table_size); 444 BUG_ON(index > ath_rc_priv->rate_table_size);
341 ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0; 445 ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
342} 446}
343 447
344static inline 448static inline
@@ -370,6 +474,8 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
370 return 0; 474 return 0;
371 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) 475 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
372 return 0; 476 return 0;
477 if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG))
478 return 0;
373 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG)) 479 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
374 return 0; 480 return 0;
375 if (!ignore_cw && WLAN_RC_PHY_HT(phy)) 481 if (!ignore_cw && WLAN_RC_PHY_HT(phy))
@@ -400,13 +506,9 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
400 u32 capflag) 506 u32 capflag)
401{ 507{
402 u8 i, hi = 0; 508 u8 i, hi = 0;
403 u32 valid;
404 509
405 for (i = 0; i < rate_table->rate_cnt; i++) { 510 for (i = 0; i < rate_table->rate_cnt; i++) {
406 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? 511 if (rate_table->info[i].rate_flags & RC_LEGACY) {
407 rate_table->info[i].valid_single_stream :
408 rate_table->info[i].valid);
409 if (valid == 1) {
410 u32 phy = rate_table->info[i].phy; 512 u32 phy = rate_table->info[i].phy;
411 u8 valid_rate_count = 0; 513 u8 valid_rate_count = 0;
412 514
@@ -418,7 +520,7 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
418 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i; 520 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
419 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 521 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
420 ath_rc_set_valid_txmask(ath_rc_priv, i, 1); 522 ath_rc_set_valid_txmask(ath_rc_priv, i, 1);
421 hi = A_MAX(hi, i); 523 hi = i;
422 } 524 }
423 } 525 }
424 526
@@ -436,9 +538,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
436 for (i = 0; i < rateset->rs_nrates; i++) { 538 for (i = 0; i < rateset->rs_nrates; i++) {
437 for (j = 0; j < rate_table->rate_cnt; j++) { 539 for (j = 0; j < rate_table->rate_cnt; j++) {
438 u32 phy = rate_table->info[j].phy; 540 u32 phy = rate_table->info[j].phy;
439 u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? 541 u16 rate_flags = rate_table->info[i].rate_flags;
440 rate_table->info[j].valid_single_stream :
441 rate_table->info[j].valid);
442 u8 rate = rateset->rs_rates[i]; 542 u8 rate = rateset->rs_rates[i];
443 u8 dot11rate = rate_table->info[j].dot11rate; 543 u8 dot11rate = rate_table->info[j].dot11rate;
444 544
@@ -447,8 +547,9 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
447 * (VALID/VALID_20/VALID_40) flags */ 547 * (VALID/VALID_20/VALID_40) flags */
448 548
449 if ((rate == dot11rate) && 549 if ((rate == dot11rate) &&
450 ((valid & WLAN_RC_CAP_MODE(capflag)) == 550 (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
451 WLAN_RC_CAP_MODE(capflag)) && 551 WLAN_RC_CAP_MODE(capflag) &&
552 (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
452 !WLAN_RC_PHY_HT(phy)) { 553 !WLAN_RC_PHY_HT(phy)) {
453 u8 valid_rate_count = 0; 554 u8 valid_rate_count = 0;
454 555
@@ -482,14 +583,13 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
482 for (i = 0; i < rateset->rs_nrates; i++) { 583 for (i = 0; i < rateset->rs_nrates; i++) {
483 for (j = 0; j < rate_table->rate_cnt; j++) { 584 for (j = 0; j < rate_table->rate_cnt; j++) {
484 u32 phy = rate_table->info[j].phy; 585 u32 phy = rate_table->info[j].phy;
485 u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? 586 u16 rate_flags = rate_table->info[j].rate_flags;
486 rate_table->info[j].valid_single_stream :
487 rate_table->info[j].valid);
488 u8 rate = rateset->rs_rates[i]; 587 u8 rate = rateset->rs_rates[i];
489 u8 dot11rate = rate_table->info[j].dot11rate; 588 u8 dot11rate = rate_table->info[j].dot11rate;
490 589
491 if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) || 590 if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) ||
492 !WLAN_RC_PHY_HT_VALID(valid, capflag)) 591 !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) ||
592 !WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
493 continue; 593 continue;
494 594
495 if (!ath_rc_valid_phyrate(phy, capflag, 0)) 595 if (!ath_rc_valid_phyrate(phy, capflag, 0))
@@ -585,12 +685,15 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
585 if (rate > (ath_rc_priv->rate_table_size - 1)) 685 if (rate > (ath_rc_priv->rate_table_size - 1))
586 rate = ath_rc_priv->rate_table_size - 1; 686 rate = ath_rc_priv->rate_table_size - 1;
587 687
588 if (rate_table->info[rate].valid && 688 if (RC_TS_ONLY(rate_table->info[rate].rate_flags) &&
589 (ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)) 689 (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG))
690 return rate;
691
692 if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) &&
693 (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG)))
590 return rate; 694 return rate;
591 695
592 if (rate_table->info[rate].valid_single_stream && 696 if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags))
593 !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG))
594 return rate; 697 return rate;
595 698
596 /* This should not happen */ 699 /* This should not happen */
@@ -1003,12 +1106,19 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1003static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, 1106static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
1004 struct ieee80211_tx_rate *rate) 1107 struct ieee80211_tx_rate *rate)
1005{ 1108{
1006 int rix; 1109 int rix = 0, i = 0;
1110 int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
1007 1111
1008 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 1112 if (!(rate->flags & IEEE80211_TX_RC_MCS))
1009 return rate->idx; 1113 return rate->idx;
1010 1114
1011 rix = rate->idx + rate_table->mcs_start; 1115 while (rate->idx > mcs_rix_off[i] &&
1116 i < sizeof(mcs_rix_off)/sizeof(int)) {
1117 rix++; i++;
1118 }
1119
1120 rix += rate->idx + rate_table->mcs_start;
1121
1012 if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && 1122 if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1013 (rate->flags & IEEE80211_TX_RC_SHORT_GI)) 1123 (rate->flags & IEEE80211_TX_RC_SHORT_GI))
1014 rix = rate_table->info[rix].ht_index; 1124 rix = rate_table->info[rix].ht_index;
@@ -1016,8 +1126,6 @@ static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
1016 rix = rate_table->info[rix].sgi_index; 1126 rix = rate_table->info[rix].sgi_index;
1017 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1127 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1018 rix = rate_table->info[rix].cw40index; 1128 rix = rate_table->info[rix].cw40index;
1019 else
1020 rix = rate_table->info[rix].base_index;
1021 1129
1022 return rix; 1130 return rix;
1023} 1131}
@@ -1193,20 +1301,19 @@ static void ath_rc_init(struct ath_softc *sc,
1193} 1301}
1194 1302
1195static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, 1303static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
1196 bool is_cw40, bool is_sgi40) 1304 bool is_cw40, bool is_sgi)
1197{ 1305{
1198 u8 caps = 0; 1306 u8 caps = 0;
1199 1307
1200 if (sta->ht_cap.ht_supported) { 1308 if (sta->ht_cap.ht_supported) {
1201 caps = WLAN_RC_HT_FLAG; 1309 caps = WLAN_RC_HT_FLAG;
1202 if (sc->sc_ah->caps.tx_chainmask != 1 && 1310 if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2])
1203 ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) { 1311 caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
1204 if (sta->ht_cap.mcs.rx_mask[1]) 1312 else if (sta->ht_cap.mcs.rx_mask[1])
1205 caps |= WLAN_RC_DS_FLAG; 1313 caps |= WLAN_RC_DS_FLAG;
1206 }
1207 if (is_cw40) 1314 if (is_cw40)
1208 caps |= WLAN_RC_40_FLAG; 1315 caps |= WLAN_RC_40_FLAG;
1209 if (is_sgi40) 1316 if (is_sgi)
1210 caps |= WLAN_RC_SGI_FLAG; 1317 caps |= WLAN_RC_SGI_FLAG;
1211 } 1318 }
1212 1319
@@ -1300,7 +1407,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1300 struct ath_softc *sc = priv; 1407 struct ath_softc *sc = priv;
1301 struct ath_rate_priv *ath_rc_priv = priv_sta; 1408 struct ath_rate_priv *ath_rc_priv = priv_sta;
1302 const struct ath_rate_table *rate_table; 1409 const struct ath_rate_table *rate_table;
1303 bool is_cw40, is_sgi40; 1410 bool is_cw40, is_sgi = false;
1304 int i, j = 0; 1411 int i, j = 0;
1305 1412
1306 for (i = 0; i < sband->n_bitrates; i++) { 1413 for (i = 0; i < sband->n_bitrates; i++) {
@@ -1323,7 +1430,11 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1323 } 1430 }
1324 1431
1325 is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; 1432 is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1326 is_sgi40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; 1433
1434 if (is_cw40)
1435 is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
1436 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
1437 is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
1327 1438
1328 /* Choose rate table first */ 1439 /* Choose rate table first */
1329 1440
@@ -1336,7 +1447,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1336 rate_table = hw_rate_table[sc->cur_rate_mode]; 1447 rate_table = hw_rate_table[sc->cur_rate_mode];
1337 } 1448 }
1338 1449
1339 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40); 1450 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
1340 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1451 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1341} 1452}
1342 1453
@@ -1347,10 +1458,10 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1347 struct ath_softc *sc = priv; 1458 struct ath_softc *sc = priv;
1348 struct ath_rate_priv *ath_rc_priv = priv_sta; 1459 struct ath_rate_priv *ath_rc_priv = priv_sta;
1349 const struct ath_rate_table *rate_table = NULL; 1460 const struct ath_rate_table *rate_table = NULL;
1350 bool oper_cw40 = false, oper_sgi40; 1461 bool oper_cw40 = false, oper_sgi;
1351 bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ? 1462 bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ?
1352 true : false; 1463 true : false;
1353 bool local_sgi40 = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ? 1464 bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
1354 true : false; 1465 true : false;
1355 1466
1356 /* FIXME: Handle AP mode later when we support CWM */ 1467 /* FIXME: Handle AP mode later when we support CWM */
@@ -1363,15 +1474,21 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1363 oper_chan_type == NL80211_CHAN_HT40PLUS) 1474 oper_chan_type == NL80211_CHAN_HT40PLUS)
1364 oper_cw40 = true; 1475 oper_cw40 = true;
1365 1476
1366 oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1477 if (oper_cw40)
1367 true : false; 1478 oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1479 true : false;
1480 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
1481 oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1482 true : false;
1483 else
1484 oper_sgi = false;
1368 1485
1369 if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) { 1486 if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
1370 rate_table = ath_choose_rate_table(sc, sband->band, 1487 rate_table = ath_choose_rate_table(sc, sband->band,
1371 sta->ht_cap.ht_supported, 1488 sta->ht_cap.ht_supported,
1372 oper_cw40); 1489 oper_cw40);
1373 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, 1490 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
1374 oper_cw40, oper_sgi40); 1491 oper_cw40, oper_sgi);
1375 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1492 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1376 1493
1377 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, 1494 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 3d8d40cdc99e..dc1082654501 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -24,32 +24,63 @@
24struct ath_softc; 24struct ath_softc;
25 25
26#define ATH_RATE_MAX 30 26#define ATH_RATE_MAX 30
27#define RATE_TABLE_SIZE 64 27#define RATE_TABLE_SIZE 72
28#define MAX_TX_RATE_PHY 48 28#define MAX_TX_RATE_PHY 48
29 29
30/* VALID_ALL - valid for 20/40/Legacy,
31 * VALID - Legacy only,
32 * VALID_20 - HT 20 only,
33 * VALID_40 - HT 40 only */
34 30
35#define INVALID 0x0 31#define RC_INVALID 0x0000
36#define VALID 0x1 32#define RC_LEGACY 0x0001
37#define VALID_20 0x2 33#define RC_SS 0x0002
38#define VALID_40 0x4 34#define RC_DS 0x0004
39#define VALID_2040 (VALID_20|VALID_40) 35#define RC_TS 0x0008
40#define VALID_ALL (VALID_2040|VALID) 36#define RC_HT_20 0x0010
37#define RC_HT_40 0x0020
38
39#define RC_STREAM_MASK 0xe
40#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \
41 (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
42#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS)
43#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY))
44
45#define RC_HT_2040 (RC_HT_20 | RC_HT_40)
46#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS)
47#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS)
48#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS)
49#define RC_HT_S_20 (RC_HT_20 | RC_SS)
50#define RC_HT_D_20 (RC_HT_20 | RC_DS)
51#define RC_HT_T_20 (RC_HT_20 | RC_TS)
52#define RC_HT_S_40 (RC_HT_40 | RC_SS)
53#define RC_HT_D_40 (RC_HT_40 | RC_DS)
54#define RC_HT_T_40 (RC_HT_40 | RC_TS)
55
56#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS)
57#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS)
58#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS)
59#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS)
60
61#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS)
62#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
63
64#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS)
65#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS)
66
67#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
41 68
42enum { 69enum {
43 WLAN_RC_PHY_OFDM, 70 WLAN_RC_PHY_OFDM,
44 WLAN_RC_PHY_CCK, 71 WLAN_RC_PHY_CCK,
45 WLAN_RC_PHY_HT_20_SS, 72 WLAN_RC_PHY_HT_20_SS,
46 WLAN_RC_PHY_HT_20_DS, 73 WLAN_RC_PHY_HT_20_DS,
74 WLAN_RC_PHY_HT_20_TS,
47 WLAN_RC_PHY_HT_40_SS, 75 WLAN_RC_PHY_HT_40_SS,
48 WLAN_RC_PHY_HT_40_DS, 76 WLAN_RC_PHY_HT_40_DS,
77 WLAN_RC_PHY_HT_40_TS,
49 WLAN_RC_PHY_HT_20_SS_HGI, 78 WLAN_RC_PHY_HT_20_SS_HGI,
50 WLAN_RC_PHY_HT_20_DS_HGI, 79 WLAN_RC_PHY_HT_20_DS_HGI,
80 WLAN_RC_PHY_HT_20_TS_HGI,
51 WLAN_RC_PHY_HT_40_SS_HGI, 81 WLAN_RC_PHY_HT_40_SS_HGI,
52 WLAN_RC_PHY_HT_40_DS_HGI, 82 WLAN_RC_PHY_HT_40_DS_HGI,
83 WLAN_RC_PHY_HT_40_TS_HGI,
53 WLAN_RC_PHY_MAX 84 WLAN_RC_PHY_MAX
54}; 85};
55 86
@@ -57,36 +88,50 @@ enum {
57 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 88 || (_phy == WLAN_RC_PHY_HT_40_DS) \
58 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 89 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
59 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 90 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
91#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \
92 || (_phy == WLAN_RC_PHY_HT_40_TS) \
93 || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
94 || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
60#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \ 95#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
61 || (_phy == WLAN_RC_PHY_HT_20_DS) \ 96 || (_phy == WLAN_RC_PHY_HT_20_DS) \
97 || (_phy == WLAN_RC_PHY_HT_20_TS) \
62 || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ 98 || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
63 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI)) 99 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
100 || (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
64#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ 101#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
65 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 102 || (_phy == WLAN_RC_PHY_HT_40_DS) \
103 || (_phy == WLAN_RC_PHY_HT_40_TS) \
66 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 104 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
67 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 105 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
106 || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
68#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ 107#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
69 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 108 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
109 || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
70 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 110 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
71 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 111 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
112 || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
72 113
73#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS) 114#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
74 115
75#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \ 116#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
76 (capflag & WLAN_RC_40_FLAG) ? VALID_40 : VALID_20 : VALID)) 117 ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
118
119#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \
120 (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
77 121
78/* Return TRUE if flag supports HT20 && client supports HT20 or 122/* Return TRUE if flag supports HT20 && client supports HT20 or
79 * return TRUE if flag supports HT40 && client supports HT40. 123 * return TRUE if flag supports HT40 && client supports HT40.
80 * This is used becos some rates overlap between HT20/HT40. 124 * This is used becos some rates overlap between HT20/HT40.
81 */ 125 */
82#define WLAN_RC_PHY_HT_VALID(flag, capflag) \ 126#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
83 (((flag & VALID_20) && !(capflag & WLAN_RC_40_FLAG)) || \ 127 (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
84 ((flag & VALID_40) && (capflag & WLAN_RC_40_FLAG))) 128 ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
85 129
86#define WLAN_RC_DS_FLAG (0x01) 130#define WLAN_RC_DS_FLAG (0x01)
87#define WLAN_RC_40_FLAG (0x02) 131#define WLAN_RC_TS_FLAG (0x02)
88#define WLAN_RC_SGI_FLAG (0x04) 132#define WLAN_RC_40_FLAG (0x04)
89#define WLAN_RC_HT_FLAG (0x08) 133#define WLAN_RC_SGI_FLAG (0x08)
134#define WLAN_RC_HT_FLAG (0x10)
90 135
91/** 136/**
92 * struct ath_rate_table - Rate Control table 137 * struct ath_rate_table - Rate Control table
@@ -110,15 +155,13 @@ struct ath_rate_table {
110 int rate_cnt; 155 int rate_cnt;
111 int mcs_start; 156 int mcs_start;
112 struct { 157 struct {
113 u8 valid; 158 u16 rate_flags;
114 u8 valid_single_stream;
115 u8 phy; 159 u8 phy;
116 u32 ratekbps; 160 u32 ratekbps;
117 u32 user_ratekbps; 161 u32 user_ratekbps;
118 u8 ratecode; 162 u8 ratecode;
119 u8 dot11rate; 163 u8 dot11rate;
120 u8 ctrl_rate; 164 u8 ctrl_rate;
121 u8 base_index;
122 u8 cw40index; 165 u8 cw40index;
123 u8 sgi_index; 166 u8 sgi_index;
124 u8 ht_index; 167 u8 ht_index;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index e3e52913d83a..da0cfe90c38a 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -116,9 +116,6 @@ static void ath_opmode_init(struct ath_softc *sc)
116 /* configure operational mode */ 116 /* configure operational mode */
117 ath9k_hw_setopmode(ah); 117 ath9k_hw_setopmode(ah);
118 118
119 /* Handle any link-level address change. */
120 ath9k_hw_setmac(ah, common->macaddr);
121
122 /* calculate and install multicast filter */ 119 /* calculate and install multicast filter */
123 mfilt[0] = mfilt[1] = ~0; 120 mfilt[0] = mfilt[1] = ~0;
124 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 121 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -295,7 +292,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
295 292
296 ath_opmode_init(sc); 293 ath_opmode_init(sc);
297 294
298 ath9k_hw_startpcureceive(sc->sc_ah); 295 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING));
299} 296}
300 297
301static void ath_edma_stop_recv(struct ath_softc *sc) 298static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -501,7 +498,7 @@ int ath_startrecv(struct ath_softc *sc)
501start_recv: 498start_recv:
502 spin_unlock_bh(&sc->rx.rxbuflock); 499 spin_unlock_bh(&sc->rx.rxbuflock);
503 ath_opmode_init(sc); 500 ath_opmode_init(sc);
504 ath9k_hw_startpcureceive(ah); 501 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING));
505 502
506 return 0; 503 return 0;
507} 504}
@@ -700,12 +697,16 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
700 bf = SKB_CB_ATHBUF(skb); 697 bf = SKB_CB_ATHBUF(skb);
701 BUG_ON(!bf); 698 BUG_ON(!bf);
702 699
703 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 700 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
704 common->rx_bufsize, DMA_FROM_DEVICE); 701 common->rx_bufsize, DMA_FROM_DEVICE);
705 702
706 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 703 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
707 if (ret == -EINPROGRESS) 704 if (ret == -EINPROGRESS) {
705 /*let device gain the buffer again*/
706 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
707 common->rx_bufsize, DMA_FROM_DEVICE);
708 return false; 708 return false;
709 }
709 710
710 __skb_unlink(skb, &rx_edma->rx_fifo); 711 __skb_unlink(skb, &rx_edma->rx_fifo);
711 if (ret == -EINVAL) { 712 if (ret == -EINVAL) {
@@ -814,13 +815,263 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
814 * 1. accessing the frame 815 * 1. accessing the frame
815 * 2. requeueing the same buffer to h/w 816 * 2. requeueing the same buffer to h/w
816 */ 817 */
817 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 818 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
818 common->rx_bufsize, 819 common->rx_bufsize,
819 DMA_FROM_DEVICE); 820 DMA_FROM_DEVICE);
820 821
821 return bf; 822 return bf;
822} 823}
823 824
825/* Assumes you've already done the endian to CPU conversion */
826static bool ath9k_rx_accept(struct ath_common *common,
827 struct ieee80211_hdr *hdr,
828 struct ieee80211_rx_status *rxs,
829 struct ath_rx_status *rx_stats,
830 bool *decrypt_error)
831{
832 struct ath_hw *ah = common->ah;
833 __le16 fc;
834 u8 rx_status_len = ah->caps.rx_status_len;
835
836 fc = hdr->frame_control;
837
838 if (!rx_stats->rs_datalen)
839 return false;
840 /*
841 * rs_status follows rs_datalen so if rs_datalen is too large
842 * we can take a hint that hardware corrupted it, so ignore
843 * those frames.
844 */
845 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
846 return false;
847
848 /*
849 * rs_more indicates chained descriptors which can be used
850 * to link buffers together for a sort of scatter-gather
851 * operation.
852 * reject the frame, we don't support scatter-gather yet and
853 * the frame is probably corrupt anyway
854 */
855 if (rx_stats->rs_more)
856 return false;
857
858 /*
859 * The rx_stats->rs_status will not be set until the end of the
860 * chained descriptors so it can be ignored if rs_more is set. The
861 * rs_more will be false at the last element of the chained
862 * descriptors.
863 */
864 if (rx_stats->rs_status != 0) {
865 if (rx_stats->rs_status & ATH9K_RXERR_CRC)
866 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
867 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
868 return false;
869
870 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
871 *decrypt_error = true;
872 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
873 if (ieee80211_is_ctl(fc))
874 /*
875 * Sometimes, we get invalid
876 * MIC failures on valid control frames.
877 * Remove these mic errors.
878 */
879 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
880 else
881 rxs->flag |= RX_FLAG_MMIC_ERROR;
882 }
883 /*
884 * Reject error frames with the exception of
885 * decryption and MIC failures. For monitor mode,
886 * we also ignore the CRC error.
887 */
888 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
889 if (rx_stats->rs_status &
890 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
891 ATH9K_RXERR_CRC))
892 return false;
893 } else {
894 if (rx_stats->rs_status &
895 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
896 return false;
897 }
898 }
899 }
900 return true;
901}
902
903static int ath9k_process_rate(struct ath_common *common,
904 struct ieee80211_hw *hw,
905 struct ath_rx_status *rx_stats,
906 struct ieee80211_rx_status *rxs)
907{
908 struct ieee80211_supported_band *sband;
909 enum ieee80211_band band;
910 unsigned int i = 0;
911
912 band = hw->conf.channel->band;
913 sband = hw->wiphy->bands[band];
914
915 if (rx_stats->rs_rate & 0x80) {
916 /* HT rate */
917 rxs->flag |= RX_FLAG_HT;
918 if (rx_stats->rs_flags & ATH9K_RX_2040)
919 rxs->flag |= RX_FLAG_40MHZ;
920 if (rx_stats->rs_flags & ATH9K_RX_GI)
921 rxs->flag |= RX_FLAG_SHORT_GI;
922 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
923 return 0;
924 }
925
926 for (i = 0; i < sband->n_bitrates; i++) {
927 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
928 rxs->rate_idx = i;
929 return 0;
930 }
931 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
932 rxs->flag |= RX_FLAG_SHORTPRE;
933 rxs->rate_idx = i;
934 return 0;
935 }
936 }
937
938 /*
939 * No valid hardware bitrate found -- we should not get here
940 * because hardware has already validated this frame as OK.
941 */
942 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
943 "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
944
945 return -EINVAL;
946}
947
948static void ath9k_process_rssi(struct ath_common *common,
949 struct ieee80211_hw *hw,
950 struct ieee80211_hdr *hdr,
951 struct ath_rx_status *rx_stats)
952{
953 struct ath_hw *ah = common->ah;
954 struct ieee80211_sta *sta;
955 struct ath_node *an;
956 int last_rssi = ATH_RSSI_DUMMY_MARKER;
957 __le16 fc;
958
959 fc = hdr->frame_control;
960
961 rcu_read_lock();
962 /*
963 * XXX: use ieee80211_find_sta! This requires quite a bit of work
964 * under the current ath9k virtual wiphy implementation as we have
965 * no way of tying a vif to wiphy. Typically vifs are attached to
966 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
967 * wiphy you'd have to iterate over every wiphy and each sdata.
968 */
969 sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
970 if (sta) {
971 an = (struct ath_node *) sta->drv_priv;
972 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
973 !rx_stats->rs_moreaggr)
974 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
975 last_rssi = an->last_rssi;
976 }
977 rcu_read_unlock();
978
979 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
980 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
981 ATH_RSSI_EP_MULTIPLIER);
982 if (rx_stats->rs_rssi < 0)
983 rx_stats->rs_rssi = 0;
984
985 /* Update Beacon RSSI, this is used by ANI. */
986 if (ieee80211_is_beacon(fc))
987 ah->stats.avgbrssi = rx_stats->rs_rssi;
988}
989
990/*
991 * For Decrypt or Demic errors, we only mark packet status here and always push
992 * up the frame up to let mac80211 handle the actual error case, be it no
993 * decryption key or real decryption error. This let us keep statistics there.
994 */
995static int ath9k_rx_skb_preprocess(struct ath_common *common,
996 struct ieee80211_hw *hw,
997 struct ieee80211_hdr *hdr,
998 struct ath_rx_status *rx_stats,
999 struct ieee80211_rx_status *rx_status,
1000 bool *decrypt_error)
1001{
1002 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
1003
1004 /*
1005 * everything but the rate is checked here, the rate check is done
1006 * separately to avoid doing two lookups for a rate for each frame.
1007 */
1008 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1009 return -EINVAL;
1010
1011 ath9k_process_rssi(common, hw, hdr, rx_stats);
1012
1013 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1014 return -EINVAL;
1015
1016 rx_status->band = hw->conf.channel->band;
1017 rx_status->freq = hw->conf.channel->center_freq;
1018 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
1019 rx_status->antenna = rx_stats->rs_antenna;
1020 rx_status->flag |= RX_FLAG_TSFT;
1021
1022 return 0;
1023}
1024
1025static void ath9k_rx_skb_postprocess(struct ath_common *common,
1026 struct sk_buff *skb,
1027 struct ath_rx_status *rx_stats,
1028 struct ieee80211_rx_status *rxs,
1029 bool decrypt_error)
1030{
1031 struct ath_hw *ah = common->ah;
1032 struct ieee80211_hdr *hdr;
1033 int hdrlen, padpos, padsize;
1034 u8 keyix;
1035 __le16 fc;
1036
1037 /* see if any padding is done by the hw and remove it */
1038 hdr = (struct ieee80211_hdr *) skb->data;
1039 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1040 fc = hdr->frame_control;
1041 padpos = ath9k_cmn_padpos(hdr->frame_control);
1042
1043 /* The MAC header is padded to have 32-bit boundary if the
1044 * packet payload is non-zero. The general calculation for
1045 * padsize would take into account odd header lengths:
1046 * padsize = (4 - padpos % 4) % 4; However, since only
1047 * even-length headers are used, padding can only be 0 or 2
1048 * bytes and we can optimize this a bit. In addition, we must
1049 * not try to remove padding from short control frames that do
1050 * not have payload. */
1051 padsize = padpos & 3;
1052 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1053 memmove(skb->data + padsize, skb->data, padpos);
1054 skb_pull(skb, padsize);
1055 }
1056
1057 keyix = rx_stats->rs_keyix;
1058
1059 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1060 ieee80211_has_protected(fc)) {
1061 rxs->flag |= RX_FLAG_DECRYPTED;
1062 } else if (ieee80211_has_protected(fc)
1063 && !decrypt_error && skb->len >= hdrlen + 4) {
1064 keyix = skb->data[hdrlen + 3] >> 6;
1065
1066 if (test_bit(keyix, common->keymap))
1067 rxs->flag |= RX_FLAG_DECRYPTED;
1068 }
1069 if (ah->sw_mgmt_crypto &&
1070 (rxs->flag & RX_FLAG_DECRYPTED) &&
1071 ieee80211_is_mgmt(fc))
1072 /* Use software decrypt for management frames. */
1073 rxs->flag &= ~RX_FLAG_DECRYPTED;
1074}
824 1075
825int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1076int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
826{ 1077{
@@ -842,6 +1093,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
842 enum ath9k_rx_qtype qtype; 1093 enum ath9k_rx_qtype qtype;
843 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1094 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
844 int dma_type; 1095 int dma_type;
1096 u8 rx_status_len = ah->caps.rx_status_len;
1097 u64 tsf = 0;
1098 u32 tsf_lower = 0;
845 1099
846 if (edma) 1100 if (edma)
847 dma_type = DMA_BIDIRECTIONAL; 1101 dma_type = DMA_BIDIRECTIONAL;
@@ -851,6 +1105,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
851 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1105 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
852 spin_lock_bh(&sc->rx.rxbuflock); 1106 spin_lock_bh(&sc->rx.rxbuflock);
853 1107
1108 tsf = ath9k_hw_gettsf64(ah);
1109 tsf_lower = tsf & 0xffffffff;
1110
854 do { 1111 do {
855 /* If handling rx interrupt and flush is in progress => exit */ 1112 /* If handling rx interrupt and flush is in progress => exit */
856 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1113 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
@@ -869,7 +1126,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
869 if (!skb) 1126 if (!skb)
870 continue; 1127 continue;
871 1128
872 hdr = (struct ieee80211_hdr *) skb->data; 1129 hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
873 rxs = IEEE80211_SKB_RXCB(skb); 1130 rxs = IEEE80211_SKB_RXCB(skb);
874 1131
875 hw = ath_get_virt_hw(sc, hdr); 1132 hw = ath_get_virt_hw(sc, hdr);
@@ -883,8 +1140,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
883 if (flush) 1140 if (flush)
884 goto requeue; 1141 goto requeue;
885 1142
886 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs, 1143 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
887 rxs, &decrypt_error); 1144 if (rs.rs_tstamp > tsf_lower &&
1145 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1146 rxs->mactime -= 0x100000000ULL;
1147
1148 if (rs.rs_tstamp < tsf_lower &&
1149 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1150 rxs->mactime += 0x100000000ULL;
1151
1152 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1153 rxs, &decrypt_error);
888 if (retval) 1154 if (retval)
889 goto requeue; 1155 goto requeue;
890 1156
@@ -908,8 +1174,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
908 if (ah->caps.rx_status_len) 1174 if (ah->caps.rx_status_len)
909 skb_pull(skb, ah->caps.rx_status_len); 1175 skb_pull(skb, ah->caps.rx_status_len);
910 1176
911 ath9k_cmn_rx_skb_postprocess(common, skb, &rs, 1177 ath9k_rx_skb_postprocess(common, skb, &rs,
912 rxs, decrypt_error); 1178 rxs, decrypt_error);
913 1179
914 /* We will now give hardware our shiny new allocated skb */ 1180 /* We will now give hardware our shiny new allocated skb */
915 bf->bf_mpdu = requeue_skb; 1181 bf->bf_mpdu = requeue_skb;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index d4371a43bdaa..633e3d949ec0 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -222,6 +222,7 @@
222 222
223#define AR_ISR_S2 0x008c 223#define AR_ISR_S2 0x008c
224#define AR_ISR_S2_QCU_TXURN 0x000003FF 224#define AR_ISR_S2_QCU_TXURN 0x000003FF
225#define AR_ISR_S2_BB_WATCHDOG 0x00010000
225#define AR_ISR_S2_CST 0x00400000 226#define AR_ISR_S2_CST 0x00400000
226#define AR_ISR_S2_GTT 0x00800000 227#define AR_ISR_S2_GTT 0x00800000
227#define AR_ISR_S2_TIM 0x01000000 228#define AR_ISR_S2_TIM 0x01000000
@@ -699,7 +700,15 @@
699#define AR_RC_HOSTIF 0x00000100 700#define AR_RC_HOSTIF 0x00000100
700 701
701#define AR_WA 0x4004 702#define AR_WA 0x4004
703#define AR_WA_BIT6 (1 << 6)
704#define AR_WA_BIT7 (1 << 7)
705#define AR_WA_BIT23 (1 << 23)
702#define AR_WA_D3_L1_DISABLE (1 << 14) 706#define AR_WA_D3_L1_DISABLE (1 << 14)
707#define AR_WA_D3_TO_L1_DISABLE_REAL (1 << 16)
708#define AR_WA_ASPM_TIMER_BASED_DISABLE (1 << 17)
709#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
710#define AR_WA_ANALOG_SHIFT (1 << 20)
711#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
703#define AR9285_WA_DEFAULT 0x004a050b 712#define AR9285_WA_DEFAULT 0x004a050b
704#define AR9280_WA_DEFAULT 0x0040073b 713#define AR9280_WA_DEFAULT 0x0040073b
705#define AR_WA_DEFAULT 0x0000073f 714#define AR_WA_DEFAULT 0x0000073f
@@ -756,32 +765,33 @@
756#define AR_SREV_REVISION2 0x00000F00 765#define AR_SREV_REVISION2 0x00000F00
757#define AR_SREV_REVISION2_S 8 766#define AR_SREV_REVISION2_S 8
758 767
759#define AR_SREV_VERSION_5416_PCI 0xD 768#define AR_SREV_VERSION_5416_PCI 0xD
760#define AR_SREV_VERSION_5416_PCIE 0xC 769#define AR_SREV_VERSION_5416_PCIE 0xC
761#define AR_SREV_REVISION_5416_10 0 770#define AR_SREV_REVISION_5416_10 0
762#define AR_SREV_REVISION_5416_20 1 771#define AR_SREV_REVISION_5416_20 1
763#define AR_SREV_REVISION_5416_22 2 772#define AR_SREV_REVISION_5416_22 2
764#define AR_SREV_VERSION_9100 0x14 773#define AR_SREV_VERSION_9100 0x14
765#define AR_SREV_VERSION_9160 0x40 774#define AR_SREV_VERSION_9160 0x40
766#define AR_SREV_REVISION_9160_10 0 775#define AR_SREV_REVISION_9160_10 0
767#define AR_SREV_REVISION_9160_11 1 776#define AR_SREV_REVISION_9160_11 1
768#define AR_SREV_VERSION_9280 0x80 777#define AR_SREV_VERSION_9280 0x80
769#define AR_SREV_REVISION_9280_10 0 778#define AR_SREV_REVISION_9280_10 0
770#define AR_SREV_REVISION_9280_20 1 779#define AR_SREV_REVISION_9280_20 1
771#define AR_SREV_REVISION_9280_21 2 780#define AR_SREV_REVISION_9280_21 2
772#define AR_SREV_VERSION_9285 0xC0 781#define AR_SREV_VERSION_9285 0xC0
773#define AR_SREV_REVISION_9285_10 0 782#define AR_SREV_REVISION_9285_10 0
774#define AR_SREV_REVISION_9285_11 1 783#define AR_SREV_REVISION_9285_11 1
775#define AR_SREV_REVISION_9285_12 2 784#define AR_SREV_REVISION_9285_12 2
776#define AR_SREV_VERSION_9287 0x180 785#define AR_SREV_VERSION_9287 0x180
777#define AR_SREV_REVISION_9287_10 0 786#define AR_SREV_REVISION_9287_10 0
778#define AR_SREV_REVISION_9287_11 1 787#define AR_SREV_REVISION_9287_11 1
779#define AR_SREV_REVISION_9287_12 2 788#define AR_SREV_REVISION_9287_12 2
780#define AR_SREV_VERSION_9271 0x140 789#define AR_SREV_REVISION_9287_13 3
781#define AR_SREV_REVISION_9271_10 0 790#define AR_SREV_VERSION_9271 0x140
782#define AR_SREV_REVISION_9271_11 1 791#define AR_SREV_REVISION_9271_10 0
783#define AR_SREV_VERSION_9300 0x1c0 792#define AR_SREV_REVISION_9271_11 1
784#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ 793#define AR_SREV_VERSION_9300 0x1c0
794#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
785 795
786#define AR_SREV_5416(_ah) \ 796#define AR_SREV_5416(_ah) \
787 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 797 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -859,6 +869,11 @@
859 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \ 869 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
860 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ 870 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
861 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_12))) 871 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_12)))
872#define AR_SREV_9287_13_OR_LATER(_ah) \
873 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
874 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
875 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_13)))
876
862#define AR_SREV_9271(_ah) \ 877#define AR_SREV_9271(_ah) \
863 (((_ah))->hw_version.macVersion == AR_SREV_VERSION_9271) 878 (((_ah))->hw_version.macVersion == AR_SREV_VERSION_9271)
864#define AR_SREV_9271_10(_ah) \ 879#define AR_SREV_9271_10(_ah) \
@@ -867,6 +882,7 @@
867#define AR_SREV_9271_11(_ah) \ 882#define AR_SREV_9271_11(_ah) \
868 (AR_SREV_9271(_ah) && \ 883 (AR_SREV_9271(_ah) && \
869 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11)) 884 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
885
870#define AR_SREV_9300(_ah) \ 886#define AR_SREV_9300(_ah) \
871 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300)) 887 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
872#define AR_SREV_9300_20(_ah) \ 888#define AR_SREV_9300_20(_ah) \
@@ -881,6 +897,10 @@
881 (AR_SREV_9285_12_OR_LATER(_ah) && \ 897 (AR_SREV_9285_12_OR_LATER(_ah) && \
882 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) 898 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
883 899
900#define AR_DEVID_7010(_ah) \
901 (((_ah)->hw_version.devid == 0x7010) || \
902 ((_ah)->hw_version.devid == 0x9018))
903
884#define AR_RADIO_SREV_MAJOR 0xf0 904#define AR_RADIO_SREV_MAJOR 0xf0
885#define AR_RAD5133_SREV_MAJOR 0xc0 905#define AR_RAD5133_SREV_MAJOR 0xc0
886#define AR_RAD2133_SREV_MAJOR 0xd0 906#define AR_RAD2133_SREV_MAJOR 0xd0
@@ -978,6 +998,7 @@ enum {
978#define AR9287_NUM_GPIO 11 998#define AR9287_NUM_GPIO 11
979#define AR9271_NUM_GPIO 16 999#define AR9271_NUM_GPIO 16
980#define AR9300_NUM_GPIO 17 1000#define AR9300_NUM_GPIO 17
1001#define AR7010_NUM_GPIO 16
981 1002
982#define AR_GPIO_IN_OUT 0x4048 1003#define AR_GPIO_IN_OUT 0x4048
983#define AR_GPIO_IN_VAL 0x0FFFC000 1004#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -992,6 +1013,8 @@ enum {
992#define AR9271_GPIO_IN_VAL_S 16 1013#define AR9271_GPIO_IN_VAL_S 16
993#define AR9300_GPIO_IN_VAL 0x0001FFFF 1014#define AR9300_GPIO_IN_VAL 0x0001FFFF
994#define AR9300_GPIO_IN_VAL_S 0 1015#define AR9300_GPIO_IN_VAL_S 0
1016#define AR7010_GPIO_IN_VAL 0x0000FFFF
1017#define AR7010_GPIO_IN_VAL_S 0
995 1018
996#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c) 1019#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
997#define AR_GPIO_OE_OUT_DRV 0x3 1020#define AR_GPIO_OE_OUT_DRV 0x3
@@ -1000,6 +1023,21 @@ enum {
1000#define AR_GPIO_OE_OUT_DRV_HI 0x2 1023#define AR_GPIO_OE_OUT_DRV_HI 0x2
1001#define AR_GPIO_OE_OUT_DRV_ALL 0x3 1024#define AR_GPIO_OE_OUT_DRV_ALL 0x3
1002 1025
1026#define AR7010_GPIO_OE 0x52000
1027#define AR7010_GPIO_OE_MASK 0x1
1028#define AR7010_GPIO_OE_AS_OUTPUT 0x0
1029#define AR7010_GPIO_OE_AS_INPUT 0x1
1030#define AR7010_GPIO_IN 0x52004
1031#define AR7010_GPIO_OUT 0x52008
1032#define AR7010_GPIO_SET 0x5200C
1033#define AR7010_GPIO_CLEAR 0x52010
1034#define AR7010_GPIO_INT 0x52014
1035#define AR7010_GPIO_INT_TYPE 0x52018
1036#define AR7010_GPIO_INT_POLARITY 0x5201C
1037#define AR7010_GPIO_PENDING 0x52020
1038#define AR7010_GPIO_INT_MASK 0x52024
1039#define AR7010_GPIO_FUNCTION 0x52028
1040
1003#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050) 1041#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
1004#define AR_GPIO_INTR_POL_VAL 0x0001FFFF 1042#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
1005#define AR_GPIO_INTR_POL_VAL_S 0 1043#define AR_GPIO_INTR_POL_VAL_S 0
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 105ad40968f6..fd20241f57d8 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -219,7 +219,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
219 info->control.rates[1].idx = -1; 219 info->control.rates[1].idx = -1;
220 220
221 memset(&txctl, 0, sizeof(struct ath_tx_control)); 221 memset(&txctl, 0, sizeof(struct ath_tx_control));
222 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]]; 222 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
223 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE; 223 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
224 224
225 if (ath_tx_start(aphy->hw, skb, &txctl) != 0) 225 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
@@ -695,16 +695,18 @@ void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
695 idle ? "idle" : "not-idle"); 695 idle ? "idle" : "not-idle");
696} 696}
697/* Only bother starting a queue on an active virtual wiphy */ 697/* Only bother starting a queue on an active virtual wiphy */
698void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) 698bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
699{ 699{
700 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 700 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
701 unsigned int i; 701 unsigned int i;
702 bool txq_started = false;
702 703
703 spin_lock_bh(&sc->wiphy_lock); 704 spin_lock_bh(&sc->wiphy_lock);
704 705
705 /* Start the primary wiphy */ 706 /* Start the primary wiphy */
706 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) { 707 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
707 ieee80211_wake_queue(hw, skb_queue); 708 ieee80211_wake_queue(hw, skb_queue);
709 txq_started = true;
708 goto unlock; 710 goto unlock;
709 } 711 }
710 712
@@ -718,11 +720,13 @@ void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
718 720
719 hw = aphy->hw; 721 hw = aphy->hw;
720 ieee80211_wake_queue(hw, skb_queue); 722 ieee80211_wake_queue(hw, skb_queue);
723 txq_started = true;
721 break; 724 break;
722 } 725 }
723 726
724unlock: 727unlock:
725 spin_unlock_bh(&sc->wiphy_lock); 728 spin_unlock_bh(&sc->wiphy_lock);
729 return txq_started;
726} 730}
727 731
728/* Go ahead and propagate information to all virtual wiphys, it won't hurt */ 732/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index e23172c9caaf..6260faa658a2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -279,9 +279,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
279 if (wmi->drv_priv->op_flags & OP_UNPLUGGED) 279 if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
280 return 0; 280 return 0;
281 281
282 if (!wmi)
283 return -EINVAL;
284
285 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC); 282 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
286 if (!skb) 283 if (!skb)
287 return -ENOMEM; 284 return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 859aa4ab0769..501b72821b4d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -328,6 +328,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
328 u32 ba[WME_BA_BMP_SIZE >> 5]; 328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true; 330 bool rc_update = true;
331 struct ieee80211_tx_rate rates[4];
331 332
332 skb = bf->bf_mpdu; 333 skb = bf->bf_mpdu;
333 hdr = (struct ieee80211_hdr *)skb->data; 334 hdr = (struct ieee80211_hdr *)skb->data;
@@ -335,18 +336,44 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
335 tx_info = IEEE80211_SKB_CB(skb); 336 tx_info = IEEE80211_SKB_CB(skb);
336 hw = bf->aphy->hw; 337 hw = bf->aphy->hw;
337 338
339 memcpy(rates, tx_info->control.rates, sizeof(rates));
340
338 rcu_read_lock(); 341 rcu_read_lock();
339 342
340 /* XXX: use ieee80211_find_sta! */ 343 /* XXX: use ieee80211_find_sta! */
341 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); 344 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
342 if (!sta) { 345 if (!sta) {
343 rcu_read_unlock(); 346 rcu_read_unlock();
347
348 INIT_LIST_HEAD(&bf_head);
349 while (bf) {
350 bf_next = bf->bf_next;
351
352 bf->bf_state.bf_type |= BUF_XRETRY;
353 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
354 !bf->bf_stale || bf_next != NULL)
355 list_move_tail(&bf->list, &bf_head);
356
357 ath_tx_rc_status(bf, ts, 0, 0, false);
358 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
359 0, 0);
360
361 bf = bf_next;
362 }
344 return; 363 return;
345 } 364 }
346 365
347 an = (struct ath_node *)sta->drv_priv; 366 an = (struct ath_node *)sta->drv_priv;
348 tid = ATH_AN_2_TID(an, bf->bf_tidno); 367 tid = ATH_AN_2_TID(an, bf->bf_tidno);
349 368
369 /*
370 * The hardware occasionally sends a tx status for the wrong TID.
371 * In this case, the BA status cannot be considered valid and all
372 * subframes need to be retransmitted
373 */
374 if (bf->bf_tidno != ts->tid)
375 txok = false;
376
350 isaggr = bf_isaggr(bf); 377 isaggr = bf_isaggr(bf);
351 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 378 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
352 379
@@ -375,6 +402,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
375 txfail = txpending = 0; 402 txfail = txpending = 0;
376 bf_next = bf->bf_next; 403 bf_next = bf->bf_next;
377 404
405 skb = bf->bf_mpdu;
406 tx_info = IEEE80211_SKB_CB(skb);
407
378 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 408 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
379 /* transmit completion, subframe is 409 /* transmit completion, subframe is
380 * acked by block ack */ 410 * acked by block ack */
@@ -428,6 +458,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
428 spin_unlock_bh(&txq->axq_lock); 458 spin_unlock_bh(&txq->axq_lock);
429 459
430 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 460 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
461 memcpy(tx_info->control.rates, rates, sizeof(rates));
431 ath_tx_rc_status(bf, ts, nbad, txok, true); 462 ath_tx_rc_status(bf, ts, nbad, txok, true);
432 rc_update = false; 463 rc_update = false;
433 } else { 464 } else {
@@ -487,6 +518,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
487 bf = bf_next; 518 bf = bf_next;
488 } 519 }
489 520
521 /* prepend un-acked frames to the beginning of the pending frame queue */
522 if (!list_empty(&bf_pending)) {
523 spin_lock_bh(&txq->axq_lock);
524 list_splice(&bf_pending, &tid->buf_q);
525 ath_tx_queue_tid(txq, tid);
526 spin_unlock_bh(&txq->axq_lock);
527 }
528
490 if (tid->state & AGGR_CLEANUP) { 529 if (tid->state & AGGR_CLEANUP) {
491 if (tid->baw_head == tid->baw_tail) { 530 if (tid->baw_head == tid->baw_tail) {
492 tid->state &= ~AGGR_ADDBA_COMPLETE; 531 tid->state &= ~AGGR_ADDBA_COMPLETE;
@@ -499,14 +538,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
499 return; 538 return;
500 } 539 }
501 540
502 /* prepend un-acked frames to the beginning of the pending frame queue */
503 if (!list_empty(&bf_pending)) {
504 spin_lock_bh(&txq->axq_lock);
505 list_splice(&bf_pending, &tid->buf_q);
506 ath_tx_queue_tid(txq, tid);
507 spin_unlock_bh(&txq->axq_lock);
508 }
509
510 rcu_read_unlock(); 541 rcu_read_unlock();
511 542
512 if (needreset) 543 if (needreset)
@@ -941,6 +972,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
941 if (!ATH_TXQ_SETUP(sc, qnum)) { 972 if (!ATH_TXQ_SETUP(sc, qnum)) {
942 struct ath_txq *txq = &sc->tx.txq[qnum]; 973 struct ath_txq *txq = &sc->tx.txq[qnum];
943 974
975 txq->axq_class = subtype;
944 txq->axq_qnum = qnum; 976 txq->axq_qnum = qnum;
945 txq->axq_link = NULL; 977 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q); 978 INIT_LIST_HEAD(&txq->axq_q);
@@ -958,58 +990,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
958 return &sc->tx.txq[qnum]; 990 return &sc->tx.txq[qnum];
959} 991}
960 992
961int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
962{
963 int qnum;
964
965 switch (qtype) {
966 case ATH9K_TX_QUEUE_DATA:
967 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
968 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
969 "HAL AC %u out of range, max %zu!\n",
970 haltype, ARRAY_SIZE(sc->tx.hwq_map));
971 return -1;
972 }
973 qnum = sc->tx.hwq_map[haltype];
974 break;
975 case ATH9K_TX_QUEUE_BEACON:
976 qnum = sc->beacon.beaconq;
977 break;
978 case ATH9K_TX_QUEUE_CAB:
979 qnum = sc->beacon.cabq->axq_qnum;
980 break;
981 default:
982 qnum = -1;
983 }
984 return qnum;
985}
986
987struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
988{
989 struct ath_txq *txq = NULL;
990 u16 skb_queue = skb_get_queue_mapping(skb);
991 int qnum;
992
993 qnum = ath_get_hal_qnum(skb_queue, sc);
994 txq = &sc->tx.txq[qnum];
995
996 spin_lock_bh(&txq->axq_lock);
997
998 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
999 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
1000 "TX queue: %d is full, depth: %d\n",
1001 qnum, txq->axq_depth);
1002 ath_mac80211_stop_queue(sc, skb_queue);
1003 txq->stopped = 1;
1004 spin_unlock_bh(&txq->axq_lock);
1005 return NULL;
1006 }
1007
1008 spin_unlock_bh(&txq->axq_lock);
1009
1010 return txq;
1011}
1012
1013int ath_txq_update(struct ath_softc *sc, int qnum, 993int ath_txq_update(struct ath_softc *sc, int qnum,
1014 struct ath9k_tx_queue_info *qinfo) 994 struct ath9k_tx_queue_info *qinfo)
1015{ 995{
@@ -1688,12 +1668,15 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1688 bf->bf_frmlen -= padsize; 1668 bf->bf_frmlen -= padsize;
1689 } 1669 }
1690 1670
1691 if (conf_is_ht(&hw->conf)) { 1671 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
1692 bf->bf_state.bf_type |= BUF_HT; 1672 bf->bf_state.bf_type |= BUF_HT;
1693 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1673 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1694 use_ldpc = true; 1674 use_ldpc = true;
1695 } 1675 }
1696 1676
1677 bf->bf_state.bfs_paprd = txctl->paprd;
1678 if (txctl->paprd)
1679 bf->bf_state.bfs_paprd_timestamp = jiffies;
1697 bf->bf_flags = setup_tx_flags(skb, use_ldpc); 1680 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1698 1681
1699 bf->bf_keytype = get_hw_crypto_keytype(skb); 1682 bf->bf_keytype = get_hw_crypto_keytype(skb);
@@ -1768,6 +1751,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1768 bf->bf_buf_addr, 1751 bf->bf_buf_addr,
1769 txctl->txq->axq_qnum); 1752 txctl->txq->axq_qnum);
1770 1753
1754 if (bf->bf_state.bfs_paprd)
1755 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1756
1771 spin_lock_bh(&txctl->txq->axq_lock); 1757 spin_lock_bh(&txctl->txq->axq_lock);
1772 1758
1773 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1759 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
@@ -1809,8 +1795,9 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1809 struct ath_wiphy *aphy = hw->priv; 1795 struct ath_wiphy *aphy = hw->priv;
1810 struct ath_softc *sc = aphy->sc; 1796 struct ath_softc *sc = aphy->sc;
1811 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1797 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1798 struct ath_txq *txq = txctl->txq;
1812 struct ath_buf *bf; 1799 struct ath_buf *bf;
1813 int r; 1800 int q, r;
1814 1801
1815 bf = ath_tx_get_buffer(sc); 1802 bf = ath_tx_get_buffer(sc);
1816 if (!bf) { 1803 if (!bf) {
@@ -1820,8 +1807,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1820 1807
1821 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1808 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1822 if (unlikely(r)) { 1809 if (unlikely(r)) {
1823 struct ath_txq *txq = txctl->txq;
1824
1825 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1810 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1826 1811
1827 /* upon ath_tx_processq() this TX queue will be resumed, we 1812 /* upon ath_tx_processq() this TX queue will be resumed, we
@@ -1829,7 +1814,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1829 * we will at least have to run TX completionon one buffer 1814 * we will at least have to run TX completionon one buffer
1830 * on the queue */ 1815 * on the queue */
1831 spin_lock_bh(&txq->axq_lock); 1816 spin_lock_bh(&txq->axq_lock);
1832 if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) { 1817 if (!txq->stopped && txq->axq_depth > 1) {
1833 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1818 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1834 txq->stopped = 1; 1819 txq->stopped = 1;
1835 } 1820 }
@@ -1840,6 +1825,17 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1840 return r; 1825 return r;
1841 } 1826 }
1842 1827
1828 q = skb_get_queue_mapping(skb);
1829 if (q >= 4)
1830 q = 0;
1831
1832 spin_lock_bh(&txq->axq_lock);
1833 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1834 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1835 txq->stopped = 1;
1836 }
1837 spin_unlock_bh(&txq->axq_lock);
1838
1843 ath_tx_start_dma(sc, bf, txctl); 1839 ath_tx_start_dma(sc, bf, txctl);
1844 1840
1845 return 0; 1841 return 0;
@@ -1909,7 +1905,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1909 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1905 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1910 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1906 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1911 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1907 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1912 int padpos, padsize; 1908 int q, padpos, padsize;
1913 1909
1914 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1910 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1915 1911
@@ -1948,8 +1944,16 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1948 1944
1949 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1945 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
1950 ath9k_tx_status(hw, skb); 1946 ath9k_tx_status(hw, skb);
1951 else 1947 else {
1948 q = skb_get_queue_mapping(skb);
1949 if (q >= 4)
1950 q = 0;
1951
1952 if (--sc->tx.pending_frames[q] < 0)
1953 sc->tx.pending_frames[q] = 0;
1954
1952 ieee80211_tx_status(hw, skb); 1955 ieee80211_tx_status(hw, skb);
1956 }
1953} 1957}
1954 1958
1955static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1959static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1971,8 +1975,18 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1971 } 1975 }
1972 1976
1973 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1977 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1974 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1978
1975 ath_debug_stat_tx(sc, txq, bf, ts); 1979 if (bf->bf_state.bfs_paprd) {
1980 if (time_after(jiffies,
1981 bf->bf_state.bfs_paprd_timestamp +
1982 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1983 dev_kfree_skb_any(skb);
1984 else
1985 complete(&sc->paprd_complete);
1986 } else {
1987 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1988 ath_debug_stat_tx(sc, txq, bf, ts);
1989 }
1976 1990
1977 /* 1991 /*
1978 * Return the list of ath_buf of this mpdu to free queue 1992 * Return the list of ath_buf of this mpdu to free queue
@@ -2050,21 +2064,21 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2050 tx_info->status.rates[i].idx = -1; 2064 tx_info->status.rates[i].idx = -1;
2051 } 2065 }
2052 2066
2053 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1; 2067 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2054} 2068}
2055 2069
2056static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 2070static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2057{ 2071{
2058 int qnum; 2072 int qnum;
2059 2073
2074 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2075 if (qnum == -1)
2076 return;
2077
2060 spin_lock_bh(&txq->axq_lock); 2078 spin_lock_bh(&txq->axq_lock);
2061 if (txq->stopped && 2079 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2062 sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) { 2080 if (ath_mac80211_start_queue(sc, qnum))
2063 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
2064 if (qnum != -1) {
2065 ath_mac80211_start_queue(sc, qnum);
2066 txq->stopped = 0; 2081 txq->stopped = 0;
2067 }
2068 } 2082 }
2069 spin_unlock_bh(&txq->axq_lock); 2083 spin_unlock_bh(&txq->axq_lock);
2070} 2084}
@@ -2161,7 +2175,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2161 * This frame is sent out as a single frame. 2175 * This frame is sent out as a single frame.
2162 * Use hardware retry status for this frame. 2176 * Use hardware retry status for this frame.
2163 */ 2177 */
2164 bf->bf_retries = ts.ts_longretry;
2165 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2178 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2166 bf->bf_state.bf_type |= BUF_XRETRY; 2179 bf->bf_state.bf_type |= BUF_XRETRY;
2167 ath_tx_rc_status(bf, &ts, 0, txok, true); 2180 ath_tx_rc_status(bf, &ts, 0, txok, true);
@@ -2279,8 +2292,18 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2279 2292
2280 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2293 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2281 2294
2295 /*
2296 * Make sure null func frame is acked before configuring
2297 * hw into ps mode.
2298 */
2299 if (bf->bf_isnullfunc && txok) {
2300 if ((sc->ps_flags & PS_ENABLED))
2301 ath9k_enable_ps(sc);
2302 else
2303 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2304 }
2305
2282 if (!bf_isampdu(bf)) { 2306 if (!bf_isampdu(bf)) {
2283 bf->bf_retries = txs.ts_longretry;
2284 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2307 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2285 bf->bf_state.bf_type |= BUF_XRETRY; 2308 bf->bf_state.bf_type |= BUF_XRETRY;
2286 ath_tx_rc_status(bf, &txs, 0, txok, true); 2309 ath_tx_rc_status(bf, &txs, 0, txok, true);
@@ -2424,62 +2447,44 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2424 for (acno = 0, ac = &an->ac[acno]; 2447 for (acno = 0, ac = &an->ac[acno];
2425 acno < WME_NUM_AC; acno++, ac++) { 2448 acno < WME_NUM_AC; acno++, ac++) {
2426 ac->sched = false; 2449 ac->sched = false;
2450 ac->qnum = sc->tx.hwq_map[acno];
2427 INIT_LIST_HEAD(&ac->tid_q); 2451 INIT_LIST_HEAD(&ac->tid_q);
2428
2429 switch (acno) {
2430 case WME_AC_BE:
2431 ac->qnum = ath_tx_get_qnum(sc,
2432 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2433 break;
2434 case WME_AC_BK:
2435 ac->qnum = ath_tx_get_qnum(sc,
2436 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2437 break;
2438 case WME_AC_VI:
2439 ac->qnum = ath_tx_get_qnum(sc,
2440 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2441 break;
2442 case WME_AC_VO:
2443 ac->qnum = ath_tx_get_qnum(sc,
2444 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2445 break;
2446 }
2447 } 2452 }
2448} 2453}
2449 2454
2450void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2455void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2451{ 2456{
2452 int i; 2457 struct ath_atx_ac *ac;
2453 struct ath_atx_ac *ac, *ac_tmp; 2458 struct ath_atx_tid *tid;
2454 struct ath_atx_tid *tid, *tid_tmp;
2455 struct ath_txq *txq; 2459 struct ath_txq *txq;
2460 int i, tidno;
2456 2461
2457 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2462 for (tidno = 0, tid = &an->tid[tidno];
2458 if (ATH_TXQ_SETUP(sc, i)) { 2463 tidno < WME_NUM_TID; tidno++, tid++) {
2459 txq = &sc->tx.txq[i]; 2464 i = tid->ac->qnum;
2460 2465
2461 spin_lock_bh(&txq->axq_lock); 2466 if (!ATH_TXQ_SETUP(sc, i))
2467 continue;
2462 2468
2463 list_for_each_entry_safe(ac, 2469 txq = &sc->tx.txq[i];
2464 ac_tmp, &txq->axq_acq, list) { 2470 ac = tid->ac;
2465 tid = list_first_entry(&ac->tid_q,
2466 struct ath_atx_tid, list);
2467 if (tid && tid->an != an)
2468 continue;
2469 list_del(&ac->list);
2470 ac->sched = false;
2471
2472 list_for_each_entry_safe(tid,
2473 tid_tmp, &ac->tid_q, list) {
2474 list_del(&tid->list);
2475 tid->sched = false;
2476 ath_tid_drain(sc, txq, tid);
2477 tid->state &= ~AGGR_ADDBA_COMPLETE;
2478 tid->state &= ~AGGR_CLEANUP;
2479 }
2480 }
2481 2471
2482 spin_unlock_bh(&txq->axq_lock); 2472 spin_lock_bh(&txq->axq_lock);
2473
2474 if (tid->sched) {
2475 list_del(&tid->list);
2476 tid->sched = false;
2483 } 2477 }
2478
2479 if (ac->sched) {
2480 list_del(&ac->list);
2481 tid->ac->sched = false;
2482 }
2483
2484 ath_tid_drain(sc, txq, tid);
2485 tid->state &= ~AGGR_ADDBA_COMPLETE;
2486 tid->state &= ~AGGR_CLEANUP;
2487
2488 spin_unlock_bh(&txq->axq_lock);
2484 } 2489 }
2485} 2490}
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 3a003e6803a5..8674a99356af 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -530,7 +530,7 @@ struct b43_fw_header {
530 /* Size of the data. For ucode and PCM this is in bytes. 530 /* Size of the data. For ucode and PCM this is in bytes.
531 * For IV this is number-of-ivs. */ 531 * For IV this is number-of-ivs. */
532 __be32 size; 532 __be32 size;
533} __attribute__((__packed__)); 533} __packed;
534 534
535/* Initial Value file format */ 535/* Initial Value file format */
536#define B43_IV_OFFSET_MASK 0x7FFF 536#define B43_IV_OFFSET_MASK 0x7FFF
@@ -540,8 +540,8 @@ struct b43_iv {
540 union { 540 union {
541 __be16 d16; 541 __be16 d16;
542 __be32 d32; 542 __be32 d32;
543 } data __attribute__((__packed__)); 543 } data __packed;
544} __attribute__((__packed__)); 544} __packed;
545 545
546 546
547/* Data structures for DMA transmission, per 80211 core. */ 547/* Data structures for DMA transmission, per 80211 core. */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index fa40fdfea719..10d0aaf754c5 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -333,11 +333,11 @@ static inline
333 dma_addr_t dmaaddr; 333 dma_addr_t dmaaddr;
334 334
335 if (tx) { 335 if (tx) {
336 dmaaddr = ssb_dma_map_single(ring->dev->dev, 336 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
337 buf, len, DMA_TO_DEVICE); 337 buf, len, DMA_TO_DEVICE);
338 } else { 338 } else {
339 dmaaddr = ssb_dma_map_single(ring->dev->dev, 339 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
340 buf, len, DMA_FROM_DEVICE); 340 buf, len, DMA_FROM_DEVICE);
341 } 341 }
342 342
343 return dmaaddr; 343 return dmaaddr;
@@ -348,11 +348,11 @@ static inline
348 dma_addr_t addr, size_t len, int tx) 348 dma_addr_t addr, size_t len, int tx)
349{ 349{
350 if (tx) { 350 if (tx) {
351 ssb_dma_unmap_single(ring->dev->dev, 351 dma_unmap_single(ring->dev->dev->dma_dev,
352 addr, len, DMA_TO_DEVICE); 352 addr, len, DMA_TO_DEVICE);
353 } else { 353 } else {
354 ssb_dma_unmap_single(ring->dev->dev, 354 dma_unmap_single(ring->dev->dev->dma_dev,
355 addr, len, DMA_FROM_DEVICE); 355 addr, len, DMA_FROM_DEVICE);
356 } 356 }
357} 357}
358 358
@@ -361,7 +361,7 @@ static inline
361 dma_addr_t addr, size_t len) 361 dma_addr_t addr, size_t len)
362{ 362{
363 B43_WARN_ON(ring->tx); 363 B43_WARN_ON(ring->tx);
364 ssb_dma_sync_single_for_cpu(ring->dev->dev, 364 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
365 addr, len, DMA_FROM_DEVICE); 365 addr, len, DMA_FROM_DEVICE);
366} 366}
367 367
@@ -370,8 +370,8 @@ static inline
370 dma_addr_t addr, size_t len) 370 dma_addr_t addr, size_t len)
371{ 371{
372 B43_WARN_ON(ring->tx); 372 B43_WARN_ON(ring->tx);
373 ssb_dma_sync_single_for_device(ring->dev->dev, 373 dma_sync_single_for_device(ring->dev->dev->dma_dev,
374 addr, len, DMA_FROM_DEVICE); 374 addr, len, DMA_FROM_DEVICE);
375} 375}
376 376
377static inline 377static inline
@@ -401,9 +401,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
401 */ 401 */
402 if (ring->type == B43_DMA_64BIT) 402 if (ring->type == B43_DMA_64BIT)
403 flags |= GFP_DMA; 403 flags |= GFP_DMA;
404 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, 404 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
405 B43_DMA_RINGMEMSIZE, 405 B43_DMA_RINGMEMSIZE,
406 &(ring->dmabase), flags); 406 &(ring->dmabase), flags);
407 if (!ring->descbase) { 407 if (!ring->descbase) {
408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
409 return -ENOMEM; 409 return -ENOMEM;
@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
420 if (ring->type == B43_DMA_64BIT) 420 if (ring->type == B43_DMA_64BIT)
421 flags |= GFP_DMA; 421 flags |= GFP_DMA;
422 422
423 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, 423 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
424 ring->descbase, ring->dmabase, flags); 424 ring->descbase, ring->dmabase);
425} 425}
426 426
427/* Reset the RX DMA channel */ 427/* Reset the RX DMA channel */
@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
528 dma_addr_t addr, 528 dma_addr_t addr,
529 size_t buffersize, bool dma_to_device) 529 size_t buffersize, bool dma_to_device)
530{ 530{
531 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 531 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
532 return 1; 532 return 1;
533 533
534 switch (ring->type) { 534 switch (ring->type) {
@@ -874,10 +874,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
874 goto err_kfree_meta; 874 goto err_kfree_meta;
875 875
876 /* test for ability to dma to txhdr_cache */ 876 /* test for ability to dma to txhdr_cache */
877 dma_test = ssb_dma_map_single(dev->dev, 877 dma_test = dma_map_single(dev->dev->dma_dev,
878 ring->txhdr_cache, 878 ring->txhdr_cache,
879 b43_txhdr_size(dev), 879 b43_txhdr_size(dev),
880 DMA_TO_DEVICE); 880 DMA_TO_DEVICE);
881 881
882 if (b43_dma_mapping_error(ring, dma_test, 882 if (b43_dma_mapping_error(ring, dma_test,
883 b43_txhdr_size(dev), 1)) { 883 b43_txhdr_size(dev), 1)) {
@@ -889,10 +889,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
889 if (!ring->txhdr_cache) 889 if (!ring->txhdr_cache)
890 goto err_kfree_meta; 890 goto err_kfree_meta;
891 891
892 dma_test = ssb_dma_map_single(dev->dev, 892 dma_test = dma_map_single(dev->dev->dma_dev,
893 ring->txhdr_cache, 893 ring->txhdr_cache,
894 b43_txhdr_size(dev), 894 b43_txhdr_size(dev),
895 DMA_TO_DEVICE); 895 DMA_TO_DEVICE);
896 896
897 if (b43_dma_mapping_error(ring, dma_test, 897 if (b43_dma_mapping_error(ring, dma_test,
898 b43_txhdr_size(dev), 1)) { 898 b43_txhdr_size(dev), 1)) {
@@ -903,9 +903,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
903 } 903 }
904 } 904 }
905 905
906 ssb_dma_unmap_single(dev->dev, 906 dma_unmap_single(dev->dev->dma_dev,
907 dma_test, b43_txhdr_size(dev), 907 dma_test, b43_txhdr_size(dev),
908 DMA_TO_DEVICE); 908 DMA_TO_DEVICE);
909 } 909 }
910 910
911 err = alloc_ringmemory(ring); 911 err = alloc_ringmemory(ring);
@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1018 /* Try to set the DMA mask. If it fails, try falling back to a 1018 /* Try to set the DMA mask. If it fails, try falling back to a
1019 * lower mask, as we can always also support a lower one. */ 1019 * lower mask, as we can always also support a lower one. */
1020 while (1) { 1020 while (1) {
1021 err = ssb_dma_set_mask(dev->dev, mask); 1021 err = dma_set_mask(dev->dev->dma_dev, mask);
1022 if (!err) 1022 if (!err) {
1023 break; 1023 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1024 if (!err)
1025 break;
1026 }
1024 if (mask == DMA_BIT_MASK(64)) { 1027 if (mask == DMA_BIT_MASK(64)) {
1025 mask = DMA_BIT_MASK(32); 1028 mask = DMA_BIT_MASK(32);
1026 fallback = 1; 1029 fallback = 1;
@@ -1221,14 +1224,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1221 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1224 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1222 /* create a bounce buffer in zone_dma on mapping failure. */ 1225 /* create a bounce buffer in zone_dma on mapping failure. */
1223 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1226 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1224 priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA); 1227 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1228 GFP_ATOMIC | GFP_DMA);
1225 if (!priv_info->bouncebuffer) { 1229 if (!priv_info->bouncebuffer) {
1226 ring->current_slot = old_top_slot; 1230 ring->current_slot = old_top_slot;
1227 ring->used_slots = old_used_slots; 1231 ring->used_slots = old_used_slots;
1228 err = -ENOMEM; 1232 err = -ENOMEM;
1229 goto out_unmap_hdr; 1233 goto out_unmap_hdr;
1230 } 1234 }
1231 memcpy(priv_info->bouncebuffer, skb->data, skb->len);
1232 1235
1233 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); 1236 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1234 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1237 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index dc91944d6022..a01c2100f166 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -67,7 +67,7 @@
67struct b43_dmadesc32 { 67struct b43_dmadesc32 {
68 __le32 control; 68 __le32 control;
69 __le32 address; 69 __le32 address;
70} __attribute__ ((__packed__)); 70} __packed;
71#define B43_DMA32_DCTL_BYTECNT 0x00001FFF 71#define B43_DMA32_DCTL_BYTECNT 0x00001FFF
72#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 72#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000
73#define B43_DMA32_DCTL_ADDREXT_SHIFT 16 73#define B43_DMA32_DCTL_ADDREXT_SHIFT 16
@@ -140,7 +140,7 @@ struct b43_dmadesc64 {
140 __le32 control1; 140 __le32 control1;
141 __le32 address_low; 141 __le32 address_low;
142 __le32 address_high; 142 __le32 address_high;
143} __attribute__ ((__packed__)); 143} __packed;
144#define B43_DMA64_DCTL0_DTABLEEND 0x10000000 144#define B43_DMA64_DCTL0_DTABLEEND 0x10000000
145#define B43_DMA64_DCTL0_IRQ 0x20000000 145#define B43_DMA64_DCTL0_IRQ 0x20000000
146#define B43_DMA64_DCTL0_FRAMEEND 0x40000000 146#define B43_DMA64_DCTL0_FRAMEEND 0x40000000
@@ -153,8 +153,8 @@ struct b43_dmadesc_generic {
153 union { 153 union {
154 struct b43_dmadesc32 dma32; 154 struct b43_dmadesc32 dma32;
155 struct b43_dmadesc64 dma64; 155 struct b43_dmadesc64 dma64;
156 } __attribute__ ((__packed__)); 156 } __packed;
157} __attribute__ ((__packed__)); 157} __packed;
158 158
159/* Misc DMA constants */ 159/* Misc DMA constants */
160#define B43_DMA_RINGMEMSIZE PAGE_SIZE 160#define B43_DMA_RINGMEMSIZE PAGE_SIZE
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7965b70efbab..20631ae2ddd7 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -108,7 +108,7 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
108module_param_named(verbose, b43_modparam_verbose, int, 0644); 108module_param_named(verbose, b43_modparam_verbose, int, 0644);
109MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); 109MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
110 110
111int b43_modparam_pio = B43_PIO_DEFAULT; 111static int b43_modparam_pio = B43_PIO_DEFAULT;
112module_param_named(pio, b43_modparam_pio, int, 0644); 112module_param_named(pio, b43_modparam_pio, int, 0644);
113MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO"); 113MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
114 114
@@ -1804,7 +1804,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1804 dma_reason[2], dma_reason[3], 1804 dma_reason[2], dma_reason[3],
1805 dma_reason[4], dma_reason[5]); 1805 dma_reason[4], dma_reason[5]);
1806 b43err(dev->wl, "This device does not support DMA " 1806 b43err(dev->wl, "This device does not support DMA "
1807 "on your system. Please use PIO instead.\n"); 1807 "on your system. It will now be switched to PIO.\n");
1808 /* Fall back to PIO transfers if we get fatal DMA errors! */ 1808 /* Fall back to PIO transfers if we get fatal DMA errors! */
1809 dev->use_pio = 1; 1809 dev->use_pio = 1;
1810 b43_controller_restart(dev, "DMA error"); 1810 b43_controller_restart(dev, "DMA error");
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 29bf34ced865..0dc33b65e86b 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -972,7 +972,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
972 b43_phy_maskset(dev, 0x04A2, 0xFFF0, 0x000B); 972 b43_phy_maskset(dev, 0x04A2, 0xFFF0, 0x000B);
973 973
974 if (phy->rev >= 3) { 974 if (phy->rev >= 3) {
975 b43_phy_mask(dev, 0x048A, (u16)~0x8000); 975 b43_phy_mask(dev, 0x048A, 0x7FFF);
976 b43_phy_maskset(dev, 0x0415, 0x8000, 0x36D8); 976 b43_phy_maskset(dev, 0x0415, 0x8000, 0x36D8);
977 b43_phy_maskset(dev, 0x0416, 0x8000, 0x36D8); 977 b43_phy_maskset(dev, 0x0416, 0x8000, 0x36D8);
978 b43_phy_maskset(dev, 0x0417, 0xFE00, 0x016D); 978 b43_phy_maskset(dev, 0x0417, 0xFE00, 0x016D);
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index c6afe9d94590..fd50eb116243 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -1145,7 +1145,7 @@ static void lpphy_write_tx_pctl_mode_to_hardware(struct b43_wldev *dev)
1145 B43_WARN_ON(1); 1145 B43_WARN_ON(1);
1146 } 1146 }
1147 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, 1147 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
1148 (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, ctl); 1148 ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, ctl);
1149} 1149}
1150 1150
1151static void lpphy_set_tx_power_control(struct b43_wldev *dev, 1151static void lpphy_set_tx_power_control(struct b43_wldev *dev,
@@ -1522,11 +1522,11 @@ static void lpphy_tx_pctl_init_hw(struct b43_wldev *dev)
1522 b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xFF); 1522 b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xFF);
1523 b43_phy_write(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xA); 1523 b43_phy_write(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xA);
1524 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, 1524 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
1525 (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, 1525 ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF,
1526 B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF); 1526 B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF);
1527 b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xF8FF); 1527 b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xF8FF);
1528 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, 1528 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
1529 (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, 1529 ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF,
1530 B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW); 1530 B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW);
1531 1531
1532 if (dev->phy.rev < 2) { 1532 if (dev->phy.rev < 2) {
@@ -2698,7 +2698,7 @@ static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
2698 return B43_TXPWR_RES_DONE; 2698 return B43_TXPWR_RES_DONE;
2699} 2699}
2700 2700
2701void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on) 2701static void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
2702{ 2702{
2703 if (on) { 2703 if (on) {
2704 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8); 2704 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 3d6b33775964..5a725703770c 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -509,7 +509,8 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
509 b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); 509 b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
510 b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); 510 b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
511 511
512 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS, 512 b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
513 ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
513 ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); 514 ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
514 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, 515 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
515 ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); 516 ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
@@ -762,7 +763,7 @@ static void b43_nphy_stop_playback(struct b43_wldev *dev)
762 if (tmp & 0x1) 763 if (tmp & 0x1)
763 b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); 764 b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
764 else if (tmp & 0x2) 765 else if (tmp & 0x2)
765 b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000); 766 b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
766 767
767 b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); 768 b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
768 769
@@ -1009,7 +1010,7 @@ static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
1009 b43_nphy_set_rf_sequence(dev, 5, 1010 b43_nphy_set_rf_sequence(dev, 5,
1010 rfseq_events, rfseq_delays, 3); 1011 rfseq_events, rfseq_delays, 3);
1011 b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, 1012 b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
1012 (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV, 1013 ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
1013 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); 1014 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
1014 1015
1015 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1016 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -1116,7 +1117,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1116 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); 1117 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
1117 1118
1118 b43_phy_mask(dev, B43_NPHY_PIL_DW1, 1119 b43_phy_mask(dev, B43_NPHY_PIL_DW1,
1119 (u16)~B43_NPHY_PIL_DW_64QAM); 1120 ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
1120 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); 1121 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
1121 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); 1122 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
1122 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); 1123 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
@@ -2455,7 +2456,8 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
2455 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600); 2456 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600);
2456 2457
2457 regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG); 2458 regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG);
2458 b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX); 2459 b43_phy_mask(dev, B43_NPHY_BBCFG,
2460 ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
2459 2461
2460 tmp = b43_ntab_read(dev, B43_NTAB16(8, 3)); 2462 tmp = b43_ntab_read(dev, B43_NTAB16(8, 3));
2461 regs[5] = tmp; 2463 regs[5] = tmp;
@@ -2930,7 +2932,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
2930 tmp[5] = b43_phy_read(dev, rfctl[1]); 2932 tmp[5] = b43_phy_read(dev, rfctl[1]);
2931 2933
2932 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, 2934 b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
2933 (u16)~B43_NPHY_RFSEQCA_RXDIS, 2935 ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
2934 ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); 2936 ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
2935 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, 2937 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
2936 (1 - i)); 2938 (1 - i));
@@ -3291,7 +3293,7 @@ static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
3291 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 3293 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
3292 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 3294 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
3293 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 3295 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
3294 b43_phy_mask(dev, B43_PHY_B_BBCFG, (u16)~0xC000); 3296 b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF);
3295 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 3297 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
3296 } 3298 }
3297 3299
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 4e56b7bbcebd..45933cf8e8c2 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -182,6 +182,7 @@ static void b43_sdio_remove(struct sdio_func *func)
182 182
183static const struct sdio_device_id b43_sdio_ids[] = { 183static const struct sdio_device_id b43_sdio_ids[] = {
184 { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */ 184 { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
185 { SDIO_DEVICE(0x0092, 0x0004) }, /* C-guys, Inc. EW-CG1102GC */
185 { }, 186 { },
186}; 187};
187 188
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 97c79161c208..9a335da65b42 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -382,7 +382,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
382 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 3, 25); 382 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 3, 25);
383 } 383 }
384 384
385 b43_phy_maskset(dev, B43_PHY_CCKSHIFTBITS_WA, (u16)~0xFF00, 0x5700); 385 b43_phy_maskset(dev, B43_PHY_CCKSHIFTBITS_WA, 0x00FF, 0x5700);
386 b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x007F, 0x000F); 386 b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x007F, 0x000F);
387 b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x3F80, 0x2B80); 387 b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x3F80, 0x2B80);
388 b43_phy_maskset(dev, B43_PHY_ANTWRSETT, 0xF0FF, 0x0300); 388 b43_phy_maskset(dev, B43_PHY_ANTWRSETT, 0xF0FF, 0x0300);
@@ -400,9 +400,9 @@ static void b43_wa_altagc(struct b43_wldev *dev)
400 b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x00FF, 0x0020); 400 b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x00FF, 0x0020);
401 b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x3F00, 0x0200); 401 b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x3F00, 0x0200);
402 b43_phy_maskset(dev, B43_PHY_OFDM(0x82), ~0x00FF, 0x002E); 402 b43_phy_maskset(dev, B43_PHY_OFDM(0x82), ~0x00FF, 0x002E);
403 b43_phy_maskset(dev, B43_PHY_OFDM(0x96), (u16)~0xFF00, 0x1A00); 403 b43_phy_maskset(dev, B43_PHY_OFDM(0x96), 0x00FF, 0x1A00);
404 b43_phy_maskset(dev, B43_PHY_OFDM(0x81), ~0x00FF, 0x0028); 404 b43_phy_maskset(dev, B43_PHY_OFDM(0x81), ~0x00FF, 0x0028);
405 b43_phy_maskset(dev, B43_PHY_OFDM(0x81), (u16)~0xFF00, 0x2C00); 405 b43_phy_maskset(dev, B43_PHY_OFDM(0x81), 0x00FF, 0x2C00);
406 if (phy->rev == 1) { 406 if (phy->rev == 1) {
407 b43_phy_write(dev, B43_PHY_PEAK_COUNT, 0x092B); 407 b43_phy_write(dev, B43_PHY_PEAK_COUNT, 0x092B);
408 b43_phy_maskset(dev, B43_PHY_OFDM(0x1B), ~0x001E, 0x0002); 408 b43_phy_maskset(dev, B43_PHY_OFDM(0x1B), ~0x001E, 0x0002);
@@ -412,7 +412,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
412 b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, ~0x000F, 0x0004); 412 b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, ~0x000F, 0x0004);
413 if (phy->rev >= 6) { 413 if (phy->rev >= 6) {
414 b43_phy_write(dev, B43_PHY_OFDM(0x22), 0x287A); 414 b43_phy_write(dev, B43_PHY_OFDM(0x22), 0x287A);
415 b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, (u16)~0xF000, 0x3000); 415 b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, 0x0FFF, 0x3000);
416 } 416 }
417 } 417 }
418 b43_phy_maskset(dev, B43_PHY_DIVSRCHIDX, 0x8080, 0x7874); 418 b43_phy_maskset(dev, B43_PHY_DIVSRCHIDX, 0x8080, 0x7874);
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d23ff9fe0c9e..d4cf9b390af3 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -10,8 +10,8 @@
10 union { \ 10 union { \
11 __le32 data; \ 11 __le32 data; \
12 __u8 raw[size]; \ 12 __u8 raw[size]; \
13 } __attribute__((__packed__)); \ 13 } __packed; \
14 } __attribute__((__packed__)) 14 } __packed
15 15
16/* struct b43_plcp_hdr4 */ 16/* struct b43_plcp_hdr4 */
17_b43_declare_plcp_hdr(4); 17_b43_declare_plcp_hdr(4);
@@ -57,7 +57,7 @@ struct b43_txhdr {
57 __u8 rts_frame[16]; /* The RTS frame (if used) */ 57 __u8 rts_frame[16]; /* The RTS frame (if used) */
58 PAD_BYTES(2); 58 PAD_BYTES(2);
59 struct b43_plcp_hdr6 plcp; /* Main PLCP header */ 59 struct b43_plcp_hdr6 plcp; /* Main PLCP header */
60 } new_format __attribute__ ((__packed__)); 60 } new_format __packed;
61 61
62 /* The old r351 format. */ 62 /* The old r351 format. */
63 struct { 63 struct {
@@ -68,10 +68,10 @@ struct b43_txhdr {
68 __u8 rts_frame[16]; /* The RTS frame (if used) */ 68 __u8 rts_frame[16]; /* The RTS frame (if used) */
69 PAD_BYTES(2); 69 PAD_BYTES(2);
70 struct b43_plcp_hdr6 plcp; /* Main PLCP header */ 70 struct b43_plcp_hdr6 plcp; /* Main PLCP header */
71 } old_format __attribute__ ((__packed__)); 71 } old_format __packed;
72 72
73 } __attribute__ ((__packed__)); 73 } __packed;
74} __attribute__ ((__packed__)); 74} __packed;
75 75
76/* MAC TX control */ 76/* MAC TX control */
77#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ 77#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */
@@ -218,20 +218,20 @@ struct b43_rxhdr_fw4 {
218 struct { 218 struct {
219 __u8 jssi; /* PHY RX Status 1: JSSI */ 219 __u8 jssi; /* PHY RX Status 1: JSSI */
220 __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ 220 __u8 sig_qual; /* PHY RX Status 1: Signal Quality */
221 } __attribute__ ((__packed__)); 221 } __packed;
222 222
223 /* RSSI for N-PHYs */ 223 /* RSSI for N-PHYs */
224 struct { 224 struct {
225 __s8 power0; /* PHY RX Status 1: Power 0 */ 225 __s8 power0; /* PHY RX Status 1: Power 0 */
226 __s8 power1; /* PHY RX Status 1: Power 1 */ 226 __s8 power1; /* PHY RX Status 1: Power 1 */
227 } __attribute__ ((__packed__)); 227 } __packed;
228 } __attribute__ ((__packed__)); 228 } __packed;
229 __le16 phy_status2; /* PHY RX Status 2 */ 229 __le16 phy_status2; /* PHY RX Status 2 */
230 __le16 phy_status3; /* PHY RX Status 3 */ 230 __le16 phy_status3; /* PHY RX Status 3 */
231 __le32 mac_status; /* MAC RX status */ 231 __le32 mac_status; /* MAC RX status */
232 __le16 mac_time; 232 __le16 mac_time;
233 __le16 channel; 233 __le16 channel;
234} __attribute__ ((__packed__)); 234} __packed;
235 235
236/* PHY RX Status 0 */ 236/* PHY RX Status 0 */
237#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ 237#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 89fe2f972c72..c81b2f53b0c5 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -372,7 +372,7 @@ struct b43legacy_fw_header {
372 /* Size of the data. For ucode and PCM this is in bytes. 372 /* Size of the data. For ucode and PCM this is in bytes.
373 * For IV this is number-of-ivs. */ 373 * For IV this is number-of-ivs. */
374 __be32 size; 374 __be32 size;
375} __attribute__((__packed__)); 375} __packed;
376 376
377/* Initial Value file format */ 377/* Initial Value file format */
378#define B43legacy_IV_OFFSET_MASK 0x7FFF 378#define B43legacy_IV_OFFSET_MASK 0x7FFF
@@ -382,8 +382,8 @@ struct b43legacy_iv {
382 union { 382 union {
383 __be16 d16; 383 __be16 d16;
384 __be32 d32; 384 __be32 d32;
385 } data __attribute__((__packed__)); 385 } data __packed;
386} __attribute__((__packed__)); 386} __packed;
387 387
388#define B43legacy_PHYMODE(phytype) (1 << (phytype)) 388#define B43legacy_PHYMODE(phytype) (1 << (phytype))
389#define B43legacy_PHYMODE_B B43legacy_PHYMODE \ 389#define B43legacy_PHYMODE_B B43legacy_PHYMODE \
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index e91520d0312e..e03e01d0bc35 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -394,11 +394,11 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
394 dma_addr_t dmaaddr; 394 dma_addr_t dmaaddr;
395 395
396 if (tx) 396 if (tx)
397 dmaaddr = ssb_dma_map_single(ring->dev->dev, 397 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
398 buf, len, 398 buf, len,
399 DMA_TO_DEVICE); 399 DMA_TO_DEVICE);
400 else 400 else
401 dmaaddr = ssb_dma_map_single(ring->dev->dev, 401 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
402 buf, len, 402 buf, len,
403 DMA_FROM_DEVICE); 403 DMA_FROM_DEVICE);
404 404
@@ -412,11 +412,11 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring,
412 int tx) 412 int tx)
413{ 413{
414 if (tx) 414 if (tx)
415 ssb_dma_unmap_single(ring->dev->dev, 415 dma_unmap_single(ring->dev->dev->dma_dev,
416 addr, len, 416 addr, len,
417 DMA_TO_DEVICE); 417 DMA_TO_DEVICE);
418 else 418 else
419 ssb_dma_unmap_single(ring->dev->dev, 419 dma_unmap_single(ring->dev->dev->dma_dev,
420 addr, len, 420 addr, len,
421 DMA_FROM_DEVICE); 421 DMA_FROM_DEVICE);
422} 422}
@@ -428,8 +428,8 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
428{ 428{
429 B43legacy_WARN_ON(ring->tx); 429 B43legacy_WARN_ON(ring->tx);
430 430
431 ssb_dma_sync_single_for_cpu(ring->dev->dev, 431 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
432 addr, len, DMA_FROM_DEVICE); 432 addr, len, DMA_FROM_DEVICE);
433} 433}
434 434
435static inline 435static inline
@@ -439,8 +439,8 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
439{ 439{
440 B43legacy_WARN_ON(ring->tx); 440 B43legacy_WARN_ON(ring->tx);
441 441
442 ssb_dma_sync_single_for_device(ring->dev->dev, 442 dma_sync_single_for_device(ring->dev->dev->dma_dev,
443 addr, len, DMA_FROM_DEVICE); 443 addr, len, DMA_FROM_DEVICE);
444} 444}
445 445
446static inline 446static inline
@@ -460,10 +460,10 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
460static int alloc_ringmemory(struct b43legacy_dmaring *ring) 460static int alloc_ringmemory(struct b43legacy_dmaring *ring)
461{ 461{
462 /* GFP flags must match the flags in free_ringmemory()! */ 462 /* GFP flags must match the flags in free_ringmemory()! */
463 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, 463 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
464 B43legacy_DMA_RINGMEMSIZE, 464 B43legacy_DMA_RINGMEMSIZE,
465 &(ring->dmabase), 465 &(ring->dmabase),
466 GFP_KERNEL); 466 GFP_KERNEL);
467 if (!ring->descbase) { 467 if (!ring->descbase) {
468 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" 468 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
469 " failed\n"); 469 " failed\n");
@@ -476,8 +476,8 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
476 476
477static void free_ringmemory(struct b43legacy_dmaring *ring) 477static void free_ringmemory(struct b43legacy_dmaring *ring)
478{ 478{
479 ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE, 479 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
480 ring->descbase, ring->dmabase, GFP_KERNEL); 480 ring->descbase, ring->dmabase);
481} 481}
482 482
483/* Reset the RX DMA channel */ 483/* Reset the RX DMA channel */
@@ -589,7 +589,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
589 size_t buffersize, 589 size_t buffersize,
590 bool dma_to_device) 590 bool dma_to_device)
591{ 591{
592 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 592 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
593 return 1; 593 return 1;
594 594
595 switch (ring->type) { 595 switch (ring->type) {
@@ -906,7 +906,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
906 goto err_kfree_meta; 906 goto err_kfree_meta;
907 907
908 /* test for ability to dma to txhdr_cache */ 908 /* test for ability to dma to txhdr_cache */
909 dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache, 909 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
910 sizeof(struct b43legacy_txhdr_fw3), 910 sizeof(struct b43legacy_txhdr_fw3),
911 DMA_TO_DEVICE); 911 DMA_TO_DEVICE);
912 912
@@ -920,7 +920,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
920 if (!ring->txhdr_cache) 920 if (!ring->txhdr_cache)
921 goto err_kfree_meta; 921 goto err_kfree_meta;
922 922
923 dma_test = ssb_dma_map_single(dev->dev, 923 dma_test = dma_map_single(dev->dev->dma_dev,
924 ring->txhdr_cache, 924 ring->txhdr_cache,
925 sizeof(struct b43legacy_txhdr_fw3), 925 sizeof(struct b43legacy_txhdr_fw3),
926 DMA_TO_DEVICE); 926 DMA_TO_DEVICE);
@@ -930,9 +930,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
930 goto err_kfree_txhdr_cache; 930 goto err_kfree_txhdr_cache;
931 } 931 }
932 932
933 ssb_dma_unmap_single(dev->dev, dma_test, 933 dma_unmap_single(dev->dev->dma_dev, dma_test,
934 sizeof(struct b43legacy_txhdr_fw3), 934 sizeof(struct b43legacy_txhdr_fw3),
935 DMA_TO_DEVICE); 935 DMA_TO_DEVICE);
936 } 936 }
937 937
938 ring->nr_slots = nr_slots; 938 ring->nr_slots = nr_slots;
@@ -1040,9 +1040,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
1040 /* Try to set the DMA mask. If it fails, try falling back to a 1040 /* Try to set the DMA mask. If it fails, try falling back to a
1041 * lower mask, as we can always also support a lower one. */ 1041 * lower mask, as we can always also support a lower one. */
1042 while (1) { 1042 while (1) {
1043 err = ssb_dma_set_mask(dev->dev, mask); 1043 err = dma_set_mask(dev->dev->dma_dev, mask);
1044 if (!err) 1044 if (!err) {
1045 break; 1045 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1046 if (!err)
1047 break;
1048 }
1046 if (mask == DMA_BIT_MASK(64)) { 1049 if (mask == DMA_BIT_MASK(64)) {
1047 mask = DMA_BIT_MASK(32); 1050 mask = DMA_BIT_MASK(32);
1048 fallback = 1; 1051 fallback = 1;
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index f9681041c2d8..f89c34226288 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -72,7 +72,7 @@
72struct b43legacy_dmadesc32 { 72struct b43legacy_dmadesc32 {
73 __le32 control; 73 __le32 control;
74 __le32 address; 74 __le32 address;
75} __attribute__((__packed__)); 75} __packed;
76#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF 76#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF
77#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 77#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000
78#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 78#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16
@@ -147,7 +147,7 @@ struct b43legacy_dmadesc64 {
147 __le32 control1; 147 __le32 control1;
148 __le32 address_low; 148 __le32 address_low;
149 __le32 address_high; 149 __le32 address_high;
150} __attribute__((__packed__)); 150} __packed;
151#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 151#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000
152#define B43legacy_DMA64_DCTL0_IRQ 0x20000000 152#define B43legacy_DMA64_DCTL0_IRQ 0x20000000
153#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 153#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000
@@ -162,8 +162,8 @@ struct b43legacy_dmadesc_generic {
162 union { 162 union {
163 struct b43legacy_dmadesc32 dma32; 163 struct b43legacy_dmadesc32 dma32;
164 struct b43legacy_dmadesc64 dma64; 164 struct b43legacy_dmadesc64 dma64;
165 } __attribute__((__packed__)); 165 } __packed;
166} __attribute__((__packed__)); 166} __packed;
167 167
168 168
169/* Misc DMA constants */ 169/* Misc DMA constants */
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index 91633087a20b..289db00a4a7b 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -9,8 +9,8 @@
9 union { \ 9 union { \
10 __le32 data; \ 10 __le32 data; \
11 __u8 raw[size]; \ 11 __u8 raw[size]; \
12 } __attribute__((__packed__)); \ 12 } __packed; \
13 } __attribute__((__packed__)) 13 } __packed
14 14
15/* struct b43legacy_plcp_hdr4 */ 15/* struct b43legacy_plcp_hdr4 */
16_b43legacy_declare_plcp_hdr(4); 16_b43legacy_declare_plcp_hdr(4);
@@ -39,7 +39,7 @@ struct b43legacy_txhdr_fw3 {
39 struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ 39 struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */
40 __u8 rts_frame[18]; /* The RTS frame (if used) */ 40 __u8 rts_frame[18]; /* The RTS frame (if used) */
41 struct b43legacy_plcp_hdr6 plcp; 41 struct b43legacy_plcp_hdr6 plcp;
42} __attribute__((__packed__)); 42} __packed;
43 43
44/* MAC TX control */ 44/* MAC TX control */
45#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ 45#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */
@@ -123,7 +123,7 @@ struct b43legacy_hwtxstatus {
123 __le16 seq; 123 __le16 seq;
124 u8 phy_stat; 124 u8 phy_stat;
125 PAD_BYTES(1); 125 PAD_BYTES(1);
126} __attribute__((__packed__)); 126} __packed;
127 127
128 128
129/* Receive header for v3 firmware. */ 129/* Receive header for v3 firmware. */
@@ -138,7 +138,7 @@ struct b43legacy_rxhdr_fw3 {
138 __le16 mac_status; /* MAC RX status */ 138 __le16 mac_status; /* MAC RX status */
139 __le16 mac_time; 139 __le16 mac_time;
140 __le16 channel; 140 __le16 channel;
141} __attribute__((__packed__)); 141} __packed;
142 142
143 143
144/* PHY RX Status 0 */ 144/* PHY RX Status 0 */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index 7f9d8d976aa8..ed98ce7c8f65 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -19,35 +19,35 @@ struct hostap_ieee80211_mgmt {
19 __le16 status_code; 19 __le16 status_code;
20 /* possibly followed by Challenge text */ 20 /* possibly followed by Challenge text */
21 u8 variable[0]; 21 u8 variable[0];
22 } __attribute__ ((packed)) auth; 22 } __packed auth;
23 struct { 23 struct {
24 __le16 reason_code; 24 __le16 reason_code;
25 } __attribute__ ((packed)) deauth; 25 } __packed deauth;
26 struct { 26 struct {
27 __le16 capab_info; 27 __le16 capab_info;
28 __le16 listen_interval; 28 __le16 listen_interval;
29 /* followed by SSID and Supported rates */ 29 /* followed by SSID and Supported rates */
30 u8 variable[0]; 30 u8 variable[0];
31 } __attribute__ ((packed)) assoc_req; 31 } __packed assoc_req;
32 struct { 32 struct {
33 __le16 capab_info; 33 __le16 capab_info;
34 __le16 status_code; 34 __le16 status_code;
35 __le16 aid; 35 __le16 aid;
36 /* followed by Supported rates */ 36 /* followed by Supported rates */
37 u8 variable[0]; 37 u8 variable[0];
38 } __attribute__ ((packed)) assoc_resp, reassoc_resp; 38 } __packed assoc_resp, reassoc_resp;
39 struct { 39 struct {
40 __le16 capab_info; 40 __le16 capab_info;
41 __le16 listen_interval; 41 __le16 listen_interval;
42 u8 current_ap[6]; 42 u8 current_ap[6];
43 /* followed by SSID and Supported rates */ 43 /* followed by SSID and Supported rates */
44 u8 variable[0]; 44 u8 variable[0];
45 } __attribute__ ((packed)) reassoc_req; 45 } __packed reassoc_req;
46 struct { 46 struct {
47 __le16 reason_code; 47 __le16 reason_code;
48 } __attribute__ ((packed)) disassoc; 48 } __packed disassoc;
49 struct { 49 struct {
50 } __attribute__ ((packed)) probe_req; 50 } __packed probe_req;
51 struct { 51 struct {
52 u8 timestamp[8]; 52 u8 timestamp[8];
53 __le16 beacon_int; 53 __le16 beacon_int;
@@ -55,9 +55,9 @@ struct hostap_ieee80211_mgmt {
55 /* followed by some of SSID, Supported rates, 55 /* followed by some of SSID, Supported rates,
56 * FH Params, DS Params, CF Params, IBSS Params, TIM */ 56 * FH Params, DS Params, CF Params, IBSS Params, TIM */
57 u8 variable[0]; 57 u8 variable[0];
58 } __attribute__ ((packed)) beacon, probe_resp; 58 } __packed beacon, probe_resp;
59 } u; 59 } u;
60} __attribute__ ((packed)); 60} __packed;
61 61
62 62
63#define IEEE80211_MGMT_HDR_LEN 24 63#define IEEE80211_MGMT_HDR_LEN 24
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 231dbd77f5f5..dbb986946e1a 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -688,7 +688,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
688 struct ap_data *ap = data; 688 struct ap_data *ap = data;
689 struct net_device *dev = ap->local->dev; 689 struct net_device *dev = ap->local->dev;
690 struct ieee80211_hdr *hdr; 690 struct ieee80211_hdr *hdr;
691 u16 fc, status; 691 u16 status;
692 __le16 *pos; 692 __le16 *pos;
693 struct sta_info *sta = NULL; 693 struct sta_info *sta = NULL;
694 char *txt = NULL; 694 char *txt = NULL;
@@ -699,7 +699,6 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
699 } 699 }
700 700
701 hdr = (struct ieee80211_hdr *) skb->data; 701 hdr = (struct ieee80211_hdr *) skb->data;
702 fc = le16_to_cpu(hdr->frame_control);
703 if ((!ieee80211_is_assoc_resp(hdr->frame_control) && 702 if ((!ieee80211_is_assoc_resp(hdr->frame_control) &&
704 !ieee80211_is_reassoc_resp(hdr->frame_control)) || 703 !ieee80211_is_reassoc_resp(hdr->frame_control)) ||
705 skb->len < IEEE80211_MGMT_HDR_LEN + 4) { 704 skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
@@ -1225,7 +1224,7 @@ static void ap_crypt_init(struct ap_data *ap)
1225 1224
1226 1225
1227/* Generate challenge data for shared key authentication. IEEE 802.11 specifies 1226/* Generate challenge data for shared key authentication. IEEE 802.11 specifies
1228 * that WEP algorithm is used for generating challange. This should be unique, 1227 * that WEP algorithm is used for generating challenge. This should be unique,
1229 * but otherwise there is not really need for randomness etc. Initialize WEP 1228 * but otherwise there is not really need for randomness etc. Initialize WEP
1230 * with pseudo random key and then use increasing IV to get unique challenge 1229 * with pseudo random key and then use increasing IV to get unique challenge
1231 * streams. 1230 * streams.
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 90b64b092007..4230102ac9e4 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -179,7 +179,7 @@ struct hfa384x_comp_ident
179 __le16 variant; 179 __le16 variant;
180 __le16 major; 180 __le16 major;
181 __le16 minor; 181 __le16 minor;
182} __attribute__ ((packed)); 182} __packed;
183 183
184#define HFA384X_COMP_ID_PRI 0x15 184#define HFA384X_COMP_ID_PRI 0x15
185#define HFA384X_COMP_ID_STA 0x1f 185#define HFA384X_COMP_ID_STA 0x1f
@@ -192,14 +192,14 @@ struct hfa384x_sup_range
192 __le16 variant; 192 __le16 variant;
193 __le16 bottom; 193 __le16 bottom;
194 __le16 top; 194 __le16 top;
195} __attribute__ ((packed)); 195} __packed;
196 196
197 197
198struct hfa384x_build_id 198struct hfa384x_build_id
199{ 199{
200 __le16 pri_seq; 200 __le16 pri_seq;
201 __le16 sec_seq; 201 __le16 sec_seq;
202} __attribute__ ((packed)); 202} __packed;
203 203
204/* FD01 - Download Buffer */ 204/* FD01 - Download Buffer */
205struct hfa384x_rid_download_buffer 205struct hfa384x_rid_download_buffer
@@ -207,14 +207,14 @@ struct hfa384x_rid_download_buffer
207 __le16 page; 207 __le16 page;
208 __le16 offset; 208 __le16 offset;
209 __le16 length; 209 __le16 length;
210} __attribute__ ((packed)); 210} __packed;
211 211
212/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */ 212/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */
213struct hfa384x_comms_quality { 213struct hfa384x_comms_quality {
214 __le16 comm_qual; /* 0 .. 92 */ 214 __le16 comm_qual; /* 0 .. 92 */
215 __le16 signal_level; /* 27 .. 154 */ 215 __le16 signal_level; /* 27 .. 154 */
216 __le16 noise_level; /* 27 .. 154 */ 216 __le16 noise_level; /* 27 .. 154 */
217} __attribute__ ((packed)); 217} __packed;
218 218
219 219
220/* netdevice private ioctls (used, e.g., with iwpriv from user space) */ 220/* netdevice private ioctls (used, e.g., with iwpriv from user space) */
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 2f999fc94f60..e9d9d622a9b0 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1896,7 +1896,7 @@ fail:
1896/* Some SMP systems have reported number of odd errors with hostap_pci. fid 1896/* Some SMP systems have reported number of odd errors with hostap_pci. fid
1897 * register has changed values between consecutive reads for an unknown reason. 1897 * register has changed values between consecutive reads for an unknown reason.
1898 * This should really not happen, so more debugging is needed. This test 1898 * This should really not happen, so more debugging is needed. This test
1899 * version is a big slower, but it will detect most of such register changes 1899 * version is a bit slower, but it will detect most of such register changes
1900 * and will try to get the correct fid eventually. */ 1900 * and will try to get the correct fid eventually. */
1901#define EXTRA_FID_READ_TESTS 1901#define EXTRA_FID_READ_TESTS
1902 1902
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index eb57d1ea361f..25a2722c8a98 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -186,7 +186,7 @@ int prism2_wds_add(local_info_t *local, u8 *remote_addr,
186 return -ENOBUFS; 186 return -ENOBUFS;
187 187
188 /* verify that there is room for wds# postfix in the interface name */ 188 /* verify that there is room for wds# postfix in the interface name */
189 if (strlen(local->dev->name) > IFNAMSIZ - 5) { 189 if (strlen(local->dev->name) >= IFNAMSIZ - 5) {
190 printk(KERN_DEBUG "'%s' too long base device name\n", 190 printk(KERN_DEBUG "'%s' too long base device name\n",
191 local->dev->name); 191 local->dev->name);
192 return -EINVAL; 192 return -EINVAL;
@@ -741,9 +741,7 @@ void hostap_set_multicast_list_queue(struct work_struct *work)
741 local_info_t *local = 741 local_info_t *local =
742 container_of(work, local_info_t, set_multicast_list_queue); 742 container_of(work, local_info_t, set_multicast_list_queue);
743 struct net_device *dev = local->dev; 743 struct net_device *dev = local->dev;
744 struct hostap_interface *iface;
745 744
746 iface = netdev_priv(dev);
747 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, 745 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
748 local->is_promisc)) { 746 local->is_promisc)) {
749 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", 747 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 1ba33be98b25..1c66b3c1030d 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -31,14 +31,14 @@ struct linux_wlan_ng_val {
31 u32 did; 31 u32 did;
32 u16 status, len; 32 u16 status, len;
33 u32 data; 33 u32 data;
34} __attribute__ ((packed)); 34} __packed;
35 35
36struct linux_wlan_ng_prism_hdr { 36struct linux_wlan_ng_prism_hdr {
37 u32 msgcode, msglen; 37 u32 msgcode, msglen;
38 char devname[16]; 38 char devname[16];
39 struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal, 39 struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal,
40 noise, rate, istx, frmlen; 40 noise, rate, istx, frmlen;
41} __attribute__ ((packed)); 41} __packed;
42 42
43struct linux_wlan_ng_cap_hdr { 43struct linux_wlan_ng_cap_hdr {
44 __be32 version; 44 __be32 version;
@@ -55,7 +55,7 @@ struct linux_wlan_ng_cap_hdr {
55 __be32 ssi_noise; 55 __be32 ssi_noise;
56 __be32 preamble; 56 __be32 preamble;
57 __be32 encoding; 57 __be32 encoding;
58} __attribute__ ((packed)); 58} __packed;
59 59
60struct hostap_radiotap_rx { 60struct hostap_radiotap_rx {
61 struct ieee80211_radiotap_header hdr; 61 struct ieee80211_radiotap_header hdr;
@@ -66,7 +66,7 @@ struct hostap_radiotap_rx {
66 __le16 chan_flags; 66 __le16 chan_flags;
67 s8 dbm_antsignal; 67 s8 dbm_antsignal;
68 s8 dbm_antnoise; 68 s8 dbm_antnoise;
69} __attribute__ ((packed)); 69} __packed;
70 70
71#define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */ 71#define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */
72#define LWNG_CAPHDR_VERSION 0x80211001 72#define LWNG_CAPHDR_VERSION 0x80211001
@@ -97,7 +97,7 @@ struct hfa384x_rx_frame {
97 __be16 len; 97 __be16 len;
98 98
99 /* followed by frame data; max 2304 bytes */ 99 /* followed by frame data; max 2304 bytes */
100} __attribute__ ((packed)); 100} __packed;
101 101
102 102
103struct hfa384x_tx_frame { 103struct hfa384x_tx_frame {
@@ -126,14 +126,14 @@ struct hfa384x_tx_frame {
126 __be16 len; 126 __be16 len;
127 127
128 /* followed by frame data; max 2304 bytes */ 128 /* followed by frame data; max 2304 bytes */
129} __attribute__ ((packed)); 129} __packed;
130 130
131 131
132struct hfa384x_rid_hdr 132struct hfa384x_rid_hdr
133{ 133{
134 __le16 len; 134 __le16 len;
135 __le16 rid; 135 __le16 rid;
136} __attribute__ ((packed)); 136} __packed;
137 137
138 138
139/* Macro for converting signal levels (range 27 .. 154) to wireless ext 139/* Macro for converting signal levels (range 27 .. 154) to wireless ext
@@ -145,24 +145,24 @@ struct hfa384x_rid_hdr
145struct hfa384x_scan_request { 145struct hfa384x_scan_request {
146 __le16 channel_list; 146 __le16 channel_list;
147 __le16 txrate; /* HFA384X_RATES_* */ 147 __le16 txrate; /* HFA384X_RATES_* */
148} __attribute__ ((packed)); 148} __packed;
149 149
150struct hfa384x_hostscan_request { 150struct hfa384x_hostscan_request {
151 __le16 channel_list; 151 __le16 channel_list;
152 __le16 txrate; 152 __le16 txrate;
153 __le16 target_ssid_len; 153 __le16 target_ssid_len;
154 u8 target_ssid[32]; 154 u8 target_ssid[32];
155} __attribute__ ((packed)); 155} __packed;
156 156
157struct hfa384x_join_request { 157struct hfa384x_join_request {
158 u8 bssid[6]; 158 u8 bssid[6];
159 __le16 channel; 159 __le16 channel;
160} __attribute__ ((packed)); 160} __packed;
161 161
162struct hfa384x_info_frame { 162struct hfa384x_info_frame {
163 __le16 len; 163 __le16 len;
164 __le16 type; 164 __le16 type;
165} __attribute__ ((packed)); 165} __packed;
166 166
167struct hfa384x_comm_tallies { 167struct hfa384x_comm_tallies {
168 __le16 tx_unicast_frames; 168 __le16 tx_unicast_frames;
@@ -186,7 +186,7 @@ struct hfa384x_comm_tallies {
186 __le16 rx_discards_wep_undecryptable; 186 __le16 rx_discards_wep_undecryptable;
187 __le16 rx_message_in_msg_fragments; 187 __le16 rx_message_in_msg_fragments;
188 __le16 rx_message_in_bad_msg_fragments; 188 __le16 rx_message_in_bad_msg_fragments;
189} __attribute__ ((packed)); 189} __packed;
190 190
191struct hfa384x_comm_tallies32 { 191struct hfa384x_comm_tallies32 {
192 __le32 tx_unicast_frames; 192 __le32 tx_unicast_frames;
@@ -210,7 +210,7 @@ struct hfa384x_comm_tallies32 {
210 __le32 rx_discards_wep_undecryptable; 210 __le32 rx_discards_wep_undecryptable;
211 __le32 rx_message_in_msg_fragments; 211 __le32 rx_message_in_msg_fragments;
212 __le32 rx_message_in_bad_msg_fragments; 212 __le32 rx_message_in_bad_msg_fragments;
213} __attribute__ ((packed)); 213} __packed;
214 214
215struct hfa384x_scan_result_hdr { 215struct hfa384x_scan_result_hdr {
216 __le16 reserved; 216 __le16 reserved;
@@ -219,7 +219,7 @@ struct hfa384x_scan_result_hdr {
219#define HFA384X_SCAN_HOST_INITIATED 1 219#define HFA384X_SCAN_HOST_INITIATED 1
220#define HFA384X_SCAN_FIRMWARE_INITIATED 2 220#define HFA384X_SCAN_FIRMWARE_INITIATED 2
221#define HFA384X_SCAN_INQUIRY_FROM_HOST 3 221#define HFA384X_SCAN_INQUIRY_FROM_HOST 3
222} __attribute__ ((packed)); 222} __packed;
223 223
224#define HFA384X_SCAN_MAX_RESULTS 32 224#define HFA384X_SCAN_MAX_RESULTS 32
225 225
@@ -234,7 +234,7 @@ struct hfa384x_scan_result {
234 u8 ssid[32]; 234 u8 ssid[32];
235 u8 sup_rates[10]; 235 u8 sup_rates[10];
236 __le16 rate; 236 __le16 rate;
237} __attribute__ ((packed)); 237} __packed;
238 238
239struct hfa384x_hostscan_result { 239struct hfa384x_hostscan_result {
240 __le16 chid; 240 __le16 chid;
@@ -248,7 +248,7 @@ struct hfa384x_hostscan_result {
248 u8 sup_rates[10]; 248 u8 sup_rates[10];
249 __le16 rate; 249 __le16 rate;
250 __le16 atim; 250 __le16 atim;
251} __attribute__ ((packed)); 251} __packed;
252 252
253struct comm_tallies_sums { 253struct comm_tallies_sums {
254 unsigned int tx_unicast_frames; 254 unsigned int tx_unicast_frames;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 0bd4dfa59a8a..c24c5efeae1f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -174,7 +174,7 @@ that only one external action is invoked at a time.
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
176 176
177struct pm_qos_request_list *ipw2100_pm_qos_req; 177static struct pm_qos_request_list ipw2100_pm_qos_req;
178 178
179/* Debugging stuff */ 179/* Debugging stuff */
180#ifdef CONFIG_IPW2100_DEBUG 180#ifdef CONFIG_IPW2100_DEBUG
@@ -1741,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1741 /* the ipw2100 hardware really doesn't want power management delays 1741 /* the ipw2100 hardware really doesn't want power management delays
1742 * longer than 175usec 1742 * longer than 175usec
1743 */ 1743 */
1744 pm_qos_update_request(ipw2100_pm_qos_req, 175); 1744 pm_qos_update_request(&ipw2100_pm_qos_req, 175);
1745 1745
1746 /* If the interrupt is enabled, turn it off... */ 1746 /* If the interrupt is enabled, turn it off... */
1747 spin_lock_irqsave(&priv->low_lock, flags); 1747 spin_lock_irqsave(&priv->low_lock, flags);
@@ -1889,7 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1889 ipw2100_disable_interrupts(priv); 1889 ipw2100_disable_interrupts(priv);
1890 spin_unlock_irqrestore(&priv->low_lock, flags); 1890 spin_unlock_irqrestore(&priv->low_lock, flags);
1891 1891
1892 pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1892 pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
1893 1893
1894 /* We have to signal any supplicant if we are disassociating */ 1894 /* We have to signal any supplicant if we are disassociating */
1895 if (associated) 1895 if (associated)
@@ -3467,10 +3467,8 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
3467 dma_addr_t p; 3467 dma_addr_t p;
3468 3468
3469 priv->msg_buffers = 3469 priv->msg_buffers =
3470 (struct ipw2100_tx_packet *)kmalloc(IPW_COMMAND_POOL_SIZE * 3470 kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
3471 sizeof(struct 3471 GFP_KERNEL);
3472 ipw2100_tx_packet),
3473 GFP_KERNEL);
3474 if (!priv->msg_buffers) { 3472 if (!priv->msg_buffers) {
3475 printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg " 3473 printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg "
3476 "buffers.\n", priv->net_dev->name); 3474 "buffers.\n", priv->net_dev->name);
@@ -4499,10 +4497,8 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
4499 } 4497 }
4500 4498
4501 priv->tx_buffers = 4499 priv->tx_buffers =
4502 (struct ipw2100_tx_packet *)kmalloc(TX_PENDED_QUEUE_LENGTH * 4500 kmalloc(TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
4503 sizeof(struct 4501 GFP_ATOMIC);
4504 ipw2100_tx_packet),
4505 GFP_ATOMIC);
4506 if (!priv->tx_buffers) { 4502 if (!priv->tx_buffers) {
4507 printk(KERN_ERR DRV_NAME 4503 printk(KERN_ERR DRV_NAME
4508 ": %s: alloc failed form tx buffers.\n", 4504 ": %s: alloc failed form tx buffers.\n",
@@ -4651,9 +4647,9 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
4651 /* 4647 /*
4652 * allocate packets 4648 * allocate packets
4653 */ 4649 */
4654 priv->rx_buffers = (struct ipw2100_rx_packet *) 4650 priv->rx_buffers = kmalloc(RX_QUEUE_LENGTH *
4655 kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet), 4651 sizeof(struct ipw2100_rx_packet),
4656 GFP_KERNEL); 4652 GFP_KERNEL);
4657 if (!priv->rx_buffers) { 4653 if (!priv->rx_buffers) {
4658 IPW_DEBUG_INFO("can't allocate rx packet buffer table\n"); 4654 IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
4659 4655
@@ -5233,7 +5229,7 @@ struct security_info_params {
5233 u8 auth_mode; 5229 u8 auth_mode;
5234 u8 replay_counters_number; 5230 u8 replay_counters_number;
5235 u8 unicast_using_group; 5231 u8 unicast_using_group;
5236} __attribute__ ((packed)); 5232} __packed;
5237 5233
5238static int ipw2100_set_security_information(struct ipw2100_priv *priv, 5234static int ipw2100_set_security_information(struct ipw2100_priv *priv,
5239 int auth_mode, 5235 int auth_mode,
@@ -6669,8 +6665,8 @@ static int __init ipw2100_init(void)
6669 if (ret) 6665 if (ret)
6670 goto out; 6666 goto out;
6671 6667
6672 ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 6668 pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
6673 PM_QOS_DEFAULT_VALUE); 6669 PM_QOS_DEFAULT_VALUE);
6674#ifdef CONFIG_IPW2100_DEBUG 6670#ifdef CONFIG_IPW2100_DEBUG
6675 ipw2100_debug_level = debug; 6671 ipw2100_debug_level = debug;
6676 ret = driver_create_file(&ipw2100_pci_driver.driver, 6672 ret = driver_create_file(&ipw2100_pci_driver.driver,
@@ -6692,7 +6688,7 @@ static void __exit ipw2100_exit(void)
6692 &driver_attr_debug_level); 6688 &driver_attr_debug_level);
6693#endif 6689#endif
6694 pci_unregister_driver(&ipw2100_pci_driver); 6690 pci_unregister_driver(&ipw2100_pci_driver);
6695 pm_qos_remove_request(ipw2100_pm_qos_req); 6691 pm_qos_remove_request(&ipw2100_pm_qos_req);
6696} 6692}
6697 6693
6698module_init(ipw2100_init); 6694module_init(ipw2100_init);
@@ -8475,7 +8471,7 @@ struct ipw2100_fw_header {
8475 short mode; 8471 short mode;
8476 unsigned int fw_size; 8472 unsigned int fw_size;
8477 unsigned int uc_size; 8473 unsigned int uc_size;
8478} __attribute__ ((packed)); 8474} __packed;
8479 8475
8480static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) 8476static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
8481{ 8477{
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 1eab0d698f4d..838002b4881e 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -164,7 +164,7 @@ struct bd_status {
164 } fields; 164 } fields;
165 u8 field; 165 u8 field;
166 } info; 166 } info;
167} __attribute__ ((packed)); 167} __packed;
168 168
169struct ipw2100_bd { 169struct ipw2100_bd {
170 u32 host_addr; 170 u32 host_addr;
@@ -174,7 +174,7 @@ struct ipw2100_bd {
174 * 1st TBD) */ 174 * 1st TBD) */
175 u8 num_fragments; 175 u8 num_fragments;
176 u8 reserved[6]; 176 u8 reserved[6];
177} __attribute__ ((packed)); 177} __packed;
178 178
179#define IPW_BD_QUEUE_LENGTH(n) (1<<n) 179#define IPW_BD_QUEUE_LENGTH(n) (1<<n)
180#define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd)) 180#define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd))
@@ -232,7 +232,7 @@ struct ipw2100_status {
232#define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1) 232#define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1)
233#define IPW_STATUS_FLAG_CRC_ERROR (1<<2) 233#define IPW_STATUS_FLAG_CRC_ERROR (1<<2)
234 u8 rssi; 234 u8 rssi;
235} __attribute__ ((packed)); 235} __packed;
236 236
237struct ipw2100_status_queue { 237struct ipw2100_status_queue {
238 /* driver (virtual) pointer to queue */ 238 /* driver (virtual) pointer to queue */
@@ -293,7 +293,7 @@ struct ipw2100_cmd_header {
293 u32 reserved1[3]; 293 u32 reserved1[3];
294 u32 *ordinal1_ptr; 294 u32 *ordinal1_ptr;
295 u32 *ordinal2_ptr; 295 u32 *ordinal2_ptr;
296} __attribute__ ((packed)); 296} __packed;
297 297
298struct ipw2100_data_header { 298struct ipw2100_data_header {
299 u32 host_command_reg; 299 u32 host_command_reg;
@@ -307,7 +307,7 @@ struct ipw2100_data_header {
307 u8 src_addr[ETH_ALEN]; 307 u8 src_addr[ETH_ALEN];
308 u8 dst_addr[ETH_ALEN]; 308 u8 dst_addr[ETH_ALEN];
309 u16 fragment_size; 309 u16 fragment_size;
310} __attribute__ ((packed)); 310} __packed;
311 311
312/* Host command data structure */ 312/* Host command data structure */
313struct host_command { 313struct host_command {
@@ -316,7 +316,7 @@ struct host_command {
316 u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID) 316 u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID)
317 u32 host_command_length; // LENGTH 317 u32 host_command_length; // LENGTH
318 u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS 318 u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS
319} __attribute__ ((packed)); 319} __packed;
320 320
321typedef enum { 321typedef enum {
322 POWER_ON_RESET, 322 POWER_ON_RESET,
@@ -382,7 +382,7 @@ struct ipw2100_notification {
382 u32 hnhdr_size; /* size in bytes of data 382 u32 hnhdr_size; /* size in bytes of data
383 or number of entries, if table. 383 or number of entries, if table.
384 Does NOT include header */ 384 Does NOT include header */
385} __attribute__ ((packed)); 385} __packed;
386 386
387#define MAX_KEY_SIZE 16 387#define MAX_KEY_SIZE 16
388#define MAX_KEYS 8 388#define MAX_KEYS 8
@@ -814,7 +814,7 @@ struct ipw2100_rx {
814 struct ipw2100_notification notification; 814 struct ipw2100_notification notification;
815 struct ipw2100_cmd_header command; 815 struct ipw2100_cmd_header command;
816 } rx_data; 816 } rx_data;
817} __attribute__ ((packed)); 817} __packed;
818 818
819/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */ 819/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */
820#define TX_RATE_1_MBIT 0x0001 820#define TX_RATE_1_MBIT 0x0001
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 3aa3bb18f615..cb2552a6777c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -96,7 +96,7 @@ static int network_mode = 0;
96static u32 ipw_debug_level; 96static u32 ipw_debug_level;
97static int associate; 97static int associate;
98static int auto_create = 1; 98static int auto_create = 1;
99static int led_support = 0; 99static int led_support = 1;
100static int disable = 0; 100static int disable = 0;
101static int bt_coexist = 0; 101static int bt_coexist = 0;
102static int hwcrypto = 0; 102static int hwcrypto = 0;
@@ -6624,13 +6624,12 @@ static int ipw_wx_set_genie(struct net_device *dev,
6624 return -EINVAL; 6624 return -EINVAL;
6625 6625
6626 if (wrqu->data.length) { 6626 if (wrqu->data.length) {
6627 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 6627 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6628 if (buf == NULL) { 6628 if (buf == NULL) {
6629 err = -ENOMEM; 6629 err = -ENOMEM;
6630 goto out; 6630 goto out;
6631 } 6631 }
6632 6632
6633 memcpy(buf, extra, wrqu->data.length);
6634 kfree(ieee->wpa_ie); 6633 kfree(ieee->wpa_ie);
6635 ieee->wpa_ie = buf; 6634 ieee->wpa_ie = buf;
6636 ieee->wpa_ie_len = wrqu->data.length; 6635 ieee->wpa_ie_len = wrqu->data.length;
@@ -12083,7 +12082,7 @@ module_param(auto_create, int, 0444);
12083MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 12082MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12084 12083
12085module_param_named(led, led_support, int, 0444); 12084module_param_named(led, led_support, int, 0444);
12086MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)"); 12085MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12087 12086
12088module_param(debug, int, 0444); 12087module_param(debug, int, 0444);
12089MODULE_PARM_DESC(debug, "debug output mask"); 12088MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index bf0eeb2e873a..d7d049c7a4fa 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -388,7 +388,7 @@ struct clx2_queue {
388 dma_addr_t dma_addr; /**< physical addr for BD's */ 388 dma_addr_t dma_addr; /**< physical addr for BD's */
389 int low_mark; /**< low watermark, resume queue if free space more than this */ 389 int low_mark; /**< low watermark, resume queue if free space more than this */
390 int high_mark; /**< high watermark, stop queue if free space less than this */ 390 int high_mark; /**< high watermark, stop queue if free space less than this */
391} __attribute__ ((packed)); /* XXX */ 391} __packed; /* XXX */
392 392
393struct machdr32 { 393struct machdr32 {
394 __le16 frame_ctl; 394 __le16 frame_ctl;
@@ -399,7 +399,7 @@ struct machdr32 {
399 __le16 seq_ctrl; // more endians! 399 __le16 seq_ctrl; // more endians!
400 u8 addr4[MACADRR_BYTE_LEN]; 400 u8 addr4[MACADRR_BYTE_LEN];
401 __le16 qos_ctrl; 401 __le16 qos_ctrl;
402} __attribute__ ((packed)); 402} __packed;
403 403
404struct machdr30 { 404struct machdr30 {
405 __le16 frame_ctl; 405 __le16 frame_ctl;
@@ -409,7 +409,7 @@ struct machdr30 {
409 u8 addr3[MACADRR_BYTE_LEN]; 409 u8 addr3[MACADRR_BYTE_LEN];
410 __le16 seq_ctrl; // more endians! 410 __le16 seq_ctrl; // more endians!
411 u8 addr4[MACADRR_BYTE_LEN]; 411 u8 addr4[MACADRR_BYTE_LEN];
412} __attribute__ ((packed)); 412} __packed;
413 413
414struct machdr26 { 414struct machdr26 {
415 __le16 frame_ctl; 415 __le16 frame_ctl;
@@ -419,7 +419,7 @@ struct machdr26 {
419 u8 addr3[MACADRR_BYTE_LEN]; 419 u8 addr3[MACADRR_BYTE_LEN];
420 __le16 seq_ctrl; // more endians! 420 __le16 seq_ctrl; // more endians!
421 __le16 qos_ctrl; 421 __le16 qos_ctrl;
422} __attribute__ ((packed)); 422} __packed;
423 423
424struct machdr24 { 424struct machdr24 {
425 __le16 frame_ctl; 425 __le16 frame_ctl;
@@ -428,20 +428,20 @@ struct machdr24 {
428 u8 addr2[MACADRR_BYTE_LEN]; 428 u8 addr2[MACADRR_BYTE_LEN];
429 u8 addr3[MACADRR_BYTE_LEN]; 429 u8 addr3[MACADRR_BYTE_LEN];
430 __le16 seq_ctrl; // more endians! 430 __le16 seq_ctrl; // more endians!
431} __attribute__ ((packed)); 431} __packed;
432 432
433// TX TFD with 32 byte MAC Header 433// TX TFD with 32 byte MAC Header
434struct tx_tfd_32 { 434struct tx_tfd_32 {
435 struct machdr32 mchdr; // 32 435 struct machdr32 mchdr; // 32
436 __le32 uivplaceholder[2]; // 8 436 __le32 uivplaceholder[2]; // 8
437} __attribute__ ((packed)); 437} __packed;
438 438
439// TX TFD with 30 byte MAC Header 439// TX TFD with 30 byte MAC Header
440struct tx_tfd_30 { 440struct tx_tfd_30 {
441 struct machdr30 mchdr; // 30 441 struct machdr30 mchdr; // 30
442 u8 reserved[2]; // 2 442 u8 reserved[2]; // 2
443 __le32 uivplaceholder[2]; // 8 443 __le32 uivplaceholder[2]; // 8
444} __attribute__ ((packed)); 444} __packed;
445 445
446// tx tfd with 26 byte mac header 446// tx tfd with 26 byte mac header
447struct tx_tfd_26 { 447struct tx_tfd_26 {
@@ -449,14 +449,14 @@ struct tx_tfd_26 {
449 u8 reserved1[2]; // 2 449 u8 reserved1[2]; // 2
450 __le32 uivplaceholder[2]; // 8 450 __le32 uivplaceholder[2]; // 8
451 u8 reserved2[4]; // 4 451 u8 reserved2[4]; // 4
452} __attribute__ ((packed)); 452} __packed;
453 453
454// tx tfd with 24 byte mac header 454// tx tfd with 24 byte mac header
455struct tx_tfd_24 { 455struct tx_tfd_24 {
456 struct machdr24 mchdr; // 24 456 struct machdr24 mchdr; // 24
457 __le32 uivplaceholder[2]; // 8 457 __le32 uivplaceholder[2]; // 8
458 u8 reserved[8]; // 8 458 u8 reserved[8]; // 8
459} __attribute__ ((packed)); 459} __packed;
460 460
461#define DCT_WEP_KEY_FIELD_LENGTH 16 461#define DCT_WEP_KEY_FIELD_LENGTH 16
462 462
@@ -465,7 +465,7 @@ struct tfd_command {
465 u8 length; 465 u8 length;
466 __le16 reserved; 466 __le16 reserved;
467 u8 payload[0]; 467 u8 payload[0];
468} __attribute__ ((packed)); 468} __packed;
469 469
470struct tfd_data { 470struct tfd_data {
471 /* Header */ 471 /* Header */
@@ -504,14 +504,14 @@ struct tfd_data {
504 __le32 num_chunks; 504 __le32 num_chunks;
505 __le32 chunk_ptr[NUM_TFD_CHUNKS]; 505 __le32 chunk_ptr[NUM_TFD_CHUNKS];
506 __le16 chunk_len[NUM_TFD_CHUNKS]; 506 __le16 chunk_len[NUM_TFD_CHUNKS];
507} __attribute__ ((packed)); 507} __packed;
508 508
509struct txrx_control_flags { 509struct txrx_control_flags {
510 u8 message_type; 510 u8 message_type;
511 u8 rx_seq_num; 511 u8 rx_seq_num;
512 u8 control_bits; 512 u8 control_bits;
513 u8 reserved; 513 u8 reserved;
514} __attribute__ ((packed)); 514} __packed;
515 515
516#define TFD_SIZE 128 516#define TFD_SIZE 128
517#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags)) 517#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags))
@@ -523,7 +523,7 @@ struct tfd_frame {
523 struct tfd_command cmd; 523 struct tfd_command cmd;
524 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; 524 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
525 } u; 525 } u;
526} __attribute__ ((packed)); 526} __packed;
527 527
528typedef void destructor_func(const void *); 528typedef void destructor_func(const void *);
529 529
@@ -559,7 +559,7 @@ struct rate_histogram {
559 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; 559 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
560 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; 560 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
561 } failed; 561 } failed;
562} __attribute__ ((packed)); 562} __packed;
563 563
564/* statistics command response */ 564/* statistics command response */
565struct ipw_cmd_stats { 565struct ipw_cmd_stats {
@@ -586,13 +586,13 @@ struct ipw_cmd_stats {
586 __le16 rx_autodetec_no_ofdm; 586 __le16 rx_autodetec_no_ofdm;
587 __le16 rx_autodetec_no_barker; 587 __le16 rx_autodetec_no_barker;
588 __le16 reserved; 588 __le16 reserved;
589} __attribute__ ((packed)); 589} __packed;
590 590
591struct notif_channel_result { 591struct notif_channel_result {
592 u8 channel_num; 592 u8 channel_num;
593 struct ipw_cmd_stats stats; 593 struct ipw_cmd_stats stats;
594 u8 uReserved; 594 u8 uReserved;
595} __attribute__ ((packed)); 595} __packed;
596 596
597#define SCAN_COMPLETED_STATUS_COMPLETE 1 597#define SCAN_COMPLETED_STATUS_COMPLETE 1
598#define SCAN_COMPLETED_STATUS_ABORTED 2 598#define SCAN_COMPLETED_STATUS_ABORTED 2
@@ -602,24 +602,24 @@ struct notif_scan_complete {
602 u8 num_channels; 602 u8 num_channels;
603 u8 status; 603 u8 status;
604 u8 reserved; 604 u8 reserved;
605} __attribute__ ((packed)); 605} __packed;
606 606
607struct notif_frag_length { 607struct notif_frag_length {
608 __le16 frag_length; 608 __le16 frag_length;
609 __le16 reserved; 609 __le16 reserved;
610} __attribute__ ((packed)); 610} __packed;
611 611
612struct notif_beacon_state { 612struct notif_beacon_state {
613 __le32 state; 613 __le32 state;
614 __le32 number; 614 __le32 number;
615} __attribute__ ((packed)); 615} __packed;
616 616
617struct notif_tgi_tx_key { 617struct notif_tgi_tx_key {
618 u8 key_state; 618 u8 key_state;
619 u8 security_type; 619 u8 security_type;
620 u8 station_index; 620 u8 station_index;
621 u8 reserved; 621 u8 reserved;
622} __attribute__ ((packed)); 622} __packed;
623 623
624#define SILENCE_OVER_THRESH (1) 624#define SILENCE_OVER_THRESH (1)
625#define SILENCE_UNDER_THRESH (2) 625#define SILENCE_UNDER_THRESH (2)
@@ -631,25 +631,25 @@ struct notif_link_deterioration {
631 struct rate_histogram histogram; 631 struct rate_histogram histogram;
632 u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ 632 u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */
633 __le16 silence_count; 633 __le16 silence_count;
634} __attribute__ ((packed)); 634} __packed;
635 635
636struct notif_association { 636struct notif_association {
637 u8 state; 637 u8 state;
638} __attribute__ ((packed)); 638} __packed;
639 639
640struct notif_authenticate { 640struct notif_authenticate {
641 u8 state; 641 u8 state;
642 struct machdr24 addr; 642 struct machdr24 addr;
643 __le16 status; 643 __le16 status;
644} __attribute__ ((packed)); 644} __packed;
645 645
646struct notif_calibration { 646struct notif_calibration {
647 u8 data[104]; 647 u8 data[104];
648} __attribute__ ((packed)); 648} __packed;
649 649
650struct notif_noise { 650struct notif_noise {
651 __le32 value; 651 __le32 value;
652} __attribute__ ((packed)); 652} __packed;
653 653
654struct ipw_rx_notification { 654struct ipw_rx_notification {
655 u8 reserved[8]; 655 u8 reserved[8];
@@ -669,7 +669,7 @@ struct ipw_rx_notification {
669 struct notif_noise noise; 669 struct notif_noise noise;
670 u8 raw[0]; 670 u8 raw[0];
671 } u; 671 } u;
672} __attribute__ ((packed)); 672} __packed;
673 673
674struct ipw_rx_frame { 674struct ipw_rx_frame {
675 __le32 reserved1; 675 __le32 reserved1;
@@ -692,14 +692,14 @@ struct ipw_rx_frame {
692 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen 692 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
693 __le16 length; 693 __le16 length;
694 u8 data[0]; 694 u8 data[0];
695} __attribute__ ((packed)); 695} __packed;
696 696
697struct ipw_rx_header { 697struct ipw_rx_header {
698 u8 message_type; 698 u8 message_type;
699 u8 rx_seq_num; 699 u8 rx_seq_num;
700 u8 control_bits; 700 u8 control_bits;
701 u8 reserved; 701 u8 reserved;
702} __attribute__ ((packed)); 702} __packed;
703 703
704struct ipw_rx_packet { 704struct ipw_rx_packet {
705 struct ipw_rx_header header; 705 struct ipw_rx_header header;
@@ -707,7 +707,7 @@ struct ipw_rx_packet {
707 struct ipw_rx_frame frame; 707 struct ipw_rx_frame frame;
708 struct ipw_rx_notification notification; 708 struct ipw_rx_notification notification;
709 } u; 709 } u;
710} __attribute__ ((packed)); 710} __packed;
711 711
712#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12 712#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
713#define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \ 713#define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \
@@ -717,7 +717,7 @@ struct ipw_rx_mem_buffer {
717 dma_addr_t dma_addr; 717 dma_addr_t dma_addr;
718 struct sk_buff *skb; 718 struct sk_buff *skb;
719 struct list_head list; 719 struct list_head list;
720}; /* Not transferred over network, so not __attribute__ ((packed)) */ 720}; /* Not transferred over network, so not __packed */
721 721
722struct ipw_rx_queue { 722struct ipw_rx_queue {
723 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 723 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -730,7 +730,7 @@ struct ipw_rx_queue {
730 struct list_head rx_free; /* Own an SKBs */ 730 struct list_head rx_free; /* Own an SKBs */
731 struct list_head rx_used; /* No SKB allocated */ 731 struct list_head rx_used; /* No SKB allocated */
732 spinlock_t lock; 732 spinlock_t lock;
733}; /* Not transferred over network, so not __attribute__ ((packed)) */ 733}; /* Not transferred over network, so not __packed */
734 734
735struct alive_command_responce { 735struct alive_command_responce {
736 u8 alive_command; 736 u8 alive_command;
@@ -745,21 +745,21 @@ struct alive_command_responce {
745 __le16 reserved4; 745 __le16 reserved4;
746 u8 time_stamp[5]; /* month, day, year, hours, minutes */ 746 u8 time_stamp[5]; /* month, day, year, hours, minutes */
747 u8 ucode_valid; 747 u8 ucode_valid;
748} __attribute__ ((packed)); 748} __packed;
749 749
750#define IPW_MAX_RATES 12 750#define IPW_MAX_RATES 12
751 751
752struct ipw_rates { 752struct ipw_rates {
753 u8 num_rates; 753 u8 num_rates;
754 u8 rates[IPW_MAX_RATES]; 754 u8 rates[IPW_MAX_RATES];
755} __attribute__ ((packed)); 755} __packed;
756 756
757struct command_block { 757struct command_block {
758 unsigned int control; 758 unsigned int control;
759 u32 source_addr; 759 u32 source_addr;
760 u32 dest_addr; 760 u32 dest_addr;
761 unsigned int status; 761 unsigned int status;
762} __attribute__ ((packed)); 762} __packed;
763 763
764#define CB_NUMBER_OF_ELEMENTS_SMALL 64 764#define CB_NUMBER_OF_ELEMENTS_SMALL 64
765struct fw_image_desc { 765struct fw_image_desc {
@@ -792,7 +792,7 @@ struct ipw_sys_config {
792 u8 accept_all_mgmt_frames; 792 u8 accept_all_mgmt_frames;
793 u8 pass_noise_stats_to_host; 793 u8 pass_noise_stats_to_host;
794 u8 reserved3; 794 u8 reserved3;
795} __attribute__ ((packed)); 795} __packed;
796 796
797struct ipw_multicast_addr { 797struct ipw_multicast_addr {
798 u8 num_of_multicast_addresses; 798 u8 num_of_multicast_addresses;
@@ -801,7 +801,7 @@ struct ipw_multicast_addr {
801 u8 mac2[6]; 801 u8 mac2[6];
802 u8 mac3[6]; 802 u8 mac3[6];
803 u8 mac4[6]; 803 u8 mac4[6];
804} __attribute__ ((packed)); 804} __packed;
805 805
806#define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */ 806#define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */
807#define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */ 807#define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */
@@ -822,7 +822,7 @@ struct ipw_wep_key {
822 u8 key_index; 822 u8 key_index;
823 u8 key_size; 823 u8 key_size;
824 u8 key[16]; 824 u8 key[16];
825} __attribute__ ((packed)); 825} __packed;
826 826
827struct ipw_tgi_tx_key { 827struct ipw_tgi_tx_key {
828 u8 key_id; 828 u8 key_id;
@@ -831,7 +831,7 @@ struct ipw_tgi_tx_key {
831 u8 flags; 831 u8 flags;
832 u8 key[16]; 832 u8 key[16];
833 __le32 tx_counter[2]; 833 __le32 tx_counter[2];
834} __attribute__ ((packed)); 834} __packed;
835 835
836#define IPW_SCAN_CHANNELS 54 836#define IPW_SCAN_CHANNELS 54
837 837
@@ -840,7 +840,7 @@ struct ipw_scan_request {
840 __le16 dwell_time; 840 __le16 dwell_time;
841 u8 channels_list[IPW_SCAN_CHANNELS]; 841 u8 channels_list[IPW_SCAN_CHANNELS];
842 u8 channels_reserved[3]; 842 u8 channels_reserved[3];
843} __attribute__ ((packed)); 843} __packed;
844 844
845enum { 845enum {
846 IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0, 846 IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
@@ -857,7 +857,7 @@ struct ipw_scan_request_ext {
857 u8 scan_type[IPW_SCAN_CHANNELS / 2]; 857 u8 scan_type[IPW_SCAN_CHANNELS / 2];
858 u8 reserved; 858 u8 reserved;
859 __le16 dwell_time[IPW_SCAN_TYPES]; 859 __le16 dwell_time[IPW_SCAN_TYPES];
860} __attribute__ ((packed)); 860} __packed;
861 861
862static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) 862static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
863{ 863{
@@ -902,7 +902,7 @@ struct ipw_associate {
902 u8 smr; 902 u8 smr;
903 u8 reserved1; 903 u8 reserved1;
904 __le16 reserved2; 904 __le16 reserved2;
905} __attribute__ ((packed)); 905} __packed;
906 906
907struct ipw_supported_rates { 907struct ipw_supported_rates {
908 u8 ieee_mode; 908 u8 ieee_mode;
@@ -910,36 +910,36 @@ struct ipw_supported_rates {
910 u8 purpose; 910 u8 purpose;
911 u8 reserved; 911 u8 reserved;
912 u8 supported_rates[IPW_MAX_RATES]; 912 u8 supported_rates[IPW_MAX_RATES];
913} __attribute__ ((packed)); 913} __packed;
914 914
915struct ipw_rts_threshold { 915struct ipw_rts_threshold {
916 __le16 rts_threshold; 916 __le16 rts_threshold;
917 __le16 reserved; 917 __le16 reserved;
918} __attribute__ ((packed)); 918} __packed;
919 919
920struct ipw_frag_threshold { 920struct ipw_frag_threshold {
921 __le16 frag_threshold; 921 __le16 frag_threshold;
922 __le16 reserved; 922 __le16 reserved;
923} __attribute__ ((packed)); 923} __packed;
924 924
925struct ipw_retry_limit { 925struct ipw_retry_limit {
926 u8 short_retry_limit; 926 u8 short_retry_limit;
927 u8 long_retry_limit; 927 u8 long_retry_limit;
928 __le16 reserved; 928 __le16 reserved;
929} __attribute__ ((packed)); 929} __packed;
930 930
931struct ipw_dino_config { 931struct ipw_dino_config {
932 __le32 dino_config_addr; 932 __le32 dino_config_addr;
933 __le16 dino_config_size; 933 __le16 dino_config_size;
934 u8 dino_response; 934 u8 dino_response;
935 u8 reserved; 935 u8 reserved;
936} __attribute__ ((packed)); 936} __packed;
937 937
938struct ipw_aironet_info { 938struct ipw_aironet_info {
939 u8 id; 939 u8 id;
940 u8 length; 940 u8 length;
941 __le16 reserved; 941 __le16 reserved;
942} __attribute__ ((packed)); 942} __packed;
943 943
944struct ipw_rx_key { 944struct ipw_rx_key {
945 u8 station_index; 945 u8 station_index;
@@ -950,25 +950,25 @@ struct ipw_rx_key {
950 u8 station_address[6]; 950 u8 station_address[6];
951 u8 key_index; 951 u8 key_index;
952 u8 reserved; 952 u8 reserved;
953} __attribute__ ((packed)); 953} __packed;
954 954
955struct ipw_country_channel_info { 955struct ipw_country_channel_info {
956 u8 first_channel; 956 u8 first_channel;
957 u8 no_channels; 957 u8 no_channels;
958 s8 max_tx_power; 958 s8 max_tx_power;
959} __attribute__ ((packed)); 959} __packed;
960 960
961struct ipw_country_info { 961struct ipw_country_info {
962 u8 id; 962 u8 id;
963 u8 length; 963 u8 length;
964 u8 country_str[3]; 964 u8 country_str[3];
965 struct ipw_country_channel_info groups[7]; 965 struct ipw_country_channel_info groups[7];
966} __attribute__ ((packed)); 966} __packed;
967 967
968struct ipw_channel_tx_power { 968struct ipw_channel_tx_power {
969 u8 channel_number; 969 u8 channel_number;
970 s8 tx_power; 970 s8 tx_power;
971} __attribute__ ((packed)); 971} __packed;
972 972
973#define SCAN_ASSOCIATED_INTERVAL (HZ) 973#define SCAN_ASSOCIATED_INTERVAL (HZ)
974#define SCAN_INTERVAL (HZ / 10) 974#define SCAN_INTERVAL (HZ / 10)
@@ -979,18 +979,18 @@ struct ipw_tx_power {
979 u8 num_channels; 979 u8 num_channels;
980 u8 ieee_mode; 980 u8 ieee_mode;
981 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS]; 981 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
982} __attribute__ ((packed)); 982} __packed;
983 983
984struct ipw_rsn_capabilities { 984struct ipw_rsn_capabilities {
985 u8 id; 985 u8 id;
986 u8 length; 986 u8 length;
987 __le16 version; 987 __le16 version;
988} __attribute__ ((packed)); 988} __packed;
989 989
990struct ipw_sensitivity_calib { 990struct ipw_sensitivity_calib {
991 __le16 beacon_rssi_raw; 991 __le16 beacon_rssi_raw;
992 __le16 reserved; 992 __le16 reserved;
993} __attribute__ ((packed)); 993} __packed;
994 994
995/** 995/**
996 * Host command structure. 996 * Host command structure.
@@ -1019,7 +1019,7 @@ struct ipw_cmd { /* XXX */
1019 * nParams=(len+3)/4+status_len 1019 * nParams=(len+3)/4+status_len
1020 */ 1020 */
1021 u32 param[0]; 1021 u32 param[0];
1022} __attribute__ ((packed)); 1022} __packed;
1023 1023
1024#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ 1024#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
1025 1025
@@ -1114,7 +1114,7 @@ struct ipw_event { /* XXX */
1114 u32 event; 1114 u32 event;
1115 u32 time; 1115 u32 time;
1116 u32 data; 1116 u32 data;
1117} __attribute__ ((packed)); 1117} __packed;
1118 1118
1119struct ipw_fw_error { /* XXX */ 1119struct ipw_fw_error { /* XXX */
1120 unsigned long jiffies; 1120 unsigned long jiffies;
@@ -1125,7 +1125,7 @@ struct ipw_fw_error { /* XXX */
1125 struct ipw_error_elem *elem; 1125 struct ipw_error_elem *elem;
1126 struct ipw_event *log; 1126 struct ipw_event *log;
1127 u8 payload[0]; 1127 u8 payload[0];
1128} __attribute__ ((packed)); 1128} __packed;
1129 1129
1130#ifdef CONFIG_IPW2200_PROMISCUOUS 1130#ifdef CONFIG_IPW2200_PROMISCUOUS
1131 1131
@@ -1170,7 +1170,7 @@ struct ipw_rt_hdr {
1170 s8 rt_dbmnoise; 1170 s8 rt_dbmnoise;
1171 u8 rt_antenna; /* antenna number */ 1171 u8 rt_antenna; /* antenna number */
1172 u8 payload[0]; /* payload... */ 1172 u8 payload[0]; /* payload... */
1173} __attribute__ ((packed)); 1173} __packed;
1174#endif 1174#endif
1175 1175
1176struct ipw_priv { 1176struct ipw_priv {
@@ -1957,7 +1957,7 @@ enum {
1957struct ipw_fixed_rate { 1957struct ipw_fixed_rate {
1958 __le16 tx_rates; 1958 __le16 tx_rates;
1959 __le16 reserved; 1959 __le16 reserved;
1960} __attribute__ ((packed)); 1960} __packed;
1961 1961
1962#define IPW_INDIRECT_ADDR_MASK (~0x3ul) 1962#define IPW_INDIRECT_ADDR_MASK (~0x3ul)
1963 1963
@@ -1966,14 +1966,14 @@ struct host_cmd {
1966 u8 len; 1966 u8 len;
1967 u16 reserved; 1967 u16 reserved;
1968 u32 *param; 1968 u32 *param;
1969} __attribute__ ((packed)); /* XXX */ 1969} __packed; /* XXX */
1970 1970
1971struct cmdlog_host_cmd { 1971struct cmdlog_host_cmd {
1972 u8 cmd; 1972 u8 cmd;
1973 u8 len; 1973 u8 len;
1974 __le16 reserved; 1974 __le16 reserved;
1975 char param[124]; 1975 char param[124];
1976} __attribute__ ((packed)); 1976} __packed;
1977 1977
1978struct ipw_cmd_log { 1978struct ipw_cmd_log {
1979 unsigned long jiffies; 1979 unsigned long jiffies;
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 284b0e4cb815..70f5586d96bd 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -154,7 +154,7 @@ struct libipw_snap_hdr {
154 u8 ctrl; /* always 0x03 */ 154 u8 ctrl; /* always 0x03 */
155 u8 oui[P80211_OUI_LEN]; /* organizational universal id */ 155 u8 oui[P80211_OUI_LEN]; /* organizational universal id */
156 156
157} __attribute__ ((packed)); 157} __packed;
158 158
159#define SNAP_SIZE sizeof(struct libipw_snap_hdr) 159#define SNAP_SIZE sizeof(struct libipw_snap_hdr)
160 160
@@ -323,7 +323,7 @@ struct libipw_security {
323 u8 keys[WEP_KEYS][SCM_KEY_LEN]; 323 u8 keys[WEP_KEYS][SCM_KEY_LEN];
324 u8 level; 324 u8 level;
325 u16 flags; 325 u16 flags;
326} __attribute__ ((packed)); 326} __packed;
327 327
328/* 328/*
329 329
@@ -347,7 +347,7 @@ struct libipw_hdr_1addr {
347 __le16 duration_id; 347 __le16 duration_id;
348 u8 addr1[ETH_ALEN]; 348 u8 addr1[ETH_ALEN];
349 u8 payload[0]; 349 u8 payload[0];
350} __attribute__ ((packed)); 350} __packed;
351 351
352struct libipw_hdr_2addr { 352struct libipw_hdr_2addr {
353 __le16 frame_ctl; 353 __le16 frame_ctl;
@@ -355,7 +355,7 @@ struct libipw_hdr_2addr {
355 u8 addr1[ETH_ALEN]; 355 u8 addr1[ETH_ALEN];
356 u8 addr2[ETH_ALEN]; 356 u8 addr2[ETH_ALEN];
357 u8 payload[0]; 357 u8 payload[0];
358} __attribute__ ((packed)); 358} __packed;
359 359
360struct libipw_hdr_3addr { 360struct libipw_hdr_3addr {
361 __le16 frame_ctl; 361 __le16 frame_ctl;
@@ -365,7 +365,7 @@ struct libipw_hdr_3addr {
365 u8 addr3[ETH_ALEN]; 365 u8 addr3[ETH_ALEN];
366 __le16 seq_ctl; 366 __le16 seq_ctl;
367 u8 payload[0]; 367 u8 payload[0];
368} __attribute__ ((packed)); 368} __packed;
369 369
370struct libipw_hdr_4addr { 370struct libipw_hdr_4addr {
371 __le16 frame_ctl; 371 __le16 frame_ctl;
@@ -376,7 +376,7 @@ struct libipw_hdr_4addr {
376 __le16 seq_ctl; 376 __le16 seq_ctl;
377 u8 addr4[ETH_ALEN]; 377 u8 addr4[ETH_ALEN];
378 u8 payload[0]; 378 u8 payload[0];
379} __attribute__ ((packed)); 379} __packed;
380 380
381struct libipw_hdr_3addrqos { 381struct libipw_hdr_3addrqos {
382 __le16 frame_ctl; 382 __le16 frame_ctl;
@@ -387,13 +387,13 @@ struct libipw_hdr_3addrqos {
387 __le16 seq_ctl; 387 __le16 seq_ctl;
388 u8 payload[0]; 388 u8 payload[0];
389 __le16 qos_ctl; 389 __le16 qos_ctl;
390} __attribute__ ((packed)); 390} __packed;
391 391
392struct libipw_info_element { 392struct libipw_info_element {
393 u8 id; 393 u8 id;
394 u8 len; 394 u8 len;
395 u8 data[0]; 395 u8 data[0];
396} __attribute__ ((packed)); 396} __packed;
397 397
398/* 398/*
399 * These are the data types that can make up management packets 399 * These are the data types that can make up management packets
@@ -406,7 +406,7 @@ struct libipw_info_element {
406 u16 listen_interval; 406 u16 listen_interval;
407 struct { 407 struct {
408 u16 association_id:14, reserved:2; 408 u16 association_id:14, reserved:2;
409 } __attribute__ ((packed)); 409 } __packed;
410 u32 time_stamp[2]; 410 u32 time_stamp[2];
411 u16 reason; 411 u16 reason;
412 u16 status; 412 u16 status;
@@ -419,7 +419,7 @@ struct libipw_auth {
419 __le16 status; 419 __le16 status;
420 /* challenge */ 420 /* challenge */
421 struct libipw_info_element info_element[0]; 421 struct libipw_info_element info_element[0];
422} __attribute__ ((packed)); 422} __packed;
423 423
424struct libipw_channel_switch { 424struct libipw_channel_switch {
425 u8 id; 425 u8 id;
@@ -427,7 +427,7 @@ struct libipw_channel_switch {
427 u8 mode; 427 u8 mode;
428 u8 channel; 428 u8 channel;
429 u8 count; 429 u8 count;
430} __attribute__ ((packed)); 430} __packed;
431 431
432struct libipw_action { 432struct libipw_action {
433 struct libipw_hdr_3addr header; 433 struct libipw_hdr_3addr header;
@@ -441,12 +441,12 @@ struct libipw_action {
441 struct libipw_channel_switch channel_switch; 441 struct libipw_channel_switch channel_switch;
442 442
443 } format; 443 } format;
444} __attribute__ ((packed)); 444} __packed;
445 445
446struct libipw_disassoc { 446struct libipw_disassoc {
447 struct libipw_hdr_3addr header; 447 struct libipw_hdr_3addr header;
448 __le16 reason; 448 __le16 reason;
449} __attribute__ ((packed)); 449} __packed;
450 450
451/* Alias deauth for disassoc */ 451/* Alias deauth for disassoc */
452#define libipw_deauth libipw_disassoc 452#define libipw_deauth libipw_disassoc
@@ -455,7 +455,7 @@ struct libipw_probe_request {
455 struct libipw_hdr_3addr header; 455 struct libipw_hdr_3addr header;
456 /* SSID, supported rates */ 456 /* SSID, supported rates */
457 struct libipw_info_element info_element[0]; 457 struct libipw_info_element info_element[0];
458} __attribute__ ((packed)); 458} __packed;
459 459
460struct libipw_probe_response { 460struct libipw_probe_response {
461 struct libipw_hdr_3addr header; 461 struct libipw_hdr_3addr header;
@@ -465,7 +465,7 @@ struct libipw_probe_response {
465 /* SSID, supported rates, FH params, DS params, 465 /* SSID, supported rates, FH params, DS params,
466 * CF params, IBSS params, TIM (if beacon), RSN */ 466 * CF params, IBSS params, TIM (if beacon), RSN */
467 struct libipw_info_element info_element[0]; 467 struct libipw_info_element info_element[0];
468} __attribute__ ((packed)); 468} __packed;
469 469
470/* Alias beacon for probe_response */ 470/* Alias beacon for probe_response */
471#define libipw_beacon libipw_probe_response 471#define libipw_beacon libipw_probe_response
@@ -476,7 +476,7 @@ struct libipw_assoc_request {
476 __le16 listen_interval; 476 __le16 listen_interval;
477 /* SSID, supported rates, RSN */ 477 /* SSID, supported rates, RSN */
478 struct libipw_info_element info_element[0]; 478 struct libipw_info_element info_element[0];
479} __attribute__ ((packed)); 479} __packed;
480 480
481struct libipw_reassoc_request { 481struct libipw_reassoc_request {
482 struct libipw_hdr_3addr header; 482 struct libipw_hdr_3addr header;
@@ -484,7 +484,7 @@ struct libipw_reassoc_request {
484 __le16 listen_interval; 484 __le16 listen_interval;
485 u8 current_ap[ETH_ALEN]; 485 u8 current_ap[ETH_ALEN];
486 struct libipw_info_element info_element[0]; 486 struct libipw_info_element info_element[0];
487} __attribute__ ((packed)); 487} __packed;
488 488
489struct libipw_assoc_response { 489struct libipw_assoc_response {
490 struct libipw_hdr_3addr header; 490 struct libipw_hdr_3addr header;
@@ -493,7 +493,7 @@ struct libipw_assoc_response {
493 __le16 aid; 493 __le16 aid;
494 /* supported rates */ 494 /* supported rates */
495 struct libipw_info_element info_element[0]; 495 struct libipw_info_element info_element[0];
496} __attribute__ ((packed)); 496} __packed;
497 497
498struct libipw_txb { 498struct libipw_txb {
499 u8 nr_frags; 499 u8 nr_frags;
@@ -555,19 +555,19 @@ struct libipw_qos_information_element {
555 u8 qui_subtype; 555 u8 qui_subtype;
556 u8 version; 556 u8 version;
557 u8 ac_info; 557 u8 ac_info;
558} __attribute__ ((packed)); 558} __packed;
559 559
560struct libipw_qos_ac_parameter { 560struct libipw_qos_ac_parameter {
561 u8 aci_aifsn; 561 u8 aci_aifsn;
562 u8 ecw_min_max; 562 u8 ecw_min_max;
563 __le16 tx_op_limit; 563 __le16 tx_op_limit;
564} __attribute__ ((packed)); 564} __packed;
565 565
566struct libipw_qos_parameter_info { 566struct libipw_qos_parameter_info {
567 struct libipw_qos_information_element info_element; 567 struct libipw_qos_information_element info_element;
568 u8 reserved; 568 u8 reserved;
569 struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; 569 struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
570} __attribute__ ((packed)); 570} __packed;
571 571
572struct libipw_qos_parameters { 572struct libipw_qos_parameters {
573 __le16 cw_min[QOS_QUEUE_NUM]; 573 __le16 cw_min[QOS_QUEUE_NUM];
@@ -575,7 +575,7 @@ struct libipw_qos_parameters {
575 u8 aifs[QOS_QUEUE_NUM]; 575 u8 aifs[QOS_QUEUE_NUM];
576 u8 flag[QOS_QUEUE_NUM]; 576 u8 flag[QOS_QUEUE_NUM];
577 __le16 tx_op_limit[QOS_QUEUE_NUM]; 577 __le16 tx_op_limit[QOS_QUEUE_NUM];
578} __attribute__ ((packed)); 578} __packed;
579 579
580struct libipw_qos_data { 580struct libipw_qos_data {
581 struct libipw_qos_parameters parameters; 581 struct libipw_qos_parameters parameters;
@@ -588,7 +588,7 @@ struct libipw_qos_data {
588struct libipw_tim_parameters { 588struct libipw_tim_parameters {
589 u8 tim_count; 589 u8 tim_count;
590 u8 tim_period; 590 u8 tim_period;
591} __attribute__ ((packed)); 591} __packed;
592 592
593/*******************************************************/ 593/*******************************************************/
594 594
@@ -606,7 +606,7 @@ struct libipw_basic_report {
606 __le64 start_time; 606 __le64 start_time;
607 __le16 duration; 607 __le16 duration;
608 u8 map; 608 u8 map;
609} __attribute__ ((packed)); 609} __packed;
610 610
611enum { /* libipw_measurement_request.mode */ 611enum { /* libipw_measurement_request.mode */
612 /* Bit 0 is reserved */ 612 /* Bit 0 is reserved */
@@ -627,7 +627,7 @@ struct libipw_measurement_params {
627 u8 channel; 627 u8 channel;
628 __le64 start_time; 628 __le64 start_time;
629 __le16 duration; 629 __le16 duration;
630} __attribute__ ((packed)); 630} __packed;
631 631
632struct libipw_measurement_request { 632struct libipw_measurement_request {
633 struct libipw_info_element ie; 633 struct libipw_info_element ie;
@@ -635,7 +635,7 @@ struct libipw_measurement_request {
635 u8 mode; 635 u8 mode;
636 u8 type; 636 u8 type;
637 struct libipw_measurement_params params[0]; 637 struct libipw_measurement_params params[0];
638} __attribute__ ((packed)); 638} __packed;
639 639
640struct libipw_measurement_report { 640struct libipw_measurement_report {
641 struct libipw_info_element ie; 641 struct libipw_info_element ie;
@@ -645,17 +645,17 @@ struct libipw_measurement_report {
645 union { 645 union {
646 struct libipw_basic_report basic[0]; 646 struct libipw_basic_report basic[0];
647 } u; 647 } u;
648} __attribute__ ((packed)); 648} __packed;
649 649
650struct libipw_tpc_report { 650struct libipw_tpc_report {
651 u8 transmit_power; 651 u8 transmit_power;
652 u8 link_margin; 652 u8 link_margin;
653} __attribute__ ((packed)); 653} __packed;
654 654
655struct libipw_channel_map { 655struct libipw_channel_map {
656 u8 channel; 656 u8 channel;
657 u8 map; 657 u8 map;
658} __attribute__ ((packed)); 658} __packed;
659 659
660struct libipw_ibss_dfs { 660struct libipw_ibss_dfs {
661 struct libipw_info_element ie; 661 struct libipw_info_element ie;
@@ -668,14 +668,14 @@ struct libipw_csa {
668 u8 mode; 668 u8 mode;
669 u8 channel; 669 u8 channel;
670 u8 count; 670 u8 count;
671} __attribute__ ((packed)); 671} __packed;
672 672
673struct libipw_quiet { 673struct libipw_quiet {
674 u8 count; 674 u8 count;
675 u8 period; 675 u8 period;
676 u8 duration; 676 u8 duration;
677 u8 offset; 677 u8 offset;
678} __attribute__ ((packed)); 678} __packed;
679 679
680struct libipw_network { 680struct libipw_network {
681 /* These entries are used to identify a unique network */ 681 /* These entries are used to identify a unique network */
@@ -828,7 +828,6 @@ struct libipw_device {
828 int host_strip_iv_icv; 828 int host_strip_iv_icv;
829 829
830 int host_open_frag; 830 int host_open_frag;
831 int host_build_iv;
832 int ieee802_1x; /* is IEEE 802.1X used */ 831 int ieee802_1x; /* is IEEE 802.1X used */
833 832
834 /* WPA data */ 833 /* WPA data */
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 55965408ff3f..32dee2ce5d31 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -62,8 +62,8 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
62MODULE_AUTHOR(DRV_COPYRIGHT); 62MODULE_AUTHOR(DRV_COPYRIGHT);
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65struct cfg80211_ops libipw_config_ops = { }; 65static struct cfg80211_ops libipw_config_ops = { };
66void *libipw_wiphy_privid = &libipw_wiphy_privid; 66static void *libipw_wiphy_privid = &libipw_wiphy_privid;
67 67
68static int libipw_networks_allocate(struct libipw_device *ieee) 68static int libipw_networks_allocate(struct libipw_device *ieee)
69{ 69{
diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c
index da8beac7fcf3..01c88a71abe1 100644
--- a/drivers/net/wireless/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_tx.c
@@ -260,7 +260,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, 260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
261 rts_required; 261 rts_required;
262 unsigned long flags; 262 unsigned long flags;
263 int encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; 263 int encrypt, host_encrypt, host_encrypt_msdu;
264 __be16 ether_type; 264 __be16 ether_type;
265 int bytes, fc, hdr_len; 265 int bytes, fc, hdr_len;
266 struct sk_buff *skb_frag; 266 struct sk_buff *skb_frag;
@@ -301,7 +301,6 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
301 301
302 host_encrypt = ieee->host_encrypt && encrypt && crypt; 302 host_encrypt = ieee->host_encrypt && encrypt && crypt;
303 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; 303 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
304 host_build_iv = ieee->host_build_iv && encrypt && crypt;
305 304
306 if (!encrypt && ieee->ieee802_1x && 305 if (!encrypt && ieee->ieee802_1x &&
307 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) { 306 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
@@ -313,7 +312,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
313 skb_copy_from_linear_data(skb, dest, ETH_ALEN); 312 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
314 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN); 313 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
315 314
316 if (host_encrypt || host_build_iv) 315 if (host_encrypt)
317 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 316 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
318 IEEE80211_FCTL_PROTECTED; 317 IEEE80211_FCTL_PROTECTED;
319 else 318 else
@@ -467,7 +466,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
467 for (; i < nr_frags; i++) { 466 for (; i < nr_frags; i++) {
468 skb_frag = txb->fragments[i]; 467 skb_frag = txb->fragments[i];
469 468
470 if (host_encrypt || host_build_iv) 469 if (host_encrypt)
471 skb_reserve(skb_frag, 470 skb_reserve(skb_frag,
472 crypt->ops->extra_mpdu_prefix_len); 471 crypt->ops->extra_mpdu_prefix_len);
473 472
@@ -502,15 +501,6 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
502 * to insert the IV between the header and the payload */ 501 * to insert the IV between the header and the payload */
503 if (host_encrypt) 502 if (host_encrypt)
504 libipw_encrypt_fragment(ieee, skb_frag, hdr_len); 503 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
505 else if (host_build_iv) {
506 atomic_inc(&crypt->refcnt);
507 if (crypt->ops->build_iv)
508 crypt->ops->build_iv(skb_frag, hdr_len,
509 ieee->sec.keys[ieee->sec.active_key],
510 ieee->sec.key_sizes[ieee->sec.active_key],
511 crypt->priv);
512 atomic_dec(&crypt->refcnt);
513 }
514 504
515 if (ieee->config & 505 if (ieee->config &
516 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) 506 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 3633c6682e49..d7bd6cf00a81 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -320,7 +320,7 @@ int libipw_wx_set_encode(struct libipw_device *ieee,
320 }; 320 };
321 int i, key, key_provided, len; 321 int i, key, key_provided, len;
322 struct lib80211_crypt_data **crypt; 322 struct lib80211_crypt_data **crypt;
323 int host_crypto = ieee->host_encrypt || ieee->host_decrypt || ieee->host_build_iv; 323 int host_crypto = ieee->host_encrypt || ieee->host_decrypt;
324 DECLARE_SSID_BUF(ssid); 324 DECLARE_SSID_BUF(ssid);
325 325
326 LIBIPW_DEBUG_WX("SET_ENCODE\n"); 326 LIBIPW_DEBUG_WX("SET_ENCODE\n");
@@ -411,10 +411,6 @@ int libipw_wx_set_encode(struct libipw_device *ieee,
411 411
412 /* If a new key was provided, set it up */ 412 /* If a new key was provided, set it up */
413 if (erq->length > 0) { 413 if (erq->length > 0) {
414#ifdef CONFIG_LIBIPW_DEBUG
415 DECLARE_SSID_BUF(ssid);
416#endif
417
418 len = erq->length <= 5 ? 5 : 13; 414 len = erq->length <= 5 ? 5 : 13;
419 memcpy(sec.keys[key], keybuf, erq->length); 415 memcpy(sec.keys[key], keybuf, erq->length);
420 if (len > erq->length) 416 if (len > erq->length)
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index dc8ed1527666..a51e4da1bdfc 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -30,9 +30,11 @@ config IWLWIFI_DEBUG
30 30
31config IWLWIFI_DEBUGFS 31config IWLWIFI_DEBUGFS
32 bool "iwlagn debugfs support" 32 bool "iwlagn debugfs support"
33 depends on IWLWIFI && IWLWIFI_DEBUG && MAC80211_DEBUGFS 33 depends on IWLWIFI && MAC80211_DEBUGFS
34 ---help--- 34 ---help---
35 Enable creation of debugfs files for the iwlwifi drivers. 35 Enable creation of debugfs files for the iwlwifi drivers. This
36 is a low-impact option that allows getting insight into the
37 driver's state at runtime.
36 38
37config IWLWIFI_DEVICE_TRACING 39config IWLWIFI_DEVICE_TRACING
38 bool "iwlwifi device access tracing" 40 bool "iwlwifi device access tracing"
@@ -85,10 +87,15 @@ config IWL4965
85 This option enables support for Intel Wireless WiFi Link 4965AGN 87 This option enables support for Intel Wireless WiFi Link 4965AGN
86 88
87config IWL5000 89config IWL5000
88 bool "Intel Wireless WiFi 5000AGN; Intel WiFi Link 1000, 6000, and 6050 Series" 90 bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
89 depends on IWLAGN 91 depends on IWLAGN
90 ---help--- 92 ---help---
91 This option enables support for Intel Wireless WiFi Link 5000AGN Family 93 This option enables support for use with the following hardware:
94 Intel Wireless WiFi Link 6250AGN Adapter
95 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
96 Intel WiFi Link 1000BGN
97 Intel Wireless WiFi 5150AGN
98 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
92 99
93config IWL3945 100config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 101 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7c7235385513..728bb858ba97 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_IWLWIFI) += iwlcore.o 1obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o 3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwlcore-objs += iwl-scan.o iwl-led.o 4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 6iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
@@ -11,7 +11,7 @@ CFLAGS_iwl-devtrace.o := -I$(src)
11obj-$(CONFIG_IWLAGN) += iwlagn.o 11obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o 12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o 13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o 14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
15iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o 15iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
16 16
17iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 17iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 6be2992f8f21..8848333bc3a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -129,8 +129,8 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
129 priv->cfg->num_of_queues * 129 priv->cfg->num_of_queues *
130 sizeof(struct iwlagn_scd_bc_tbl); 130 sizeof(struct iwlagn_scd_bc_tbl);
131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
132 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
133 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 133 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
134 134
135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -157,6 +157,10 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
157 BIT(IWL_CALIB_TX_IQ) | 157 BIT(IWL_CALIB_TX_IQ) |
158 BIT(IWL_CALIB_TX_IQ_PERD) | 158 BIT(IWL_CALIB_TX_IQ_PERD) |
159 BIT(IWL_CALIB_BASE_BAND); 159 BIT(IWL_CALIB_BASE_BAND);
160 if (priv->cfg->need_dc_calib)
161 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
162
163 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
160 164
161 return 0; 165 return 0;
162} 166}
@@ -213,14 +217,18 @@ static struct iwl_lib_ops iwl1000_lib = {
213 .set_ct_kill = iwl1000_set_ct_threshold, 217 .set_ct_kill = iwl1000_set_ct_threshold,
214 }, 218 },
215 .manage_ibss_station = iwlagn_manage_ibss_station, 219 .manage_ibss_station = iwlagn_manage_ibss_station,
220 .update_bcast_station = iwl_update_bcast_station,
216 .debugfs_ops = { 221 .debugfs_ops = {
217 .rx_stats_read = iwl_ucode_rx_stats_read, 222 .rx_stats_read = iwl_ucode_rx_stats_read,
218 .tx_stats_read = iwl_ucode_tx_stats_read, 223 .tx_stats_read = iwl_ucode_tx_stats_read,
219 .general_stats_read = iwl_ucode_general_stats_read, 224 .general_stats_read = iwl_ucode_general_stats_read,
225 .bt_stats_read = iwl_ucode_bt_stats_read,
220 }, 226 },
221 .recover_from_tx_stall = iwl_bg_monitor_recover, 227 .recover_from_tx_stall = iwl_bg_monitor_recover,
222 .check_plcp_health = iwl_good_plcp_health, 228 .check_plcp_health = iwl_good_plcp_health,
223 .check_ack_health = iwl_good_ack_health, 229 .check_ack_health = iwl_good_ack_health,
230 .txfifo_flush = iwlagn_txfifo_flush,
231 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
224}; 232};
225 233
226static const struct iwl_ops iwl1000_ops = { 234static const struct iwl_ops iwl1000_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
index 6a9c64a50e36..ef0835b01b6b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -28,6 +28,28 @@
28 28
29#include "iwl-3945-debugfs.h" 29#include "iwl-3945-debugfs.h"
30 30
31
32static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
33{
34 int p = 0;
35
36 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
37 le32_to_cpu(priv->_3945.statistics.flag));
38 if (le32_to_cpu(priv->_3945.statistics.flag) &
39 UCODE_STATISTICS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(priv->_3945.statistics.flag) &
44 UCODE_STATISTICS_FREQUENCY_MSK)
45 ? "2.4 GHz" : "5.2 GHz");
46 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
47 (le32_to_cpu(priv->_3945.statistics.flag) &
48 UCODE_STATISTICS_NARROW_BAND_MSK)
49 ? "enabled" : "disabled");
50 return p;
51}
52
31ssize_t iwl3945_ucode_rx_stats_read(struct file *file, 53ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
32 char __user *user_buf, 54 char __user *user_buf,
33 size_t count, loff_t *ppos) 55 size_t count, loff_t *ppos)
@@ -70,7 +92,7 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
70 max_cck = &priv->_3945.max_delta.rx.cck; 92 max_cck = &priv->_3945.max_delta.rx.cck;
71 max_general = &priv->_3945.max_delta.rx.general; 93 max_general = &priv->_3945.max_delta.rx.general;
72 94
73 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 95 pos += iwl3945_statistics_flag(priv, buf, bufsz);
74 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 96 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
75 "acumulative delta max\n", 97 "acumulative delta max\n",
76 "Statistics_Rx - OFDM:"); 98 "Statistics_Rx - OFDM:");
@@ -331,7 +353,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
331 accum_tx = &priv->_3945.accum_statistics.tx; 353 accum_tx = &priv->_3945.accum_statistics.tx;
332 delta_tx = &priv->_3945.delta_statistics.tx; 354 delta_tx = &priv->_3945.delta_statistics.tx;
333 max_tx = &priv->_3945.max_delta.tx; 355 max_tx = &priv->_3945.max_delta.tx;
334 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 356 pos += iwl3945_statistics_flag(priv, buf, bufsz);
335 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 357 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
336 "acumulative delta max\n", 358 "acumulative delta max\n",
337 "Statistics_Tx:"); 359 "Statistics_Tx:");
@@ -438,7 +460,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
438 accum_div = &priv->_3945.accum_statistics.general.div; 460 accum_div = &priv->_3945.accum_statistics.general.div;
439 delta_div = &priv->_3945.delta_statistics.general.div; 461 delta_div = &priv->_3945.delta_statistics.general.div;
440 max_div = &priv->_3945.max_delta.general.div; 462 max_div = &priv->_3945.max_delta.general.div;
441 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 463 pos += iwl3945_statistics_flag(priv, buf, bufsz);
442 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 464 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
443 "acumulative delta max\n", 465 "acumulative delta max\n",
444 "Statistics_General:"); 466 "Statistics_General:");
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 042f6bc0df13..2c9ed2b502a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -175,13 +175,13 @@
175struct iwl3945_tfd_tb { 175struct iwl3945_tfd_tb {
176 __le32 addr; 176 __le32 addr;
177 __le32 len; 177 __le32 len;
178} __attribute__ ((packed)); 178} __packed;
179 179
180struct iwl3945_tfd { 180struct iwl3945_tfd {
181 __le32 control_flags; 181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4]; 182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28]; 183 u8 __pad[28];
184} __attribute__ ((packed)); 184} __packed;
185 185
186 186
187#endif /* __iwl_3945_fh_h__ */ 187#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 91bcb4e3cdfb..7c731a793632 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -96,7 +96,7 @@ struct iwl3945_eeprom_txpower_sample {
96 u8 gain_index; /* index into power (gain) setup table ... */ 96 u8 gain_index; /* index into power (gain) setup table ... */
97 s8 power; /* ... for this pwr level for this chnl group */ 97 s8 power; /* ... for this pwr level for this chnl group */
98 u16 v_det; /* PA output voltage */ 98 u16 v_det; /* PA output voltage */
99} __attribute__ ((packed)); 99} __packed;
100 100
101/* 101/*
102 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. 102 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
@@ -117,7 +117,7 @@ struct iwl3945_eeprom_txpower_group {
117 u8 group_channel; /* "representative" channel # in this band */ 117 u8 group_channel; /* "representative" channel # in this band */
118 s16 temperature; /* h/w temperature at factory calib this band 118 s16 temperature; /* h/w temperature at factory calib this band
119 * (signed) */ 119 * (signed) */
120} __attribute__ ((packed)); 120} __packed;
121 121
122/* 122/*
123 * Temperature-based Tx-power compensation data, not band-specific. 123 * Temperature-based Tx-power compensation data, not band-specific.
@@ -131,7 +131,7 @@ struct iwl3945_eeprom_temperature_corr {
131 u32 Tc; 131 u32 Tc;
132 u32 Td; 132 u32 Td;
133 u32 Te; 133 u32 Te;
134} __attribute__ ((packed)); 134} __packed;
135 135
136/* 136/*
137 * EEPROM map 137 * EEPROM map
@@ -215,7 +215,7 @@ struct iwl3945_eeprom {
215/* abs.ofs: 512 */ 215/* abs.ofs: 512 */
216 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ 216 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
217 u8 reserved16[172]; /* fill out to full 1024 byte block */ 217 u8 reserved16[172]; /* fill out to full 1024 byte block */
218} __attribute__ ((packed)); 218} __packed;
219 219
220#define IWL3945_EEPROM_IMG_SIZE 1024 220#define IWL3945_EEPROM_IMG_SIZE 1024
221 221
@@ -274,7 +274,7 @@ static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
274 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ 274 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
275struct iwl3945_shared { 275struct iwl3945_shared {
276 __le32 tx_base_ptr[8]; 276 __le32 tx_base_ptr[8];
277} __attribute__ ((packed)); 277} __packed;
278 278
279static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) 279static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
280{ 280{
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index c44a303e62ed..a07310fefcf2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -279,8 +279,8 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
280 280
281 tx_info = &txq->txb[txq->q.read_ptr]; 281 tx_info = &txq->txb[txq->q.read_ptr];
282 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); 282 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
283 tx_info->skb[0] = NULL; 283 tx_info->skb = NULL;
284 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 284 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
285 } 285 }
286 286
@@ -315,7 +315,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
315 return; 315 return;
316 } 316 }
317 317
318 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); 318 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
319 ieee80211_tx_info_clear_status(info); 319 ieee80211_tx_info_clear_status(info);
320 320
321 /* Fill the MRR chain with some info about on-chip retransmissions */ 321 /* Fill the MRR chain with some info about on-chip retransmissions */
@@ -352,7 +352,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
352 * RX handler implementations 352 * RX handler implementations
353 * 353 *
354 *****************************************************************************/ 354 *****************************************************************************/
355#ifdef CONFIG_IWLWIFI_DEBUG 355#ifdef CONFIG_IWLWIFI_DEBUGFS
356/* 356/*
357 * based on the assumption of all statistics counter are in DWORD 357 * based on the assumption of all statistics counter are in DWORD
358 * FIXME: This function is for debugging, do not deal with 358 * FIXME: This function is for debugging, do not deal with
@@ -406,6 +406,11 @@ static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
406 unsigned int plcp_msec; 406 unsigned int plcp_msec;
407 unsigned long plcp_received_jiffies; 407 unsigned long plcp_received_jiffies;
408 408
409 if (priv->cfg->plcp_delta_threshold ==
410 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
411 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
412 return rc;
413 }
409 memcpy(&current_stat, pkt->u.raw, sizeof(struct 414 memcpy(&current_stat, pkt->u.raw, sizeof(struct
410 iwl3945_notif_statistics)); 415 iwl3945_notif_statistics));
411 /* 416 /*
@@ -460,7 +465,7 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
460 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 465 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
461 (int)sizeof(struct iwl3945_notif_statistics), 466 (int)sizeof(struct iwl3945_notif_statistics),
462 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 467 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
463#ifdef CONFIG_IWLWIFI_DEBUG 468#ifdef CONFIG_IWLWIFI_DEBUGFS
464 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); 469 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
465#endif 470#endif
466 iwl_recover_from_statistics(priv, pkt); 471 iwl_recover_from_statistics(priv, pkt);
@@ -475,7 +480,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
475 __le32 *flag = (__le32 *)&pkt->u.raw; 480 __le32 *flag = (__le32 *)&pkt->u.raw;
476 481
477 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { 482 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
478#ifdef CONFIG_IWLWIFI_DEBUG 483#ifdef CONFIG_IWLWIFI_DEBUGFS
479 memset(&priv->_3945.accum_statistics, 0, 484 memset(&priv->_3945.accum_statistics, 0,
480 sizeof(struct iwl3945_notif_statistics)); 485 sizeof(struct iwl3945_notif_statistics));
481 memset(&priv->_3945.delta_statistics, 0, 486 memset(&priv->_3945.delta_statistics, 0,
@@ -494,158 +499,6 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
494 * Misc. internal state and helper functions 499 * Misc. internal state and helper functions
495 * 500 *
496 ******************************************************************************/ 501 ******************************************************************************/
497#ifdef CONFIG_IWLWIFI_DEBUG
498
499/**
500 * iwl3945_report_frame - dump frame to syslog during debug sessions
501 *
502 * You may hack this function to show different aspects of received frames,
503 * including selective frame dumps.
504 * group100 parameter selects whether to show 1 out of 100 good frames.
505 */
506static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
507 struct iwl_rx_packet *pkt,
508 struct ieee80211_hdr *header, int group100)
509{
510 u32 to_us;
511 u32 print_summary = 0;
512 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
513 u32 hundred = 0;
514 u32 dataframe = 0;
515 __le16 fc;
516 u16 seq_ctl;
517 u16 channel;
518 u16 phy_flags;
519 u16 length;
520 u16 status;
521 u16 bcn_tmr;
522 u32 tsf_low;
523 u64 tsf;
524 u8 rssi;
525 u8 agc;
526 u16 sig_avg;
527 u16 noise_diff;
528 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
529 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
530 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
531 u8 *data = IWL_RX_DATA(pkt);
532
533 /* MAC header */
534 fc = header->frame_control;
535 seq_ctl = le16_to_cpu(header->seq_ctrl);
536
537 /* metadata */
538 channel = le16_to_cpu(rx_hdr->channel);
539 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
540 length = le16_to_cpu(rx_hdr->len);
541
542 /* end-of-frame status and timestamp */
543 status = le32_to_cpu(rx_end->status);
544 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
545 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
546 tsf = le64_to_cpu(rx_end->timestamp);
547
548 /* signal statistics */
549 rssi = rx_stats->rssi;
550 agc = rx_stats->agc;
551 sig_avg = le16_to_cpu(rx_stats->sig_avg);
552 noise_diff = le16_to_cpu(rx_stats->noise_diff);
553
554 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
555
556 /* if data frame is to us and all is good,
557 * (optionally) print summary for only 1 out of every 100 */
558 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
559 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
560 dataframe = 1;
561 if (!group100)
562 print_summary = 1; /* print each frame */
563 else if (priv->framecnt_to_us < 100) {
564 priv->framecnt_to_us++;
565 print_summary = 0;
566 } else {
567 priv->framecnt_to_us = 0;
568 print_summary = 1;
569 hundred = 1;
570 }
571 } else {
572 /* print summary for all other frames */
573 print_summary = 1;
574 }
575
576 if (print_summary) {
577 char *title;
578 int rate;
579
580 if (hundred)
581 title = "100Frames";
582 else if (ieee80211_has_retry(fc))
583 title = "Retry";
584 else if (ieee80211_is_assoc_resp(fc))
585 title = "AscRsp";
586 else if (ieee80211_is_reassoc_resp(fc))
587 title = "RasRsp";
588 else if (ieee80211_is_probe_resp(fc)) {
589 title = "PrbRsp";
590 print_dump = 1; /* dump frame contents */
591 } else if (ieee80211_is_beacon(fc)) {
592 title = "Beacon";
593 print_dump = 1; /* dump frame contents */
594 } else if (ieee80211_is_atim(fc))
595 title = "ATIM";
596 else if (ieee80211_is_auth(fc))
597 title = "Auth";
598 else if (ieee80211_is_deauth(fc))
599 title = "DeAuth";
600 else if (ieee80211_is_disassoc(fc))
601 title = "DisAssoc";
602 else
603 title = "Frame";
604
605 rate = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
606 if (rate == -1)
607 rate = 0;
608 else
609 rate = iwl3945_rates[rate].ieee / 2;
610
611 /* print frame summary.
612 * MAC addresses show just the last byte (for brevity),
613 * but you can hack it to show more, if you'd like to. */
614 if (dataframe)
615 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
616 "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
617 title, le16_to_cpu(fc), header->addr1[5],
618 length, rssi, channel, rate);
619 else {
620 /* src/dst addresses assume managed mode */
621 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, "
622 "src=0x%02x, rssi=%u, tim=%lu usec, "
623 "phy=0x%02x, chnl=%d\n",
624 title, le16_to_cpu(fc), header->addr1[5],
625 header->addr3[5], rssi,
626 tsf_low - priv->scan_start_tsf,
627 phy_flags, channel);
628 }
629 }
630 if (print_dump)
631 iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
632}
633
634static void iwl3945_dbg_report_frame(struct iwl_priv *priv,
635 struct iwl_rx_packet *pkt,
636 struct ieee80211_hdr *header, int group100)
637{
638 if (iwl_get_debug_level(priv) & IWL_DL_RX)
639 _iwl3945_dbg_report_frame(priv, pkt, header, group100);
640}
641
642#else
643static inline void iwl3945_dbg_report_frame(struct iwl_priv *priv,
644 struct iwl_rx_packet *pkt,
645 struct ieee80211_hdr *header, int group100)
646{
647}
648#endif
649 502
650/* This is necessary only for a number of statistics, see the caller. */ 503/* This is necessary only for a number of statistics, see the caller. */
651static int iwl3945_is_network_packet(struct iwl_priv *priv, 504static int iwl3945_is_network_packet(struct iwl_priv *priv,
@@ -777,8 +630,6 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
777 rx_status.signal, rx_status.signal, 630 rx_status.signal, rx_status.signal,
778 rx_status.rate_idx); 631 rx_status.rate_idx);
779 632
780 /* Set "1" to report good data frames in groups of 100 */
781 iwl3945_dbg_report_frame(priv, pkt, header, 1);
782 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); 633 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
783 634
784 if (network_packet) { 635 if (network_packet) {
@@ -850,25 +701,28 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
850 /* Unmap tx_cmd */ 701 /* Unmap tx_cmd */
851 if (counter) 702 if (counter)
852 pci_unmap_single(dev, 703 pci_unmap_single(dev,
853 pci_unmap_addr(&txq->meta[index], mapping), 704 dma_unmap_addr(&txq->meta[index], mapping),
854 pci_unmap_len(&txq->meta[index], len), 705 dma_unmap_len(&txq->meta[index], len),
855 PCI_DMA_TODEVICE); 706 PCI_DMA_TODEVICE);
856 707
857 /* unmap chunks if any */ 708 /* unmap chunks if any */
858 709
859 for (i = 1; i < counter; i++) { 710 for (i = 1; i < counter; i++)
860 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), 711 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
861 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); 712 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
862 if (txq->txb[txq->q.read_ptr].skb[0]) { 713
863 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0]; 714 /* free SKB */
864 if (txq->txb[txq->q.read_ptr].skb[0]) { 715 if (txq->txb) {
865 /* Can be called from interrupt context */ 716 struct sk_buff *skb;
866 dev_kfree_skb_any(skb); 717
867 txq->txb[txq->q.read_ptr].skb[0] = NULL; 718 skb = txq->txb[txq->q.read_ptr].skb;
868 } 719
720 /* can be called from irqs-disabled context */
721 if (skb) {
722 dev_kfree_skb_any(skb);
723 txq->txb[txq->q.read_ptr].skb = NULL;
869 } 724 }
870 } 725 }
871 return ;
872} 726}
873 727
874/** 728/**
@@ -947,8 +801,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
947 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); 801 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
948} 802}
949 803
950static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, 804static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
951 u16 tx_rate, u8 flags)
952{ 805{
953 unsigned long flags_spin; 806 unsigned long flags_spin;
954 struct iwl_station_entry *station; 807 struct iwl_station_entry *station;
@@ -962,10 +815,9 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
962 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; 815 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
963 station->sta.rate_n_flags = cpu_to_le16(tx_rate); 816 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
964 station->sta.mode = STA_CONTROL_MODIFY_MSK; 817 station->sta.mode = STA_CONTROL_MODIFY_MSK;
965 818 iwl_send_add_sta(priv, &station->sta, CMD_ASYNC);
966 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 819 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
967 820
968 iwl_send_add_sta(priv, &station->sta, flags);
969 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", 821 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
970 sta_id, tx_rate); 822 sta_id, tx_rate);
971 return sta_id; 823 return sta_id;
@@ -997,7 +849,7 @@ static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
997 849
998static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 850static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
999{ 851{
1000 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr); 852 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
1001 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); 853 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
1002 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); 854 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
1003 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 855 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
@@ -2473,8 +2325,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2473 2325
2474 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id, 2326 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
2475 (priv->band == IEEE80211_BAND_5GHZ) ? 2327 (priv->band == IEEE80211_BAND_5GHZ) ?
2476 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, 2328 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
2477 CMD_ASYNC);
2478 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id); 2329 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
2479 2330
2480 return 0; 2331 return 0;
@@ -2590,6 +2441,7 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2590 2441
2591 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; 2442 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2592 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; 2443 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
2444 priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
2593 2445
2594 return 0; 2446 return 0;
2595} 2447}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index cd4b61ae25b7..9166794eda0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -787,6 +787,6 @@ enum {
787struct iwl4965_scd_bc_tbl { 787struct iwl4965_scd_bc_tbl {
788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; 788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; 789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __attribute__ ((packed)); 790} __packed;
791 791
792#endif /* !__iwl_4965_hw_h__ */ 792#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d3afddae8d9f..d6531ad3906a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -346,9 +346,19 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
346{ 346{
347 struct iwl_chain_noise_data *data = &(priv->chain_noise_data); 347 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
348 348
349 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 349 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
350 iwl_is_associated(priv)) {
350 struct iwl_calib_diff_gain_cmd cmd; 351 struct iwl_calib_diff_gain_cmd cmd;
351 352
353 /* clear data for chain noise calibration algorithm */
354 data->chain_noise_a = 0;
355 data->chain_noise_b = 0;
356 data->chain_noise_c = 0;
357 data->chain_signal_a = 0;
358 data->chain_signal_b = 0;
359 data->chain_signal_c = 0;
360 data->beacon_count = 0;
361
352 memset(&cmd, 0, sizeof(cmd)); 362 memset(&cmd, 0, sizeof(cmd));
353 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; 363 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
354 cmd.diff_gain_a = 0; 364 cmd.diff_gain_a = 0;
@@ -419,13 +429,6 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
419 /* Mark so we run this algo only once! */ 429 /* Mark so we run this algo only once! */
420 data->state = IWL_CHAIN_NOISE_CALIBRATED; 430 data->state = IWL_CHAIN_NOISE_CALIBRATED;
421 } 431 }
422 data->chain_noise_a = 0;
423 data->chain_noise_b = 0;
424 data->chain_noise_c = 0;
425 data->chain_signal_a = 0;
426 data->chain_signal_b = 0;
427 data->chain_signal_c = 0;
428 data->beacon_count = 0;
429} 432}
430 433
431static void iwl4965_bg_txpower_work(struct work_struct *work) 434static void iwl4965_bg_txpower_work(struct work_struct *work)
@@ -669,6 +672,7 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
669 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); 672 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
670 673
671 priv->hw_params.sens = &iwl4965_sensitivity; 674 priv->hw_params.sens = &iwl4965_sensitivity;
675 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
672 676
673 return 0; 677 return 0;
674} 678}
@@ -1441,7 +1445,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1441 return ret; 1445 return ret;
1442} 1446}
1443 1447
1444static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) 1448static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1449 struct ieee80211_channel_switch *ch_switch)
1445{ 1450{
1446 int rc; 1451 int rc;
1447 u8 band = 0; 1452 u8 band = 0;
@@ -1449,11 +1454,14 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1449 u8 ctrl_chan_high = 0; 1454 u8 ctrl_chan_high = 0;
1450 struct iwl4965_channel_switch_cmd cmd; 1455 struct iwl4965_channel_switch_cmd cmd;
1451 const struct iwl_channel_info *ch_info; 1456 const struct iwl_channel_info *ch_info;
1452 1457 u32 switch_time_in_usec, ucode_switch_time;
1458 u16 ch;
1459 u32 tsf_low;
1460 u8 switch_count;
1461 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
1462 struct ieee80211_vif *vif = priv->vif;
1453 band = priv->band == IEEE80211_BAND_2GHZ; 1463 band = priv->band == IEEE80211_BAND_2GHZ;
1454 1464
1455 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1456
1457 is_ht40 = is_ht40_channel(priv->staging_rxon.flags); 1465 is_ht40 = is_ht40_channel(priv->staging_rxon.flags);
1458 1466
1459 if (is_ht40 && 1467 if (is_ht40 &&
@@ -1462,26 +1470,56 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1462 1470
1463 cmd.band = band; 1471 cmd.band = band;
1464 cmd.expect_beacon = 0; 1472 cmd.expect_beacon = 0;
1465 cmd.channel = cpu_to_le16(channel); 1473 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
1474 cmd.channel = cpu_to_le16(ch);
1466 cmd.rxon_flags = priv->staging_rxon.flags; 1475 cmd.rxon_flags = priv->staging_rxon.flags;
1467 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 1476 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
1468 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 1477 switch_count = ch_switch->count;
1478 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1479 /*
1480 * calculate the ucode channel switch time
1481 * adding TSF as one of the factor for when to switch
1482 */
1483 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1484 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1485 beacon_interval)) {
1486 switch_count -= (priv->ucode_beacon_time -
1487 tsf_low) / beacon_interval;
1488 } else
1489 switch_count = 0;
1490 }
1491 if (switch_count <= 1)
1492 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1493 else {
1494 switch_time_in_usec =
1495 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1496 ucode_switch_time = iwl_usecs_to_beacons(priv,
1497 switch_time_in_usec,
1498 beacon_interval);
1499 cmd.switch_time = iwl_add_beacon_time(priv,
1500 priv->ucode_beacon_time,
1501 ucode_switch_time,
1502 beacon_interval);
1503 }
1504 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1505 cmd.switch_time);
1506 ch_info = iwl_get_channel_info(priv, priv->band, ch);
1469 if (ch_info) 1507 if (ch_info)
1470 cmd.expect_beacon = is_channel_radar(ch_info); 1508 cmd.expect_beacon = is_channel_radar(ch_info);
1471 else { 1509 else {
1472 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 1510 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1473 priv->active_rxon.channel, channel); 1511 priv->active_rxon.channel, ch);
1474 return -EFAULT; 1512 return -EFAULT;
1475 } 1513 }
1476 1514
1477 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40, 1515 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
1478 ctrl_chan_high, &cmd.tx_power); 1516 ctrl_chan_high, &cmd.tx_power);
1479 if (rc) { 1517 if (rc) {
1480 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc); 1518 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
1481 return rc; 1519 return rc;
1482 } 1520 }
1483 1521
1484 priv->switch_rxon.channel = cpu_to_le16(channel); 1522 priv->switch_rxon.channel = cmd.channel;
1485 priv->switch_rxon.switch_in_progress = true; 1523 priv->switch_rxon.switch_in_progress = true;
1486 1524
1487 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1525 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
@@ -1542,7 +1580,8 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1542 u32 R4; 1580 u32 R4;
1543 1581
1544 if (test_bit(STATUS_TEMPERATURE, &priv->status) && 1582 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1545 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { 1583 (priv->_agn.statistics.flag &
1584 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1546 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); 1585 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1547 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); 1586 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1548 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); 1587 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
@@ -1566,8 +1605,8 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1566 if (!test_bit(STATUS_TEMPERATURE, &priv->status)) 1605 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1567 vt = sign_extend(R4, 23); 1606 vt = sign_extend(R4, 23);
1568 else 1607 else
1569 vt = sign_extend( 1608 vt = sign_extend(le32_to_cpu(priv->_agn.statistics.
1570 le32_to_cpu(priv->statistics.general.temperature), 23); 1609 general.common.temperature), 23);
1571 1610
1572 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); 1611 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1573 1612
@@ -1747,6 +1786,7 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1747{ 1786{
1748 unsigned long flags; 1787 unsigned long flags;
1749 u16 ra_tid; 1788 u16 ra_tid;
1789 int ret;
1750 1790
1751 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1791 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1752 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 1792 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
@@ -1762,7 +1802,9 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1762 ra_tid = BUILD_RAxTID(sta_id, tid); 1802 ra_tid = BUILD_RAxTID(sta_id, tid);
1763 1803
1764 /* Modify device's station table to Tx this TID */ 1804 /* Modify device's station table to Tx this TID */
1765 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 1805 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1806 if (ret)
1807 return ret;
1766 1808
1767 spin_lock_irqsave(&priv->lock, flags); 1809 spin_lock_irqsave(&priv->lock, flags);
1768 1810
@@ -1870,7 +1912,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1870 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 1912 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
1871 agg->frame_count, agg->start_idx, idx); 1913 agg->frame_count, agg->start_idx, idx);
1872 1914
1873 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 1915 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
1874 info->status.rates[0].count = tx_resp->failure_frame + 1; 1916 info->status.rates[0].count = tx_resp->failure_frame + 1;
1875 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1917 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1876 info->flags |= iwl_tx_status_to_mac80211(status); 1918 info->flags |= iwl_tx_status_to_mac80211(status);
@@ -2026,6 +2068,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2026 int sta_id; 2068 int sta_id;
2027 int freed; 2069 int freed;
2028 u8 *qc = NULL; 2070 u8 *qc = NULL;
2071 unsigned long flags;
2029 2072
2030 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 2073 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
2031 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 2074 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
@@ -2035,7 +2078,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2035 return; 2078 return;
2036 } 2079 }
2037 2080
2038 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); 2081 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2039 memset(&info->status, 0, sizeof(info->status)); 2082 memset(&info->status, 0, sizeof(info->status));
2040 2083
2041 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); 2084 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
@@ -2050,10 +2093,10 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2050 return; 2093 return;
2051 } 2094 }
2052 2095
2096 spin_lock_irqsave(&priv->sta_lock, flags);
2053 if (txq->sched_retry) { 2097 if (txq->sched_retry) {
2054 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); 2098 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2055 struct iwl_ht_agg *agg = NULL; 2099 struct iwl_ht_agg *agg = NULL;
2056
2057 WARN_ON(!qc); 2100 WARN_ON(!qc);
2058 2101
2059 agg = &priv->stations[sta_id].tid[tid].agg; 2102 agg = &priv->stations[sta_id].tid[tid].agg;
@@ -2110,6 +2153,8 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2110 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 2153 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
2111 2154
2112 iwl_check_abort_status(priv, tx_resp->frame_count, status); 2155 iwl_check_abort_status(priv, tx_resp->frame_count, status);
2156
2157 spin_unlock_irqrestore(&priv->sta_lock, flags);
2113} 2158}
2114 2159
2115static int iwl4965_calc_rssi(struct iwl_priv *priv, 2160static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2235,11 +2280,14 @@ static struct iwl_lib_ops iwl4965_lib = {
2235 .set_ct_kill = iwl4965_set_ct_threshold, 2280 .set_ct_kill = iwl4965_set_ct_threshold,
2236 }, 2281 },
2237 .manage_ibss_station = iwlagn_manage_ibss_station, 2282 .manage_ibss_station = iwlagn_manage_ibss_station,
2283 .update_bcast_station = iwl_update_bcast_station,
2238 .debugfs_ops = { 2284 .debugfs_ops = {
2239 .rx_stats_read = iwl_ucode_rx_stats_read, 2285 .rx_stats_read = iwl_ucode_rx_stats_read,
2240 .tx_stats_read = iwl_ucode_tx_stats_read, 2286 .tx_stats_read = iwl_ucode_tx_stats_read,
2241 .general_stats_read = iwl_ucode_general_stats_read, 2287 .general_stats_read = iwl_ucode_general_stats_read,
2288 .bt_stats_read = iwl_ucode_bt_stats_read,
2242 }, 2289 },
2290 .recover_from_tx_stall = iwl_bg_monitor_recover,
2243 .check_plcp_health = iwl_good_plcp_health, 2291 .check_plcp_health = iwl_good_plcp_health,
2244}; 2292};
2245 2293
@@ -2285,7 +2333,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2285 * Force use of chains B and C for scan RX on 5 GHz band 2333 * Force use of chains B and C for scan RX on 5 GHz band
2286 * because the device has off-channel reception on chain A. 2334 * because the device has off-channel reception on chain A.
2287 */ 2335 */
2288 .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, 2336 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2289}; 2337};
2290 2338
2291/* Module firmware */ 2339/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a28af7eb67eb..8093ce2804fb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -179,8 +179,8 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
179 priv->cfg->num_of_queues * 179 priv->cfg->num_of_queues *
180 sizeof(struct iwlagn_scd_bc_tbl); 180 sizeof(struct iwlagn_scd_bc_tbl);
181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
182 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 182 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
183 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 183 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
184 184
185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -208,6 +208,8 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
208 BIT(IWL_CALIB_TX_IQ_PERD) | 208 BIT(IWL_CALIB_TX_IQ_PERD) |
209 BIT(IWL_CALIB_BASE_BAND); 209 BIT(IWL_CALIB_BASE_BAND);
210 210
211 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
212
211 return 0; 213 return 0;
212} 214}
213 215
@@ -224,8 +226,8 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
224 priv->cfg->num_of_queues * 226 priv->cfg->num_of_queues *
225 sizeof(struct iwlagn_scd_bc_tbl); 227 sizeof(struct iwlagn_scd_bc_tbl);
226 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 228 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
227 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 229 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
228 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 230 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
229 231
230 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 232 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
231 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 233 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -247,10 +249,13 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
247 /* Set initial calibration set */ 249 /* Set initial calibration set */
248 priv->hw_params.sens = &iwl5150_sensitivity; 250 priv->hw_params.sens = &iwl5150_sensitivity;
249 priv->hw_params.calib_init_cfg = 251 priv->hw_params.calib_init_cfg =
250 BIT(IWL_CALIB_DC) |
251 BIT(IWL_CALIB_LO) | 252 BIT(IWL_CALIB_LO) |
252 BIT(IWL_CALIB_TX_IQ) | 253 BIT(IWL_CALIB_TX_IQ) |
253 BIT(IWL_CALIB_BASE_BAND); 254 BIT(IWL_CALIB_BASE_BAND);
255 if (priv->cfg->need_dc_calib)
256 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
257
258 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
254 259
255 return 0; 260 return 0;
256} 261}
@@ -260,40 +265,76 @@ static void iwl5150_temperature(struct iwl_priv *priv)
260 u32 vt = 0; 265 u32 vt = 0;
261 s32 offset = iwl_temp_calib_to_offset(priv); 266 s32 offset = iwl_temp_calib_to_offset(priv);
262 267
263 vt = le32_to_cpu(priv->statistics.general.temperature); 268 vt = le32_to_cpu(priv->_agn.statistics.general.common.temperature);
264 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; 269 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
265 /* now vt hold the temperature in Kelvin */ 270 /* now vt hold the temperature in Kelvin */
266 priv->temperature = KELVIN_TO_CELSIUS(vt); 271 priv->temperature = KELVIN_TO_CELSIUS(vt);
267 iwl_tt_handler(priv); 272 iwl_tt_handler(priv);
268} 273}
269 274
270static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) 275static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
276 struct ieee80211_channel_switch *ch_switch)
271{ 277{
272 struct iwl5000_channel_switch_cmd cmd; 278 struct iwl5000_channel_switch_cmd cmd;
273 const struct iwl_channel_info *ch_info; 279 const struct iwl_channel_info *ch_info;
280 u32 switch_time_in_usec, ucode_switch_time;
281 u16 ch;
282 u32 tsf_low;
283 u8 switch_count;
284 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
285 struct ieee80211_vif *vif = priv->vif;
274 struct iwl_host_cmd hcmd = { 286 struct iwl_host_cmd hcmd = {
275 .id = REPLY_CHANNEL_SWITCH, 287 .id = REPLY_CHANNEL_SWITCH,
276 .len = sizeof(cmd), 288 .len = sizeof(cmd),
277 .flags = CMD_SIZE_HUGE, 289 .flags = CMD_SYNC,
278 .data = &cmd, 290 .data = &cmd,
279 }; 291 };
280 292
281 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
282 priv->active_rxon.channel, channel);
283 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 293 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
284 cmd.channel = cpu_to_le16(channel); 294 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
295 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
296 priv->active_rxon.channel, ch);
297 cmd.channel = cpu_to_le16(ch);
285 cmd.rxon_flags = priv->staging_rxon.flags; 298 cmd.rxon_flags = priv->staging_rxon.flags;
286 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 299 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
287 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 300 switch_count = ch_switch->count;
288 ch_info = iwl_get_channel_info(priv, priv->band, channel); 301 tsf_low = ch_switch->timestamp & 0x0ffffffff;
302 /*
303 * calculate the ucode channel switch time
304 * adding TSF as one of the factor for when to switch
305 */
306 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
307 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
308 beacon_interval)) {
309 switch_count -= (priv->ucode_beacon_time -
310 tsf_low) / beacon_interval;
311 } else
312 switch_count = 0;
313 }
314 if (switch_count <= 1)
315 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
316 else {
317 switch_time_in_usec =
318 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
319 ucode_switch_time = iwl_usecs_to_beacons(priv,
320 switch_time_in_usec,
321 beacon_interval);
322 cmd.switch_time = iwl_add_beacon_time(priv,
323 priv->ucode_beacon_time,
324 ucode_switch_time,
325 beacon_interval);
326 }
327 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
328 cmd.switch_time);
329 ch_info = iwl_get_channel_info(priv, priv->band, ch);
289 if (ch_info) 330 if (ch_info)
290 cmd.expect_beacon = is_channel_radar(ch_info); 331 cmd.expect_beacon = is_channel_radar(ch_info);
291 else { 332 else {
292 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 333 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
293 priv->active_rxon.channel, channel); 334 priv->active_rxon.channel, ch);
294 return -EFAULT; 335 return -EFAULT;
295 } 336 }
296 priv->switch_rxon.channel = cpu_to_le16(channel); 337 priv->switch_rxon.channel = cmd.channel;
297 priv->switch_rxon.switch_in_progress = true; 338 priv->switch_rxon.switch_in_progress = true;
298 339
299 return iwl_send_cmd_sync(priv, &hcmd); 340 return iwl_send_cmd_sync(priv, &hcmd);
@@ -352,14 +393,18 @@ static struct iwl_lib_ops iwl5000_lib = {
352 .set_ct_kill = iwl5000_set_ct_threshold, 393 .set_ct_kill = iwl5000_set_ct_threshold,
353 }, 394 },
354 .manage_ibss_station = iwlagn_manage_ibss_station, 395 .manage_ibss_station = iwlagn_manage_ibss_station,
396 .update_bcast_station = iwl_update_bcast_station,
355 .debugfs_ops = { 397 .debugfs_ops = {
356 .rx_stats_read = iwl_ucode_rx_stats_read, 398 .rx_stats_read = iwl_ucode_rx_stats_read,
357 .tx_stats_read = iwl_ucode_tx_stats_read, 399 .tx_stats_read = iwl_ucode_tx_stats_read,
358 .general_stats_read = iwl_ucode_general_stats_read, 400 .general_stats_read = iwl_ucode_general_stats_read,
401 .bt_stats_read = iwl_ucode_bt_stats_read,
359 }, 402 },
360 .recover_from_tx_stall = iwl_bg_monitor_recover, 403 .recover_from_tx_stall = iwl_bg_monitor_recover,
361 .check_plcp_health = iwl_good_plcp_health, 404 .check_plcp_health = iwl_good_plcp_health,
362 .check_ack_health = iwl_good_ack_health, 405 .check_ack_health = iwl_good_ack_health,
406 .txfifo_flush = iwlagn_txfifo_flush,
407 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
363}; 408};
364 409
365static struct iwl_lib_ops iwl5150_lib = { 410static struct iwl_lib_ops iwl5150_lib = {
@@ -414,6 +459,7 @@ static struct iwl_lib_ops iwl5150_lib = {
414 .set_ct_kill = iwl5150_set_ct_threshold, 459 .set_ct_kill = iwl5150_set_ct_threshold,
415 }, 460 },
416 .manage_ibss_station = iwlagn_manage_ibss_station, 461 .manage_ibss_station = iwlagn_manage_ibss_station,
462 .update_bcast_station = iwl_update_bcast_station,
417 .debugfs_ops = { 463 .debugfs_ops = {
418 .rx_stats_read = iwl_ucode_rx_stats_read, 464 .rx_stats_read = iwl_ucode_rx_stats_read,
419 .tx_stats_read = iwl_ucode_tx_stats_read, 465 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -422,6 +468,8 @@ static struct iwl_lib_ops iwl5150_lib = {
422 .recover_from_tx_stall = iwl_bg_monitor_recover, 468 .recover_from_tx_stall = iwl_bg_monitor_recover,
423 .check_plcp_health = iwl_good_plcp_health, 469 .check_plcp_health = iwl_good_plcp_health,
424 .check_ack_health = iwl_good_ack_health, 470 .check_ack_health = iwl_good_ack_health,
471 .txfifo_flush = iwlagn_txfifo_flush,
472 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
425}; 473};
426 474
427static const struct iwl_ops iwl5000_ops = { 475static const struct iwl_ops iwl5000_ops = {
@@ -620,6 +668,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
620 .ucode_tracing = true, 668 .ucode_tracing = true,
621 .sensitivity_calib_by_driver = true, 669 .sensitivity_calib_by_driver = true,
622 .chain_noise_calib_by_driver = true, 670 .chain_noise_calib_by_driver = true,
671 .need_dc_calib = true,
623}; 672};
624 673
625struct iwl_cfg iwl5150_abg_cfg = { 674struct iwl_cfg iwl5150_abg_cfg = {
@@ -649,6 +698,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
649 .ucode_tracing = true, 698 .ucode_tracing = true,
650 .sensitivity_calib_by_driver = true, 699 .sensitivity_calib_by_driver = true,
651 .chain_noise_calib_by_driver = true, 700 .chain_noise_calib_by_driver = true,
701 .need_dc_calib = true,
652}; 702};
653 703
654MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 704MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 9fbf54cd3e1a..58270529a0e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -71,6 +71,10 @@
71#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode" 71#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
72#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api) 72#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
73 73
74#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
75#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
76#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
77
74 78
75static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 79static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
76{ 80{
@@ -80,9 +84,10 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
80} 84}
81 85
82/* Indicate calibration version to uCode. */ 86/* Indicate calibration version to uCode. */
83static void iwl6050_set_calib_version(struct iwl_priv *priv) 87static void iwl6000_set_calib_version(struct iwl_priv *priv)
84{ 88{
85 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) 89 if (priv->cfg->need_dc_calib &&
90 (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6))
86 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 91 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
87 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 92 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
88} 93}
@@ -155,8 +160,8 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
155 priv->cfg->num_of_queues * 160 priv->cfg->num_of_queues *
156 sizeof(struct iwlagn_scd_bc_tbl); 161 sizeof(struct iwlagn_scd_bc_tbl);
157 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 162 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
158 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 163 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
159 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 164 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
160 165
161 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; 166 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
162 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; 167 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
@@ -182,83 +187,77 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
182 BIT(IWL_CALIB_LO) | 187 BIT(IWL_CALIB_LO) |
183 BIT(IWL_CALIB_TX_IQ) | 188 BIT(IWL_CALIB_TX_IQ) |
184 BIT(IWL_CALIB_BASE_BAND); 189 BIT(IWL_CALIB_BASE_BAND);
190 if (priv->cfg->need_dc_calib)
191 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
185 192
186 return 0; 193 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
187}
188
189static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
190{
191 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
192 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
193 priv->cfg->num_of_queues =
194 priv->cfg->mod_params->num_of_queues;
195
196 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
197 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
198 priv->hw_params.scd_bc_tbls_size =
199 priv->cfg->num_of_queues *
200 sizeof(struct iwlagn_scd_bc_tbl);
201 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
202 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
203 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
204
205 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
206 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
207
208 priv->hw_params.max_bsm_size = 0;
209 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
210 BIT(IEEE80211_BAND_5GHZ);
211 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
212
213 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
214 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
215 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
216 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
217
218 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
219 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
220
221 /* Set initial sensitivity parameters */
222 /* Set initial calibration set */
223 priv->hw_params.sens = &iwl6000_sensitivity;
224 priv->hw_params.calib_init_cfg =
225 BIT(IWL_CALIB_XTAL) |
226 BIT(IWL_CALIB_DC) |
227 BIT(IWL_CALIB_LO) |
228 BIT(IWL_CALIB_TX_IQ) |
229 BIT(IWL_CALIB_BASE_BAND);
230 194
231 return 0; 195 return 0;
232} 196}
233 197
234static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel) 198static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
199 struct ieee80211_channel_switch *ch_switch)
235{ 200{
236 struct iwl6000_channel_switch_cmd cmd; 201 struct iwl6000_channel_switch_cmd cmd;
237 const struct iwl_channel_info *ch_info; 202 const struct iwl_channel_info *ch_info;
203 u32 switch_time_in_usec, ucode_switch_time;
204 u16 ch;
205 u32 tsf_low;
206 u8 switch_count;
207 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
208 struct ieee80211_vif *vif = priv->vif;
238 struct iwl_host_cmd hcmd = { 209 struct iwl_host_cmd hcmd = {
239 .id = REPLY_CHANNEL_SWITCH, 210 .id = REPLY_CHANNEL_SWITCH,
240 .len = sizeof(cmd), 211 .len = sizeof(cmd),
241 .flags = CMD_SIZE_HUGE, 212 .flags = CMD_SYNC,
242 .data = &cmd, 213 .data = &cmd,
243 }; 214 };
244 215
245 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
246 priv->active_rxon.channel, channel);
247
248 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 216 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
249 cmd.channel = cpu_to_le16(channel); 217 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
218 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
219 priv->active_rxon.channel, ch);
220 cmd.channel = cpu_to_le16(ch);
250 cmd.rxon_flags = priv->staging_rxon.flags; 221 cmd.rxon_flags = priv->staging_rxon.flags;
251 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 222 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
252 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 223 switch_count = ch_switch->count;
253 ch_info = iwl_get_channel_info(priv, priv->band, channel); 224 tsf_low = ch_switch->timestamp & 0x0ffffffff;
225 /*
226 * calculate the ucode channel switch time
227 * adding TSF as one of the factor for when to switch
228 */
229 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
230 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
231 beacon_interval)) {
232 switch_count -= (priv->ucode_beacon_time -
233 tsf_low) / beacon_interval;
234 } else
235 switch_count = 0;
236 }
237 if (switch_count <= 1)
238 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
239 else {
240 switch_time_in_usec =
241 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
242 ucode_switch_time = iwl_usecs_to_beacons(priv,
243 switch_time_in_usec,
244 beacon_interval);
245 cmd.switch_time = iwl_add_beacon_time(priv,
246 priv->ucode_beacon_time,
247 ucode_switch_time,
248 beacon_interval);
249 }
250 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
251 cmd.switch_time);
252 ch_info = iwl_get_channel_info(priv, priv->band, ch);
254 if (ch_info) 253 if (ch_info)
255 cmd.expect_beacon = is_channel_radar(ch_info); 254 cmd.expect_beacon = is_channel_radar(ch_info);
256 else { 255 else {
257 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 256 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
258 priv->active_rxon.channel, channel); 257 priv->active_rxon.channel, ch);
259 return -EFAULT; 258 return -EFAULT;
260 } 259 }
261 priv->switch_rxon.channel = cpu_to_le16(channel); 260 priv->switch_rxon.channel = cmd.channel;
262 priv->switch_rxon.switch_in_progress = true; 261 priv->switch_rxon.switch_in_progress = true;
263 262
264 return iwl_send_cmd_sync(priv, &hcmd); 263 return iwl_send_cmd_sync(priv, &hcmd);
@@ -316,16 +315,21 @@ static struct iwl_lib_ops iwl6000_lib = {
316 .temp_ops = { 315 .temp_ops = {
317 .temperature = iwlagn_temperature, 316 .temperature = iwlagn_temperature,
318 .set_ct_kill = iwl6000_set_ct_threshold, 317 .set_ct_kill = iwl6000_set_ct_threshold,
318 .set_calib_version = iwl6000_set_calib_version,
319 }, 319 },
320 .manage_ibss_station = iwlagn_manage_ibss_station, 320 .manage_ibss_station = iwlagn_manage_ibss_station,
321 .update_bcast_station = iwl_update_bcast_station,
321 .debugfs_ops = { 322 .debugfs_ops = {
322 .rx_stats_read = iwl_ucode_rx_stats_read, 323 .rx_stats_read = iwl_ucode_rx_stats_read,
323 .tx_stats_read = iwl_ucode_tx_stats_read, 324 .tx_stats_read = iwl_ucode_tx_stats_read,
324 .general_stats_read = iwl_ucode_general_stats_read, 325 .general_stats_read = iwl_ucode_general_stats_read,
326 .bt_stats_read = iwl_ucode_bt_stats_read,
325 }, 327 },
326 .recover_from_tx_stall = iwl_bg_monitor_recover, 328 .recover_from_tx_stall = iwl_bg_monitor_recover,
327 .check_plcp_health = iwl_good_plcp_health, 329 .check_plcp_health = iwl_good_plcp_health,
328 .check_ack_health = iwl_good_ack_health, 330 .check_ack_health = iwl_good_ack_health,
331 .txfifo_flush = iwlagn_txfifo_flush,
332 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
329}; 333};
330 334
331static const struct iwl_ops iwl6000_ops = { 335static const struct iwl_ops iwl6000_ops = {
@@ -335,79 +339,25 @@ static const struct iwl_ops iwl6000_ops = {
335 .led = &iwlagn_led_ops, 339 .led = &iwlagn_led_ops,
336}; 340};
337 341
338static struct iwl_lib_ops iwl6050_lib = { 342static void do_not_send_bt_config(struct iwl_priv *priv)
339 .set_hw_params = iwl6050_hw_set_hw_params, 343{
340 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, 344}
341 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, 345
342 .txq_set_sched = iwlagn_txq_set_sched, 346static struct iwl_hcmd_ops iwl6000g2b_hcmd = {
343 .txq_agg_enable = iwlagn_txq_agg_enable, 347 .rxon_assoc = iwlagn_send_rxon_assoc,
344 .txq_agg_disable = iwlagn_txq_agg_disable, 348 .commit_rxon = iwl_commit_rxon,
345 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 349 .set_rxon_chain = iwl_set_rxon_chain,
346 .txq_free_tfd = iwl_hw_txq_free_tfd, 350 .set_tx_ant = iwlagn_send_tx_ant_config,
347 .txq_init = iwl_hw_tx_queue_init, 351 .send_bt_config = do_not_send_bt_config,
348 .rx_handler_setup = iwlagn_rx_handler_setup,
349 .setup_deferred_work = iwlagn_setup_deferred_work,
350 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
351 .load_ucode = iwlagn_load_ucode,
352 .dump_nic_event_log = iwl_dump_nic_event_log,
353 .dump_nic_error_log = iwl_dump_nic_error_log,
354 .dump_csr = iwl_dump_csr,
355 .dump_fh = iwl_dump_fh,
356 .init_alive_start = iwlagn_init_alive_start,
357 .alive_notify = iwlagn_alive_notify,
358 .send_tx_power = iwlagn_send_tx_power,
359 .update_chain_flags = iwl_update_chain_flags,
360 .set_channel_switch = iwl6000_hw_channel_switch,
361 .apm_ops = {
362 .init = iwl_apm_init,
363 .stop = iwl_apm_stop,
364 .config = iwl6000_nic_config,
365 .set_pwr_src = iwl_set_pwr_src,
366 },
367 .eeprom_ops = {
368 .regulatory_bands = {
369 EEPROM_REG_BAND_1_CHANNELS,
370 EEPROM_REG_BAND_2_CHANNELS,
371 EEPROM_REG_BAND_3_CHANNELS,
372 EEPROM_REG_BAND_4_CHANNELS,
373 EEPROM_REG_BAND_5_CHANNELS,
374 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
375 EEPROM_REG_BAND_52_HT40_CHANNELS
376 },
377 .verify_signature = iwlcore_eeprom_verify_signature,
378 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
379 .release_semaphore = iwlcore_eeprom_release_semaphore,
380 .calib_version = iwlagn_eeprom_calib_version,
381 .query_addr = iwlagn_eeprom_query_addr,
382 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
383 },
384 .post_associate = iwl_post_associate,
385 .isr = iwl_isr_ict,
386 .config_ap = iwl_config_ap,
387 .temp_ops = {
388 .temperature = iwlagn_temperature,
389 .set_ct_kill = iwl6000_set_ct_threshold,
390 .set_calib_version = iwl6050_set_calib_version,
391 },
392 .manage_ibss_station = iwlagn_manage_ibss_station,
393 .debugfs_ops = {
394 .rx_stats_read = iwl_ucode_rx_stats_read,
395 .tx_stats_read = iwl_ucode_tx_stats_read,
396 .general_stats_read = iwl_ucode_general_stats_read,
397 },
398 .recover_from_tx_stall = iwl_bg_monitor_recover,
399 .check_plcp_health = iwl_good_plcp_health,
400 .check_ack_health = iwl_good_ack_health,
401}; 352};
402 353
403static const struct iwl_ops iwl6050_ops = { 354static const struct iwl_ops iwl6000g2b_ops = {
404 .lib = &iwl6050_lib, 355 .lib = &iwl6000_lib,
405 .hcmd = &iwlagn_hcmd, 356 .hcmd = &iwl6000g2b_hcmd,
406 .utils = &iwlagn_hcmd_utils, 357 .utils = &iwlagn_hcmd_utils,
407 .led = &iwlagn_led_ops, 358 .led = &iwlagn_led_ops,
408}; 359};
409 360
410
411struct iwl_cfg iwl6000g2a_2agn_cfg = { 361struct iwl_cfg iwl6000g2a_2agn_cfg = {
412 .name = "6000 Series 2x2 AGN Gen2a", 362 .name = "6000 Series 2x2 AGN Gen2a",
413 .fw_name_pre = IWL6000G2A_FW_PRE, 363 .fw_name_pre = IWL6000G2A_FW_PRE,
@@ -443,6 +393,299 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
443 .ucode_tracing = true, 393 .ucode_tracing = true,
444 .sensitivity_calib_by_driver = true, 394 .sensitivity_calib_by_driver = true,
445 .chain_noise_calib_by_driver = true, 395 .chain_noise_calib_by_driver = true,
396 .need_dc_calib = true,
397};
398
399struct iwl_cfg iwl6000g2a_2abg_cfg = {
400 .name = "6000 Series 2x2 ABG Gen2a",
401 .fw_name_pre = IWL6000G2A_FW_PRE,
402 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
403 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
404 .sku = IWL_SKU_A|IWL_SKU_G,
405 .ops = &iwl6000_ops,
406 .eeprom_size = OTP_LOW_IMAGE_SIZE,
407 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
408 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
409 .num_of_queues = IWLAGN_NUM_QUEUES,
410 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
411 .mod_params = &iwlagn_mod_params,
412 .valid_tx_ant = ANT_AB,
413 .valid_rx_ant = ANT_AB,
414 .pll_cfg_val = 0,
415 .set_l0s = true,
416 .use_bsm = false,
417 .pa_type = IWL_PA_SYSTEM,
418 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
419 .shadow_ram_support = true,
420 .led_compensation = 51,
421 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
422 .supports_idle = true,
423 .adv_thermal_throttle = true,
424 .support_ct_kill_exit = true,
425 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
426 .chain_noise_scale = 1000,
427 .monitor_recover_period = IWL_MONITORING_PERIOD,
428 .max_event_log_size = 512,
429 .sensitivity_calib_by_driver = true,
430 .chain_noise_calib_by_driver = true,
431 .need_dc_calib = true,
432};
433
434struct iwl_cfg iwl6000g2a_2bg_cfg = {
435 .name = "6000 Series 2x2 BG Gen2a",
436 .fw_name_pre = IWL6000G2A_FW_PRE,
437 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
438 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
439 .sku = IWL_SKU_G,
440 .ops = &iwl6000_ops,
441 .eeprom_size = OTP_LOW_IMAGE_SIZE,
442 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
443 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
444 .num_of_queues = IWLAGN_NUM_QUEUES,
445 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
446 .mod_params = &iwlagn_mod_params,
447 .valid_tx_ant = ANT_AB,
448 .valid_rx_ant = ANT_AB,
449 .pll_cfg_val = 0,
450 .set_l0s = true,
451 .use_bsm = false,
452 .pa_type = IWL_PA_SYSTEM,
453 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
454 .shadow_ram_support = true,
455 .led_compensation = 51,
456 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
457 .supports_idle = true,
458 .adv_thermal_throttle = true,
459 .support_ct_kill_exit = true,
460 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
461 .chain_noise_scale = 1000,
462 .monitor_recover_period = IWL_MONITORING_PERIOD,
463 .max_event_log_size = 512,
464 .sensitivity_calib_by_driver = true,
465 .chain_noise_calib_by_driver = true,
466 .need_dc_calib = true,
467};
468
469struct iwl_cfg iwl6000g2b_2agn_cfg = {
470 .name = "6000 Series 2x2 AGN Gen2b",
471 .fw_name_pre = IWL6000G2B_FW_PRE,
472 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
473 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
474 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
475 .ops = &iwl6000g2b_ops,
476 .eeprom_size = OTP_LOW_IMAGE_SIZE,
477 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
478 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
479 .num_of_queues = IWLAGN_NUM_QUEUES,
480 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
481 .mod_params = &iwlagn_mod_params,
482 .valid_tx_ant = ANT_AB,
483 .valid_rx_ant = ANT_AB,
484 .pll_cfg_val = 0,
485 .set_l0s = true,
486 .use_bsm = false,
487 .pa_type = IWL_PA_SYSTEM,
488 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
489 .shadow_ram_support = true,
490 .ht_greenfield_support = true,
491 .led_compensation = 51,
492 .use_rts_for_ht = true, /* use rts/cts protection */
493 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
494 .supports_idle = true,
495 .adv_thermal_throttle = true,
496 .support_ct_kill_exit = true,
497 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
498 .chain_noise_scale = 1000,
499 .monitor_recover_period = IWL_MONITORING_PERIOD,
500 .max_event_log_size = 512,
501 .sensitivity_calib_by_driver = true,
502 .chain_noise_calib_by_driver = true,
503 .need_dc_calib = true,
504 .bt_statistics = true,
505};
506
507struct iwl_cfg iwl6000g2b_2abg_cfg = {
508 .name = "6000 Series 2x2 ABG Gen2b",
509 .fw_name_pre = IWL6000G2B_FW_PRE,
510 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
511 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
512 .sku = IWL_SKU_A|IWL_SKU_G,
513 .ops = &iwl6000g2b_ops,
514 .eeprom_size = OTP_LOW_IMAGE_SIZE,
515 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
516 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
517 .num_of_queues = IWLAGN_NUM_QUEUES,
518 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
519 .mod_params = &iwlagn_mod_params,
520 .valid_tx_ant = ANT_AB,
521 .valid_rx_ant = ANT_AB,
522 .pll_cfg_val = 0,
523 .set_l0s = true,
524 .use_bsm = false,
525 .pa_type = IWL_PA_SYSTEM,
526 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
527 .shadow_ram_support = true,
528 .led_compensation = 51,
529 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
530 .supports_idle = true,
531 .adv_thermal_throttle = true,
532 .support_ct_kill_exit = true,
533 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
534 .chain_noise_scale = 1000,
535 .monitor_recover_period = IWL_MONITORING_PERIOD,
536 .max_event_log_size = 512,
537 .sensitivity_calib_by_driver = true,
538 .chain_noise_calib_by_driver = true,
539 .need_dc_calib = true,
540 .bt_statistics = true,
541};
542
543struct iwl_cfg iwl6000g2b_2bgn_cfg = {
544 .name = "6000 Series 2x2 BGN Gen2b",
545 .fw_name_pre = IWL6000G2B_FW_PRE,
546 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
547 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
548 .sku = IWL_SKU_G|IWL_SKU_N,
549 .ops = &iwl6000g2b_ops,
550 .eeprom_size = OTP_LOW_IMAGE_SIZE,
551 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
552 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
553 .num_of_queues = IWLAGN_NUM_QUEUES,
554 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
555 .mod_params = &iwlagn_mod_params,
556 .valid_tx_ant = ANT_AB,
557 .valid_rx_ant = ANT_AB,
558 .pll_cfg_val = 0,
559 .set_l0s = true,
560 .use_bsm = false,
561 .pa_type = IWL_PA_SYSTEM,
562 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
563 .shadow_ram_support = true,
564 .ht_greenfield_support = true,
565 .led_compensation = 51,
566 .use_rts_for_ht = true, /* use rts/cts protection */
567 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
568 .supports_idle = true,
569 .adv_thermal_throttle = true,
570 .support_ct_kill_exit = true,
571 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
572 .chain_noise_scale = 1000,
573 .monitor_recover_period = IWL_MONITORING_PERIOD,
574 .max_event_log_size = 512,
575 .sensitivity_calib_by_driver = true,
576 .chain_noise_calib_by_driver = true,
577 .need_dc_calib = true,
578 .bt_statistics = true,
579};
580
581struct iwl_cfg iwl6000g2b_2bg_cfg = {
582 .name = "6000 Series 2x2 BG Gen2b",
583 .fw_name_pre = IWL6000G2B_FW_PRE,
584 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
585 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
586 .sku = IWL_SKU_G,
587 .ops = &iwl6000g2b_ops,
588 .eeprom_size = OTP_LOW_IMAGE_SIZE,
589 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
590 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
591 .num_of_queues = IWLAGN_NUM_QUEUES,
592 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
593 .mod_params = &iwlagn_mod_params,
594 .valid_tx_ant = ANT_AB,
595 .valid_rx_ant = ANT_AB,
596 .pll_cfg_val = 0,
597 .set_l0s = true,
598 .use_bsm = false,
599 .pa_type = IWL_PA_SYSTEM,
600 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
601 .shadow_ram_support = true,
602 .led_compensation = 51,
603 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
604 .supports_idle = true,
605 .adv_thermal_throttle = true,
606 .support_ct_kill_exit = true,
607 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
608 .chain_noise_scale = 1000,
609 .monitor_recover_period = IWL_MONITORING_PERIOD,
610 .max_event_log_size = 512,
611 .sensitivity_calib_by_driver = true,
612 .chain_noise_calib_by_driver = true,
613 .need_dc_calib = true,
614 .bt_statistics = true,
615};
616
617struct iwl_cfg iwl6000g2b_bgn_cfg = {
618 .name = "6000 Series 1x2 BGN Gen2b",
619 .fw_name_pre = IWL6000G2B_FW_PRE,
620 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
621 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
622 .sku = IWL_SKU_G|IWL_SKU_N,
623 .ops = &iwl6000g2b_ops,
624 .eeprom_size = OTP_LOW_IMAGE_SIZE,
625 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
626 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
627 .num_of_queues = IWLAGN_NUM_QUEUES,
628 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
629 .mod_params = &iwlagn_mod_params,
630 .valid_tx_ant = ANT_A,
631 .valid_rx_ant = ANT_AB,
632 .pll_cfg_val = 0,
633 .set_l0s = true,
634 .use_bsm = false,
635 .pa_type = IWL_PA_SYSTEM,
636 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
637 .shadow_ram_support = true,
638 .ht_greenfield_support = true,
639 .led_compensation = 51,
640 .use_rts_for_ht = true, /* use rts/cts protection */
641 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
642 .supports_idle = true,
643 .adv_thermal_throttle = true,
644 .support_ct_kill_exit = true,
645 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
646 .chain_noise_scale = 1000,
647 .monitor_recover_period = IWL_MONITORING_PERIOD,
648 .max_event_log_size = 512,
649 .sensitivity_calib_by_driver = true,
650 .chain_noise_calib_by_driver = true,
651 .need_dc_calib = true,
652 .bt_statistics = true,
653};
654
655struct iwl_cfg iwl6000g2b_bg_cfg = {
656 .name = "6000 Series 1x2 BG Gen2b",
657 .fw_name_pre = IWL6000G2B_FW_PRE,
658 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
659 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
660 .sku = IWL_SKU_G,
661 .ops = &iwl6000g2b_ops,
662 .eeprom_size = OTP_LOW_IMAGE_SIZE,
663 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
664 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
665 .num_of_queues = IWLAGN_NUM_QUEUES,
666 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
667 .mod_params = &iwlagn_mod_params,
668 .valid_tx_ant = ANT_A,
669 .valid_rx_ant = ANT_AB,
670 .pll_cfg_val = 0,
671 .set_l0s = true,
672 .use_bsm = false,
673 .pa_type = IWL_PA_SYSTEM,
674 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
675 .shadow_ram_support = true,
676 .led_compensation = 51,
677 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
678 .supports_idle = true,
679 .adv_thermal_throttle = true,
680 .support_ct_kill_exit = true,
681 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
682 .chain_noise_scale = 1000,
683 .monitor_recover_period = IWL_MONITORING_PERIOD,
684 .max_event_log_size = 512,
685 .sensitivity_calib_by_driver = true,
686 .chain_noise_calib_by_driver = true,
687 .need_dc_calib = true,
688 .bt_statistics = true,
446}; 689};
447 690
448/* 691/*
@@ -561,7 +804,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
561 .ucode_api_max = IWL6050_UCODE_API_MAX, 804 .ucode_api_max = IWL6050_UCODE_API_MAX,
562 .ucode_api_min = IWL6050_UCODE_API_MIN, 805 .ucode_api_min = IWL6050_UCODE_API_MIN,
563 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 806 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
564 .ops = &iwl6050_ops, 807 .ops = &iwl6000_ops,
565 .eeprom_size = OTP_LOW_IMAGE_SIZE, 808 .eeprom_size = OTP_LOW_IMAGE_SIZE,
566 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 809 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
567 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, 810 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
@@ -590,6 +833,45 @@ struct iwl_cfg iwl6050_2agn_cfg = {
590 .ucode_tracing = true, 833 .ucode_tracing = true,
591 .sensitivity_calib_by_driver = true, 834 .sensitivity_calib_by_driver = true,
592 .chain_noise_calib_by_driver = true, 835 .chain_noise_calib_by_driver = true,
836 .need_dc_calib = true,
837};
838
839struct iwl_cfg iwl6050g2_bgn_cfg = {
840 .name = "6050 Series 1x2 BGN Gen2",
841 .fw_name_pre = IWL6050_FW_PRE,
842 .ucode_api_max = IWL6050_UCODE_API_MAX,
843 .ucode_api_min = IWL6050_UCODE_API_MIN,
844 .sku = IWL_SKU_G|IWL_SKU_N,
845 .ops = &iwl6000_ops,
846 .eeprom_size = OTP_LOW_IMAGE_SIZE,
847 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
848 .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
849 .num_of_queues = IWLAGN_NUM_QUEUES,
850 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
851 .mod_params = &iwlagn_mod_params,
852 .valid_tx_ant = ANT_A,
853 .valid_rx_ant = ANT_AB,
854 .pll_cfg_val = 0,
855 .set_l0s = true,
856 .use_bsm = false,
857 .pa_type = IWL_PA_SYSTEM,
858 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
859 .shadow_ram_support = true,
860 .ht_greenfield_support = true,
861 .led_compensation = 51,
862 .use_rts_for_ht = true, /* use rts/cts protection */
863 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
864 .supports_idle = true,
865 .adv_thermal_throttle = true,
866 .support_ct_kill_exit = true,
867 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
868 .chain_noise_scale = 1500,
869 .monitor_recover_period = IWL_MONITORING_PERIOD,
870 .max_event_log_size = 1024,
871 .ucode_tracing = true,
872 .sensitivity_calib_by_driver = true,
873 .chain_noise_calib_by_driver = true,
874 .need_dc_calib = true,
593}; 875};
594 876
595struct iwl_cfg iwl6050_2abg_cfg = { 877struct iwl_cfg iwl6050_2abg_cfg = {
@@ -598,7 +880,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
598 .ucode_api_max = IWL6050_UCODE_API_MAX, 880 .ucode_api_max = IWL6050_UCODE_API_MAX,
599 .ucode_api_min = IWL6050_UCODE_API_MIN, 881 .ucode_api_min = IWL6050_UCODE_API_MIN,
600 .sku = IWL_SKU_A|IWL_SKU_G, 882 .sku = IWL_SKU_A|IWL_SKU_G,
601 .ops = &iwl6050_ops, 883 .ops = &iwl6000_ops,
602 .eeprom_size = OTP_LOW_IMAGE_SIZE, 884 .eeprom_size = OTP_LOW_IMAGE_SIZE,
603 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 885 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
604 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, 886 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
@@ -625,6 +907,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
625 .ucode_tracing = true, 907 .ucode_tracing = true,
626 .sensitivity_calib_by_driver = true, 908 .sensitivity_calib_by_driver = true,
627 .chain_noise_calib_by_driver = true, 909 .chain_noise_calib_by_driver = true,
910 .need_dc_calib = true,
628}; 911};
629 912
630struct iwl_cfg iwl6000_3agn_cfg = { 913struct iwl_cfg iwl6000_3agn_cfg = {
@@ -667,3 +950,4 @@ struct iwl_cfg iwl6000_3agn_cfg = {
667MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 950MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
668MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 951MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
669MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 952MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
953MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 7e8227773213..c4c5691032a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -96,17 +96,16 @@ int iwl_send_calib_results(struct iwl_priv *priv)
96 hcmd.len = priv->calib_results[i].buf_len; 96 hcmd.len = priv->calib_results[i].buf_len;
97 hcmd.data = priv->calib_results[i].buf; 97 hcmd.data = priv->calib_results[i].buf;
98 ret = iwl_send_cmd_sync(priv, &hcmd); 98 ret = iwl_send_cmd_sync(priv, &hcmd);
99 if (ret) 99 if (ret) {
100 goto err; 100 IWL_ERR(priv, "Error %d iteration %d\n",
101 ret, i);
102 break;
103 }
101 } 104 }
102 } 105 }
103 106
104 return 0;
105err:
106 IWL_ERR(priv, "Error %d iteration %d\n", ret, i);
107 return ret; 107 return ret;
108} 108}
109EXPORT_SYMBOL(iwl_send_calib_results);
110 109
111int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) 110int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
112{ 111{
@@ -121,7 +120,6 @@ int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
121 memcpy(res->buf, buf, len); 120 memcpy(res->buf, buf, len);
122 return 0; 121 return 0;
123} 122}
124EXPORT_SYMBOL(iwl_calib_set);
125 123
126void iwl_calib_free_results(struct iwl_priv *priv) 124void iwl_calib_free_results(struct iwl_priv *priv)
127{ 125{
@@ -133,7 +131,6 @@ void iwl_calib_free_results(struct iwl_priv *priv)
133 priv->calib_results[i].buf_len = 0; 131 priv->calib_results[i].buf_len = 0;
134 } 132 }
135} 133}
136EXPORT_SYMBOL(iwl_calib_free_results);
137 134
138/***************************************************************************** 135/*****************************************************************************
139 * RUNTIME calibrations framework 136 * RUNTIME calibrations framework
@@ -412,46 +409,34 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
412 return 0; 409 return 0;
413} 410}
414 411
415/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 412static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
416static int iwl_sensitivity_write(struct iwl_priv *priv) 413 struct iwl_sensitivity_data *data,
414 __le16 *tbl)
417{ 415{
418 struct iwl_sensitivity_cmd cmd ; 416 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
419 struct iwl_sensitivity_data *data = NULL;
420 struct iwl_host_cmd cmd_out = {
421 .id = SENSITIVITY_CMD,
422 .len = sizeof(struct iwl_sensitivity_cmd),
423 .flags = CMD_ASYNC,
424 .data = &cmd,
425 };
426
427 data = &(priv->sensitivity_data);
428
429 memset(&cmd, 0, sizeof(cmd));
430
431 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
432 cpu_to_le16((u16)data->auto_corr_ofdm); 417 cpu_to_le16((u16)data->auto_corr_ofdm);
433 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = 418 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
434 cpu_to_le16((u16)data->auto_corr_ofdm_mrc); 419 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
435 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = 420 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
436 cpu_to_le16((u16)data->auto_corr_ofdm_x1); 421 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
437 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = 422 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
438 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); 423 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
439 424
440 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = 425 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
441 cpu_to_le16((u16)data->auto_corr_cck); 426 cpu_to_le16((u16)data->auto_corr_cck);
442 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = 427 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
443 cpu_to_le16((u16)data->auto_corr_cck_mrc); 428 cpu_to_le16((u16)data->auto_corr_cck_mrc);
444 429
445 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] = 430 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
446 cpu_to_le16((u16)data->nrg_th_cck); 431 cpu_to_le16((u16)data->nrg_th_cck);
447 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] = 432 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
448 cpu_to_le16((u16)data->nrg_th_ofdm); 433 cpu_to_le16((u16)data->nrg_th_ofdm);
449 434
450 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 435 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
451 cpu_to_le16(data->barker_corr_th_min); 436 cpu_to_le16(data->barker_corr_th_min);
452 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 437 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
453 cpu_to_le16(data->barker_corr_th_min_mrc); 438 cpu_to_le16(data->barker_corr_th_min_mrc);
454 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = 439 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
455 cpu_to_le16(data->nrg_th_cca); 440 cpu_to_le16(data->nrg_th_cca);
456 441
457 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 442 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
@@ -462,6 +447,25 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
462 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", 447 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
463 data->auto_corr_cck, data->auto_corr_cck_mrc, 448 data->auto_corr_cck, data->auto_corr_cck_mrc,
464 data->nrg_th_cck); 449 data->nrg_th_cck);
450}
451
452/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
453static int iwl_sensitivity_write(struct iwl_priv *priv)
454{
455 struct iwl_sensitivity_cmd cmd;
456 struct iwl_sensitivity_data *data = NULL;
457 struct iwl_host_cmd cmd_out = {
458 .id = SENSITIVITY_CMD,
459 .len = sizeof(struct iwl_sensitivity_cmd),
460 .flags = CMD_ASYNC,
461 .data = &cmd,
462 };
463
464 data = &(priv->sensitivity_data);
465
466 memset(&cmd, 0, sizeof(cmd));
467
468 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
465 469
466 /* Update uCode's "work" table, and copy it to DSP */ 470 /* Update uCode's "work" table, and copy it to DSP */
467 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; 471 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
@@ -480,6 +484,70 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
480 return iwl_send_cmd(priv, &cmd_out); 484 return iwl_send_cmd(priv, &cmd_out);
481} 485}
482 486
487/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
488static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
489{
490 struct iwl_enhance_sensitivity_cmd cmd;
491 struct iwl_sensitivity_data *data = NULL;
492 struct iwl_host_cmd cmd_out = {
493 .id = SENSITIVITY_CMD,
494 .len = sizeof(struct iwl_enhance_sensitivity_cmd),
495 .flags = CMD_ASYNC,
496 .data = &cmd,
497 };
498
499 data = &(priv->sensitivity_data);
500
501 memset(&cmd, 0, sizeof(cmd));
502
503 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
504
505 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
506 HD_INA_NON_SQUARE_DET_OFDM_DATA;
507 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
508 HD_INA_NON_SQUARE_DET_CCK_DATA;
509 cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
510 HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA;
511 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
512 HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA;
513 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
514 HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
515 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
516 HD_OFDM_NON_SQUARE_DET_SLOPE_DATA;
517 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
518 HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA;
519 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
520 HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA;
521 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
522 HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
523 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
524 HD_CCK_NON_SQUARE_DET_SLOPE_DATA;
525 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
526 HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA;
527
528 /* Update uCode's "work" table, and copy it to DSP */
529 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
530
531 /* Don't send command to uCode if nothing has changed */
532 if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]),
533 sizeof(u16)*HD_TABLE_SIZE) &&
534 !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX],
535 &(priv->enhance_sensitivity_tbl[0]),
536 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) {
537 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
538 return 0;
539 }
540
541 /* Copy table for comparison next time */
542 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]),
543 sizeof(u16)*HD_TABLE_SIZE);
544 memcpy(&(priv->enhance_sensitivity_tbl[0]),
545 &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
546 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
547
548 return iwl_send_cmd(priv, &cmd_out);
549}
550
483void iwl_init_sensitivity(struct iwl_priv *priv) 551void iwl_init_sensitivity(struct iwl_priv *priv)
484{ 552{
485 int ret = 0; 553 int ret = 0;
@@ -530,13 +598,14 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
530 data->last_bad_plcp_cnt_cck = 0; 598 data->last_bad_plcp_cnt_cck = 0;
531 data->last_fa_cnt_cck = 0; 599 data->last_fa_cnt_cck = 0;
532 600
533 ret |= iwl_sensitivity_write(priv); 601 if (priv->enhance_sensitivity_table)
602 ret |= iwl_enhance_sensitivity_write(priv);
603 else
604 ret |= iwl_sensitivity_write(priv);
534 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); 605 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
535} 606}
536EXPORT_SYMBOL(iwl_init_sensitivity);
537 607
538void iwl_sensitivity_calibration(struct iwl_priv *priv, 608void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
539 struct iwl_notif_statistics *resp)
540{ 609{
541 u32 rx_enable_time; 610 u32 rx_enable_time;
542 u32 fa_cck; 611 u32 fa_cck;
@@ -546,8 +615,8 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
546 u32 norm_fa_ofdm; 615 u32 norm_fa_ofdm;
547 u32 norm_fa_cck; 616 u32 norm_fa_cck;
548 struct iwl_sensitivity_data *data = NULL; 617 struct iwl_sensitivity_data *data = NULL;
549 struct statistics_rx_non_phy *rx_info = &(resp->rx.general); 618 struct statistics_rx_non_phy *rx_info;
550 struct statistics_rx *statistics = &(resp->rx); 619 struct statistics_rx_phy *ofdm, *cck;
551 unsigned long flags; 620 unsigned long flags;
552 struct statistics_general_data statis; 621 struct statistics_general_data statis;
553 622
@@ -562,6 +631,16 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
562 } 631 }
563 632
564 spin_lock_irqsave(&priv->lock, flags); 633 spin_lock_irqsave(&priv->lock, flags);
634 if (priv->cfg->bt_statistics) {
635 rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
636 rx.general.common);
637 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
638 cck = &(((struct iwl_bt_notif_statistics *)resp)->rx.cck);
639 } else {
640 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
641 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
642 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
643 }
565 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 644 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
566 IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); 645 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
567 spin_unlock_irqrestore(&priv->lock, flags); 646 spin_unlock_irqrestore(&priv->lock, flags);
@@ -570,23 +649,23 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
570 649
571 /* Extract Statistics: */ 650 /* Extract Statistics: */
572 rx_enable_time = le32_to_cpu(rx_info->channel_load); 651 rx_enable_time = le32_to_cpu(rx_info->channel_load);
573 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt); 652 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
574 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt); 653 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
575 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err); 654 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
576 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err); 655 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
577 656
578 statis.beacon_silence_rssi_a = 657 statis.beacon_silence_rssi_a =
579 le32_to_cpu(statistics->general.beacon_silence_rssi_a); 658 le32_to_cpu(rx_info->beacon_silence_rssi_a);
580 statis.beacon_silence_rssi_b = 659 statis.beacon_silence_rssi_b =
581 le32_to_cpu(statistics->general.beacon_silence_rssi_b); 660 le32_to_cpu(rx_info->beacon_silence_rssi_b);
582 statis.beacon_silence_rssi_c = 661 statis.beacon_silence_rssi_c =
583 le32_to_cpu(statistics->general.beacon_silence_rssi_c); 662 le32_to_cpu(rx_info->beacon_silence_rssi_c);
584 statis.beacon_energy_a = 663 statis.beacon_energy_a =
585 le32_to_cpu(statistics->general.beacon_energy_a); 664 le32_to_cpu(rx_info->beacon_energy_a);
586 statis.beacon_energy_b = 665 statis.beacon_energy_b =
587 le32_to_cpu(statistics->general.beacon_energy_b); 666 le32_to_cpu(rx_info->beacon_energy_b);
588 statis.beacon_energy_c = 667 statis.beacon_energy_c =
589 le32_to_cpu(statistics->general.beacon_energy_c); 668 le32_to_cpu(rx_info->beacon_energy_c);
590 669
591 spin_unlock_irqrestore(&priv->lock, flags); 670 spin_unlock_irqrestore(&priv->lock, flags);
592 671
@@ -637,9 +716,11 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
637 716
638 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); 717 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
639 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); 718 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
640 iwl_sensitivity_write(priv); 719 if (priv->enhance_sensitivity_table)
720 iwl_enhance_sensitivity_write(priv);
721 else
722 iwl_sensitivity_write(priv);
641} 723}
642EXPORT_SYMBOL(iwl_sensitivity_calibration);
643 724
644static inline u8 find_first_chain(u8 mask) 725static inline u8 find_first_chain(u8 mask)
645{ 726{
@@ -656,8 +737,7 @@ static inline u8 find_first_chain(u8 mask)
656 * 1) Which antennas are connected. 737 * 1) Which antennas are connected.
657 * 2) Differential rx gain settings to balance the 3 receivers. 738 * 2) Differential rx gain settings to balance the 3 receivers.
658 */ 739 */
659void iwl_chain_noise_calibration(struct iwl_priv *priv, 740void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
660 struct iwl_notif_statistics *stat_resp)
661{ 741{
662 struct iwl_chain_noise_data *data = NULL; 742 struct iwl_chain_noise_data *data = NULL;
663 743
@@ -681,7 +761,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
681 u32 active_chains = 0; 761 u32 active_chains = 0;
682 u8 num_tx_chains; 762 u8 num_tx_chains;
683 unsigned long flags; 763 unsigned long flags;
684 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); 764 struct statistics_rx_non_phy *rx_info;
685 u8 first_chain; 765 u8 first_chain;
686 766
687 if (priv->disable_chain_noise_cal) 767 if (priv->disable_chain_noise_cal)
@@ -700,6 +780,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
700 } 780 }
701 781
702 spin_lock_irqsave(&priv->lock, flags); 782 spin_lock_irqsave(&priv->lock, flags);
783 if (priv->cfg->bt_statistics) {
784 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
785 rx.general.common);
786 } else {
787 rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
788 rx.general);
789 }
703 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 790 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
704 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); 791 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
705 spin_unlock_irqrestore(&priv->lock, flags); 792 spin_unlock_irqrestore(&priv->lock, flags);
@@ -708,8 +795,19 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
708 795
709 rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK); 796 rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK);
710 rxon_chnum = le16_to_cpu(priv->staging_rxon.channel); 797 rxon_chnum = le16_to_cpu(priv->staging_rxon.channel);
711 stat_band24 = !!(stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK); 798 if (priv->cfg->bt_statistics) {
712 stat_chnum = le32_to_cpu(stat_resp->flag) >> 16; 799 stat_band24 = !!(((struct iwl_bt_notif_statistics *)
800 stat_resp)->flag &
801 STATISTICS_REPLY_FLG_BAND_24G_MSK);
802 stat_chnum = le32_to_cpu(((struct iwl_bt_notif_statistics *)
803 stat_resp)->flag) >> 16;
804 } else {
805 stat_band24 = !!(((struct iwl_notif_statistics *)
806 stat_resp)->flag &
807 STATISTICS_REPLY_FLG_BAND_24G_MSK);
808 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
809 stat_resp)->flag) >> 16;
810 }
713 811
714 /* Make sure we accumulate data for just the associated channel 812 /* Make sure we accumulate data for just the associated channel
715 * (even if scanning). */ 813 * (even if scanning). */
@@ -846,6 +944,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
846 } 944 }
847 } 945 }
848 946
947 if (active_chains != priv->hw_params.valid_rx_ant &&
948 active_chains != priv->chain_noise_data.active_chains)
949 IWL_DEBUG_CALIB(priv,
950 "Detected that not all antennas are connected! "
951 "Connected: %#x, valid: %#x.\n",
952 active_chains, priv->hw_params.valid_rx_ant);
953
849 /* Save for use within RXON, TX, SCAN commands, etc. */ 954 /* Save for use within RXON, TX, SCAN commands, etc. */
850 priv->chain_noise_data.active_chains = active_chains; 955 priv->chain_noise_data.active_chains = active_chains;
851 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", 956 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
@@ -890,8 +995,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
890 data->state = IWL_CHAIN_NOISE_DONE; 995 data->state = IWL_CHAIN_NOISE_DONE;
891 iwl_power_update_mode(priv, false); 996 iwl_power_update_mode(priv, false);
892} 997}
893EXPORT_SYMBOL(iwl_chain_noise_calibration);
894
895 998
896void iwl_reset_run_time_calib(struct iwl_priv *priv) 999void iwl_reset_run_time_calib(struct iwl_priv *priv)
897{ 1000{
@@ -908,5 +1011,3 @@ void iwl_reset_run_time_calib(struct iwl_priv *priv)
908 * periodically after association */ 1011 * periodically after association */
909 iwl_send_statistics_request(priv, CMD_ASYNC, true); 1012 iwl_send_statistics_request(priv, CMD_ASYNC, true);
910} 1013}
911EXPORT_SYMBOL(iwl_reset_run_time_calib);
912
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index 48c023b4ca36..f052c6d09b37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -28,6 +28,30 @@
28 28
29#include "iwl-agn-debugfs.h" 29#include "iwl-agn-debugfs.h"
30 30
31static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
32{
33 int p = 0;
34 u32 flag;
35
36 if (priv->cfg->bt_statistics)
37 flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
38 else
39 flag = le32_to_cpu(priv->_agn.statistics.flag);
40
41 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
42 if (flag & UCODE_STATISTICS_CLEAR_MSK)
43 p += scnprintf(buf + p, bufsz - p,
44 "\tStatistics have been cleared\n");
45 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
46 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
47 ? "2.4 GHz" : "5.2 GHz");
48 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
49 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
50 ? "enabled" : "disabled");
51
52 return p;
53}
54
31ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, 55ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
32 size_t count, loff_t *ppos) 56 size_t count, loff_t *ppos)
33 { 57 {
@@ -58,24 +82,45 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 * the last statistics notification from uCode 82 * the last statistics notification from uCode
59 * might not reflect the current uCode activity 83 * might not reflect the current uCode activity
60 */ 84 */
61 ofdm = &priv->statistics.rx.ofdm; 85 if (priv->cfg->bt_statistics) {
62 cck = &priv->statistics.rx.cck; 86 ofdm = &priv->_agn.statistics_bt.rx.ofdm;
63 general = &priv->statistics.rx.general; 87 cck = &priv->_agn.statistics_bt.rx.cck;
64 ht = &priv->statistics.rx.ofdm_ht; 88 general = &priv->_agn.statistics_bt.rx.general.common;
65 accum_ofdm = &priv->accum_statistics.rx.ofdm; 89 ht = &priv->_agn.statistics_bt.rx.ofdm_ht;
66 accum_cck = &priv->accum_statistics.rx.cck; 90 accum_ofdm = &priv->_agn.accum_statistics_bt.rx.ofdm;
67 accum_general = &priv->accum_statistics.rx.general; 91 accum_cck = &priv->_agn.accum_statistics_bt.rx.cck;
68 accum_ht = &priv->accum_statistics.rx.ofdm_ht; 92 accum_general =
69 delta_ofdm = &priv->delta_statistics.rx.ofdm; 93 &priv->_agn.accum_statistics_bt.rx.general.common;
70 delta_cck = &priv->delta_statistics.rx.cck; 94 accum_ht = &priv->_agn.accum_statistics_bt.rx.ofdm_ht;
71 delta_general = &priv->delta_statistics.rx.general; 95 delta_ofdm = &priv->_agn.delta_statistics_bt.rx.ofdm;
72 delta_ht = &priv->delta_statistics.rx.ofdm_ht; 96 delta_cck = &priv->_agn.delta_statistics_bt.rx.cck;
73 max_ofdm = &priv->max_delta.rx.ofdm; 97 delta_general =
74 max_cck = &priv->max_delta.rx.cck; 98 &priv->_agn.delta_statistics_bt.rx.general.common;
75 max_general = &priv->max_delta.rx.general; 99 delta_ht = &priv->_agn.delta_statistics_bt.rx.ofdm_ht;
76 max_ht = &priv->max_delta.rx.ofdm_ht; 100 max_ofdm = &priv->_agn.max_delta_bt.rx.ofdm;
77 101 max_cck = &priv->_agn.max_delta_bt.rx.cck;
78 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 102 max_general = &priv->_agn.max_delta_bt.rx.general.common;
103 max_ht = &priv->_agn.max_delta_bt.rx.ofdm_ht;
104 } else {
105 ofdm = &priv->_agn.statistics.rx.ofdm;
106 cck = &priv->_agn.statistics.rx.cck;
107 general = &priv->_agn.statistics.rx.general;
108 ht = &priv->_agn.statistics.rx.ofdm_ht;
109 accum_ofdm = &priv->_agn.accum_statistics.rx.ofdm;
110 accum_cck = &priv->_agn.accum_statistics.rx.cck;
111 accum_general = &priv->_agn.accum_statistics.rx.general;
112 accum_ht = &priv->_agn.accum_statistics.rx.ofdm_ht;
113 delta_ofdm = &priv->_agn.delta_statistics.rx.ofdm;
114 delta_cck = &priv->_agn.delta_statistics.rx.cck;
115 delta_general = &priv->_agn.delta_statistics.rx.general;
116 delta_ht = &priv->_agn.delta_statistics.rx.ofdm_ht;
117 max_ofdm = &priv->_agn.max_delta.rx.ofdm;
118 max_cck = &priv->_agn.max_delta.rx.cck;
119 max_general = &priv->_agn.max_delta.rx.general;
120 max_ht = &priv->_agn.max_delta.rx.ofdm_ht;
121 }
122
123 pos += iwl_statistics_flag(priv, buf, bufsz);
79 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 124 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
80 "acumulative delta max\n", 125 "acumulative delta max\n",
81 "Statistics_Rx - OFDM:"); 126 "Statistics_Rx - OFDM:");
@@ -539,11 +584,19 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
539 * the last statistics notification from uCode 584 * the last statistics notification from uCode
540 * might not reflect the current uCode activity 585 * might not reflect the current uCode activity
541 */ 586 */
542 tx = &priv->statistics.tx; 587 if (priv->cfg->bt_statistics) {
543 accum_tx = &priv->accum_statistics.tx; 588 tx = &priv->_agn.statistics_bt.tx;
544 delta_tx = &priv->delta_statistics.tx; 589 accum_tx = &priv->_agn.accum_statistics_bt.tx;
545 max_tx = &priv->max_delta.tx; 590 delta_tx = &priv->_agn.delta_statistics_bt.tx;
546 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 591 max_tx = &priv->_agn.max_delta_bt.tx;
592 } else {
593 tx = &priv->_agn.statistics.tx;
594 accum_tx = &priv->_agn.accum_statistics.tx;
595 delta_tx = &priv->_agn.delta_statistics.tx;
596 max_tx = &priv->_agn.max_delta.tx;
597 }
598
599 pos += iwl_statistics_flag(priv, buf, bufsz);
547 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 600 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
548 "acumulative delta max\n", 601 "acumulative delta max\n",
549 "Statistics_Tx:"); 602 "Statistics_Tx:");
@@ -738,8 +791,8 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
738 char *buf; 791 char *buf;
739 int bufsz = sizeof(struct statistics_general) * 10 + 300; 792 int bufsz = sizeof(struct statistics_general) * 10 + 300;
740 ssize_t ret; 793 ssize_t ret;
741 struct statistics_general *general, *accum_general; 794 struct statistics_general_common *general, *accum_general;
742 struct statistics_general *delta_general, *max_general; 795 struct statistics_general_common *delta_general, *max_general;
743 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; 796 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
744 struct statistics_div *div, *accum_div, *delta_div, *max_div; 797 struct statistics_div *div, *accum_div, *delta_div, *max_div;
745 798
@@ -756,19 +809,35 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
756 * the last statistics notification from uCode 809 * the last statistics notification from uCode
757 * might not reflect the current uCode activity 810 * might not reflect the current uCode activity
758 */ 811 */
759 general = &priv->statistics.general; 812 if (priv->cfg->bt_statistics) {
760 dbg = &priv->statistics.general.dbg; 813 general = &priv->_agn.statistics_bt.general.common;
761 div = &priv->statistics.general.div; 814 dbg = &priv->_agn.statistics_bt.general.common.dbg;
762 accum_general = &priv->accum_statistics.general; 815 div = &priv->_agn.statistics_bt.general.common.div;
763 delta_general = &priv->delta_statistics.general; 816 accum_general = &priv->_agn.accum_statistics_bt.general.common;
764 max_general = &priv->max_delta.general; 817 accum_dbg = &priv->_agn.accum_statistics_bt.general.common.dbg;
765 accum_dbg = &priv->accum_statistics.general.dbg; 818 accum_div = &priv->_agn.accum_statistics_bt.general.common.div;
766 delta_dbg = &priv->delta_statistics.general.dbg; 819 delta_general = &priv->_agn.delta_statistics_bt.general.common;
767 max_dbg = &priv->max_delta.general.dbg; 820 max_general = &priv->_agn.max_delta_bt.general.common;
768 accum_div = &priv->accum_statistics.general.div; 821 delta_dbg = &priv->_agn.delta_statistics_bt.general.common.dbg;
769 delta_div = &priv->delta_statistics.general.div; 822 max_dbg = &priv->_agn.max_delta_bt.general.common.dbg;
770 max_div = &priv->max_delta.general.div; 823 delta_div = &priv->_agn.delta_statistics_bt.general.common.div;
771 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 824 max_div = &priv->_agn.max_delta_bt.general.common.div;
825 } else {
826 general = &priv->_agn.statistics.general.common;
827 dbg = &priv->_agn.statistics.general.common.dbg;
828 div = &priv->_agn.statistics.general.common.div;
829 accum_general = &priv->_agn.accum_statistics.general.common;
830 accum_dbg = &priv->_agn.accum_statistics.general.common.dbg;
831 accum_div = &priv->_agn.accum_statistics.general.common.div;
832 delta_general = &priv->_agn.delta_statistics.general.common;
833 max_general = &priv->_agn.max_delta.general.common;
834 delta_dbg = &priv->_agn.delta_statistics.general.common.dbg;
835 max_dbg = &priv->_agn.max_delta.general.common.dbg;
836 delta_div = &priv->_agn.delta_statistics.general.common.div;
837 max_div = &priv->_agn.max_delta.general.common.div;
838 }
839
840 pos += iwl_statistics_flag(priv, buf, bufsz);
772 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 841 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
773 "acumulative delta max\n", 842 "acumulative delta max\n",
774 "Statistics_General:"); 843 "Statistics_General:");
@@ -792,6 +861,13 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
792 delta_dbg->burst_count, max_dbg->burst_count); 861 delta_dbg->burst_count, max_dbg->burst_count);
793 pos += scnprintf(buf + pos, bufsz - pos, 862 pos += scnprintf(buf + pos, bufsz - pos,
794 " %-30s %10u %10u %10u %10u\n", 863 " %-30s %10u %10u %10u %10u\n",
864 "wait_for_silence_timeout_count:",
865 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
866 accum_dbg->wait_for_silence_timeout_cnt,
867 delta_dbg->wait_for_silence_timeout_cnt,
868 max_dbg->wait_for_silence_timeout_cnt);
869 pos += scnprintf(buf + pos, bufsz - pos,
870 " %-30s %10u %10u %10u %10u\n",
795 "sleep_time:", 871 "sleep_time:",
796 le32_to_cpu(general->sleep_time), 872 le32_to_cpu(general->sleep_time),
797 accum_general->sleep_time, 873 accum_general->sleep_time,
@@ -848,3 +924,90 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
848 kfree(buf); 924 kfree(buf);
849 return ret; 925 return ret;
850} 926}
927
928ssize_t iwl_ucode_bt_stats_read(struct file *file,
929 char __user *user_buf,
930 size_t count, loff_t *ppos)
931{
932 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
933 int pos = 0;
934 char *buf;
935 int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200;
936 ssize_t ret;
937 struct statistics_bt_activity *bt, *accum_bt;
938
939 if (!iwl_is_alive(priv))
940 return -EAGAIN;
941
942 /* make request to uCode to retrieve statistics information */
943 mutex_lock(&priv->mutex);
944 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
945 mutex_unlock(&priv->mutex);
946
947 if (ret) {
948 IWL_ERR(priv,
949 "Error sending statistics request: %zd\n", ret);
950 return -EAGAIN;
951 }
952 buf = kzalloc(bufsz, GFP_KERNEL);
953 if (!buf) {
954 IWL_ERR(priv, "Can not allocate Buffer\n");
955 return -ENOMEM;
956 }
957
958 /*
959 * the statistic information display here is based on
960 * the last statistics notification from uCode
961 * might not reflect the current uCode activity
962 */
963 bt = &priv->_agn.statistics_bt.general.activity;
964 accum_bt = &priv->_agn.accum_statistics_bt.general.activity;
965
966 pos += iwl_statistics_flag(priv, buf, bufsz);
967 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n");
968 pos += scnprintf(buf + pos, bufsz - pos,
969 "\t\t\tcurrent\t\t\taccumulative\n");
970 pos += scnprintf(buf + pos, bufsz - pos,
971 "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
972 le32_to_cpu(bt->hi_priority_tx_req_cnt),
973 accum_bt->hi_priority_tx_req_cnt);
974 pos += scnprintf(buf + pos, bufsz - pos,
975 "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
976 le32_to_cpu(bt->hi_priority_tx_denied_cnt),
977 accum_bt->hi_priority_tx_denied_cnt);
978 pos += scnprintf(buf + pos, bufsz - pos,
979 "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
980 le32_to_cpu(bt->lo_priority_tx_req_cnt),
981 accum_bt->lo_priority_tx_req_cnt);
982 pos += scnprintf(buf + pos, bufsz - pos,
983 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
984 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
985 accum_bt->lo_priority_tx_denied_cnt);
986 pos += scnprintf(buf + pos, bufsz - pos,
987 "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
988 le32_to_cpu(bt->hi_priority_rx_req_cnt),
989 accum_bt->hi_priority_rx_req_cnt);
990 pos += scnprintf(buf + pos, bufsz - pos,
991 "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
992 le32_to_cpu(bt->hi_priority_rx_denied_cnt),
993 accum_bt->hi_priority_rx_denied_cnt);
994 pos += scnprintf(buf + pos, bufsz - pos,
995 "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
996 le32_to_cpu(bt->lo_priority_rx_req_cnt),
997 accum_bt->lo_priority_rx_req_cnt);
998 pos += scnprintf(buf + pos, bufsz - pos,
999 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1000 le32_to_cpu(bt->lo_priority_rx_denied_cnt),
1001 accum_bt->lo_priority_rx_denied_cnt);
1002
1003 pos += scnprintf(buf + pos, bufsz - pos,
1004 "(rx)num_bt_kills:\t\t%u\t\t\t%u\n",
1005 le32_to_cpu(priv->_agn.statistics_bt.rx.
1006 general.num_bt_kills),
1007 priv->_agn.accum_statistics_bt.rx.
1008 general.num_bt_kills);
1009
1010 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1011 kfree(buf);
1012 return ret;
1013}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
index 59b1f25f0d85..bbdce5913ac7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -37,6 +37,8 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos); 37 size_t count, loff_t *ppos);
38ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf, 38ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
39 size_t count, loff_t *ppos); 39 size_t count, loff_t *ppos);
40ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
41 size_t count, loff_t *ppos);
40#else 42#else
41static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, 43static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
42 size_t count, loff_t *ppos) 44 size_t count, loff_t *ppos)
@@ -53,4 +55,9 @@ static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user
53{ 55{
54 return 0; 56 return 0;
55} 57}
58static ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
59 size_t count, loff_t *ppos)
60{
61 return 0;
62}
56#endif 63#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 01658cf82d39..a7216dda9786 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -37,7 +37,7 @@
37#include "iwl-io.h" 37#include "iwl-io.h"
38#include "iwl-agn.h" 38#include "iwl-agn.h"
39 39
40static int iwlagn_send_rxon_assoc(struct iwl_priv *priv) 40int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
41{ 41{
42 int ret = 0; 42 int ret = 0;
43 struct iwl5000_rxon_assoc_cmd rxon_assoc; 43 struct iwl5000_rxon_assoc_cmd rxon_assoc;
@@ -84,7 +84,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
84 return ret; 84 return ret;
85} 85}
86 86
87static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) 87int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
88{ 88{
89 struct iwl_tx_ant_config_cmd tx_ant_cmd = { 89 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
90 .valid = cpu_to_le32(valid_tx_ant), 90 .valid = cpu_to_le32(valid_tx_ant),
@@ -164,7 +164,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
164 164
165 memset(&cmd, 0, sizeof(cmd)); 165 memset(&cmd, 0, sizeof(cmd));
166 166
167 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD; 167 cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_gain_cmd;
168 cmd.hdr.first_group = 0; 168 cmd.hdr.first_group = 0;
169 cmd.hdr.groups_num = 1; 169 cmd.hdr.groups_num = 1;
170 cmd.hdr.data_valid = 1; 170 cmd.hdr.data_valid = 1;
@@ -176,14 +176,6 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
176 data->radio_write = 1; 176 data->radio_write = 1;
177 data->state = IWL_CHAIN_NOISE_CALIBRATED; 177 data->state = IWL_CHAIN_NOISE_CALIBRATED;
178 } 178 }
179
180 data->chain_noise_a = 0;
181 data->chain_noise_b = 0;
182 data->chain_noise_c = 0;
183 data->chain_signal_a = 0;
184 data->chain_signal_b = 0;
185 data->chain_signal_c = 0;
186 data->beacon_count = 0;
187} 179}
188 180
189static void iwlagn_chain_noise_reset(struct iwl_priv *priv) 181static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
@@ -191,11 +183,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
191 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 183 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
192 int ret; 184 int ret;
193 185
194 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 186 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
187 iwl_is_associated(priv)) {
195 struct iwl_calib_chain_noise_reset_cmd cmd; 188 struct iwl_calib_chain_noise_reset_cmd cmd;
196 memset(&cmd, 0, sizeof(cmd));
197 189
198 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD; 190 /* clear data for chain noise calibration algorithm */
191 data->chain_noise_a = 0;
192 data->chain_noise_b = 0;
193 data->chain_noise_c = 0;
194 data->chain_signal_a = 0;
195 data->chain_signal_b = 0;
196 data->chain_signal_c = 0;
197 data->beacon_count = 0;
198
199 memset(&cmd, 0, sizeof(cmd));
200 cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_reset_cmd;
199 cmd.hdr.first_group = 0; 201 cmd.hdr.first_group = 0;
200 cmd.hdr.groups_num = 1; 202 cmd.hdr.groups_num = 1;
201 cmd.hdr.data_valid = 1; 203 cmd.hdr.data_valid = 1;
@@ -212,7 +214,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
212static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 214static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
213 __le32 *tx_flags) 215 __le32 *tx_flags)
214{ 216{
215 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; 217 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
216} 218}
217 219
218/* Calc max signal level (dBm) among 3 possible receivers */ 220/* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index f9a3fbb6338f..a52b82c8e7a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -112,7 +112,7 @@
112 */ 112 */
113struct iwlagn_scd_bc_tbl { 113struct iwlagn_scd_bc_tbl {
114 __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; 114 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
115} __attribute__ ((packed)); 115} __packed;
116 116
117 117
118#endif /* __iwl_agn_hw_h__ */ 118#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 0f292a210ed9..a1b6d202d57c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -77,7 +77,7 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
78 agg->frame_count, agg->start_idx, idx); 78 agg->frame_count, agg->start_idx, idx);
79 79
80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
81 info->status.rates[0].count = tx_resp->failure_frame + 1; 81 info->status.rates[0].count = tx_resp->failure_frame + 1;
82 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 82 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
83 info->flags |= iwl_tx_status_to_mac80211(status); 83 info->flags |= iwl_tx_status_to_mac80211(status);
@@ -93,6 +93,12 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
93 } else { 93 } else {
94 /* Two or more frames were attempted; expect block-ack */ 94 /* Two or more frames were attempted; expect block-ack */
95 u64 bitmap = 0; 95 u64 bitmap = 0;
96
97 /*
98 * Start is the lowest frame sent. It may not be the first
99 * frame in the batch; we figure this out dynamically during
100 * the following loop.
101 */
96 int start = agg->start_idx; 102 int start = agg->start_idx;
97 103
98 /* Construct bit-map of pending frames within Tx window */ 104 /* Construct bit-map of pending frames within Tx window */
@@ -131,25 +137,58 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
131 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", 137 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
132 i, idx, SEQ_TO_SN(sc)); 138 i, idx, SEQ_TO_SN(sc));
133 139
140 /*
141 * sh -> how many frames ahead of the starting frame is
142 * the current one?
143 *
144 * Note that all frames sent in the batch must be in a
145 * 64-frame window, so this number should be in [0,63].
146 * If outside of this window, then we've found a new
147 * "first" frame in the batch and need to change start.
148 */
134 sh = idx - start; 149 sh = idx - start;
135 if (sh > 64) { 150
136 sh = (start - idx) + 0xff; 151 /*
152 * If >= 64, out of window. start must be at the front
153 * of the circular buffer, idx must be near the end of
154 * the buffer, and idx is the new "first" frame. Shift
155 * the indices around.
156 */
157 if (sh >= 64) {
158 /* Shift bitmap by start - idx, wrapped */
159 sh = 0x100 - idx + start;
137 bitmap = bitmap << sh; 160 bitmap = bitmap << sh;
161 /* Now idx is the new start so sh = 0 */
138 sh = 0; 162 sh = 0;
139 start = idx; 163 start = idx;
140 } else if (sh < -64) 164 /*
141 sh = 0xff - (start - idx); 165 * If <= -64 then wraps the 256-pkt circular buffer
142 else if (sh < 0) { 166 * (e.g., start = 255 and idx = 0, sh should be 1)
167 */
168 } else if (sh <= -64) {
169 sh = 0x100 - start + idx;
170 /*
171 * If < 0 but > -64, out of window. idx is before start
172 * but not wrapped. Shift the indices around.
173 */
174 } else if (sh < 0) {
175 /* Shift by how far start is ahead of idx */
143 sh = start - idx; 176 sh = start - idx;
144 start = idx;
145 bitmap = bitmap << sh; 177 bitmap = bitmap << sh;
178 /* Now idx is the new start so sh = 0 */
179 start = idx;
146 sh = 0; 180 sh = 0;
147 } 181 }
182 /* Sequence number start + sh was sent in this batch */
148 bitmap |= 1ULL << sh; 183 bitmap |= 1ULL << sh;
149 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", 184 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
150 start, (unsigned long long)bitmap); 185 start, (unsigned long long)bitmap);
151 } 186 }
152 187
188 /*
189 * Store the bitmap and possibly the new start, if we wrapped
190 * the buffer above
191 */
153 agg->bitmap = bitmap; 192 agg->bitmap = bitmap;
154 agg->start_idx = start; 193 agg->start_idx = start;
155 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", 194 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
@@ -166,7 +205,9 @@ void iwl_check_abort_status(struct iwl_priv *priv,
166 u8 frame_count, u32 status) 205 u8 frame_count, u32 status)
167{ 206{
168 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { 207 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
169 IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n"); 208 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
209 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
210 queue_work(priv->workqueue, &priv->tx_flush);
170 } 211 }
171} 212}
172 213
@@ -184,6 +225,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
184 int tid; 225 int tid;
185 int sta_id; 226 int sta_id;
186 int freed; 227 int freed;
228 unsigned long flags;
187 229
188 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 230 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
189 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 231 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
@@ -193,15 +235,16 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
193 return; 235 return;
194 } 236 }
195 237
196 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); 238 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
197 memset(&info->status, 0, sizeof(info->status)); 239 memset(&info->status, 0, sizeof(info->status));
198 240
199 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; 241 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
200 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; 242 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
201 243
244 spin_lock_irqsave(&priv->sta_lock, flags);
202 if (txq->sched_retry) { 245 if (txq->sched_retry) {
203 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp); 246 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
204 struct iwl_ht_agg *agg = NULL; 247 struct iwl_ht_agg *agg;
205 248
206 agg = &priv->stations[sta_id].tid[tid].agg; 249 agg = &priv->stations[sta_id].tid[tid].agg;
207 250
@@ -256,6 +299,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
256 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 299 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
257 300
258 iwl_check_abort_status(priv, tx_resp->frame_count, status); 301 iwl_check_abort_status(priv, tx_resp->frame_count, status);
302 spin_unlock_irqrestore(&priv->sta_lock, flags);
259} 303}
260 304
261void iwlagn_rx_handler_setup(struct iwl_priv *priv) 305void iwlagn_rx_handler_setup(struct iwl_priv *priv)
@@ -319,7 +363,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
319void iwlagn_temperature(struct iwl_priv *priv) 363void iwlagn_temperature(struct iwl_priv *priv)
320{ 364{
321 /* store temperature from statistics (in Celsius) */ 365 /* store temperature from statistics (in Celsius) */
322 priv->temperature = le32_to_cpu(priv->statistics.general.temperature); 366 priv->temperature =
367 le32_to_cpu(priv->_agn.statistics.general.common.temperature);
323 iwl_tt_handler(priv); 368 iwl_tt_handler(priv);
324} 369}
325 370
@@ -444,7 +489,7 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
444 489
445 /* Tell device where to find RBD circular buffer in DRAM */ 490 /* Tell device where to find RBD circular buffer in DRAM */
446 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 491 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
447 (u32)(rxq->dma_addr >> 8)); 492 (u32)(rxq->bd_dma >> 8));
448 493
449 /* Tell device where in DRAM to update its Rx status */ 494 /* Tell device where in DRAM to update its Rx status */
450 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 495 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
@@ -709,7 +754,7 @@ void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
709 } 754 }
710 755
711 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 756 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
712 rxq->dma_addr); 757 rxq->bd_dma);
713 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), 758 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
714 rxq->rb_stts, rxq->rb_stts_dma); 759 rxq->rb_stts, rxq->rb_stts_dma);
715 rxq->bd = NULL; 760 rxq->bd = NULL;
@@ -755,132 +800,6 @@ static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
755 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); 800 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
756} 801}
757 802
758#ifdef CONFIG_IWLWIFI_DEBUG
759/**
760 * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
761 *
762 * You may hack this function to show different aspects of received frames,
763 * including selective frame dumps.
764 * group100 parameter selects whether to show 1 out of 100 good data frames.
765 * All beacon and probe response frames are printed.
766 */
767static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
768 struct iwl_rx_phy_res *phy_res, u16 length,
769 struct ieee80211_hdr *header, int group100)
770{
771 u32 to_us;
772 u32 print_summary = 0;
773 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
774 u32 hundred = 0;
775 u32 dataframe = 0;
776 __le16 fc;
777 u16 seq_ctl;
778 u16 channel;
779 u16 phy_flags;
780 u32 rate_n_flags;
781 u32 tsf_low;
782 int rssi;
783
784 if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
785 return;
786
787 /* MAC header */
788 fc = header->frame_control;
789 seq_ctl = le16_to_cpu(header->seq_ctrl);
790
791 /* metadata */
792 channel = le16_to_cpu(phy_res->channel);
793 phy_flags = le16_to_cpu(phy_res->phy_flags);
794 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
795
796 /* signal statistics */
797 rssi = iwlagn_calc_rssi(priv, phy_res);
798 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
799
800 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
801
802 /* if data frame is to us and all is good,
803 * (optionally) print summary for only 1 out of every 100 */
804 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
805 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
806 dataframe = 1;
807 if (!group100)
808 print_summary = 1; /* print each frame */
809 else if (priv->framecnt_to_us < 100) {
810 priv->framecnt_to_us++;
811 print_summary = 0;
812 } else {
813 priv->framecnt_to_us = 0;
814 print_summary = 1;
815 hundred = 1;
816 }
817 } else {
818 /* print summary for all other frames */
819 print_summary = 1;
820 }
821
822 if (print_summary) {
823 char *title;
824 int rate_idx;
825 u32 bitrate;
826
827 if (hundred)
828 title = "100Frames";
829 else if (ieee80211_has_retry(fc))
830 title = "Retry";
831 else if (ieee80211_is_assoc_resp(fc))
832 title = "AscRsp";
833 else if (ieee80211_is_reassoc_resp(fc))
834 title = "RasRsp";
835 else if (ieee80211_is_probe_resp(fc)) {
836 title = "PrbRsp";
837 print_dump = 1; /* dump frame contents */
838 } else if (ieee80211_is_beacon(fc)) {
839 title = "Beacon";
840 print_dump = 1; /* dump frame contents */
841 } else if (ieee80211_is_atim(fc))
842 title = "ATIM";
843 else if (ieee80211_is_auth(fc))
844 title = "Auth";
845 else if (ieee80211_is_deauth(fc))
846 title = "DeAuth";
847 else if (ieee80211_is_disassoc(fc))
848 title = "DisAssoc";
849 else
850 title = "Frame";
851
852 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
853 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
854 bitrate = 0;
855 WARN_ON_ONCE(1);
856 } else {
857 bitrate = iwl_rates[rate_idx].ieee / 2;
858 }
859
860 /* print frame summary.
861 * MAC addresses show just the last byte (for brevity),
862 * but you can hack it to show more, if you'd like to. */
863 if (dataframe)
864 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
865 "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
866 title, le16_to_cpu(fc), header->addr1[5],
867 length, rssi, channel, bitrate);
868 else {
869 /* src/dst addresses assume managed mode */
870 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
871 "len=%u, rssi=%d, tim=%lu usec, "
872 "phy=0x%02x, chnl=%d\n",
873 title, le16_to_cpu(fc), header->addr1[5],
874 header->addr3[5], length, rssi,
875 tsf_low - priv->scan_start_tsf,
876 phy_flags, channel);
877 }
878 }
879 if (print_dump)
880 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
881}
882#endif
883
884static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 803static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
885{ 804{
886 u32 decrypt_out = 0; 805 u32 decrypt_out = 0;
@@ -988,7 +907,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
988 struct iwl_rx_packet *pkt = rxb_addr(rxb); 907 struct iwl_rx_packet *pkt = rxb_addr(rxb);
989 struct iwl_rx_phy_res *phy_res; 908 struct iwl_rx_phy_res *phy_res;
990 __le32 rx_pkt_status; 909 __le32 rx_pkt_status;
991 struct iwl4965_rx_mpdu_res_start *amsdu; 910 struct iwl_rx_mpdu_res_start *amsdu;
992 u32 len; 911 u32 len;
993 u32 ampdu_status; 912 u32 ampdu_status;
994 u32 rate_n_flags; 913 u32 rate_n_flags;
@@ -1017,7 +936,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
1017 return; 936 return;
1018 } 937 }
1019 phy_res = &priv->_agn.last_phy_res; 938 phy_res = &priv->_agn.last_phy_res;
1020 amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; 939 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
1021 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); 940 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1022 len = le16_to_cpu(amsdu->byte_count); 941 len = le16_to_cpu(amsdu->byte_count);
1023 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); 942 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
@@ -1060,11 +979,6 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
1060 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 979 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1061 rx_status.signal = iwlagn_calc_rssi(priv, phy_res); 980 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
1062 981
1063#ifdef CONFIG_IWLWIFI_DEBUG
1064 /* Set "1" to report good data frames in groups of 100 */
1065 if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
1066 iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
1067#endif
1068 iwl_dbg_log_rx_data_frame(priv, len, header); 982 iwl_dbg_log_rx_data_frame(priv, len, header);
1069 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", 983 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1070 rx_status.signal, (unsigned long long)rx_status.mactime); 984 rx_status.signal, (unsigned long long)rx_status.mactime);
@@ -1252,6 +1166,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1252 bool is_active = false; 1166 bool is_active = false;
1253 int chan_mod; 1167 int chan_mod;
1254 u8 active_chains; 1168 u8 active_chains;
1169 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1255 1170
1256 conf = ieee80211_get_hw_conf(priv->hw); 1171 conf = ieee80211_get_hw_conf(priv->hw);
1257 1172
@@ -1319,7 +1234,10 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1319 1234
1320 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 1235 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1321 spin_lock_irqsave(&priv->lock, flags); 1236 spin_lock_irqsave(&priv->lock, flags);
1322 interval = vif ? vif->bss_conf.beacon_int : 0; 1237 if (priv->is_internal_short_scan)
1238 interval = 0;
1239 else
1240 interval = vif->bss_conf.beacon_int;
1323 spin_unlock_irqrestore(&priv->lock, flags); 1241 spin_unlock_irqrestore(&priv->lock, flags);
1324 1242
1325 scan->suspend_time = 0; 1243 scan->suspend_time = 0;
@@ -1403,11 +1321,14 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1403 1321
1404 band = priv->scan_band; 1322 band = priv->scan_band;
1405 1323
1406 if (priv->cfg->scan_antennas[band]) 1324 if (priv->cfg->scan_rx_antennas[band])
1407 rx_ant = priv->cfg->scan_antennas[band]; 1325 rx_ant = priv->cfg->scan_rx_antennas[band];
1326
1327 if (priv->cfg->scan_tx_antennas[band])
1328 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
1408 1329
1409 priv->scan_tx_ant[band] = 1330 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
1410 iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]); 1331 scan_tx_antennas);
1411 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); 1332 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
1412 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); 1333 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
1413 1334
@@ -1433,13 +1354,15 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1433 if (!priv->is_internal_short_scan) { 1354 if (!priv->is_internal_short_scan) {
1434 cmd_len = iwl_fill_probe_req(priv, 1355 cmd_len = iwl_fill_probe_req(priv,
1435 (struct ieee80211_mgmt *)scan->data, 1356 (struct ieee80211_mgmt *)scan->data,
1357 vif->addr,
1436 priv->scan_request->ie, 1358 priv->scan_request->ie,
1437 priv->scan_request->ie_len, 1359 priv->scan_request->ie_len,
1438 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1360 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1439 } else { 1361 } else {
1362 /* use bcast addr, will not be transmitted but must be valid */
1440 cmd_len = iwl_fill_probe_req(priv, 1363 cmd_len = iwl_fill_probe_req(priv,
1441 (struct ieee80211_mgmt *)scan->data, 1364 (struct ieee80211_mgmt *)scan->data,
1442 NULL, 0, 1365 iwl_bcast_addr, NULL, 0,
1443 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1366 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1444 1367
1445 } 1368 }
@@ -1502,3 +1425,96 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1502 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 1425 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1503 vif->bss_conf.bssid); 1426 vif->bss_conf.bssid);
1504} 1427}
1428
1429void iwl_free_tfds_in_queue(struct iwl_priv *priv,
1430 int sta_id, int tid, int freed)
1431{
1432 WARN_ON(!spin_is_locked(&priv->sta_lock));
1433
1434 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1435 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1436 else {
1437 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1438 priv->stations[sta_id].tid[tid].tfds_in_queue,
1439 freed);
1440 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1441 }
1442}
1443
1444#define IWL_FLUSH_WAIT_MS 2000
1445
1446int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1447{
1448 struct iwl_tx_queue *txq;
1449 struct iwl_queue *q;
1450 int cnt;
1451 unsigned long now = jiffies;
1452 int ret = 0;
1453
1454 /* waiting for all the tx frames complete might take a while */
1455 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1456 if (cnt == IWL_CMD_QUEUE_NUM)
1457 continue;
1458 txq = &priv->txq[cnt];
1459 q = &txq->q;
1460 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1461 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1462 msleep(1);
1463
1464 if (q->read_ptr != q->write_ptr) {
1465 IWL_ERR(priv, "fail to flush all tx fifo queues\n");
1466 ret = -ETIMEDOUT;
1467 break;
1468 }
1469 }
1470 return ret;
1471}
1472
1473#define IWL_TX_QUEUE_MSK 0xfffff
1474
1475/**
1476 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1477 *
1478 * pre-requirements:
1479 * 1. acquire mutex before calling
1480 * 2. make sure rf is on and not in exit state
1481 */
1482int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1483{
1484 struct iwl_txfifo_flush_cmd flush_cmd;
1485 struct iwl_host_cmd cmd = {
1486 .id = REPLY_TXFIFO_FLUSH,
1487 .len = sizeof(struct iwl_txfifo_flush_cmd),
1488 .flags = CMD_SYNC,
1489 .data = &flush_cmd,
1490 };
1491
1492 might_sleep();
1493
1494 memset(&flush_cmd, 0, sizeof(flush_cmd));
1495 flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK |
1496 IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK;
1497 if (priv->cfg->sku & IWL_SKU_N)
1498 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
1499
1500 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
1501 flush_cmd.fifo_control);
1502 flush_cmd.flush_control = cpu_to_le16(flush_control);
1503
1504 return iwl_send_cmd(priv, &cmd);
1505}
1506
1507void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1508{
1509 mutex_lock(&priv->mutex);
1510 ieee80211_stop_queues(priv->hw);
1511 if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
1512 IWL_ERR(priv, "flush request fail\n");
1513 goto done;
1514 }
1515 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
1516 iwlagn_wait_tx_queue_empty(priv);
1517done:
1518 ieee80211_wake_queues(priv->hw);
1519 mutex_unlock(&priv->mutex);
1520}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index cf4a95bae4ff..35c86d22b14b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -313,8 +313,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
313 */ 313 */
314 IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", 314 IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
315 tid); 315 tid);
316 ieee80211_stop_tx_ba_session(sta, tid, 316 ieee80211_stop_tx_ba_session(sta, tid);
317 WLAN_BACK_INITIATOR);
318 } 317 }
319 } else 318 } else
320 IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid); 319 IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
@@ -325,18 +324,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
325 struct iwl_lq_sta *lq_data, 324 struct iwl_lq_sta *lq_data,
326 struct ieee80211_sta *sta) 325 struct ieee80211_sta *sta)
327{ 326{
328 if ((tid < TID_MAX_LOAD_COUNT) && 327 if (tid < TID_MAX_LOAD_COUNT)
329 !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) { 328 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
330 if (priv->cfg->use_rts_for_ht) { 329 else
331 /* 330 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
332 * switch to RTS/CTS if it is the prefer protection 331 tid, TID_MAX_LOAD_COUNT);
333 * method for HT traffic
334 */
335 IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
336 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
337 iwlcore_commit_rxon(priv);
338 }
339 }
340} 332}
341 333
342static inline int get_num_of_ant_from_rate(u32 rate_n_flags) 334static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
new file mode 100644
index 000000000000..9490eced1198
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -0,0 +1,351 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-calib.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-agn-hw.h"
42#include "iwl-agn.h"
43
44void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb)
46
47{
48 struct iwl_rx_packet *pkt = rxb_addr(rxb);
49 struct iwl_missed_beacon_notif *missed_beacon;
50
51 missed_beacon = &pkt->u.missed_beacon;
52 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
53 priv->missed_beacon_threshold) {
54 IWL_DEBUG_CALIB(priv,
55 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
56 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
57 le32_to_cpu(missed_beacon->total_missed_becons),
58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl_init_sensitivity(priv);
62 }
63}
64
65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl_rx_calc_noise(struct iwl_priv *priv)
69{
70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0;
72 int total_silence = 0;
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise;
75
76 if (priv->cfg->bt_statistics)
77 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
78 else
79 rx_info = &(priv->_agn.statistics.rx.general);
80 bcn_silence_a =
81 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
82 bcn_silence_b =
83 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
84 bcn_silence_c =
85 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
86
87 if (bcn_silence_a) {
88 total_silence += bcn_silence_a;
89 num_active_rx++;
90 }
91 if (bcn_silence_b) {
92 total_silence += bcn_silence_b;
93 num_active_rx++;
94 }
95 if (bcn_silence_c) {
96 total_silence += bcn_silence_c;
97 num_active_rx++;
98 }
99
100 /* Average among active antennas */
101 if (num_active_rx)
102 last_rx_noise = (total_silence / num_active_rx) - 107;
103 else
104 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
105
106 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
107 bcn_silence_a, bcn_silence_b, bcn_silence_c,
108 last_rx_noise);
109}
110
111#ifdef CONFIG_IWLWIFI_DEBUGFS
112/*
113 * based on the assumption of all statistics counter are in DWORD
114 * FIXME: This function is for debugging, do not deal with
115 * the case of counters roll-over.
116 */
117static void iwl_accumulative_statistics(struct iwl_priv *priv,
118 __le32 *stats)
119{
120 int i, size;
121 __le32 *prev_stats;
122 u32 *accum_stats;
123 u32 *delta, *max_delta;
124 struct statistics_general_common *general, *accum_general;
125 struct statistics_tx *tx, *accum_tx;
126
127 if (priv->cfg->bt_statistics) {
128 prev_stats = (__le32 *)&priv->_agn.statistics_bt;
129 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
130 size = sizeof(struct iwl_bt_notif_statistics);
131 general = &priv->_agn.statistics_bt.general.common;
132 accum_general = &priv->_agn.accum_statistics_bt.general.common;
133 tx = &priv->_agn.statistics_bt.tx;
134 accum_tx = &priv->_agn.accum_statistics_bt.tx;
135 delta = (u32 *)&priv->_agn.delta_statistics_bt;
136 max_delta = (u32 *)&priv->_agn.max_delta_bt;
137 } else {
138 prev_stats = (__le32 *)&priv->_agn.statistics;
139 accum_stats = (u32 *)&priv->_agn.accum_statistics;
140 size = sizeof(struct iwl_notif_statistics);
141 general = &priv->_agn.statistics.general.common;
142 accum_general = &priv->_agn.accum_statistics.general.common;
143 tx = &priv->_agn.statistics.tx;
144 accum_tx = &priv->_agn.accum_statistics.tx;
145 delta = (u32 *)&priv->_agn.delta_statistics;
146 max_delta = (u32 *)&priv->_agn.max_delta;
147 }
148 for (i = sizeof(__le32); i < size;
149 i += sizeof(__le32), stats++, prev_stats++, delta++,
150 max_delta++, accum_stats++) {
151 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
152 *delta = (le32_to_cpu(*stats) -
153 le32_to_cpu(*prev_stats));
154 *accum_stats += *delta;
155 if (*delta > *max_delta)
156 *max_delta = *delta;
157 }
158 }
159
160 /* reset accumulative statistics for "no-counter" type statistics */
161 accum_general->temperature = general->temperature;
162 accum_general->temperature_m = general->temperature_m;
163 accum_general->ttl_timestamp = general->ttl_timestamp;
164 accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
165 accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
166 accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
167}
168#endif
169
170#define REG_RECALIB_PERIOD (60)
171
172/**
173 * iwl_good_plcp_health - checks for plcp error.
174 *
175 * When the plcp error is exceeding the thresholds, reset the radio
176 * to improve the throughput.
177 */
178bool iwl_good_plcp_health(struct iwl_priv *priv,
179 struct iwl_rx_packet *pkt)
180{
181 bool rc = true;
182 int combined_plcp_delta;
183 unsigned int plcp_msec;
184 unsigned long plcp_received_jiffies;
185
186 if (priv->cfg->plcp_delta_threshold ==
187 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
188 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
189 return rc;
190 }
191
192 /*
193 * check for plcp_err and trigger radio reset if it exceeds
194 * the plcp error threshold plcp_delta.
195 */
196 plcp_received_jiffies = jiffies;
197 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
198 (long) priv->plcp_jiffies);
199 priv->plcp_jiffies = plcp_received_jiffies;
200 /*
201 * check to make sure plcp_msec is not 0 to prevent division
202 * by zero.
203 */
204 if (plcp_msec) {
205 struct statistics_rx_phy *ofdm;
206 struct statistics_rx_ht_phy *ofdm_ht;
207
208 if (priv->cfg->bt_statistics) {
209 ofdm = &pkt->u.stats_bt.rx.ofdm;
210 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
211 combined_plcp_delta =
212 (le32_to_cpu(ofdm->plcp_err) -
213 le32_to_cpu(priv->_agn.statistics_bt.
214 rx.ofdm.plcp_err)) +
215 (le32_to_cpu(ofdm_ht->plcp_err) -
216 le32_to_cpu(priv->_agn.statistics_bt.
217 rx.ofdm_ht.plcp_err));
218 } else {
219 ofdm = &pkt->u.stats.rx.ofdm;
220 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
221 combined_plcp_delta =
222 (le32_to_cpu(ofdm->plcp_err) -
223 le32_to_cpu(priv->_agn.statistics.
224 rx.ofdm.plcp_err)) +
225 (le32_to_cpu(ofdm_ht->plcp_err) -
226 le32_to_cpu(priv->_agn.statistics.
227 rx.ofdm_ht.plcp_err));
228 }
229
230 if ((combined_plcp_delta > 0) &&
231 ((combined_plcp_delta * 100) / plcp_msec) >
232 priv->cfg->plcp_delta_threshold) {
233 /*
234 * if plcp_err exceed the threshold,
235 * the following data is printed in csv format:
236 * Text: plcp_err exceeded %d,
237 * Received ofdm.plcp_err,
238 * Current ofdm.plcp_err,
239 * Received ofdm_ht.plcp_err,
240 * Current ofdm_ht.plcp_err,
241 * combined_plcp_delta,
242 * plcp_msec
243 */
244 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
245 "%u, %u, %u, %u, %d, %u mSecs\n",
246 priv->cfg->plcp_delta_threshold,
247 le32_to_cpu(ofdm->plcp_err),
248 le32_to_cpu(ofdm->plcp_err),
249 le32_to_cpu(ofdm_ht->plcp_err),
250 le32_to_cpu(ofdm_ht->plcp_err),
251 combined_plcp_delta, plcp_msec);
252
253 rc = false;
254 }
255 }
256 return rc;
257}
258
259void iwl_rx_statistics(struct iwl_priv *priv,
260 struct iwl_rx_mem_buffer *rxb)
261{
262 int change;
263 struct iwl_rx_packet *pkt = rxb_addr(rxb);
264
265 if (priv->cfg->bt_statistics) {
266 IWL_DEBUG_RX(priv,
267 "Statistics notification received (%d vs %d).\n",
268 (int)sizeof(struct iwl_bt_notif_statistics),
269 le32_to_cpu(pkt->len_n_flags) &
270 FH_RSCSR_FRAME_SIZE_MSK);
271
272 change = ((priv->_agn.statistics_bt.general.common.temperature !=
273 pkt->u.stats_bt.general.common.temperature) ||
274 ((priv->_agn.statistics_bt.flag &
275 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
276 (pkt->u.stats_bt.flag &
277 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
278#ifdef CONFIG_IWLWIFI_DEBUGFS
279 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
280#endif
281
282 } else {
283 IWL_DEBUG_RX(priv,
284 "Statistics notification received (%d vs %d).\n",
285 (int)sizeof(struct iwl_notif_statistics),
286 le32_to_cpu(pkt->len_n_flags) &
287 FH_RSCSR_FRAME_SIZE_MSK);
288
289 change = ((priv->_agn.statistics.general.common.temperature !=
290 pkt->u.stats.general.common.temperature) ||
291 ((priv->_agn.statistics.flag &
292 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
293 (pkt->u.stats.flag &
294 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
295#ifdef CONFIG_IWLWIFI_DEBUGFS
296 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
297#endif
298
299 }
300
301 iwl_recover_from_statistics(priv, pkt);
302
303 if (priv->cfg->bt_statistics)
304 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
305 sizeof(priv->_agn.statistics_bt));
306 else
307 memcpy(&priv->_agn.statistics, &pkt->u.stats,
308 sizeof(priv->_agn.statistics));
309
310 set_bit(STATUS_STATISTICS, &priv->status);
311
312 /* Reschedule the statistics timer to occur in
313 * REG_RECALIB_PERIOD seconds to ensure we get a
314 * thermal update even if the uCode doesn't give
315 * us one */
316 mod_timer(&priv->statistics_periodic, jiffies +
317 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
318
319 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
320 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
321 iwl_rx_calc_noise(priv);
322 queue_work(priv->workqueue, &priv->run_time_calib_work);
323 }
324 if (priv->cfg->ops->lib->temp_ops.temperature && change)
325 priv->cfg->ops->lib->temp_ops.temperature(priv);
326}
327
328void iwl_reply_statistics(struct iwl_priv *priv,
329 struct iwl_rx_mem_buffer *rxb)
330{
331 struct iwl_rx_packet *pkt = rxb_addr(rxb);
332
333 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
334#ifdef CONFIG_IWLWIFI_DEBUGFS
335 memset(&priv->_agn.accum_statistics, 0,
336 sizeof(struct iwl_notif_statistics));
337 memset(&priv->_agn.delta_statistics, 0,
338 sizeof(struct iwl_notif_statistics));
339 memset(&priv->_agn.max_delta, 0,
340 sizeof(struct iwl_notif_statistics));
341 memset(&priv->_agn.accum_statistics_bt, 0,
342 sizeof(struct iwl_bt_notif_statistics));
343 memset(&priv->_agn.delta_statistics_bt, 0,
344 sizeof(struct iwl_bt_notif_statistics));
345 memset(&priv->_agn.max_delta_bt, 0,
346 sizeof(struct iwl_bt_notif_statistics));
347#endif
348 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
349 }
350 iwl_rx_statistics(priv, rxb);
351}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 7d614c4d3c62..55a1b31fd09a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -233,6 +233,7 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
233{ 233{
234 unsigned long flags; 234 unsigned long flags;
235 u16 ra_tid; 235 u16 ra_tid;
236 int ret;
236 237
237 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 238 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
238 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 239 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
@@ -248,7 +249,9 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
248 ra_tid = BUILD_RAxTID(sta_id, tid); 249 ra_tid = BUILD_RAxTID(sta_id, tid);
249 250
250 /* Modify device's station table to Tx this TID */ 251 /* Modify device's station table to Tx this TID */
251 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 252 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
253 if (ret)
254 return ret;
252 255
253 spin_lock_irqsave(&priv->lock, flags); 256 spin_lock_irqsave(&priv->lock, flags);
254 257
@@ -469,7 +472,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
469 } 472 }
470 473
471 /* Set up antennas */ 474 /* Set up antennas */
472 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); 475 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
476 priv->hw_params.valid_tx_ant);
473 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 477 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
474 478
475 /* Set the rate in the TX cmd */ 479 /* Set the rate in the TX cmd */
@@ -567,10 +571,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
567 hdr_len = ieee80211_hdrlen(fc); 571 hdr_len = ieee80211_hdrlen(fc);
568 572
569 /* Find index into station table for destination station */ 573 /* Find index into station table for destination station */
570 if (!info->control.sta) 574 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
571 sta_id = priv->hw_params.bcast_sta_id;
572 else
573 sta_id = iwl_sta_id(info->control.sta);
574 if (sta_id == IWL_INVALID_STATION) { 575 if (sta_id == IWL_INVALID_STATION) {
575 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 576 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
576 hdr->addr1); 577 hdr->addr1);
@@ -598,11 +599,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
598 } 599 }
599 600
600 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); 601 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
602
603 /* irqs already disabled/saved above when locking priv->lock */
604 spin_lock(&priv->sta_lock);
605
601 if (ieee80211_is_data_qos(fc)) { 606 if (ieee80211_is_data_qos(fc)) {
602 qc = ieee80211_get_qos_ctl(hdr); 607 qc = ieee80211_get_qos_ctl(hdr);
603 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 608 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
604 if (unlikely(tid >= MAX_TID_COUNT)) 609 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
610 spin_unlock(&priv->sta_lock);
605 goto drop_unlock; 611 goto drop_unlock;
612 }
606 seq_number = priv->stations[sta_id].tid[tid].seq_number; 613 seq_number = priv->stations[sta_id].tid[tid].seq_number;
607 seq_number &= IEEE80211_SCTL_SEQ; 614 seq_number &= IEEE80211_SCTL_SEQ;
608 hdr->seq_ctrl = hdr->seq_ctrl & 615 hdr->seq_ctrl = hdr->seq_ctrl &
@@ -620,15 +627,22 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
620 swq_id = txq->swq_id; 627 swq_id = txq->swq_id;
621 q = &txq->q; 628 q = &txq->q;
622 629
623 if (unlikely(iwl_queue_space(q) < q->high_mark)) 630 if (unlikely(iwl_queue_space(q) < q->high_mark)) {
631 spin_unlock(&priv->sta_lock);
624 goto drop_unlock; 632 goto drop_unlock;
633 }
625 634
626 if (ieee80211_is_data_qos(fc)) 635 if (ieee80211_is_data_qos(fc)) {
627 priv->stations[sta_id].tid[tid].tfds_in_queue++; 636 priv->stations[sta_id].tid[tid].tfds_in_queue++;
637 if (!ieee80211_has_morefrags(fc))
638 priv->stations[sta_id].tid[tid].seq_number = seq_number;
639 }
640
641 spin_unlock(&priv->sta_lock);
628 642
629 /* Set up driver data for this TFD */ 643 /* Set up driver data for this TFD */
630 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 644 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
631 txq->txb[q->write_ptr].skb[0] = skb; 645 txq->txb[q->write_ptr].skb = skb;
632 646
633 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 647 /* Set up first empty entry in queue's array of Tx/cmd buffers */
634 out_cmd = txq->cmd[q->write_ptr]; 648 out_cmd = txq->cmd[q->write_ptr];
@@ -694,8 +708,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
694 txcmd_phys = pci_map_single(priv->pci_dev, 708 txcmd_phys = pci_map_single(priv->pci_dev,
695 &out_cmd->hdr, len, 709 &out_cmd->hdr, len,
696 PCI_DMA_BIDIRECTIONAL); 710 PCI_DMA_BIDIRECTIONAL);
697 pci_unmap_addr_set(out_meta, mapping, txcmd_phys); 711 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
698 pci_unmap_len_set(out_meta, len, len); 712 dma_unmap_len_set(out_meta, len, len);
699 /* Add buffer containing Tx command and MAC(!) header to TFD's 713 /* Add buffer containing Tx command and MAC(!) header to TFD's
700 * first entry */ 714 * first entry */
701 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 715 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
@@ -703,8 +717,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
703 717
704 if (!ieee80211_has_morefrags(hdr->frame_control)) { 718 if (!ieee80211_has_morefrags(hdr->frame_control)) {
705 txq->need_update = 1; 719 txq->need_update = 1;
706 if (qc)
707 priv->stations[sta_id].tid[tid].seq_number = seq_number;
708 } else { 720 } else {
709 wait_write_ptr = 1; 721 wait_write_ptr = 1;
710 txq->need_update = 0; 722 txq->need_update = 0;
@@ -938,9 +950,12 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
938 /* Stop each Tx DMA channel, and wait for it to be idle */ 950 /* Stop each Tx DMA channel, and wait for it to be idle */
939 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { 951 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
940 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 952 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
941 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, 953 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
942 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 954 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
943 1000); 955 1000))
956 IWL_ERR(priv, "Failing on timeout while stopping"
957 " DMA channel %d [0x%08x]", ch,
958 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
944 } 959 }
945 spin_unlock_irqrestore(&priv->lock, flags); 960 spin_unlock_irqrestore(&priv->lock, flags);
946} 961}
@@ -1009,6 +1024,8 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1009 if (ret) 1024 if (ret)
1010 return ret; 1025 return ret;
1011 1026
1027 spin_lock_irqsave(&priv->sta_lock, flags);
1028 tid_data = &priv->stations[sta_id].tid[tid];
1012 if (tid_data->tfds_in_queue == 0) { 1029 if (tid_data->tfds_in_queue == 0) {
1013 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1030 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1014 tid_data->agg.state = IWL_AGG_ON; 1031 tid_data->agg.state = IWL_AGG_ON;
@@ -1018,6 +1035,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1018 tid_data->tfds_in_queue); 1035 tid_data->tfds_in_queue);
1019 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; 1036 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1020 } 1037 }
1038 spin_unlock_irqrestore(&priv->sta_lock, flags);
1021 return ret; 1039 return ret;
1022} 1040}
1023 1041
@@ -1040,11 +1058,14 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1040 return -ENXIO; 1058 return -ENXIO;
1041 } 1059 }
1042 1060
1061 spin_lock_irqsave(&priv->sta_lock, flags);
1062
1043 if (priv->stations[sta_id].tid[tid].agg.state == 1063 if (priv->stations[sta_id].tid[tid].agg.state ==
1044 IWL_EMPTYING_HW_QUEUE_ADDBA) { 1064 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1045 IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); 1065 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1046 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1066 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1047 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1067 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1068 spin_unlock_irqrestore(&priv->sta_lock, flags);
1048 return 0; 1069 return 0;
1049 } 1070 }
1050 1071
@@ -1062,13 +1083,17 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1062 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); 1083 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1063 priv->stations[sta_id].tid[tid].agg.state = 1084 priv->stations[sta_id].tid[tid].agg.state =
1064 IWL_EMPTYING_HW_QUEUE_DELBA; 1085 IWL_EMPTYING_HW_QUEUE_DELBA;
1086 spin_unlock_irqrestore(&priv->sta_lock, flags);
1065 return 0; 1087 return 0;
1066 } 1088 }
1067 1089
1068 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1090 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1069 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1091 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1070 1092
1071 spin_lock_irqsave(&priv->lock, flags); 1093 /* do not restore/save irqs */
1094 spin_unlock(&priv->sta_lock);
1095 spin_lock(&priv->lock);
1096
1072 /* 1097 /*
1073 * the only reason this call can fail is queue number out of range, 1098 * the only reason this call can fail is queue number out of range,
1074 * which can happen if uCode is reloaded and all the station 1099 * which can happen if uCode is reloaded and all the station
@@ -1092,6 +1117,8 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1092 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1117 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1093 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1118 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1094 1119
1120 WARN_ON(!spin_is_locked(&priv->sta_lock));
1121
1095 switch (priv->stations[sta_id].tid[tid].agg.state) { 1122 switch (priv->stations[sta_id].tid[tid].agg.state) {
1096 case IWL_EMPTYING_HW_QUEUE_DELBA: 1123 case IWL_EMPTYING_HW_QUEUE_DELBA:
1097 /* We are reclaiming the last packet of the */ 1124 /* We are reclaiming the last packet of the */
@@ -1116,6 +1143,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1116 } 1143 }
1117 break; 1144 break;
1118 } 1145 }
1146
1119 return 0; 1147 return 0;
1120} 1148}
1121 1149
@@ -1159,12 +1187,12 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1159 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1187 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1160 1188
1161 tx_info = &txq->txb[txq->q.read_ptr]; 1189 tx_info = &txq->txb[txq->q.read_ptr];
1162 iwlagn_tx_status(priv, tx_info->skb[0]); 1190 iwlagn_tx_status(priv, tx_info->skb);
1163 1191
1164 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; 1192 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1165 if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1193 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1166 nfreed++; 1194 nfreed++;
1167 tx_info->skb[0] = NULL; 1195 tx_info->skb = NULL;
1168 1196
1169 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1197 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1170 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1198 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
@@ -1188,7 +1216,7 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1188 int i, sh, ack; 1216 int i, sh, ack;
1189 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); 1217 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1190 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1218 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1191 u64 bitmap; 1219 u64 bitmap, sent_bitmap;
1192 int successes = 0; 1220 int successes = 0;
1193 struct ieee80211_tx_info *info; 1221 struct ieee80211_tx_info *info;
1194 1222
@@ -1216,24 +1244,26 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1216 1244
1217 /* check for success or failure according to the 1245 /* check for success or failure according to the
1218 * transmitted bitmap and block-ack bitmap */ 1246 * transmitted bitmap and block-ack bitmap */
1219 bitmap &= agg->bitmap; 1247 sent_bitmap = bitmap & agg->bitmap;
1220 1248
1221 /* For each frame attempted in aggregation, 1249 /* For each frame attempted in aggregation,
1222 * update driver's record of tx frame's status. */ 1250 * update driver's record of tx frame's status. */
1223 for (i = 0; i < agg->frame_count ; i++) { 1251 i = 0;
1224 ack = bitmap & (1ULL << i); 1252 while (sent_bitmap) {
1225 successes += !!ack; 1253 ack = sent_bitmap & 1ULL;
1254 successes += ack;
1226 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", 1255 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1227 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, 1256 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1228 agg->start_idx + i); 1257 agg->start_idx + i);
1258 sent_bitmap >>= 1;
1259 ++i;
1229 } 1260 }
1230 1261
1231 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); 1262 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1232 memset(&info->status, 0, sizeof(info->status)); 1263 memset(&info->status, 0, sizeof(info->status));
1233 info->flags |= IEEE80211_TX_STAT_ACK; 1264 info->flags |= IEEE80211_TX_STAT_ACK;
1234 info->flags |= IEEE80211_TX_STAT_AMPDU; 1265 info->flags |= IEEE80211_TX_STAT_AMPDU;
1235 info->status.ampdu_ack_len = successes; 1266 info->status.ampdu_ack_len = successes;
1236 info->status.ampdu_ack_map = bitmap;
1237 info->status.ampdu_len = agg->frame_count; 1267 info->status.ampdu_len = agg->frame_count;
1238 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1268 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1239 1269
@@ -1281,6 +1311,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1281 int index; 1311 int index;
1282 int sta_id; 1312 int sta_id;
1283 int tid; 1313 int tid;
1314 unsigned long flags;
1284 1315
1285 /* "flow" corresponds to Tx queue */ 1316 /* "flow" corresponds to Tx queue */
1286 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1317 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
@@ -1308,7 +1339,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1308 /* Find index just before block-ack window */ 1339 /* Find index just before block-ack window */
1309 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 1340 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1310 1341
1311 /* TODO: Need to get this copy more safely - now good for debug */ 1342 spin_lock_irqsave(&priv->sta_lock, flags);
1312 1343
1313 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " 1344 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1314 "sta_id = %d\n", 1345 "sta_id = %d\n",
@@ -1344,4 +1375,6 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1344 1375
1345 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); 1376 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1346 } 1377 }
1378
1379 spin_unlock_irqrestore(&priv->sta_lock, flags);
1347} 1380}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 637286c396fe..6f77441cb65a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -423,3 +423,126 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
423 423
424 return 0; 424 return 0;
425} 425}
426
427
428/**
429 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
430 * using sample data 100 bytes apart. If these sample points are good,
431 * it's a pretty good bet that everything between them is good, too.
432 */
433static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
434{
435 u32 val;
436 int ret = 0;
437 u32 errcnt = 0;
438 u32 i;
439
440 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
441
442 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
443 /* read data comes through single port, auto-incr addr */
444 /* NOTE: Use the debugless read so we don't flood kernel log
445 * if IWL_DL_IO is set */
446 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
447 i + IWLAGN_RTC_INST_LOWER_BOUND);
448 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
449 if (val != le32_to_cpu(*image)) {
450 ret = -EIO;
451 errcnt++;
452 if (errcnt >= 3)
453 break;
454 }
455 }
456
457 return ret;
458}
459
460/**
461 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
462 * looking at all data.
463 */
464static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
465 u32 len)
466{
467 u32 val;
468 u32 save_len = len;
469 int ret = 0;
470 u32 errcnt;
471
472 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
473
474 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
475 IWLAGN_RTC_INST_LOWER_BOUND);
476
477 errcnt = 0;
478 for (; len > 0; len -= sizeof(u32), image++) {
479 /* read data comes through single port, auto-incr addr */
480 /* NOTE: Use the debugless read so we don't flood kernel log
481 * if IWL_DL_IO is set */
482 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
483 if (val != le32_to_cpu(*image)) {
484 IWL_ERR(priv, "uCode INST section is invalid at "
485 "offset 0x%x, is 0x%x, s/b 0x%x\n",
486 save_len - len, val, le32_to_cpu(*image));
487 ret = -EIO;
488 errcnt++;
489 if (errcnt >= 20)
490 break;
491 }
492 }
493
494 if (!errcnt)
495 IWL_DEBUG_INFO(priv,
496 "ucode image in INSTRUCTION memory is good\n");
497
498 return ret;
499}
500
501/**
502 * iwl_verify_ucode - determine which instruction image is in SRAM,
503 * and verify its contents
504 */
505int iwl_verify_ucode(struct iwl_priv *priv)
506{
507 __le32 *image;
508 u32 len;
509 int ret;
510
511 /* Try bootstrap */
512 image = (__le32 *)priv->ucode_boot.v_addr;
513 len = priv->ucode_boot.len;
514 ret = iwlcore_verify_inst_sparse(priv, image, len);
515 if (!ret) {
516 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
517 return 0;
518 }
519
520 /* Try initialize */
521 image = (__le32 *)priv->ucode_init.v_addr;
522 len = priv->ucode_init.len;
523 ret = iwlcore_verify_inst_sparse(priv, image, len);
524 if (!ret) {
525 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
526 return 0;
527 }
528
529 /* Try runtime/protocol */
530 image = (__le32 *)priv->ucode_code.v_addr;
531 len = priv->ucode_code.len;
532 ret = iwlcore_verify_inst_sparse(priv, image, len);
533 if (!ret) {
534 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
535 return 0;
536 }
537
538 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
539
540 /* Since nothing seems to match, show first several data entries in
541 * instruction SRAM, so maybe visual inspection will give a clue.
542 * Selection of bootstrap image (vs. other images) is arbitrary. */
543 image = (__le32 *)priv->ucode_boot.v_addr;
544 len = priv->ucode_boot.len;
545 ret = iwl_verify_inst_full(priv, image, len);
546
547 return ret;
548}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 24aff654fa9c..35337b1e7cac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -27,6 +27,8 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/kernel.h> 32#include <linux/kernel.h>
31#include <linux/module.h> 33#include <linux/module.h>
32#include <linux/init.h> 34#include <linux/init.h>
@@ -120,7 +122,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
120 (priv->switch_rxon.channel != priv->staging_rxon.channel)) { 122 (priv->switch_rxon.channel != priv->staging_rxon.channel)) {
121 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 123 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
122 le16_to_cpu(priv->switch_rxon.channel)); 124 le16_to_cpu(priv->switch_rxon.channel));
123 priv->switch_rxon.switch_in_progress = false; 125 iwl_chswitch_done(priv, false);
124 } 126 }
125 127
126 /* If we don't need to send a full RXON, we can use 128 /* If we don't need to send a full RXON, we can use
@@ -292,9 +294,7 @@ static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
292 struct ieee80211_hdr *hdr, 294 struct ieee80211_hdr *hdr,
293 int left) 295 int left)
294{ 296{
295 if (!iwl_is_associated(priv) || !priv->ibss_beacon || 297 if (!priv->ibss_beacon)
296 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
297 (priv->iw_mode != NL80211_IFTYPE_AP)))
298 return 0; 298 return 0;
299 299
300 if (priv->ibss_beacon->len > left) 300 if (priv->ibss_beacon->len > left)
@@ -367,7 +367,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
367 367
368 /* Set up packet rate and flags */ 368 /* Set up packet rate and flags */
369 rate = iwl_rate_get_lowest_plcp(priv); 369 rate = iwl_rate_get_lowest_plcp(priv);
370 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); 370 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
371 priv->hw_params.valid_tx_ant);
371 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 372 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
372 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) 373 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
373 rate_flags |= RATE_MCS_CCK_MSK; 374 rate_flags |= RATE_MCS_CCK_MSK;
@@ -474,18 +475,25 @@ void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
474 /* Unmap tx_cmd */ 475 /* Unmap tx_cmd */
475 if (num_tbs) 476 if (num_tbs)
476 pci_unmap_single(dev, 477 pci_unmap_single(dev,
477 pci_unmap_addr(&txq->meta[index], mapping), 478 dma_unmap_addr(&txq->meta[index], mapping),
478 pci_unmap_len(&txq->meta[index], len), 479 dma_unmap_len(&txq->meta[index], len),
479 PCI_DMA_BIDIRECTIONAL); 480 PCI_DMA_BIDIRECTIONAL);
480 481
481 /* Unmap chunks, if any. */ 482 /* Unmap chunks, if any. */
482 for (i = 1; i < num_tbs; i++) { 483 for (i = 1; i < num_tbs; i++)
483 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), 484 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
484 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); 485 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
485 486
486 if (txq->txb) { 487 /* free SKB */
487 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]); 488 if (txq->txb) {
488 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; 489 struct sk_buff *skb;
490
491 skb = txq->txb[txq->q.read_ptr].skb;
492
493 /* can be called from irqs-disabled context */
494 if (skb) {
495 dev_kfree_skb_any(skb);
496 txq->txb[txq->q.read_ptr].skb = NULL;
489 } 497 }
490 } 498 }
491} 499}
@@ -851,6 +859,24 @@ int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
851 return 0; 859 return 0;
852} 860}
853 861
862static void iwl_bg_tx_flush(struct work_struct *work)
863{
864 struct iwl_priv *priv =
865 container_of(work, struct iwl_priv, tx_flush);
866
867 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
868 return;
869
870 /* do nothing if rf-kill is on */
871 if (!iwl_is_ready_rf(priv))
872 return;
873
874 if (priv->cfg->ops->lib->txfifo_flush) {
875 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
876 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
877 }
878}
879
854/** 880/**
855 * iwl_setup_rx_handlers - Initialize Rx handler callbacks 881 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
856 * 882 *
@@ -933,6 +959,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
933 fill_rx = 1; 959 fill_rx = 1;
934 960
935 while (i != r) { 961 while (i != r) {
962 int len;
963
936 rxb = rxq->queue[i]; 964 rxb = rxq->queue[i];
937 965
938 /* If an RXB doesn't have a Rx queue slot associated with it, 966 /* If an RXB doesn't have a Rx queue slot associated with it,
@@ -947,8 +975,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
947 PCI_DMA_FROMDEVICE); 975 PCI_DMA_FROMDEVICE);
948 pkt = rxb_addr(rxb); 976 pkt = rxb_addr(rxb);
949 977
950 trace_iwlwifi_dev_rx(priv, pkt, 978 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
951 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 979 len += sizeof(u32); /* account for status word */
980 trace_iwlwifi_dev_rx(priv, pkt, len);
952 981
953 /* Reclaim a command buffer only if this packet is a response 982 /* Reclaim a command buffer only if this packet is a response
954 * to a (driver-originated) command. 983 * to a (driver-originated) command.
@@ -1450,13 +1479,13 @@ bool iwl_good_ack_health(struct iwl_priv *priv,
1450 1479
1451 actual_ack_cnt_delta = 1480 actual_ack_cnt_delta =
1452 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - 1481 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
1453 le32_to_cpu(priv->statistics.tx.actual_ack_cnt); 1482 le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
1454 expected_ack_cnt_delta = 1483 expected_ack_cnt_delta =
1455 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - 1484 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
1456 le32_to_cpu(priv->statistics.tx.expected_ack_cnt); 1485 le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
1457 ba_timeout_delta = 1486 ba_timeout_delta =
1458 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - 1487 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
1459 le32_to_cpu(priv->statistics.tx.agg.ba_timeout); 1488 le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
1460 if ((priv->_agn.agg_tids_count > 0) && 1489 if ((priv->_agn.agg_tids_count > 0) &&
1461 (expected_ack_cnt_delta > 0) && 1490 (expected_ack_cnt_delta > 0) &&
1462 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) 1491 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
@@ -1466,12 +1495,17 @@ bool iwl_good_ack_health(struct iwl_priv *priv,
1466 " expected_ack_cnt = %d\n", 1495 " expected_ack_cnt = %d\n",
1467 actual_ack_cnt_delta, expected_ack_cnt_delta); 1496 actual_ack_cnt_delta, expected_ack_cnt_delta);
1468 1497
1469#ifdef CONFIG_IWLWIFI_DEBUG 1498#ifdef CONFIG_IWLWIFI_DEBUGFS
1499 /*
1500 * This is ifdef'ed on DEBUGFS because otherwise the
1501 * statistics aren't available. If DEBUGFS is set but
1502 * DEBUG is not, these will just compile out.
1503 */
1470 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", 1504 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
1471 priv->delta_statistics.tx.rx_detected_cnt); 1505 priv->_agn.delta_statistics.tx.rx_detected_cnt);
1472 IWL_DEBUG_RADIO(priv, 1506 IWL_DEBUG_RADIO(priv,
1473 "ack_or_ba_timeout_collision delta = %d\n", 1507 "ack_or_ba_timeout_collision delta = %d\n",
1474 priv->delta_statistics.tx. 1508 priv->_agn.delta_statistics.tx.
1475 ack_or_ba_timeout_collision); 1509 ack_or_ba_timeout_collision);
1476#endif 1510#endif
1477 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", 1511 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
@@ -1658,6 +1692,7 @@ static void iwl_nic_start(struct iwl_priv *priv)
1658 1692
1659struct iwlagn_ucode_capabilities { 1693struct iwlagn_ucode_capabilities {
1660 u32 max_probe_length; 1694 u32 max_probe_length;
1695 u32 standard_phy_calibration_size;
1661}; 1696};
1662 1697
1663static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 1698static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
@@ -1694,6 +1729,9 @@ struct iwlagn_firmware_pieces {
1694 size_t inst_size, data_size, init_size, init_data_size, boot_size; 1729 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1695 1730
1696 u32 build; 1731 u32 build;
1732
1733 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
1734 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
1697}; 1735};
1698 1736
1699static int iwlagn_load_legacy_firmware(struct iwl_priv *priv, 1737static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
@@ -1787,12 +1825,20 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1787 const u8 *data; 1825 const u8 *data;
1788 int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp; 1826 int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
1789 u64 alternatives; 1827 u64 alternatives;
1828 u32 tlv_len;
1829 enum iwl_ucode_tlv_type tlv_type;
1830 const u8 *tlv_data;
1790 1831
1791 if (len < sizeof(*ucode)) 1832 if (len < sizeof(*ucode)) {
1833 IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
1792 return -EINVAL; 1834 return -EINVAL;
1835 }
1793 1836
1794 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) 1837 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
1838 IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
1839 le32_to_cpu(ucode->magic));
1795 return -EINVAL; 1840 return -EINVAL;
1841 }
1796 1842
1797 /* 1843 /*
1798 * Check which alternatives are present, and "downgrade" 1844 * Check which alternatives are present, and "downgrade"
@@ -1818,10 +1864,7 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1818 len -= sizeof(*ucode); 1864 len -= sizeof(*ucode);
1819 1865
1820 while (len >= sizeof(*tlv)) { 1866 while (len >= sizeof(*tlv)) {
1821 u32 tlv_len;
1822 enum iwl_ucode_tlv_type tlv_type;
1823 u16 tlv_alt; 1867 u16 tlv_alt;
1824 const u8 *tlv_data;
1825 1868
1826 len -= sizeof(*tlv); 1869 len -= sizeof(*tlv);
1827 tlv = (void *)data; 1870 tlv = (void *)data;
@@ -1831,8 +1874,11 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1831 tlv_alt = le16_to_cpu(tlv->alternative); 1874 tlv_alt = le16_to_cpu(tlv->alternative);
1832 tlv_data = tlv->data; 1875 tlv_data = tlv->data;
1833 1876
1834 if (len < tlv_len) 1877 if (len < tlv_len) {
1878 IWL_ERR(priv, "invalid TLV len: %zd/%u\n",
1879 len, tlv_len);
1835 return -EINVAL; 1880 return -EINVAL;
1881 }
1836 len -= ALIGN(tlv_len, 4); 1882 len -= ALIGN(tlv_len, 4);
1837 data += sizeof(*tlv) + ALIGN(tlv_len, 4); 1883 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
1838 1884
@@ -1866,20 +1912,77 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1866 pieces->boot_size = tlv_len; 1912 pieces->boot_size = tlv_len;
1867 break; 1913 break;
1868 case IWL_UCODE_TLV_PROBE_MAX_LEN: 1914 case IWL_UCODE_TLV_PROBE_MAX_LEN:
1869 if (tlv_len != 4) 1915 if (tlv_len != sizeof(u32))
1870 return -EINVAL; 1916 goto invalid_tlv_len;
1871 capa->max_probe_length = 1917 capa->max_probe_length =
1872 le32_to_cpup((__le32 *)tlv_data); 1918 le32_to_cpup((__le32 *)tlv_data);
1919 break;
1920 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
1921 if (tlv_len != sizeof(u32))
1922 goto invalid_tlv_len;
1923 pieces->init_evtlog_ptr =
1924 le32_to_cpup((__le32 *)tlv_data);
1925 break;
1926 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
1927 if (tlv_len != sizeof(u32))
1928 goto invalid_tlv_len;
1929 pieces->init_evtlog_size =
1930 le32_to_cpup((__le32 *)tlv_data);
1931 break;
1932 case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
1933 if (tlv_len != sizeof(u32))
1934 goto invalid_tlv_len;
1935 pieces->init_errlog_ptr =
1936 le32_to_cpup((__le32 *)tlv_data);
1937 break;
1938 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
1939 if (tlv_len != sizeof(u32))
1940 goto invalid_tlv_len;
1941 pieces->inst_evtlog_ptr =
1942 le32_to_cpup((__le32 *)tlv_data);
1943 break;
1944 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
1945 if (tlv_len != sizeof(u32))
1946 goto invalid_tlv_len;
1947 pieces->inst_evtlog_size =
1948 le32_to_cpup((__le32 *)tlv_data);
1949 break;
1950 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
1951 if (tlv_len != sizeof(u32))
1952 goto invalid_tlv_len;
1953 pieces->inst_errlog_ptr =
1954 le32_to_cpup((__le32 *)tlv_data);
1955 break;
1956 case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
1957 if (tlv_len)
1958 goto invalid_tlv_len;
1959 priv->enhance_sensitivity_table = true;
1960 break;
1961 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
1962 if (tlv_len != sizeof(u32))
1963 goto invalid_tlv_len;
1964 capa->standard_phy_calibration_size =
1965 le32_to_cpup((__le32 *)tlv_data);
1873 break; 1966 break;
1874 default: 1967 default:
1968 IWL_WARN(priv, "unknown TLV: %d\n", tlv_type);
1875 break; 1969 break;
1876 } 1970 }
1877 } 1971 }
1878 1972
1879 if (len) 1973 if (len) {
1974 IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len);
1975 iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len);
1880 return -EINVAL; 1976 return -EINVAL;
1977 }
1881 1978
1882 return 0; 1979 return 0;
1980
1981 invalid_tlv_len:
1982 IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
1983 iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len);
1984
1985 return -EINVAL;
1883} 1986}
1884 1987
1885/** 1988/**
@@ -1901,6 +2004,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1901 u32 build; 2004 u32 build;
1902 struct iwlagn_ucode_capabilities ucode_capa = { 2005 struct iwlagn_ucode_capabilities ucode_capa = {
1903 .max_probe_length = 200, 2006 .max_probe_length = 200,
2007 .standard_phy_calibration_size =
2008 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE,
1904 }; 2009 };
1905 2010
1906 memset(&pieces, 0, sizeof(pieces)); 2011 memset(&pieces, 0, sizeof(pieces));
@@ -2063,6 +2168,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2063 goto err_pci_alloc; 2168 goto err_pci_alloc;
2064 } 2169 }
2065 2170
2171 /* Now that we can no longer fail, copy information */
2172
2173 /*
2174 * The (size - 16) / 12 formula is based on the information recorded
2175 * for each event, which is of mode 1 (including timestamp) for all
2176 * new microcodes that include this information.
2177 */
2178 priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr;
2179 if (pieces.init_evtlog_size)
2180 priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
2181 else
2182 priv->_agn.init_evtlog_size = priv->cfg->max_event_log_size;
2183 priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr;
2184 priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr;
2185 if (pieces.inst_evtlog_size)
2186 priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
2187 else
2188 priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size;
2189 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
2190
2066 /* Copy images into buffers for card's bus-master reads ... */ 2191 /* Copy images into buffers for card's bus-master reads ... */
2067 2192
2068 /* Runtime instructions (first block of data in file) */ 2193 /* Runtime instructions (first block of data in file) */
@@ -2102,6 +2227,20 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2102 pieces.boot_size); 2227 pieces.boot_size);
2103 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); 2228 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
2104 2229
2230 /*
2231 * figure out the offset of chain noise reset and gain commands
2232 * base on the size of standard phy calibration commands table size
2233 */
2234 if (ucode_capa.standard_phy_calibration_size >
2235 IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
2236 ucode_capa.standard_phy_calibration_size =
2237 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
2238
2239 priv->_agn.phy_calib_chain_noise_reset_cmd =
2240 ucode_capa.standard_phy_calibration_size;
2241 priv->_agn.phy_calib_chain_noise_gain_cmd =
2242 ucode_capa.standard_phy_calibration_size + 1;
2243
2105 /************************************************** 2244 /**************************************************
2106 * This is still part of probe() in a sense... 2245 * This is still part of probe() in a sense...
2107 * 2246 *
@@ -2172,17 +2311,41 @@ static const char *desc_lookup_text[] = {
2172 "DEBUG_1", 2311 "DEBUG_1",
2173 "DEBUG_2", 2312 "DEBUG_2",
2174 "DEBUG_3", 2313 "DEBUG_3",
2175 "ADVANCED SYSASSERT"
2176}; 2314};
2177 2315
2178static const char *desc_lookup(int i) 2316static struct { char *name; u8 num; } advanced_lookup[] = {
2317 { "NMI_INTERRUPT_WDG", 0x34 },
2318 { "SYSASSERT", 0x35 },
2319 { "UCODE_VERSION_MISMATCH", 0x37 },
2320 { "BAD_COMMAND", 0x38 },
2321 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
2322 { "FATAL_ERROR", 0x3D },
2323 { "NMI_TRM_HW_ERR", 0x46 },
2324 { "NMI_INTERRUPT_TRM", 0x4C },
2325 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
2326 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
2327 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
2328 { "NMI_INTERRUPT_HOST", 0x66 },
2329 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
2330 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
2331 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
2332 { "ADVANCED_SYSASSERT", 0 },
2333};
2334
2335static const char *desc_lookup(u32 num)
2179{ 2336{
2180 int max = ARRAY_SIZE(desc_lookup_text) - 1; 2337 int i;
2338 int max = ARRAY_SIZE(desc_lookup_text);
2181 2339
2182 if (i < 0 || i > max) 2340 if (num < max)
2183 i = max; 2341 return desc_lookup_text[num];
2184 2342
2185 return desc_lookup_text[i]; 2343 max = ARRAY_SIZE(advanced_lookup) - 1;
2344 for (i = 0; i < max; i++) {
2345 if (advanced_lookup[i].num == num)
2346 break;;
2347 }
2348 return advanced_lookup[i].name;
2186} 2349}
2187 2350
2188#define ERROR_START_OFFSET (1 * sizeof(u32)) 2351#define ERROR_START_OFFSET (1 * sizeof(u32))
@@ -2195,10 +2358,15 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
2195 u32 blink1, blink2, ilink1, ilink2; 2358 u32 blink1, blink2, ilink1, ilink2;
2196 u32 pc, hcmd; 2359 u32 pc, hcmd;
2197 2360
2198 if (priv->ucode_type == UCODE_INIT) 2361 if (priv->ucode_type == UCODE_INIT) {
2199 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); 2362 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
2200 else 2363 if (!base)
2364 base = priv->_agn.init_errlog_ptr;
2365 } else {
2201 base = le32_to_cpu(priv->card_alive.error_event_table_ptr); 2366 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
2367 if (!base)
2368 base = priv->_agn.inst_errlog_ptr;
2369 }
2202 2370
2203 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 2371 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
2204 IWL_ERR(priv, 2372 IWL_ERR(priv,
@@ -2230,9 +2398,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
2230 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, 2398 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
2231 blink1, blink2, ilink1, ilink2); 2399 blink1, blink2, ilink1, ilink2);
2232 2400
2233 IWL_ERR(priv, "Desc Time " 2401 IWL_ERR(priv, "Desc Time "
2234 "data1 data2 line\n"); 2402 "data1 data2 line\n");
2235 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", 2403 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
2236 desc_lookup(desc), desc, time, data1, data2, line); 2404 desc_lookup(desc), desc, time, data1, data2, line);
2237 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); 2405 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
2238 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", 2406 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
@@ -2258,10 +2426,16 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
2258 2426
2259 if (num_events == 0) 2427 if (num_events == 0)
2260 return pos; 2428 return pos;
2261 if (priv->ucode_type == UCODE_INIT) 2429
2430 if (priv->ucode_type == UCODE_INIT) {
2262 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 2431 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
2263 else 2432 if (!base)
2433 base = priv->_agn.init_evtlog_ptr;
2434 } else {
2264 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 2435 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
2436 if (!base)
2437 base = priv->_agn.inst_evtlog_ptr;
2438 }
2265 2439
2266 if (mode == 0) 2440 if (mode == 0)
2267 event_size = 2 * sizeof(u32); 2441 event_size = 2 * sizeof(u32);
@@ -2363,13 +2537,21 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2363 u32 num_wraps; /* # times uCode wrapped to top of log */ 2537 u32 num_wraps; /* # times uCode wrapped to top of log */
2364 u32 next_entry; /* index of next entry to be written by uCode */ 2538 u32 next_entry; /* index of next entry to be written by uCode */
2365 u32 size; /* # entries that we'll print */ 2539 u32 size; /* # entries that we'll print */
2540 u32 logsize;
2366 int pos = 0; 2541 int pos = 0;
2367 size_t bufsz = 0; 2542 size_t bufsz = 0;
2368 2543
2369 if (priv->ucode_type == UCODE_INIT) 2544 if (priv->ucode_type == UCODE_INIT) {
2370 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 2545 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
2371 else 2546 logsize = priv->_agn.init_evtlog_size;
2547 if (!base)
2548 base = priv->_agn.init_evtlog_ptr;
2549 } else {
2372 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 2550 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
2551 logsize = priv->_agn.inst_evtlog_size;
2552 if (!base)
2553 base = priv->_agn.inst_evtlog_ptr;
2554 }
2373 2555
2374 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 2556 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
2375 IWL_ERR(priv, 2557 IWL_ERR(priv,
@@ -2384,16 +2566,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2384 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 2566 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
2385 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 2567 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
2386 2568
2387 if (capacity > priv->cfg->max_event_log_size) { 2569 if (capacity > logsize) {
2388 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 2570 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
2389 capacity, priv->cfg->max_event_log_size); 2571 capacity, logsize);
2390 capacity = priv->cfg->max_event_log_size; 2572 capacity = logsize;
2391 } 2573 }
2392 2574
2393 if (next_entry > priv->cfg->max_event_log_size) { 2575 if (next_entry > logsize) {
2394 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 2576 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
2395 next_entry, priv->cfg->max_event_log_size); 2577 next_entry, logsize);
2396 next_entry = priv->cfg->max_event_log_size; 2578 next_entry = logsize;
2397 } 2579 }
2398 2580
2399 size = num_wraps ? capacity : next_entry; 2581 size = num_wraps ? capacity : next_entry;
@@ -2518,8 +2700,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2518 2700
2519 if (priv->cfg->ops->hcmd->set_rxon_chain) 2701 if (priv->cfg->ops->hcmd->set_rxon_chain)
2520 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2702 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2521
2522 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2523 } 2703 }
2524 2704
2525 /* Configure Bluetooth device coexistence support */ 2705 /* Configure Bluetooth device coexistence support */
@@ -2843,9 +3023,17 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
2843 } 3023 }
2844 3024
2845 if (priv->start_calib) { 3025 if (priv->start_calib) {
2846 iwl_chain_noise_calibration(priv, &priv->statistics); 3026 if (priv->cfg->bt_statistics) {
2847 3027 iwl_chain_noise_calibration(priv,
2848 iwl_sensitivity_calibration(priv, &priv->statistics); 3028 (void *)&priv->_agn.statistics_bt);
3029 iwl_sensitivity_calibration(priv,
3030 (void *)&priv->_agn.statistics_bt);
3031 } else {
3032 iwl_chain_noise_calibration(priv,
3033 (void *)&priv->_agn.statistics);
3034 iwl_sensitivity_calibration(priv,
3035 (void *)&priv->_agn.statistics);
3036 }
2849 } 3037 }
2850 3038
2851 mutex_unlock(&priv->mutex); 3039 mutex_unlock(&priv->mutex);
@@ -2934,20 +3122,16 @@ void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
2934 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3122 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2935 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3123 vif->bss_conf.aid, vif->bss_conf.beacon_int);
2936 3124
2937 if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 3125 if (vif->bss_conf.use_short_preamble)
2938 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3126 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2939 else 3127 else
2940 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3128 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2941 3129
2942 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3130 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
2943 if (vif->bss_conf.assoc_capability & 3131 if (vif->bss_conf.use_short_slot)
2944 WLAN_CAPABILITY_SHORT_SLOT_TIME)
2945 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3132 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2946 else 3133 else
2947 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3134 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2948
2949 if (vif->type == NL80211_IFTYPE_ADHOC)
2950 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2951 } 3135 }
2952 3136
2953 iwlcore_commit_rxon(priv); 3137 iwlcore_commit_rxon(priv);
@@ -3173,8 +3357,7 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3173 3357
3174 priv->staging_rxon.assoc_id = 0; 3358 priv->staging_rxon.assoc_id = 0;
3175 3359
3176 if (vif->bss_conf.assoc_capability & 3360 if (vif->bss_conf.use_short_preamble)
3177 WLAN_CAPABILITY_SHORT_PREAMBLE)
3178 priv->staging_rxon.flags |= 3361 priv->staging_rxon.flags |=
3179 RXON_FLG_SHORT_PREAMBLE_MSK; 3362 RXON_FLG_SHORT_PREAMBLE_MSK;
3180 else 3363 else
@@ -3182,17 +3365,12 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3182 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3365 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3183 3366
3184 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3367 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
3185 if (vif->bss_conf.assoc_capability & 3368 if (vif->bss_conf.use_short_slot)
3186 WLAN_CAPABILITY_SHORT_SLOT_TIME)
3187 priv->staging_rxon.flags |= 3369 priv->staging_rxon.flags |=
3188 RXON_FLG_SHORT_SLOT_MSK; 3370 RXON_FLG_SHORT_SLOT_MSK;
3189 else 3371 else
3190 priv->staging_rxon.flags &= 3372 priv->staging_rxon.flags &=
3191 ~RXON_FLG_SHORT_SLOT_MSK; 3373 ~RXON_FLG_SHORT_SLOT_MSK;
3192
3193 if (vif->type == NL80211_IFTYPE_ADHOC)
3194 priv->staging_rxon.flags &=
3195 ~RXON_FLG_SHORT_SLOT_MSK;
3196 } 3374 }
3197 /* restore RXON assoc */ 3375 /* restore RXON assoc */
3198 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3376 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
@@ -3238,17 +3416,9 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3238 return -EOPNOTSUPP; 3416 return -EOPNOTSUPP;
3239 } 3417 }
3240 3418
3241 if (sta) { 3419 sta_id = iwl_sta_id_or_broadcast(priv, sta);
3242 sta_id = iwl_sta_id(sta); 3420 if (sta_id == IWL_INVALID_STATION)
3243 3421 return -EINVAL;
3244 if (sta_id == IWL_INVALID_STATION) {
3245 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
3246 sta->addr);
3247 return -EINVAL;
3248 }
3249 } else {
3250 sta_id = priv->hw_params.bcast_sta_id;
3251 }
3252 3422
3253 mutex_lock(&priv->mutex); 3423 mutex_lock(&priv->mutex);
3254 iwl_scan_cancel_timeout(priv, 100); 3424 iwl_scan_cancel_timeout(priv, 100);
@@ -3294,13 +3464,32 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3294 return ret; 3464 return ret;
3295} 3465}
3296 3466
3467/*
3468 * switch to RTS/CTS for TX
3469 */
3470static void iwl_enable_rts_cts(struct iwl_priv *priv)
3471{
3472
3473 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3474 return;
3475
3476 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
3477 if (!test_bit(STATUS_SCANNING, &priv->status)) {
3478 IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n");
3479 iwlcore_commit_rxon(priv);
3480 } else {
3481 /* scanning, defer the request until scan completed */
3482 IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n");
3483 }
3484}
3485
3297static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, 3486static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3298 struct ieee80211_vif *vif, 3487 struct ieee80211_vif *vif,
3299 enum ieee80211_ampdu_mlme_action action, 3488 enum ieee80211_ampdu_mlme_action action,
3300 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3489 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3301{ 3490{
3302 struct iwl_priv *priv = hw->priv; 3491 struct iwl_priv *priv = hw->priv;
3303 int ret; 3492 int ret = -EINVAL;
3304 3493
3305 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 3494 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
3306 sta->addr, tid); 3495 sta->addr, tid);
@@ -3308,17 +3497,19 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3308 if (!(priv->cfg->sku & IWL_SKU_N)) 3497 if (!(priv->cfg->sku & IWL_SKU_N))
3309 return -EACCES; 3498 return -EACCES;
3310 3499
3500 mutex_lock(&priv->mutex);
3501
3311 switch (action) { 3502 switch (action) {
3312 case IEEE80211_AMPDU_RX_START: 3503 case IEEE80211_AMPDU_RX_START:
3313 IWL_DEBUG_HT(priv, "start Rx\n"); 3504 IWL_DEBUG_HT(priv, "start Rx\n");
3314 return iwl_sta_rx_agg_start(priv, sta, tid, *ssn); 3505 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
3506 break;
3315 case IEEE80211_AMPDU_RX_STOP: 3507 case IEEE80211_AMPDU_RX_STOP:
3316 IWL_DEBUG_HT(priv, "stop Rx\n"); 3508 IWL_DEBUG_HT(priv, "stop Rx\n");
3317 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 3509 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
3318 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3510 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3319 return 0; 3511 ret = 0;
3320 else 3512 break;
3321 return ret;
3322 case IEEE80211_AMPDU_TX_START: 3513 case IEEE80211_AMPDU_TX_START:
3323 IWL_DEBUG_HT(priv, "start Tx\n"); 3514 IWL_DEBUG_HT(priv, "start Tx\n");
3324 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); 3515 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
@@ -3327,7 +3518,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3327 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", 3518 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
3328 priv->_agn.agg_tids_count); 3519 priv->_agn.agg_tids_count);
3329 } 3520 }
3330 return ret; 3521 break;
3331 case IEEE80211_AMPDU_TX_STOP: 3522 case IEEE80211_AMPDU_TX_STOP:
3332 IWL_DEBUG_HT(priv, "stop Tx\n"); 3523 IWL_DEBUG_HT(priv, "stop Tx\n");
3333 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); 3524 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
@@ -3337,18 +3528,22 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3337 priv->_agn.agg_tids_count); 3528 priv->_agn.agg_tids_count);
3338 } 3529 }
3339 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3530 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3340 return 0; 3531 ret = 0;
3341 else 3532 break;
3342 return ret;
3343 case IEEE80211_AMPDU_TX_OPERATIONAL: 3533 case IEEE80211_AMPDU_TX_OPERATIONAL:
3344 /* do nothing */ 3534 if (priv->cfg->use_rts_for_ht) {
3345 return -EOPNOTSUPP; 3535 /*
3346 default: 3536 * switch to RTS/CTS if it is the prefer protection
3347 IWL_DEBUG_HT(priv, "unknown\n"); 3537 * method for HT traffic
3348 return -EINVAL; 3538 */
3539 iwl_enable_rts_cts(priv);
3540 }
3541 ret = 0;
3349 break; 3542 break;
3350 } 3543 }
3351 return 0; 3544 mutex_unlock(&priv->mutex);
3545
3546 return ret;
3352} 3547}
3353 3548
3354static void iwl_mac_sta_notify(struct ieee80211_hw *hw, 3549static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
@@ -3423,6 +3618,136 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3423 return 0; 3618 return 0;
3424} 3619}
3425 3620
3621static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3622 struct ieee80211_channel_switch *ch_switch)
3623{
3624 struct iwl_priv *priv = hw->priv;
3625 const struct iwl_channel_info *ch_info;
3626 struct ieee80211_conf *conf = &hw->conf;
3627 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
3628 u16 ch;
3629 unsigned long flags = 0;
3630
3631 IWL_DEBUG_MAC80211(priv, "enter\n");
3632
3633 if (iwl_is_rfkill(priv))
3634 goto out_exit;
3635
3636 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
3637 test_bit(STATUS_SCANNING, &priv->status))
3638 goto out_exit;
3639
3640 if (!iwl_is_associated(priv))
3641 goto out_exit;
3642
3643 /* channel switch in progress */
3644 if (priv->switch_rxon.switch_in_progress == true)
3645 goto out_exit;
3646
3647 mutex_lock(&priv->mutex);
3648 if (priv->cfg->ops->lib->set_channel_switch) {
3649
3650 ch = ieee80211_frequency_to_channel(
3651 ch_switch->channel->center_freq);
3652 if (le16_to_cpu(priv->active_rxon.channel) != ch) {
3653 ch_info = iwl_get_channel_info(priv,
3654 conf->channel->band,
3655 ch);
3656 if (!is_channel_valid(ch_info)) {
3657 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
3658 goto out;
3659 }
3660 spin_lock_irqsave(&priv->lock, flags);
3661
3662 priv->current_ht_config.smps = conf->smps_mode;
3663
3664 /* Configure HT40 channels */
3665 ht_conf->is_ht = conf_is_ht(conf);
3666 if (ht_conf->is_ht) {
3667 if (conf_is_ht40_minus(conf)) {
3668 ht_conf->extension_chan_offset =
3669 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
3670 ht_conf->is_40mhz = true;
3671 } else if (conf_is_ht40_plus(conf)) {
3672 ht_conf->extension_chan_offset =
3673 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
3674 ht_conf->is_40mhz = true;
3675 } else {
3676 ht_conf->extension_chan_offset =
3677 IEEE80211_HT_PARAM_CHA_SEC_NONE;
3678 ht_conf->is_40mhz = false;
3679 }
3680 } else
3681 ht_conf->is_40mhz = false;
3682
3683 /* if we are switching from ht to 2.4 clear flags
3684 * from any ht related info since 2.4 does not
3685 * support ht */
3686 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
3687 priv->staging_rxon.flags = 0;
3688
3689 iwl_set_rxon_channel(priv, conf->channel);
3690 iwl_set_rxon_ht(priv, ht_conf);
3691 iwl_set_flags_for_band(priv, conf->channel->band,
3692 priv->vif);
3693 spin_unlock_irqrestore(&priv->lock, flags);
3694
3695 iwl_set_rate(priv);
3696 /*
3697 * at this point, staging_rxon has the
3698 * configuration for channel switch
3699 */
3700 if (priv->cfg->ops->lib->set_channel_switch(priv,
3701 ch_switch))
3702 priv->switch_rxon.switch_in_progress = false;
3703 }
3704 }
3705out:
3706 mutex_unlock(&priv->mutex);
3707out_exit:
3708 if (!priv->switch_rxon.switch_in_progress)
3709 ieee80211_chswitch_done(priv->vif, false);
3710 IWL_DEBUG_MAC80211(priv, "leave\n");
3711}
3712
3713static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
3714{
3715 struct iwl_priv *priv = hw->priv;
3716
3717 mutex_lock(&priv->mutex);
3718 IWL_DEBUG_MAC80211(priv, "enter\n");
3719
3720 /* do not support "flush" */
3721 if (!priv->cfg->ops->lib->txfifo_flush)
3722 goto done;
3723
3724 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3725 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
3726 goto done;
3727 }
3728 if (iwl_is_rfkill(priv)) {
3729 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
3730 goto done;
3731 }
3732
3733 /*
3734 * mac80211 will not push any more frames for transmit
3735 * until the flush is completed
3736 */
3737 if (drop) {
3738 IWL_DEBUG_MAC80211(priv, "send flush command\n");
3739 if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
3740 IWL_ERR(priv, "flush request fail\n");
3741 goto done;
3742 }
3743 }
3744 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
3745 iwlagn_wait_tx_queue_empty(priv);
3746done:
3747 mutex_unlock(&priv->mutex);
3748 IWL_DEBUG_MAC80211(priv, "leave\n");
3749}
3750
3426/***************************************************************************** 3751/*****************************************************************************
3427 * 3752 *
3428 * driver setup and teardown 3753 * driver setup and teardown
@@ -3439,6 +3764,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3439 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); 3764 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
3440 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); 3765 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
3441 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); 3766 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
3767 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
3442 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 3768 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
3443 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); 3769 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
3444 3770
@@ -3479,6 +3805,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3479 cancel_delayed_work(&priv->scan_check); 3805 cancel_delayed_work(&priv->scan_check);
3480 cancel_work_sync(&priv->start_internal_scan); 3806 cancel_work_sync(&priv->start_internal_scan);
3481 cancel_delayed_work(&priv->alive_start); 3807 cancel_delayed_work(&priv->alive_start);
3808 cancel_work_sync(&priv->run_time_calib_work);
3482 cancel_work_sync(&priv->beacon_update); 3809 cancel_work_sync(&priv->beacon_update);
3483 del_timer_sync(&priv->statistics_periodic); 3810 del_timer_sync(&priv->statistics_periodic);
3484 del_timer_sync(&priv->ucode_trace); 3811 del_timer_sync(&priv->ucode_trace);
@@ -3594,6 +3921,8 @@ static struct ieee80211_ops iwl_hw_ops = {
3594 .sta_notify = iwl_mac_sta_notify, 3921 .sta_notify = iwl_mac_sta_notify,
3595 .sta_add = iwlagn_mac_sta_add, 3922 .sta_add = iwlagn_mac_sta_add,
3596 .sta_remove = iwl_mac_sta_remove, 3923 .sta_remove = iwl_mac_sta_remove,
3924 .channel_switch = iwl_mac_channel_switch,
3925 .flush = iwl_mac_flush,
3597}; 3926};
3598 3927
3599static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3928static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3603,7 +3932,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3603 struct ieee80211_hw *hw; 3932 struct ieee80211_hw *hw;
3604 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3933 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3605 unsigned long flags; 3934 unsigned long flags;
3606 u16 pci_cmd; 3935 u16 pci_cmd, num_mac;
3607 3936
3608 /************************ 3937 /************************
3609 * 1. Allocating HW data 3938 * 1. Allocating HW data
@@ -3633,9 +3962,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3633 priv->pci_dev = pdev; 3962 priv->pci_dev = pdev;
3634 priv->inta_mask = CSR_INI_SET_MASK; 3963 priv->inta_mask = CSR_INI_SET_MASK;
3635 3964
3636#ifdef CONFIG_IWLWIFI_DEBUG
3637 atomic_set(&priv->restrict_refcnt, 0);
3638#endif
3639 if (iwl_alloc_traffic_mem(priv)) 3965 if (iwl_alloc_traffic_mem(priv))
3640 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 3966 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3641 3967
@@ -3724,9 +4050,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3724 goto out_free_eeprom; 4050 goto out_free_eeprom;
3725 4051
3726 /* extract MAC Address */ 4052 /* extract MAC Address */
3727 iwl_eeprom_get_mac(priv, priv->mac_addr); 4053 iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
3728 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); 4054 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3729 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 4055 priv->hw->wiphy->addresses = priv->addresses;
4056 priv->hw->wiphy->n_addresses = 1;
4057 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
4058 if (num_mac > 1) {
4059 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
4060 ETH_ALEN);
4061 priv->addresses[1].addr[5]++;
4062 priv->hw->wiphy->n_addresses++;
4063 }
3730 4064
3731 /************************ 4065 /************************
3732 * 5. Setup HW constants 4066 * 5. Setup HW constants
@@ -3993,6 +4327,47 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3993 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)}, 4327 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
3994 {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)}, 4328 {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
3995 {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)}, 4329 {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
4330 {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)},
4331 {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)},
4332 {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)},
4333 {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)},
4334 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)},
4335 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)},
4336 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)},
4337 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)},
4338 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)},
4339 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)},
4340 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)},
4341
4342/* 6x00 Series Gen2b */
4343 {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)},
4344 {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)},
4345 {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)},
4346 {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)},
4347 {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)},
4348 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
4349 {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)},
4350 {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)},
4351 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
4352 {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
4353 {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
4354 {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
4355 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
4356 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
4357 {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
4358 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
4359 {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)},
4360 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
4361 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
4362 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
4363 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
4364 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)},
4365 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)},
4366 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)},
4367 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)},
4368 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)},
4369 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)},
4370 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)},
3996 4371
3997/* 6x50 WiFi/WiMax Series */ 4372/* 6x50 WiFi/WiMax Series */
3998 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, 4373 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
@@ -4002,6 +4377,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4002 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, 4377 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
4003 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, 4378 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
4004 4379
4380/* 6x50 WiFi/WiMax Series Gen2 */
4381 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6050g2_bgn_cfg)},
4382 {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6050g2_bgn_cfg)},
4383 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6050g2_bgn_cfg)},
4384 {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6050g2_bgn_cfg)},
4385 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6050g2_bgn_cfg)},
4386 {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6050g2_bgn_cfg)},
4387
4005/* 1000 Series WiFi */ 4388/* 1000 Series WiFi */
4006 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, 4389 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
4007 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, 4390 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
@@ -4036,19 +4419,18 @@ static int __init iwl_init(void)
4036{ 4419{
4037 4420
4038 int ret; 4421 int ret;
4039 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 4422 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
4040 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 4423 pr_info(DRV_COPYRIGHT "\n");
4041 4424
4042 ret = iwlagn_rate_control_register(); 4425 ret = iwlagn_rate_control_register();
4043 if (ret) { 4426 if (ret) {
4044 printk(KERN_ERR DRV_NAME 4427 pr_err("Unable to register rate control algorithm: %d\n", ret);
4045 "Unable to register rate control algorithm: %d\n", ret);
4046 return ret; 4428 return ret;
4047 } 4429 }
4048 4430
4049 ret = pci_register_driver(&iwl_driver); 4431 ret = pci_register_driver(&iwl_driver);
4050 if (ret) { 4432 if (ret) {
4051 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); 4433 pr_err("Unable to initialize PCI module\n");
4052 goto error_register; 4434 goto error_register;
4053 } 4435 }
4054 4436
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2d748053358e..cc6464dc72e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,34 @@
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67 67
68/* configuration for the _agn devices */
69extern struct iwl_cfg iwl4965_agn_cfg;
70extern struct iwl_cfg iwl5300_agn_cfg;
71extern struct iwl_cfg iwl5100_agn_cfg;
72extern struct iwl_cfg iwl5350_agn_cfg;
73extern struct iwl_cfg iwl5100_bgn_cfg;
74extern struct iwl_cfg iwl5100_abg_cfg;
75extern struct iwl_cfg iwl5150_agn_cfg;
76extern struct iwl_cfg iwl5150_abg_cfg;
77extern struct iwl_cfg iwl6000g2a_2agn_cfg;
78extern struct iwl_cfg iwl6000g2a_2abg_cfg;
79extern struct iwl_cfg iwl6000g2a_2bg_cfg;
80extern struct iwl_cfg iwl6000g2b_bgn_cfg;
81extern struct iwl_cfg iwl6000g2b_bg_cfg;
82extern struct iwl_cfg iwl6000g2b_2agn_cfg;
83extern struct iwl_cfg iwl6000g2b_2abg_cfg;
84extern struct iwl_cfg iwl6000g2b_2bgn_cfg;
85extern struct iwl_cfg iwl6000g2b_2bg_cfg;
86extern struct iwl_cfg iwl6000i_2agn_cfg;
87extern struct iwl_cfg iwl6000i_2abg_cfg;
88extern struct iwl_cfg iwl6000i_2bg_cfg;
89extern struct iwl_cfg iwl6000_3agn_cfg;
90extern struct iwl_cfg iwl6050_2agn_cfg;
91extern struct iwl_cfg iwl6050_2abg_cfg;
92extern struct iwl_cfg iwl6050g2_bgn_cfg;
93extern struct iwl_cfg iwl1000_bgn_cfg;
94extern struct iwl_cfg iwl1000_bg_cfg;
95
68extern struct iwl_mod_params iwlagn_mod_params; 96extern struct iwl_mod_params iwlagn_mod_params;
69extern struct iwl_hcmd_ops iwlagn_hcmd; 97extern struct iwl_hcmd_ops iwlagn_hcmd;
70extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; 98extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
@@ -93,6 +121,8 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
93int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 121int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
94 u16 ssn_idx, u8 tx_fifo); 122 u16 ssn_idx, u8 tx_fifo);
95void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask); 123void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
124void iwl_free_tfds_in_queue(struct iwl_priv *priv,
125 int sta_id, int tid, int freed);
96 126
97/* uCode */ 127/* uCode */
98int iwlagn_load_ucode(struct iwl_priv *priv); 128int iwlagn_load_ucode(struct iwl_priv *priv);
@@ -102,6 +132,7 @@ void iwlagn_rx_calib_complete(struct iwl_priv *priv,
102 struct iwl_rx_mem_buffer *rxb); 132 struct iwl_rx_mem_buffer *rxb);
103void iwlagn_init_alive_start(struct iwl_priv *priv); 133void iwlagn_init_alive_start(struct iwl_priv *priv);
104int iwlagn_alive_notify(struct iwl_priv *priv); 134int iwlagn_alive_notify(struct iwl_priv *priv);
135int iwl_verify_ucode(struct iwl_priv *priv);
105 136
106/* lib */ 137/* lib */
107void iwl_check_abort_status(struct iwl_priv *priv, 138void iwl_check_abort_status(struct iwl_priv *priv,
@@ -117,6 +148,9 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
117void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 148void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
118int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 149int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
119int iwlagn_hw_nic_init(struct iwl_priv *priv); 150int iwlagn_hw_nic_init(struct iwl_priv *priv);
151int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
152int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
153void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
120 154
121/* rx */ 155/* rx */
122void iwlagn_rx_queue_restock(struct iwl_priv *priv); 156void iwlagn_rx_queue_restock(struct iwl_priv *priv);
@@ -171,6 +205,16 @@ static inline bool iwl_is_tx_success(u32 status)
171 (status == TX_STATUS_DIRECT_DONE); 205 (status == TX_STATUS_DIRECT_DONE);
172} 206}
173 207
208/* rx */
209void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
210 struct iwl_rx_mem_buffer *rxb);
211bool iwl_good_plcp_health(struct iwl_priv *priv,
212 struct iwl_rx_packet *pkt);
213void iwl_rx_statistics(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb);
215void iwl_reply_statistics(struct iwl_priv *priv,
216 struct iwl_rx_mem_buffer *rxb);
217
174/* scan */ 218/* scan */
175void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); 219void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
176 220
@@ -178,4 +222,8 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
178int iwlagn_manage_ibss_station(struct iwl_priv *priv, 222int iwlagn_manage_ibss_station(struct iwl_priv *priv,
179 struct ieee80211_vif *vif, bool add); 223 struct ieee80211_vif *vif, bool add);
180 224
225/* hcmd */
226int iwlagn_send_rxon_assoc(struct iwl_priv *priv);
227int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
228
181#endif /* __iwl_agn_h__ */ 229#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index 2b7b1df83ba0..ba9523fbb300 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -66,10 +66,8 @@
66#include "iwl-core.h" 66#include "iwl-core.h"
67#include "iwl-commands.h" 67#include "iwl-commands.h"
68 68
69void iwl_chain_noise_calibration(struct iwl_priv *priv, 69void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
70 struct iwl_notif_statistics *stat_resp); 70void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp);
71void iwl_sensitivity_calibration(struct iwl_priv *priv,
72 struct iwl_notif_statistics *resp);
73 71
74void iwl_init_sensitivity(struct iwl_priv *priv); 72void iwl_init_sensitivity(struct iwl_priv *priv);
75void iwl_reset_run_time_calib(struct iwl_priv *priv); 73void iwl_reset_run_time_calib(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 9aab020c474b..60725a5c1b69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -95,8 +95,9 @@ enum {
95 95
96 /* Multi-Station support */ 96 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18, 97 REPLY_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19, /* not used */ 98 REPLY_REMOVE_STA = 0x19,
99 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ 99 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
100 REPLY_TXFIFO_FLUSH = 0x1e,
100 101
101 /* Security */ 102 /* Security */
102 REPLY_WEPKEY = 0x20, 103 REPLY_WEPKEY = 0x20,
@@ -227,7 +228,7 @@ struct iwl_cmd_header {
227 228
228 /* command or response/notification data follows immediately */ 229 /* command or response/notification data follows immediately */
229 u8 data[0]; 230 u8 data[0];
230} __attribute__ ((packed)); 231} __packed;
231 232
232 233
233/** 234/**
@@ -247,7 +248,7 @@ struct iwl_cmd_header {
247struct iwl3945_tx_power { 248struct iwl3945_tx_power {
248 u8 tx_gain; /* gain for analog radio */ 249 u8 tx_gain; /* gain for analog radio */
249 u8 dsp_atten; /* gain for DSP */ 250 u8 dsp_atten; /* gain for DSP */
250} __attribute__ ((packed)); 251} __packed;
251 252
252/** 253/**
253 * struct iwl3945_power_per_rate 254 * struct iwl3945_power_per_rate
@@ -258,7 +259,7 @@ struct iwl3945_power_per_rate {
258 u8 rate; /* plcp */ 259 u8 rate; /* plcp */
259 struct iwl3945_tx_power tpc; 260 struct iwl3945_tx_power tpc;
260 u8 reserved; 261 u8 reserved;
261} __attribute__ ((packed)); 262} __packed;
262 263
263/** 264/**
264 * iwlagn rate_n_flags bit fields 265 * iwlagn rate_n_flags bit fields
@@ -389,7 +390,7 @@ union iwl4965_tx_power_dual_stream {
389 */ 390 */
390struct tx_power_dual_stream { 391struct tx_power_dual_stream {
391 __le32 dw; 392 __le32 dw;
392} __attribute__ ((packed)); 393} __packed;
393 394
394/** 395/**
395 * struct iwl4965_tx_power_db 396 * struct iwl4965_tx_power_db
@@ -398,7 +399,7 @@ struct tx_power_dual_stream {
398 */ 399 */
399struct iwl4965_tx_power_db { 400struct iwl4965_tx_power_db {
400 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; 401 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
401} __attribute__ ((packed)); 402} __packed;
402 403
403/** 404/**
404 * Command REPLY_TX_POWER_DBM_CMD = 0x98 405 * Command REPLY_TX_POWER_DBM_CMD = 0x98
@@ -412,7 +413,7 @@ struct iwl5000_tx_power_dbm_cmd {
412 u8 flags; 413 u8 flags;
413 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ 414 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
414 u8 reserved; 415 u8 reserved;
415} __attribute__ ((packed)); 416} __packed;
416 417
417/** 418/**
418 * Command TX_ANT_CONFIGURATION_CMD = 0x98 419 * Command TX_ANT_CONFIGURATION_CMD = 0x98
@@ -422,7 +423,7 @@ struct iwl5000_tx_power_dbm_cmd {
422 */ 423 */
423struct iwl_tx_ant_config_cmd { 424struct iwl_tx_ant_config_cmd {
424 __le32 valid; 425 __le32 valid;
425} __attribute__ ((packed)); 426} __packed;
426 427
427/****************************************************************************** 428/******************************************************************************
428 * (0a) 429 * (0a)
@@ -478,7 +479,7 @@ struct iwl_init_alive_resp {
478 __le32 therm_r4[2]; /* signed */ 479 __le32 therm_r4[2]; /* signed */
479 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, 480 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
480 * 2 Tx chains */ 481 * 2 Tx chains */
481} __attribute__ ((packed)); 482} __packed;
482 483
483 484
484/** 485/**
@@ -570,7 +571,7 @@ struct iwl_alive_resp {
570 __le32 error_event_table_ptr; /* SRAM address for error log */ 571 __le32 error_event_table_ptr; /* SRAM address for error log */
571 __le32 timestamp; 572 __le32 timestamp;
572 __le32 is_valid; 573 __le32 is_valid;
573} __attribute__ ((packed)); 574} __packed;
574 575
575/* 576/*
576 * REPLY_ERROR = 0x2 (response only, not a command) 577 * REPLY_ERROR = 0x2 (response only, not a command)
@@ -582,7 +583,7 @@ struct iwl_error_resp {
582 __le16 bad_cmd_seq_num; 583 __le16 bad_cmd_seq_num;
583 __le32 error_info; 584 __le32 error_info;
584 __le64 timestamp; 585 __le64 timestamp;
585} __attribute__ ((packed)); 586} __packed;
586 587
587/****************************************************************************** 588/******************************************************************************
588 * (1) 589 * (1)
@@ -718,7 +719,7 @@ struct iwl3945_rxon_cmd {
718 __le32 filter_flags; 719 __le32 filter_flags;
719 __le16 channel; 720 __le16 channel;
720 __le16 reserved5; 721 __le16 reserved5;
721} __attribute__ ((packed)); 722} __packed;
722 723
723struct iwl4965_rxon_cmd { 724struct iwl4965_rxon_cmd {
724 u8 node_addr[6]; 725 u8 node_addr[6];
@@ -738,7 +739,7 @@ struct iwl4965_rxon_cmd {
738 __le16 channel; 739 __le16 channel;
739 u8 ofdm_ht_single_stream_basic_rates; 740 u8 ofdm_ht_single_stream_basic_rates;
740 u8 ofdm_ht_dual_stream_basic_rates; 741 u8 ofdm_ht_dual_stream_basic_rates;
741} __attribute__ ((packed)); 742} __packed;
742 743
743/* 5000 HW just extend this command */ 744/* 5000 HW just extend this command */
744struct iwl_rxon_cmd { 745struct iwl_rxon_cmd {
@@ -763,7 +764,7 @@ struct iwl_rxon_cmd {
763 u8 reserved5; 764 u8 reserved5;
764 __le16 acquisition_data; 765 __le16 acquisition_data;
765 __le16 reserved6; 766 __le16 reserved6;
766} __attribute__ ((packed)); 767} __packed;
767 768
768/* 769/*
769 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 770 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
@@ -774,7 +775,7 @@ struct iwl3945_rxon_assoc_cmd {
774 u8 ofdm_basic_rates; 775 u8 ofdm_basic_rates;
775 u8 cck_basic_rates; 776 u8 cck_basic_rates;
776 __le16 reserved; 777 __le16 reserved;
777} __attribute__ ((packed)); 778} __packed;
778 779
779struct iwl4965_rxon_assoc_cmd { 780struct iwl4965_rxon_assoc_cmd {
780 __le32 flags; 781 __le32 flags;
@@ -785,7 +786,7 @@ struct iwl4965_rxon_assoc_cmd {
785 u8 ofdm_ht_dual_stream_basic_rates; 786 u8 ofdm_ht_dual_stream_basic_rates;
786 __le16 rx_chain_select_flags; 787 __le16 rx_chain_select_flags;
787 __le16 reserved; 788 __le16 reserved;
788} __attribute__ ((packed)); 789} __packed;
789 790
790struct iwl5000_rxon_assoc_cmd { 791struct iwl5000_rxon_assoc_cmd {
791 __le32 flags; 792 __le32 flags;
@@ -800,7 +801,7 @@ struct iwl5000_rxon_assoc_cmd {
800 __le16 rx_chain_select_flags; 801 __le16 rx_chain_select_flags;
801 __le16 acquisition_data; 802 __le16 acquisition_data;
802 __le32 reserved3; 803 __le32 reserved3;
803} __attribute__ ((packed)); 804} __packed;
804 805
805#define IWL_CONN_MAX_LISTEN_INTERVAL 10 806#define IWL_CONN_MAX_LISTEN_INTERVAL 10
806#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ 807#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
@@ -816,7 +817,7 @@ struct iwl_rxon_time_cmd {
816 __le32 beacon_init_val; 817 __le32 beacon_init_val;
817 __le16 listen_interval; 818 __le16 listen_interval;
818 __le16 reserved; 819 __le16 reserved;
819} __attribute__ ((packed)); 820} __packed;
820 821
821/* 822/*
822 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 823 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
@@ -829,7 +830,7 @@ struct iwl3945_channel_switch_cmd {
829 __le32 rxon_filter_flags; 830 __le32 rxon_filter_flags;
830 __le32 switch_time; 831 __le32 switch_time;
831 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 832 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
832} __attribute__ ((packed)); 833} __packed;
833 834
834struct iwl4965_channel_switch_cmd { 835struct iwl4965_channel_switch_cmd {
835 u8 band; 836 u8 band;
@@ -839,7 +840,7 @@ struct iwl4965_channel_switch_cmd {
839 __le32 rxon_filter_flags; 840 __le32 rxon_filter_flags;
840 __le32 switch_time; 841 __le32 switch_time;
841 struct iwl4965_tx_power_db tx_power; 842 struct iwl4965_tx_power_db tx_power;
842} __attribute__ ((packed)); 843} __packed;
843 844
844/** 845/**
845 * struct iwl5000_channel_switch_cmd 846 * struct iwl5000_channel_switch_cmd
@@ -860,7 +861,7 @@ struct iwl5000_channel_switch_cmd {
860 __le32 rxon_filter_flags; 861 __le32 rxon_filter_flags;
861 __le32 switch_time; 862 __le32 switch_time;
862 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; 863 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
863} __attribute__ ((packed)); 864} __packed;
864 865
865/** 866/**
866 * struct iwl6000_channel_switch_cmd 867 * struct iwl6000_channel_switch_cmd
@@ -881,7 +882,7 @@ struct iwl6000_channel_switch_cmd {
881 __le32 rxon_filter_flags; 882 __le32 rxon_filter_flags;
882 __le32 switch_time; 883 __le32 switch_time;
883 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; 884 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
884} __attribute__ ((packed)); 885} __packed;
885 886
886/* 887/*
887 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 888 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
@@ -890,7 +891,7 @@ struct iwl_csa_notification {
890 __le16 band; 891 __le16 band;
891 __le16 channel; 892 __le16 channel;
892 __le32 status; /* 0 - OK, 1 - fail */ 893 __le32 status; /* 0 - OK, 1 - fail */
893} __attribute__ ((packed)); 894} __packed;
894 895
895/****************************************************************************** 896/******************************************************************************
896 * (2) 897 * (2)
@@ -920,7 +921,7 @@ struct iwl_ac_qos {
920 u8 aifsn; 921 u8 aifsn;
921 u8 reserved1; 922 u8 reserved1;
922 __le16 edca_txop; 923 __le16 edca_txop;
923} __attribute__ ((packed)); 924} __packed;
924 925
925/* QoS flags defines */ 926/* QoS flags defines */
926#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) 927#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
@@ -939,7 +940,7 @@ struct iwl_ac_qos {
939struct iwl_qosparam_cmd { 940struct iwl_qosparam_cmd {
940 __le32 qos_flags; 941 __le32 qos_flags;
941 struct iwl_ac_qos ac[AC_NUM]; 942 struct iwl_ac_qos ac[AC_NUM];
942} __attribute__ ((packed)); 943} __packed;
943 944
944/****************************************************************************** 945/******************************************************************************
945 * (3) 946 * (3)
@@ -952,20 +953,19 @@ struct iwl_qosparam_cmd {
952 953
953/* Special, dedicated locations within device's station table */ 954/* Special, dedicated locations within device's station table */
954#define IWL_AP_ID 0 955#define IWL_AP_ID 0
955#define IWL_MULTICAST_ID 1
956#define IWL_STA_ID 2 956#define IWL_STA_ID 2
957#define IWL3945_BROADCAST_ID 24 957#define IWL3945_BROADCAST_ID 24
958#define IWL3945_STATION_COUNT 25 958#define IWL3945_STATION_COUNT 25
959#define IWL4965_BROADCAST_ID 31 959#define IWL4965_BROADCAST_ID 31
960#define IWL4965_STATION_COUNT 32 960#define IWL4965_STATION_COUNT 32
961#define IWL5000_BROADCAST_ID 15 961#define IWLAGN_BROADCAST_ID 15
962#define IWL5000_STATION_COUNT 16 962#define IWLAGN_STATION_COUNT 16
963 963
964#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 964#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
965#define IWL_INVALID_STATION 255 965#define IWL_INVALID_STATION 255
966 966
967#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2); 967#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
968#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8); 968#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
969#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) 969#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
970#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) 970#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
971#define STA_FLG_MAX_AGG_SIZE_POS (19) 971#define STA_FLG_MAX_AGG_SIZE_POS (19)
@@ -1015,7 +1015,7 @@ struct iwl4965_keyinfo {
1015 u8 key_offset; 1015 u8 key_offset;
1016 u8 reserved2; 1016 u8 reserved2;
1017 u8 key[16]; /* 16-byte unicast decryption key */ 1017 u8 key[16]; /* 16-byte unicast decryption key */
1018} __attribute__ ((packed)); 1018} __packed;
1019 1019
1020/* 5000 */ 1020/* 5000 */
1021struct iwl_keyinfo { 1021struct iwl_keyinfo {
@@ -1029,7 +1029,7 @@ struct iwl_keyinfo {
1029 __le64 tx_secur_seq_cnt; 1029 __le64 tx_secur_seq_cnt;
1030 __le64 hw_tkip_mic_rx_key; 1030 __le64 hw_tkip_mic_rx_key;
1031 __le64 hw_tkip_mic_tx_key; 1031 __le64 hw_tkip_mic_tx_key;
1032} __attribute__ ((packed)); 1032} __packed;
1033 1033
1034/** 1034/**
1035 * struct sta_id_modify 1035 * struct sta_id_modify
@@ -1049,7 +1049,7 @@ struct sta_id_modify {
1049 u8 sta_id; 1049 u8 sta_id;
1050 u8 modify_mask; 1050 u8 modify_mask;
1051 __le16 reserved2; 1051 __le16 reserved2;
1052} __attribute__ ((packed)); 1052} __packed;
1053 1053
1054/* 1054/*
1055 * REPLY_ADD_STA = 0x18 (command) 1055 * REPLY_ADD_STA = 0x18 (command)
@@ -1103,7 +1103,7 @@ struct iwl3945_addsta_cmd {
1103 /* Starting Sequence Number for added block-ack support. 1103 /* Starting Sequence Number for added block-ack support.
1104 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1104 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1105 __le16 add_immediate_ba_ssn; 1105 __le16 add_immediate_ba_ssn;
1106} __attribute__ ((packed)); 1106} __packed;
1107 1107
1108struct iwl4965_addsta_cmd { 1108struct iwl4965_addsta_cmd {
1109 u8 mode; /* 1: modify existing, 0: add new station */ 1109 u8 mode; /* 1: modify existing, 0: add new station */
@@ -1140,7 +1140,7 @@ struct iwl4965_addsta_cmd {
1140 __le16 sleep_tx_count; 1140 __le16 sleep_tx_count;
1141 1141
1142 __le16 reserved2; 1142 __le16 reserved2;
1143} __attribute__ ((packed)); 1143} __packed;
1144 1144
1145/* 5000 */ 1145/* 5000 */
1146struct iwl_addsta_cmd { 1146struct iwl_addsta_cmd {
@@ -1178,7 +1178,7 @@ struct iwl_addsta_cmd {
1178 __le16 sleep_tx_count; 1178 __le16 sleep_tx_count;
1179 1179
1180 __le16 reserved2; 1180 __le16 reserved2;
1181} __attribute__ ((packed)); 1181} __packed;
1182 1182
1183 1183
1184#define ADD_STA_SUCCESS_MSK 0x1 1184#define ADD_STA_SUCCESS_MSK 0x1
@@ -1190,7 +1190,7 @@ struct iwl_addsta_cmd {
1190 */ 1190 */
1191struct iwl_add_sta_resp { 1191struct iwl_add_sta_resp {
1192 u8 status; /* ADD_STA_* */ 1192 u8 status; /* ADD_STA_* */
1193} __attribute__ ((packed)); 1193} __packed;
1194 1194
1195#define REM_STA_SUCCESS_MSK 0x1 1195#define REM_STA_SUCCESS_MSK 0x1
1196/* 1196/*
@@ -1198,7 +1198,7 @@ struct iwl_add_sta_resp {
1198 */ 1198 */
1199struct iwl_rem_sta_resp { 1199struct iwl_rem_sta_resp {
1200 u8 status; 1200 u8 status;
1201} __attribute__ ((packed)); 1201} __packed;
1202 1202
1203/* 1203/*
1204 * REPLY_REM_STA = 0x19 (command) 1204 * REPLY_REM_STA = 0x19 (command)
@@ -1208,7 +1208,44 @@ struct iwl_rem_sta_cmd {
1208 u8 reserved[3]; 1208 u8 reserved[3];
1209 u8 addr[ETH_ALEN]; /* MAC addr of the first station */ 1209 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1210 u8 reserved2[2]; 1210 u8 reserved2[2];
1211} __attribute__ ((packed)); 1211} __packed;
1212
1213#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1214#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1215#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1216#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1217#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1218
1219#define IWL_DROP_SINGLE 0
1220#define IWL_DROP_SELECTED 1
1221#define IWL_DROP_ALL 2
1222
1223/*
1224 * REPLY_TXFIFO_FLUSH = 0x1e(command and response)
1225 *
1226 * When using full FIFO flush this command checks the scheduler HW block WR/RD
1227 * pointers to check if all the frames were transferred by DMA into the
1228 * relevant TX FIFO queue. Only when the DMA is finished and the queue is
1229 * empty the command can finish.
1230 * This command is used to flush the TXFIFO from transmit commands, it may
1231 * operate on single or multiple queues, the command queue can't be flushed by
1232 * this command. The command response is returned when all the queue flush
1233 * operations are done. Each TX command flushed return response with the FLUSH
1234 * status set in the TX response status. When FIFO flush operation is used,
1235 * the flush operation ends when both the scheduler DMA done and TXFIFO empty
1236 * are set.
1237 *
1238 * @fifo_control: bit mask for which queues to flush
1239 * @flush_control: flush controls
1240 * 0: Dump single MSDU
1241 * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
1242 * 2: Dump all FIFO
1243 */
1244struct iwl_txfifo_flush_cmd {
1245 __le32 fifo_control;
1246 __le16 flush_control;
1247 __le16 reserved;
1248} __packed;
1212 1249
1213/* 1250/*
1214 * REPLY_WEP_KEY = 0x20 1251 * REPLY_WEP_KEY = 0x20
@@ -1220,7 +1257,7 @@ struct iwl_wep_key {
1220 u8 key_size; 1257 u8 key_size;
1221 u8 reserved2[3]; 1258 u8 reserved2[3];
1222 u8 key[16]; 1259 u8 key[16];
1223} __attribute__ ((packed)); 1260} __packed;
1224 1261
1225struct iwl_wep_cmd { 1262struct iwl_wep_cmd {
1226 u8 num_keys; 1263 u8 num_keys;
@@ -1228,7 +1265,7 @@ struct iwl_wep_cmd {
1228 u8 flags; 1265 u8 flags;
1229 u8 reserved; 1266 u8 reserved;
1230 struct iwl_wep_key key[0]; 1267 struct iwl_wep_key key[0];
1231} __attribute__ ((packed)); 1268} __packed;
1232 1269
1233#define WEP_KEY_WEP_TYPE 1 1270#define WEP_KEY_WEP_TYPE 1
1234#define WEP_KEYS_MAX 4 1271#define WEP_KEYS_MAX 4
@@ -1282,7 +1319,7 @@ struct iwl3945_rx_frame_stats {
1282 __le16 sig_avg; 1319 __le16 sig_avg;
1283 __le16 noise_diff; 1320 __le16 noise_diff;
1284 u8 payload[0]; 1321 u8 payload[0];
1285} __attribute__ ((packed)); 1322} __packed;
1286 1323
1287struct iwl3945_rx_frame_hdr { 1324struct iwl3945_rx_frame_hdr {
1288 __le16 channel; 1325 __le16 channel;
@@ -1291,13 +1328,13 @@ struct iwl3945_rx_frame_hdr {
1291 u8 rate; 1328 u8 rate;
1292 __le16 len; 1329 __le16 len;
1293 u8 payload[0]; 1330 u8 payload[0];
1294} __attribute__ ((packed)); 1331} __packed;
1295 1332
1296struct iwl3945_rx_frame_end { 1333struct iwl3945_rx_frame_end {
1297 __le32 status; 1334 __le32 status;
1298 __le64 timestamp; 1335 __le64 timestamp;
1299 __le32 beacon_timestamp; 1336 __le32 beacon_timestamp;
1300} __attribute__ ((packed)); 1337} __packed;
1301 1338
1302/* 1339/*
1303 * REPLY_3945_RX = 0x1b (response only, not a command) 1340 * REPLY_3945_RX = 0x1b (response only, not a command)
@@ -1311,7 +1348,7 @@ struct iwl3945_rx_frame {
1311 struct iwl3945_rx_frame_stats stats; 1348 struct iwl3945_rx_frame_stats stats;
1312 struct iwl3945_rx_frame_hdr hdr; 1349 struct iwl3945_rx_frame_hdr hdr;
1313 struct iwl3945_rx_frame_end end; 1350 struct iwl3945_rx_frame_end end;
1314} __attribute__ ((packed)); 1351} __packed;
1315 1352
1316#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) 1353#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1317 1354
@@ -1327,7 +1364,7 @@ struct iwl4965_rx_non_cfg_phy {
1327 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ 1364 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1328 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ 1365 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1329 u8 pad[0]; 1366 u8 pad[0];
1330} __attribute__ ((packed)); 1367} __packed;
1331 1368
1332 1369
1333#define IWL50_RX_RES_PHY_CNT 8 1370#define IWL50_RX_RES_PHY_CNT 8
@@ -1345,7 +1382,7 @@ struct iwl4965_rx_non_cfg_phy {
1345 1382
1346struct iwl5000_non_cfg_phy { 1383struct iwl5000_non_cfg_phy {
1347 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ 1384 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1348} __attribute__ ((packed)); 1385} __packed;
1349 1386
1350 1387
1351/* 1388/*
@@ -1365,12 +1402,12 @@ struct iwl_rx_phy_res {
1365 __le32 rate_n_flags; /* RATE_MCS_* */ 1402 __le32 rate_n_flags; /* RATE_MCS_* */
1366 __le16 byte_count; /* frame's byte-count */ 1403 __le16 byte_count; /* frame's byte-count */
1367 __le16 reserved3; 1404 __le16 reserved3;
1368} __attribute__ ((packed)); 1405} __packed;
1369 1406
1370struct iwl4965_rx_mpdu_res_start { 1407struct iwl_rx_mpdu_res_start {
1371 __le16 byte_count; 1408 __le16 byte_count;
1372 __le16 reserved; 1409 __le16 reserved;
1373} __attribute__ ((packed)); 1410} __packed;
1374 1411
1375 1412
1376/****************************************************************************** 1413/******************************************************************************
@@ -1400,18 +1437,27 @@ struct iwl4965_rx_mpdu_res_start {
1400 1437
1401/* REPLY_TX Tx flags field */ 1438/* REPLY_TX Tx flags field */
1402 1439
1403/* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it 1440/*
1441 * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
1404 * before this frame. if CTS-to-self required check 1442 * before this frame. if CTS-to-self required check
1405 * RXON_FLG_SELF_CTS_EN status. */ 1443 * RXON_FLG_SELF_CTS_EN status.
1406#define TX_CMD_FLG_RTS_CTS_MSK cpu_to_le32(1 << 0) 1444 * unused in 3945/4965, used in 5000 series and after
1445 */
1446#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
1407 1447
1408/* 1: Use Request-To-Send protocol before this frame. 1448/*
1409 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */ 1449 * 1: Use Request-To-Send protocol before this frame.
1450 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1451 * used in 3945/4965, unused in 5000 series and after
1452 */
1410#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1) 1453#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1411 1454
1412/* 1: Transmit Clear-To-Send to self before this frame. 1455/*
1456 * 1: Transmit Clear-To-Send to self before this frame.
1413 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. 1457 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1414 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */ 1458 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1459 * used in 3945/4965, unused in 5000 series and after
1460 */
1415#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2) 1461#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1416 1462
1417/* 1: Expect ACK from receiving station 1463/* 1: Expect ACK from receiving station
@@ -1431,8 +1477,11 @@ struct iwl4965_rx_mpdu_res_start {
1431 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ 1477 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1432#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) 1478#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1433 1479
1434/* 1: Frame requires full Tx-Op protection. 1480/*
1435 * Set this if either RTS or CTS Tx Flag gets set. */ 1481 * 1: Frame requires full Tx-Op protection.
1482 * Set this if either RTS or CTS Tx Flag gets set.
1483 * used in 3945/4965, unused in 5000 series and after
1484 */
1436#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) 1485#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1437 1486
1438/* Tx antenna selection field; used only for 3945, reserved (0) for 4965. 1487/* Tx antenna selection field; used only for 3945, reserved (0) for 4965.
@@ -1557,7 +1606,7 @@ struct iwl3945_tx_cmd {
1557 */ 1606 */
1558 u8 payload[0]; 1607 u8 payload[0];
1559 struct ieee80211_hdr hdr[0]; 1608 struct ieee80211_hdr hdr[0];
1560} __attribute__ ((packed)); 1609} __packed;
1561 1610
1562/* 1611/*
1563 * REPLY_TX = 0x1c (response) 1612 * REPLY_TX = 0x1c (response)
@@ -1569,7 +1618,7 @@ struct iwl3945_tx_resp {
1569 u8 rate; 1618 u8 rate;
1570 __le32 wireless_media_time; 1619 __le32 wireless_media_time;
1571 __le32 status; /* TX status */ 1620 __le32 status; /* TX status */
1572} __attribute__ ((packed)); 1621} __packed;
1573 1622
1574 1623
1575/* 1624/*
@@ -1581,7 +1630,7 @@ struct iwl_dram_scratch {
1581 u8 try_cnt; /* Tx attempts */ 1630 u8 try_cnt; /* Tx attempts */
1582 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ 1631 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1583 __le16 reserved; 1632 __le16 reserved;
1584} __attribute__ ((packed)); 1633} __packed;
1585 1634
1586struct iwl_tx_cmd { 1635struct iwl_tx_cmd {
1587 /* 1636 /*
@@ -1660,7 +1709,7 @@ struct iwl_tx_cmd {
1660 */ 1709 */
1661 u8 payload[0]; 1710 u8 payload[0];
1662 struct ieee80211_hdr hdr[0]; 1711 struct ieee80211_hdr hdr[0];
1663} __attribute__ ((packed)); 1712} __packed;
1664 1713
1665/* TX command response is sent after *3945* transmission attempts. 1714/* TX command response is sent after *3945* transmission attempts.
1666 * 1715 *
@@ -1826,7 +1875,7 @@ enum {
1826struct agg_tx_status { 1875struct agg_tx_status {
1827 __le16 status; 1876 __le16 status;
1828 __le16 sequence; 1877 __le16 sequence;
1829} __attribute__ ((packed)); 1878} __packed;
1830 1879
1831struct iwl4965_tx_resp { 1880struct iwl4965_tx_resp {
1832 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1881 u8 frame_count; /* 1 no aggregation, >1 aggregation */
@@ -1863,7 +1912,7 @@ struct iwl4965_tx_resp {
1863 __le32 status; 1912 __le32 status;
1864 struct agg_tx_status agg_status[0]; /* for each agg frame */ 1913 struct agg_tx_status agg_status[0]; /* for each agg frame */
1865 } u; 1914 } u;
1866} __attribute__ ((packed)); 1915} __packed;
1867 1916
1868/* 1917/*
1869 * definitions for initial rate index field 1918 * definitions for initial rate index field
@@ -1927,7 +1976,7 @@ struct iwl5000_tx_resp {
1927 */ 1976 */
1928 struct agg_tx_status status; /* TX status (in aggregation - 1977 struct agg_tx_status status; /* TX status (in aggregation -
1929 * status of 1st frame) */ 1978 * status of 1st frame) */
1930} __attribute__ ((packed)); 1979} __packed;
1931/* 1980/*
1932 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1981 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1933 * 1982 *
@@ -1945,7 +1994,7 @@ struct iwl_compressed_ba_resp {
1945 __le64 bitmap; 1994 __le64 bitmap;
1946 __le16 scd_flow; 1995 __le16 scd_flow;
1947 __le16 scd_ssn; 1996 __le16 scd_ssn;
1948} __attribute__ ((packed)); 1997} __packed;
1949 1998
1950/* 1999/*
1951 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) 2000 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
@@ -1958,14 +2007,14 @@ struct iwl3945_txpowertable_cmd {
1958 u8 reserved; 2007 u8 reserved;
1959 __le16 channel; 2008 __le16 channel;
1960 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 2009 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1961} __attribute__ ((packed)); 2010} __packed;
1962 2011
1963struct iwl4965_txpowertable_cmd { 2012struct iwl4965_txpowertable_cmd {
1964 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 2013 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1965 u8 reserved; 2014 u8 reserved;
1966 __le16 channel; 2015 __le16 channel;
1967 struct iwl4965_tx_power_db tx_power; 2016 struct iwl4965_tx_power_db tx_power;
1968} __attribute__ ((packed)); 2017} __packed;
1969 2018
1970 2019
1971/** 2020/**
@@ -1987,13 +2036,13 @@ struct iwl3945_rate_scaling_info {
1987 __le16 rate_n_flags; 2036 __le16 rate_n_flags;
1988 u8 try_cnt; 2037 u8 try_cnt;
1989 u8 next_rate_index; 2038 u8 next_rate_index;
1990} __attribute__ ((packed)); 2039} __packed;
1991 2040
1992struct iwl3945_rate_scaling_cmd { 2041struct iwl3945_rate_scaling_cmd {
1993 u8 table_id; 2042 u8 table_id;
1994 u8 reserved[3]; 2043 u8 reserved[3];
1995 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; 2044 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1996} __attribute__ ((packed)); 2045} __packed;
1997 2046
1998 2047
1999/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ 2048/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
@@ -2040,7 +2089,7 @@ struct iwl_link_qual_general_params {
2040 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. 2089 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
2041 */ 2090 */
2042 u8 start_rate_index[LINK_QUAL_AC_NUM]; 2091 u8 start_rate_index[LINK_QUAL_AC_NUM];
2043} __attribute__ ((packed)); 2092} __packed;
2044 2093
2045#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 2094#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
2046#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) 2095#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535)
@@ -2081,7 +2130,7 @@ struct iwl_link_qual_agg_params {
2081 u8 agg_frame_cnt_limit; 2130 u8 agg_frame_cnt_limit;
2082 2131
2083 __le32 reserved; 2132 __le32 reserved;
2084} __attribute__ ((packed)); 2133} __packed;
2085 2134
2086/* 2135/*
2087 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 2136 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
@@ -2287,7 +2336,7 @@ struct iwl_link_quality_cmd {
2287 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ 2336 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
2288 } rs_table[LINK_QUAL_MAX_RETRY_NUM]; 2337 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2289 __le32 reserved2; 2338 __le32 reserved2;
2290} __attribute__ ((packed)); 2339} __packed;
2291 2340
2292/* 2341/*
2293 * BT configuration enable flags: 2342 * BT configuration enable flags:
@@ -2328,7 +2377,7 @@ struct iwl_bt_cmd {
2328 u8 reserved; 2377 u8 reserved;
2329 __le32 kill_ack_mask; 2378 __le32 kill_ack_mask;
2330 __le32 kill_cts_mask; 2379 __le32 kill_cts_mask;
2331} __attribute__ ((packed)); 2380} __packed;
2332 2381
2333/****************************************************************************** 2382/******************************************************************************
2334 * (6) 2383 * (6)
@@ -2353,7 +2402,7 @@ struct iwl_measure_channel {
2353 u8 channel; /* channel to measure */ 2402 u8 channel; /* channel to measure */
2354 u8 type; /* see enum iwl_measure_type */ 2403 u8 type; /* see enum iwl_measure_type */
2355 __le16 reserved; 2404 __le16 reserved;
2356} __attribute__ ((packed)); 2405} __packed;
2357 2406
2358/* 2407/*
2359 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) 2408 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
@@ -2372,7 +2421,7 @@ struct iwl_spectrum_cmd {
2372 __le16 channel_count; /* minimum 1, maximum 10 */ 2421 __le16 channel_count; /* minimum 1, maximum 10 */
2373 __le16 reserved3; 2422 __le16 reserved3;
2374 struct iwl_measure_channel channels[10]; 2423 struct iwl_measure_channel channels[10];
2375} __attribute__ ((packed)); 2424} __packed;
2376 2425
2377/* 2426/*
2378 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) 2427 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
@@ -2383,7 +2432,7 @@ struct iwl_spectrum_resp {
2383 __le16 status; /* 0 - command will be handled 2432 __le16 status; /* 0 - command will be handled
2384 * 1 - cannot handle (conflicts with another 2433 * 1 - cannot handle (conflicts with another
2385 * measurement) */ 2434 * measurement) */
2386} __attribute__ ((packed)); 2435} __packed;
2387 2436
2388enum iwl_measurement_state { 2437enum iwl_measurement_state {
2389 IWL_MEASUREMENT_START = 0, 2438 IWL_MEASUREMENT_START = 0,
@@ -2406,13 +2455,13 @@ enum iwl_measurement_status {
2406struct iwl_measurement_histogram { 2455struct iwl_measurement_histogram {
2407 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ 2456 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2408 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ 2457 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2409} __attribute__ ((packed)); 2458} __packed;
2410 2459
2411/* clear channel availability counters */ 2460/* clear channel availability counters */
2412struct iwl_measurement_cca_counters { 2461struct iwl_measurement_cca_counters {
2413 __le32 ofdm; 2462 __le32 ofdm;
2414 __le32 cck; 2463 __le32 cck;
2415} __attribute__ ((packed)); 2464} __packed;
2416 2465
2417enum iwl_measure_type { 2466enum iwl_measure_type {
2418 IWL_MEASURE_BASIC = (1 << 0), 2467 IWL_MEASURE_BASIC = (1 << 0),
@@ -2448,7 +2497,7 @@ struct iwl_spectrum_notification {
2448 struct iwl_measurement_histogram histogram; 2497 struct iwl_measurement_histogram histogram;
2449 __le32 stop_time; /* lower 32-bits of TSF */ 2498 __le32 stop_time; /* lower 32-bits of TSF */
2450 __le32 status; /* see iwl_measurement_status */ 2499 __le32 status; /* see iwl_measurement_status */
2451} __attribute__ ((packed)); 2500} __packed;
2452 2501
2453/****************************************************************************** 2502/******************************************************************************
2454 * (7) 2503 * (7)
@@ -2504,7 +2553,7 @@ struct iwl3945_powertable_cmd {
2504 __le32 rx_data_timeout; 2553 __le32 rx_data_timeout;
2505 __le32 tx_data_timeout; 2554 __le32 tx_data_timeout;
2506 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2555 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2507} __attribute__ ((packed)); 2556} __packed;
2508 2557
2509struct iwl_powertable_cmd { 2558struct iwl_powertable_cmd {
2510 __le16 flags; 2559 __le16 flags;
@@ -2514,7 +2563,7 @@ struct iwl_powertable_cmd {
2514 __le32 tx_data_timeout; 2563 __le32 tx_data_timeout;
2515 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2564 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2516 __le32 keep_alive_beacons; 2565 __le32 keep_alive_beacons;
2517} __attribute__ ((packed)); 2566} __packed;
2518 2567
2519/* 2568/*
2520 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2569 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
@@ -2527,7 +2576,7 @@ struct iwl_sleep_notification {
2527 __le32 sleep_time; 2576 __le32 sleep_time;
2528 __le32 tsf_low; 2577 __le32 tsf_low;
2529 __le32 bcon_timer; 2578 __le32 bcon_timer;
2530} __attribute__ ((packed)); 2579} __packed;
2531 2580
2532/* Sleep states. 3945 and 4965 identical. */ 2581/* Sleep states. 3945 and 4965 identical. */
2533enum { 2582enum {
@@ -2552,14 +2601,14 @@ enum {
2552#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ 2601#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
2553struct iwl_card_state_cmd { 2602struct iwl_card_state_cmd {
2554 __le32 status; /* CARD_STATE_CMD_* request new power state */ 2603 __le32 status; /* CARD_STATE_CMD_* request new power state */
2555} __attribute__ ((packed)); 2604} __packed;
2556 2605
2557/* 2606/*
2558 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) 2607 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2559 */ 2608 */
2560struct iwl_card_state_notif { 2609struct iwl_card_state_notif {
2561 __le32 flags; 2610 __le32 flags;
2562} __attribute__ ((packed)); 2611} __packed;
2563 2612
2564#define HW_CARD_DISABLED 0x01 2613#define HW_CARD_DISABLED 0x01
2565#define SW_CARD_DISABLED 0x02 2614#define SW_CARD_DISABLED 0x02
@@ -2570,14 +2619,14 @@ struct iwl_ct_kill_config {
2570 __le32 reserved; 2619 __le32 reserved;
2571 __le32 critical_temperature_M; 2620 __le32 critical_temperature_M;
2572 __le32 critical_temperature_R; 2621 __le32 critical_temperature_R;
2573} __attribute__ ((packed)); 2622} __packed;
2574 2623
2575/* 1000, and 6x00 */ 2624/* 1000, and 6x00 */
2576struct iwl_ct_kill_throttling_config { 2625struct iwl_ct_kill_throttling_config {
2577 __le32 critical_temperature_exit; 2626 __le32 critical_temperature_exit;
2578 __le32 reserved; 2627 __le32 reserved;
2579 __le32 critical_temperature_enter; 2628 __le32 critical_temperature_enter;
2580} __attribute__ ((packed)); 2629} __packed;
2581 2630
2582/****************************************************************************** 2631/******************************************************************************
2583 * (8) 2632 * (8)
@@ -2622,7 +2671,7 @@ struct iwl3945_scan_channel {
2622 struct iwl3945_tx_power tpc; 2671 struct iwl3945_tx_power tpc;
2623 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2672 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2624 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2673 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2625} __attribute__ ((packed)); 2674} __packed;
2626 2675
2627/* set number of direct probes u8 type */ 2676/* set number of direct probes u8 type */
2628#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) 2677#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
@@ -2641,7 +2690,7 @@ struct iwl_scan_channel {
2641 u8 dsp_atten; /* gain for DSP */ 2690 u8 dsp_atten; /* gain for DSP */
2642 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2691 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2643 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2692 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2644} __attribute__ ((packed)); 2693} __packed;
2645 2694
2646/* set number of direct probes __le32 type */ 2695/* set number of direct probes __le32 type */
2647#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) 2696#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
@@ -2658,7 +2707,7 @@ struct iwl_ssid_ie {
2658 u8 id; 2707 u8 id;
2659 u8 len; 2708 u8 len;
2660 u8 ssid[32]; 2709 u8 ssid[32];
2661} __attribute__ ((packed)); 2710} __packed;
2662 2711
2663#define PROBE_OPTION_MAX_3945 4 2712#define PROBE_OPTION_MAX_3945 4
2664#define PROBE_OPTION_MAX 20 2713#define PROBE_OPTION_MAX 20
@@ -2764,7 +2813,7 @@ struct iwl3945_scan_cmd {
2764 * before requesting another scan. 2813 * before requesting another scan.
2765 */ 2814 */
2766 u8 data[0]; 2815 u8 data[0];
2767} __attribute__ ((packed)); 2816} __packed;
2768 2817
2769struct iwl_scan_cmd { 2818struct iwl_scan_cmd {
2770 __le16 len; 2819 __le16 len;
@@ -2808,7 +2857,7 @@ struct iwl_scan_cmd {
2808 * before requesting another scan. 2857 * before requesting another scan.
2809 */ 2858 */
2810 u8 data[0]; 2859 u8 data[0];
2811} __attribute__ ((packed)); 2860} __packed;
2812 2861
2813/* Can abort will notify by complete notification with abort status. */ 2862/* Can abort will notify by complete notification with abort status. */
2814#define CAN_ABORT_STATUS cpu_to_le32(0x1) 2863#define CAN_ABORT_STATUS cpu_to_le32(0x1)
@@ -2820,7 +2869,7 @@ struct iwl_scan_cmd {
2820 */ 2869 */
2821struct iwl_scanreq_notification { 2870struct iwl_scanreq_notification {
2822 __le32 status; /* 1: okay, 2: cannot fulfill request */ 2871 __le32 status; /* 1: okay, 2: cannot fulfill request */
2823} __attribute__ ((packed)); 2872} __packed;
2824 2873
2825/* 2874/*
2826 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) 2875 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
@@ -2833,7 +2882,7 @@ struct iwl_scanstart_notification {
2833 u8 band; 2882 u8 band;
2834 u8 reserved[2]; 2883 u8 reserved[2];
2835 __le32 status; 2884 __le32 status;
2836} __attribute__ ((packed)); 2885} __packed;
2837 2886
2838#define SCAN_OWNER_STATUS 0x1; 2887#define SCAN_OWNER_STATUS 0x1;
2839#define MEASURE_OWNER_STATUS 0x2; 2888#define MEASURE_OWNER_STATUS 0x2;
@@ -2849,7 +2898,7 @@ struct iwl_scanresults_notification {
2849 __le32 tsf_low; 2898 __le32 tsf_low;
2850 __le32 tsf_high; 2899 __le32 tsf_high;
2851 __le32 statistics[NUMBER_OF_STATISTICS]; 2900 __le32 statistics[NUMBER_OF_STATISTICS];
2852} __attribute__ ((packed)); 2901} __packed;
2853 2902
2854/* 2903/*
2855 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) 2904 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
@@ -2861,7 +2910,7 @@ struct iwl_scancomplete_notification {
2861 u8 last_channel; 2910 u8 last_channel;
2862 __le32 tsf_low; 2911 __le32 tsf_low;
2863 __le32 tsf_high; 2912 __le32 tsf_high;
2864} __attribute__ ((packed)); 2913} __packed;
2865 2914
2866 2915
2867/****************************************************************************** 2916/******************************************************************************
@@ -2879,14 +2928,14 @@ struct iwl3945_beacon_notif {
2879 __le32 low_tsf; 2928 __le32 low_tsf;
2880 __le32 high_tsf; 2929 __le32 high_tsf;
2881 __le32 ibss_mgr_status; 2930 __le32 ibss_mgr_status;
2882} __attribute__ ((packed)); 2931} __packed;
2883 2932
2884struct iwl4965_beacon_notif { 2933struct iwl4965_beacon_notif {
2885 struct iwl4965_tx_resp beacon_notify_hdr; 2934 struct iwl4965_tx_resp beacon_notify_hdr;
2886 __le32 low_tsf; 2935 __le32 low_tsf;
2887 __le32 high_tsf; 2936 __le32 high_tsf;
2888 __le32 ibss_mgr_status; 2937 __le32 ibss_mgr_status;
2889} __attribute__ ((packed)); 2938} __packed;
2890 2939
2891/* 2940/*
2892 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2941 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
@@ -2898,7 +2947,7 @@ struct iwl3945_tx_beacon_cmd {
2898 u8 tim_size; 2947 u8 tim_size;
2899 u8 reserved1; 2948 u8 reserved1;
2900 struct ieee80211_hdr frame[0]; /* beacon frame */ 2949 struct ieee80211_hdr frame[0]; /* beacon frame */
2901} __attribute__ ((packed)); 2950} __packed;
2902 2951
2903struct iwl_tx_beacon_cmd { 2952struct iwl_tx_beacon_cmd {
2904 struct iwl_tx_cmd tx; 2953 struct iwl_tx_cmd tx;
@@ -2906,7 +2955,7 @@ struct iwl_tx_beacon_cmd {
2906 u8 tim_size; 2955 u8 tim_size;
2907 u8 reserved1; 2956 u8 reserved1;
2908 struct ieee80211_hdr frame[0]; /* beacon frame */ 2957 struct ieee80211_hdr frame[0]; /* beacon frame */
2909} __attribute__ ((packed)); 2958} __packed;
2910 2959
2911/****************************************************************************** 2960/******************************************************************************
2912 * (10) 2961 * (10)
@@ -2932,7 +2981,7 @@ struct rate_histogram {
2932 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; 2981 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2933 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; 2982 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2934 } failed; 2983 } failed;
2935} __attribute__ ((packed)); 2984} __packed;
2936 2985
2937/* statistics command response */ 2986/* statistics command response */
2938 2987
@@ -2952,7 +3001,7 @@ struct iwl39_statistics_rx_phy {
2952 __le32 rxe_frame_limit_overrun; 3001 __le32 rxe_frame_limit_overrun;
2953 __le32 sent_ack_cnt; 3002 __le32 sent_ack_cnt;
2954 __le32 sent_cts_cnt; 3003 __le32 sent_cts_cnt;
2955} __attribute__ ((packed)); 3004} __packed;
2956 3005
2957struct iwl39_statistics_rx_non_phy { 3006struct iwl39_statistics_rx_non_phy {
2958 __le32 bogus_cts; /* CTS received when not expecting CTS */ 3007 __le32 bogus_cts; /* CTS received when not expecting CTS */
@@ -2963,13 +3012,13 @@ struct iwl39_statistics_rx_non_phy {
2963 * filtering process */ 3012 * filtering process */
2964 __le32 non_channel_beacons; /* beacons with our bss id but not on 3013 __le32 non_channel_beacons; /* beacons with our bss id but not on
2965 * our serving channel */ 3014 * our serving channel */
2966} __attribute__ ((packed)); 3015} __packed;
2967 3016
2968struct iwl39_statistics_rx { 3017struct iwl39_statistics_rx {
2969 struct iwl39_statistics_rx_phy ofdm; 3018 struct iwl39_statistics_rx_phy ofdm;
2970 struct iwl39_statistics_rx_phy cck; 3019 struct iwl39_statistics_rx_phy cck;
2971 struct iwl39_statistics_rx_non_phy general; 3020 struct iwl39_statistics_rx_non_phy general;
2972} __attribute__ ((packed)); 3021} __packed;
2973 3022
2974struct iwl39_statistics_tx { 3023struct iwl39_statistics_tx {
2975 __le32 preamble_cnt; 3024 __le32 preamble_cnt;
@@ -2981,20 +3030,21 @@ struct iwl39_statistics_tx {
2981 __le32 ack_timeout; 3030 __le32 ack_timeout;
2982 __le32 expected_ack_cnt; 3031 __le32 expected_ack_cnt;
2983 __le32 actual_ack_cnt; 3032 __le32 actual_ack_cnt;
2984} __attribute__ ((packed)); 3033} __packed;
2985 3034
2986struct statistics_dbg { 3035struct statistics_dbg {
2987 __le32 burst_check; 3036 __le32 burst_check;
2988 __le32 burst_count; 3037 __le32 burst_count;
2989 __le32 reserved[4]; 3038 __le32 wait_for_silence_timeout_cnt;
2990} __attribute__ ((packed)); 3039 __le32 reserved[3];
3040} __packed;
2991 3041
2992struct iwl39_statistics_div { 3042struct iwl39_statistics_div {
2993 __le32 tx_on_a; 3043 __le32 tx_on_a;
2994 __le32 tx_on_b; 3044 __le32 tx_on_b;
2995 __le32 exec_time; 3045 __le32 exec_time;
2996 __le32 probe_time; 3046 __le32 probe_time;
2997} __attribute__ ((packed)); 3047} __packed;
2998 3048
2999struct iwl39_statistics_general { 3049struct iwl39_statistics_general {
3000 __le32 temperature; 3050 __le32 temperature;
@@ -3004,7 +3054,7 @@ struct iwl39_statistics_general {
3004 __le32 slots_idle; 3054 __le32 slots_idle;
3005 __le32 ttl_timestamp; 3055 __le32 ttl_timestamp;
3006 struct iwl39_statistics_div div; 3056 struct iwl39_statistics_div div;
3007} __attribute__ ((packed)); 3057} __packed;
3008 3058
3009struct statistics_rx_phy { 3059struct statistics_rx_phy {
3010 __le32 ina_cnt; 3060 __le32 ina_cnt;
@@ -3027,7 +3077,7 @@ struct statistics_rx_phy {
3027 __le32 mh_format_err; 3077 __le32 mh_format_err;
3028 __le32 re_acq_main_rssi_sum; 3078 __le32 re_acq_main_rssi_sum;
3029 __le32 reserved3; 3079 __le32 reserved3;
3030} __attribute__ ((packed)); 3080} __packed;
3031 3081
3032struct statistics_rx_ht_phy { 3082struct statistics_rx_ht_phy {
3033 __le32 plcp_err; 3083 __le32 plcp_err;
@@ -3040,7 +3090,7 @@ struct statistics_rx_ht_phy {
3040 __le32 agg_mpdu_cnt; 3090 __le32 agg_mpdu_cnt;
3041 __le32 agg_cnt; 3091 __le32 agg_cnt;
3042 __le32 unsupport_mcs; 3092 __le32 unsupport_mcs;
3043} __attribute__ ((packed)); 3093} __packed;
3044 3094
3045#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 3095#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
3046 3096
@@ -3075,14 +3125,28 @@ struct statistics_rx_non_phy {
3075 __le32 beacon_energy_a; 3125 __le32 beacon_energy_a;
3076 __le32 beacon_energy_b; 3126 __le32 beacon_energy_b;
3077 __le32 beacon_energy_c; 3127 __le32 beacon_energy_c;
3078} __attribute__ ((packed)); 3128} __packed;
3129
3130struct statistics_rx_non_phy_bt {
3131 struct statistics_rx_non_phy common;
3132 /* additional stats for bt */
3133 __le32 num_bt_kills;
3134 __le32 reserved[2];
3135} __packed;
3079 3136
3080struct statistics_rx { 3137struct statistics_rx {
3081 struct statistics_rx_phy ofdm; 3138 struct statistics_rx_phy ofdm;
3082 struct statistics_rx_phy cck; 3139 struct statistics_rx_phy cck;
3083 struct statistics_rx_non_phy general; 3140 struct statistics_rx_non_phy general;
3084 struct statistics_rx_ht_phy ofdm_ht; 3141 struct statistics_rx_ht_phy ofdm_ht;
3085} __attribute__ ((packed)); 3142} __packed;
3143
3144struct statistics_rx_bt {
3145 struct statistics_rx_phy ofdm;
3146 struct statistics_rx_phy cck;
3147 struct statistics_rx_non_phy_bt general;
3148 struct statistics_rx_ht_phy ofdm_ht;
3149} __packed;
3086 3150
3087/** 3151/**
3088 * struct statistics_tx_power - current tx power 3152 * struct statistics_tx_power - current tx power
@@ -3096,7 +3160,7 @@ struct statistics_tx_power {
3096 u8 ant_b; 3160 u8 ant_b;
3097 u8 ant_c; 3161 u8 ant_c;
3098 u8 reserved; 3162 u8 reserved;
3099} __attribute__ ((packed)); 3163} __packed;
3100 3164
3101struct statistics_tx_non_phy_agg { 3165struct statistics_tx_non_phy_agg {
3102 __le32 ba_timeout; 3166 __le32 ba_timeout;
@@ -3109,7 +3173,7 @@ struct statistics_tx_non_phy_agg {
3109 __le32 underrun; 3173 __le32 underrun;
3110 __le32 bt_prio_kill; 3174 __le32 bt_prio_kill;
3111 __le32 rx_ba_rsp_cnt; 3175 __le32 rx_ba_rsp_cnt;
3112} __attribute__ ((packed)); 3176} __packed;
3113 3177
3114struct statistics_tx { 3178struct statistics_tx {
3115 __le32 preamble_cnt; 3179 __le32 preamble_cnt;
@@ -3134,7 +3198,7 @@ struct statistics_tx {
3134 */ 3198 */
3135 struct statistics_tx_power tx_power; 3199 struct statistics_tx_power tx_power;
3136 __le32 reserved1; 3200 __le32 reserved1;
3137} __attribute__ ((packed)); 3201} __packed;
3138 3202
3139 3203
3140struct statistics_div { 3204struct statistics_div {
@@ -3144,9 +3208,9 @@ struct statistics_div {
3144 __le32 probe_time; 3208 __le32 probe_time;
3145 __le32 reserved1; 3209 __le32 reserved1;
3146 __le32 reserved2; 3210 __le32 reserved2;
3147} __attribute__ ((packed)); 3211} __packed;
3148 3212
3149struct statistics_general { 3213struct statistics_general_common {
3150 __le32 temperature; /* radio temperature */ 3214 __le32 temperature; /* radio temperature */
3151 __le32 temperature_m; /* for 5000 and up, this is radio voltage */ 3215 __le32 temperature_m; /* for 5000 and up, this is radio voltage */
3152 struct statistics_dbg dbg; 3216 struct statistics_dbg dbg;
@@ -3162,9 +3226,33 @@ struct statistics_general {
3162 * in order to get out of bad PHY status 3226 * in order to get out of bad PHY status
3163 */ 3227 */
3164 __le32 num_of_sos_states; 3228 __le32 num_of_sos_states;
3229} __packed;
3230
3231struct statistics_bt_activity {
3232 /* Tx statistics */
3233 __le32 hi_priority_tx_req_cnt;
3234 __le32 hi_priority_tx_denied_cnt;
3235 __le32 lo_priority_tx_req_cnt;
3236 __le32 lo_priority_tx_denied_cnt;
3237 /* Rx statistics */
3238 __le32 hi_priority_rx_req_cnt;
3239 __le32 hi_priority_rx_denied_cnt;
3240 __le32 lo_priority_rx_req_cnt;
3241 __le32 lo_priority_rx_denied_cnt;
3242} __packed;
3243
3244struct statistics_general {
3245 struct statistics_general_common common;
3165 __le32 reserved2; 3246 __le32 reserved2;
3166 __le32 reserved3; 3247 __le32 reserved3;
3167} __attribute__ ((packed)); 3248} __packed;
3249
3250struct statistics_general_bt {
3251 struct statistics_general_common common;
3252 struct statistics_bt_activity activity;
3253 __le32 reserved2;
3254 __le32 reserved3;
3255} __packed;
3168 3256
3169#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) 3257#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
3170#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) 3258#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
@@ -3189,7 +3277,7 @@ struct statistics_general {
3189#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ 3277#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
3190struct iwl_statistics_cmd { 3278struct iwl_statistics_cmd {
3191 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 3279 __le32 configuration_flags; /* IWL_STATS_CONF_* */
3192} __attribute__ ((packed)); 3280} __packed;
3193 3281
3194/* 3282/*
3195 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) 3283 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
@@ -3214,15 +3302,21 @@ struct iwl3945_notif_statistics {
3214 struct iwl39_statistics_rx rx; 3302 struct iwl39_statistics_rx rx;
3215 struct iwl39_statistics_tx tx; 3303 struct iwl39_statistics_tx tx;
3216 struct iwl39_statistics_general general; 3304 struct iwl39_statistics_general general;
3217} __attribute__ ((packed)); 3305} __packed;
3218 3306
3219struct iwl_notif_statistics { 3307struct iwl_notif_statistics {
3220 __le32 flag; 3308 __le32 flag;
3221 struct statistics_rx rx; 3309 struct statistics_rx rx;
3222 struct statistics_tx tx; 3310 struct statistics_tx tx;
3223 struct statistics_general general; 3311 struct statistics_general general;
3224} __attribute__ ((packed)); 3312} __packed;
3225 3313
3314struct iwl_bt_notif_statistics {
3315 __le32 flag;
3316 struct statistics_rx_bt rx;
3317 struct statistics_tx tx;
3318 struct statistics_general_bt general;
3319} __packed;
3226 3320
3227/* 3321/*
3228 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) 3322 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
@@ -3253,7 +3347,7 @@ struct iwl_missed_beacon_notif {
3253 __le32 total_missed_becons; 3347 __le32 total_missed_becons;
3254 __le32 num_expected_beacons; 3348 __le32 num_expected_beacons;
3255 __le32 num_recvd_beacons; 3349 __le32 num_recvd_beacons;
3256} __attribute__ ((packed)); 3350} __packed;
3257 3351
3258 3352
3259/****************************************************************************** 3353/******************************************************************************
@@ -3441,6 +3535,41 @@ struct iwl_missed_beacon_notif {
3441#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) 3535#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3442#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 3536#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3443 3537
3538/*
3539 * Additional table entries in enhance SENSITIVITY_CMD
3540 */
3541#define HD_INA_NON_SQUARE_DET_OFDM_INDEX (11)
3542#define HD_INA_NON_SQUARE_DET_CCK_INDEX (12)
3543#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX (13)
3544#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX (14)
3545#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (15)
3546#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX (16)
3547#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX (17)
3548#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX (18)
3549#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (19)
3550#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX (20)
3551#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX (21)
3552#define HD_RESERVED (22)
3553
3554/* number of entries for enhanced tbl */
3555#define ENHANCE_HD_TABLE_SIZE (23)
3556
3557/* number of additional entries for enhanced tbl */
3558#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE)
3559
3560#define HD_INA_NON_SQUARE_DET_OFDM_DATA cpu_to_le16(0)
3561#define HD_INA_NON_SQUARE_DET_CCK_DATA cpu_to_le16(0)
3562#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA cpu_to_le16(0)
3563#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(668)
3564#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
3565#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(486)
3566#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(37)
3567#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(853)
3568#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
3569#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(476)
3570#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(99)
3571
3572
3444/* Control field in struct iwl_sensitivity_cmd */ 3573/* Control field in struct iwl_sensitivity_cmd */
3445#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) 3574#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3446#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) 3575#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
@@ -3455,7 +3584,15 @@ struct iwl_missed_beacon_notif {
3455struct iwl_sensitivity_cmd { 3584struct iwl_sensitivity_cmd {
3456 __le16 control; /* always use "1" */ 3585 __le16 control; /* always use "1" */
3457 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 3586 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3458} __attribute__ ((packed)); 3587} __packed;
3588
3589/*
3590 *
3591 */
3592struct iwl_enhance_sensitivity_cmd {
3593 __le16 control; /* always use "1" */
3594 __le16 enhance_table[ENHANCE_HD_TABLE_SIZE]; /* use HD_* as index */
3595} __packed;
3459 3596
3460 3597
3461/** 3598/**
@@ -3523,10 +3660,10 @@ enum {
3523 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, 3660 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
3524 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, 3661 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
3525 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, 3662 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
3526 IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18, 3663 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 18,
3527 IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19,
3528}; 3664};
3529 3665
3666#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3530 3667
3531#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff) 3668#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff)
3532 3669
@@ -3536,31 +3673,31 @@ struct iwl_calib_cfg_elmnt_s {
3536 __le32 send_res; 3673 __le32 send_res;
3537 __le32 apply_res; 3674 __le32 apply_res;
3538 __le32 reserved; 3675 __le32 reserved;
3539} __attribute__ ((packed)); 3676} __packed;
3540 3677
3541struct iwl_calib_cfg_status_s { 3678struct iwl_calib_cfg_status_s {
3542 struct iwl_calib_cfg_elmnt_s once; 3679 struct iwl_calib_cfg_elmnt_s once;
3543 struct iwl_calib_cfg_elmnt_s perd; 3680 struct iwl_calib_cfg_elmnt_s perd;
3544 __le32 flags; 3681 __le32 flags;
3545} __attribute__ ((packed)); 3682} __packed;
3546 3683
3547struct iwl_calib_cfg_cmd { 3684struct iwl_calib_cfg_cmd {
3548 struct iwl_calib_cfg_status_s ucd_calib_cfg; 3685 struct iwl_calib_cfg_status_s ucd_calib_cfg;
3549 struct iwl_calib_cfg_status_s drv_calib_cfg; 3686 struct iwl_calib_cfg_status_s drv_calib_cfg;
3550 __le32 reserved1; 3687 __le32 reserved1;
3551} __attribute__ ((packed)); 3688} __packed;
3552 3689
3553struct iwl_calib_hdr { 3690struct iwl_calib_hdr {
3554 u8 op_code; 3691 u8 op_code;
3555 u8 first_group; 3692 u8 first_group;
3556 u8 groups_num; 3693 u8 groups_num;
3557 u8 data_valid; 3694 u8 data_valid;
3558} __attribute__ ((packed)); 3695} __packed;
3559 3696
3560struct iwl_calib_cmd { 3697struct iwl_calib_cmd {
3561 struct iwl_calib_hdr hdr; 3698 struct iwl_calib_hdr hdr;
3562 u8 data[0]; 3699 u8 data[0];
3563} __attribute__ ((packed)); 3700} __packed;
3564 3701
3565/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ 3702/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3566struct iwl_calib_diff_gain_cmd { 3703struct iwl_calib_diff_gain_cmd {
@@ -3569,14 +3706,14 @@ struct iwl_calib_diff_gain_cmd {
3569 s8 diff_gain_b; 3706 s8 diff_gain_b;
3570 s8 diff_gain_c; 3707 s8 diff_gain_c;
3571 u8 reserved1; 3708 u8 reserved1;
3572} __attribute__ ((packed)); 3709} __packed;
3573 3710
3574struct iwl_calib_xtal_freq_cmd { 3711struct iwl_calib_xtal_freq_cmd {
3575 struct iwl_calib_hdr hdr; 3712 struct iwl_calib_hdr hdr;
3576 u8 cap_pin1; 3713 u8 cap_pin1;
3577 u8 cap_pin2; 3714 u8 cap_pin2;
3578 u8 pad[2]; 3715 u8 pad[2];
3579} __attribute__ ((packed)); 3716} __packed;
3580 3717
3581/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ 3718/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
3582struct iwl_calib_chain_noise_reset_cmd { 3719struct iwl_calib_chain_noise_reset_cmd {
@@ -3590,7 +3727,7 @@ struct iwl_calib_chain_noise_gain_cmd {
3590 u8 delta_gain_1; 3727 u8 delta_gain_1;
3591 u8 delta_gain_2; 3728 u8 delta_gain_2;
3592 u8 pad[2]; 3729 u8 pad[2];
3593} __attribute__ ((packed)); 3730} __packed;
3594 3731
3595/****************************************************************************** 3732/******************************************************************************
3596 * (12) 3733 * (12)
@@ -3613,7 +3750,7 @@ struct iwl_led_cmd {
3613 u8 on; /* # intervals on while blinking; 3750 u8 on; /* # intervals on while blinking;
3614 * "0", regardless of "off", turns LED off */ 3751 * "0", regardless of "off", turns LED off */
3615 u8 reserved; 3752 u8 reserved;
3616} __attribute__ ((packed)); 3753} __packed;
3617 3754
3618/* 3755/*
3619 * station priority table entries 3756 * station priority table entries
@@ -3749,7 +3886,7 @@ struct iwl_wimax_coex_event_entry {
3749 u8 win_medium_prio; 3886 u8 win_medium_prio;
3750 u8 reserved; 3887 u8 reserved;
3751 u8 flags; 3888 u8 flags;
3752} __attribute__ ((packed)); 3889} __packed;
3753 3890
3754/* COEX flag masks */ 3891/* COEX flag masks */
3755 3892
@@ -3766,7 +3903,7 @@ struct iwl_wimax_coex_cmd {
3766 u8 flags; 3903 u8 flags;
3767 u8 reserved[3]; 3904 u8 reserved[3];
3768 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; 3905 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
3769} __attribute__ ((packed)); 3906} __packed;
3770 3907
3771/* 3908/*
3772 * Coexistence MEDIUM NOTIFICATION 3909 * Coexistence MEDIUM NOTIFICATION
@@ -3795,7 +3932,7 @@ struct iwl_wimax_coex_cmd {
3795struct iwl_coex_medium_notification { 3932struct iwl_coex_medium_notification {
3796 __le32 status; 3933 __le32 status;
3797 __le32 events; 3934 __le32 events;
3798} __attribute__ ((packed)); 3935} __packed;
3799 3936
3800/* 3937/*
3801 * Coexistence EVENT Command 3938 * Coexistence EVENT Command
@@ -3810,11 +3947,11 @@ struct iwl_coex_event_cmd {
3810 u8 flags; 3947 u8 flags;
3811 u8 event; 3948 u8 event;
3812 __le16 reserved; 3949 __le16 reserved;
3813} __attribute__ ((packed)); 3950} __packed;
3814 3951
3815struct iwl_coex_event_resp { 3952struct iwl_coex_event_resp {
3816 __le32 status; 3953 __le32 status;
3817} __attribute__ ((packed)); 3954} __packed;
3818 3955
3819 3956
3820/****************************************************************************** 3957/******************************************************************************
@@ -3851,6 +3988,7 @@ struct iwl_rx_packet {
3851 struct iwl_sleep_notification sleep_notif; 3988 struct iwl_sleep_notification sleep_notif;
3852 struct iwl_spectrum_resp spectrum; 3989 struct iwl_spectrum_resp spectrum;
3853 struct iwl_notif_statistics stats; 3990 struct iwl_notif_statistics stats;
3991 struct iwl_bt_notif_statistics stats_bt;
3854 struct iwl_compressed_ba_resp compressed_ba; 3992 struct iwl_compressed_ba_resp compressed_ba;
3855 struct iwl_missed_beacon_notif missed_beacon; 3993 struct iwl_missed_beacon_notif missed_beacon;
3856 struct iwl_coex_medium_notification coex_medium_notif; 3994 struct iwl_coex_medium_notification coex_medium_notif;
@@ -3858,7 +3996,7 @@ struct iwl_rx_packet {
3858 __le32 status; 3996 __le32 status;
3859 u8 raw[0]; 3997 u8 raw[0];
3860 } u; 3998 } u;
3861} __attribute__ ((packed)); 3999} __packed;
3862 4000
3863int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); 4001int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
3864 4002
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5bbc5298ef96..8024d44ce4bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -141,13 +141,14 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
141} 141}
142EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); 142EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
143 143
144u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant) 144u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
145{ 145{
146 int i; 146 int i;
147 u8 ind = ant; 147 u8 ind = ant;
148
148 for (i = 0; i < RATE_ANT_NUM - 1; i++) { 149 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
149 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; 150 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
150 if (priv->hw_params.valid_tx_ant & BIT(ind)) 151 if (valid & BIT(ind))
151 return ind; 152 return ind;
152 } 153 }
153 return ant; 154 return ant;
@@ -169,7 +170,7 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
169 struct ieee80211_hw *hw = 170 struct ieee80211_hw *hw =
170 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); 171 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
171 if (hw == NULL) { 172 if (hw == NULL) {
172 printk(KERN_ERR "%s: Can not allocate network device\n", 173 pr_err("%s: Can not allocate network device\n",
173 cfg->name); 174 cfg->name);
174 goto out; 175 goto out;
175 } 176 }
@@ -457,7 +458,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
457 if (!sta_ht_inf->ht_supported) 458 if (!sta_ht_inf->ht_supported)
458 return 0; 459 return 0;
459 } 460 }
460#ifdef CONFIG_IWLWIFI_DEBUG 461#ifdef CONFIG_IWLWIFI_DEBUGFS
461 if (priv->disable_ht40) 462 if (priv->disable_ht40)
462 return 0; 463 return 0;
463#endif 464#endif
@@ -506,11 +507,11 @@ void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
506 } 507 }
507 508
508 beacon_int = iwl_adjust_beacon_interval(beacon_int, 509 beacon_int = iwl_adjust_beacon_interval(beacon_int,
509 priv->hw_params.max_beacon_itrvl * 1024); 510 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
510 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int); 511 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
511 512
512 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ 513 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
513 interval_tm = beacon_int * 1024; 514 interval_tm = beacon_int * TIME_UNIT;
514 rem = do_div(tsf, interval_tm); 515 rem = do_div(tsf, interval_tm);
515 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 516 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
516 517
@@ -932,9 +933,9 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
932} 933}
933EXPORT_SYMBOL(iwl_set_rxon_channel); 934EXPORT_SYMBOL(iwl_set_rxon_channel);
934 935
935static void iwl_set_flags_for_band(struct iwl_priv *priv, 936void iwl_set_flags_for_band(struct iwl_priv *priv,
936 enum ieee80211_band band, 937 enum ieee80211_band band,
937 struct ieee80211_vif *vif) 938 struct ieee80211_vif *vif)
938{ 939{
939 if (band == IEEE80211_BAND_5GHZ) { 940 if (band == IEEE80211_BAND_5GHZ) {
940 priv->staging_rxon.flags &= 941 priv->staging_rxon.flags &=
@@ -943,19 +944,17 @@ static void iwl_set_flags_for_band(struct iwl_priv *priv,
943 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 944 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
944 } else { 945 } else {
945 /* Copied from iwl_post_associate() */ 946 /* Copied from iwl_post_associate() */
946 if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) 947 if (vif && vif->bss_conf.use_short_slot)
947 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 948 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
948 else 949 else
949 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 950 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
950 951
951 if (vif && vif->type == NL80211_IFTYPE_ADHOC)
952 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
953
954 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 952 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
955 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; 953 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
956 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; 954 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
957 } 955 }
958} 956}
957EXPORT_SYMBOL(iwl_set_flags_for_band);
959 958
960/* 959/*
961 * initialize rxon structure with default values from eeprom 960 * initialize rxon structure with default values from eeprom
@@ -1021,15 +1020,17 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
1021 /* clear both MIX and PURE40 mode flag */ 1020 /* clear both MIX and PURE40 mode flag */
1022 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | 1021 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
1023 RXON_FLG_CHANNEL_MODE_PURE_40); 1022 RXON_FLG_CHANNEL_MODE_PURE_40);
1024 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 1023
1025 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); 1024 if (vif)
1025 memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN);
1026
1026 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; 1027 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1027 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; 1028 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1028 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff; 1029 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
1029} 1030}
1030EXPORT_SYMBOL(iwl_connection_init_rx_config); 1031EXPORT_SYMBOL(iwl_connection_init_rx_config);
1031 1032
1032static void iwl_set_rate(struct iwl_priv *priv) 1033void iwl_set_rate(struct iwl_priv *priv)
1033{ 1034{
1034 const struct ieee80211_supported_band *hw = NULL; 1035 const struct ieee80211_supported_band *hw = NULL;
1035 struct ieee80211_rate *rate; 1036 struct ieee80211_rate *rate;
@@ -1057,6 +1058,21 @@ static void iwl_set_rate(struct iwl_priv *priv)
1057 priv->staging_rxon.ofdm_basic_rates = 1058 priv->staging_rxon.ofdm_basic_rates =
1058 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1059 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1059} 1060}
1061EXPORT_SYMBOL(iwl_set_rate);
1062
1063void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1064{
1065 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1066 return;
1067
1068 if (priv->switch_rxon.switch_in_progress) {
1069 ieee80211_chswitch_done(priv->vif, is_success);
1070 mutex_lock(&priv->mutex);
1071 priv->switch_rxon.switch_in_progress = false;
1072 mutex_unlock(&priv->mutex);
1073 }
1074}
1075EXPORT_SYMBOL(iwl_chswitch_done);
1060 1076
1061void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1077void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1062{ 1078{
@@ -1071,11 +1087,12 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1071 priv->staging_rxon.channel = csa->channel; 1087 priv->staging_rxon.channel = csa->channel;
1072 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 1088 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1073 le16_to_cpu(csa->channel)); 1089 le16_to_cpu(csa->channel));
1074 } else 1090 iwl_chswitch_done(priv, true);
1091 } else {
1075 IWL_ERR(priv, "CSA notif (fail) : channel %d\n", 1092 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1076 le16_to_cpu(csa->channel)); 1093 le16_to_cpu(csa->channel));
1077 1094 iwl_chswitch_done(priv, false);
1078 priv->switch_rxon.switch_in_progress = false; 1095 }
1079 } 1096 }
1080} 1097}
1081EXPORT_SYMBOL(iwl_rx_csa); 1098EXPORT_SYMBOL(iwl_rx_csa);
@@ -1507,130 +1524,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1507} 1524}
1508EXPORT_SYMBOL(iwl_send_statistics_request); 1525EXPORT_SYMBOL(iwl_send_statistics_request);
1509 1526
1510/**
1511 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
1512 * using sample data 100 bytes apart. If these sample points are good,
1513 * it's a pretty good bet that everything between them is good, too.
1514 */
1515static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1516{
1517 u32 val;
1518 int ret = 0;
1519 u32 errcnt = 0;
1520 u32 i;
1521
1522 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1523
1524 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1525 /* read data comes through single port, auto-incr addr */
1526 /* NOTE: Use the debugless read so we don't flood kernel log
1527 * if IWL_DL_IO is set */
1528 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1529 i + IWL49_RTC_INST_LOWER_BOUND);
1530 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1531 if (val != le32_to_cpu(*image)) {
1532 ret = -EIO;
1533 errcnt++;
1534 if (errcnt >= 3)
1535 break;
1536 }
1537 }
1538
1539 return ret;
1540}
1541
1542/**
1543 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1544 * looking at all data.
1545 */
1546static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1547 u32 len)
1548{
1549 u32 val;
1550 u32 save_len = len;
1551 int ret = 0;
1552 u32 errcnt;
1553
1554 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1555
1556 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1557 IWL49_RTC_INST_LOWER_BOUND);
1558
1559 errcnt = 0;
1560 for (; len > 0; len -= sizeof(u32), image++) {
1561 /* read data comes through single port, auto-incr addr */
1562 /* NOTE: Use the debugless read so we don't flood kernel log
1563 * if IWL_DL_IO is set */
1564 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1565 if (val != le32_to_cpu(*image)) {
1566 IWL_ERR(priv, "uCode INST section is invalid at "
1567 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1568 save_len - len, val, le32_to_cpu(*image));
1569 ret = -EIO;
1570 errcnt++;
1571 if (errcnt >= 20)
1572 break;
1573 }
1574 }
1575
1576 if (!errcnt)
1577 IWL_DEBUG_INFO(priv,
1578 "ucode image in INSTRUCTION memory is good\n");
1579
1580 return ret;
1581}
1582
1583/**
1584 * iwl_verify_ucode - determine which instruction image is in SRAM,
1585 * and verify its contents
1586 */
1587int iwl_verify_ucode(struct iwl_priv *priv)
1588{
1589 __le32 *image;
1590 u32 len;
1591 int ret;
1592
1593 /* Try bootstrap */
1594 image = (__le32 *)priv->ucode_boot.v_addr;
1595 len = priv->ucode_boot.len;
1596 ret = iwlcore_verify_inst_sparse(priv, image, len);
1597 if (!ret) {
1598 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
1599 return 0;
1600 }
1601
1602 /* Try initialize */
1603 image = (__le32 *)priv->ucode_init.v_addr;
1604 len = priv->ucode_init.len;
1605 ret = iwlcore_verify_inst_sparse(priv, image, len);
1606 if (!ret) {
1607 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
1608 return 0;
1609 }
1610
1611 /* Try runtime/protocol */
1612 image = (__le32 *)priv->ucode_code.v_addr;
1613 len = priv->ucode_code.len;
1614 ret = iwlcore_verify_inst_sparse(priv, image, len);
1615 if (!ret) {
1616 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
1617 return 0;
1618 }
1619
1620 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1621
1622 /* Since nothing seems to match, show first several data entries in
1623 * instruction SRAM, so maybe visual inspection will give a clue.
1624 * Selection of bootstrap image (vs. other images) is arbitrary. */
1625 image = (__le32 *)priv->ucode_boot.v_addr;
1626 len = priv->ucode_boot.len;
1627 ret = iwl_verify_inst_full(priv, image, len);
1628
1629 return ret;
1630}
1631EXPORT_SYMBOL(iwl_verify_ucode);
1632
1633
1634void iwl_rf_kill_ct_config(struct iwl_priv *priv) 1527void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1635{ 1528{
1636 struct iwl_ct_kill_config cmd; 1529 struct iwl_ct_kill_config cmd;
@@ -1855,6 +1748,37 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv)
1855 iwlcore_commit_rxon(priv); 1748 iwlcore_commit_rxon(priv);
1856} 1749}
1857 1750
1751static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1752{
1753 struct iwl_priv *priv = hw->priv;
1754 unsigned long flags;
1755 __le64 timestamp;
1756
1757 IWL_DEBUG_MAC80211(priv, "enter\n");
1758
1759 if (!iwl_is_ready_rf(priv)) {
1760 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1761 return -EIO;
1762 }
1763
1764 spin_lock_irqsave(&priv->lock, flags);
1765
1766 if (priv->ibss_beacon)
1767 dev_kfree_skb(priv->ibss_beacon);
1768
1769 priv->ibss_beacon = skb;
1770
1771 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
1772 priv->timestamp = le64_to_cpu(timestamp);
1773
1774 IWL_DEBUG_MAC80211(priv, "leave\n");
1775 spin_unlock_irqrestore(&priv->lock, flags);
1776
1777 priv->cfg->ops->lib->post_associate(priv, priv->vif);
1778
1779 return 0;
1780}
1781
1858void iwl_bss_info_changed(struct ieee80211_hw *hw, 1782void iwl_bss_info_changed(struct ieee80211_hw *hw,
1859 struct ieee80211_vif *vif, 1783 struct ieee80211_vif *vif,
1860 struct ieee80211_bss_conf *bss_conf, 1784 struct ieee80211_bss_conf *bss_conf,
@@ -1870,6 +1794,15 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1870 1794
1871 mutex_lock(&priv->mutex); 1795 mutex_lock(&priv->mutex);
1872 1796
1797 if (changes & BSS_CHANGED_QOS) {
1798 unsigned long flags;
1799
1800 spin_lock_irqsave(&priv->lock, flags);
1801 priv->qos_data.qos_active = bss_conf->qos;
1802 iwl_update_qos(priv);
1803 spin_unlock_irqrestore(&priv->lock, flags);
1804 }
1805
1873 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) { 1806 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
1874 dev_kfree_skb(priv->ibss_beacon); 1807 dev_kfree_skb(priv->ibss_beacon);
1875 priv->ibss_beacon = ieee80211_beacon_get(hw, vif); 1808 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
@@ -2012,38 +1945,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2012} 1945}
2013EXPORT_SYMBOL(iwl_bss_info_changed); 1946EXPORT_SYMBOL(iwl_bss_info_changed);
2014 1947
2015int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2016{
2017 struct iwl_priv *priv = hw->priv;
2018 unsigned long flags;
2019 __le64 timestamp;
2020
2021 IWL_DEBUG_MAC80211(priv, "enter\n");
2022
2023 if (!iwl_is_ready_rf(priv)) {
2024 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2025 return -EIO;
2026 }
2027
2028 spin_lock_irqsave(&priv->lock, flags);
2029
2030 if (priv->ibss_beacon)
2031 dev_kfree_skb(priv->ibss_beacon);
2032
2033 priv->ibss_beacon = skb;
2034
2035 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2036 priv->timestamp = le64_to_cpu(timestamp);
2037
2038 IWL_DEBUG_MAC80211(priv, "leave\n");
2039 spin_unlock_irqrestore(&priv->lock, flags);
2040
2041 priv->cfg->ops->lib->post_associate(priv, priv->vif);
2042
2043 return 0;
2044}
2045EXPORT_SYMBOL(iwl_mac_beacon_update);
2046
2047static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif) 1948static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
2048{ 1949{
2049 iwl_connection_init_rx_config(priv, vif); 1950 iwl_connection_init_rx_config(priv, vif);
@@ -2051,8 +1952,6 @@ static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
2051 if (priv->cfg->ops->hcmd->set_rxon_chain) 1952 if (priv->cfg->ops->hcmd->set_rxon_chain)
2052 priv->cfg->ops->hcmd->set_rxon_chain(priv); 1953 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2053 1954
2054 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2055
2056 return iwlcore_commit_rxon(priv); 1955 return iwlcore_commit_rxon(priv);
2057} 1956}
2058 1957
@@ -2061,7 +1960,8 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2061 struct iwl_priv *priv = hw->priv; 1960 struct iwl_priv *priv = hw->priv;
2062 int err = 0; 1961 int err = 0;
2063 1962
2064 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type); 1963 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1964 vif->type, vif->addr);
2065 1965
2066 mutex_lock(&priv->mutex); 1966 mutex_lock(&priv->mutex);
2067 1967
@@ -2079,9 +1979,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2079 priv->vif = vif; 1979 priv->vif = vif;
2080 priv->iw_mode = vif->type; 1980 priv->iw_mode = vif->type;
2081 1981
2082 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2083 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
2084
2085 err = iwl_set_mode(priv, vif); 1982 err = iwl_set_mode(priv, vif);
2086 if (err) 1983 if (err)
2087 goto out_err; 1984 goto out_err;
@@ -2115,6 +2012,11 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2115 } 2012 }
2116 if (priv->vif == vif) { 2013 if (priv->vif == vif) {
2117 priv->vif = NULL; 2014 priv->vif = NULL;
2015 if (priv->scan_vif == vif) {
2016 ieee80211_scan_completed(priv->hw, true);
2017 priv->scan_vif = NULL;
2018 priv->scan_request = NULL;
2019 }
2118 memset(priv->bssid, 0, ETH_ALEN); 2020 memset(priv->bssid, 0, ETH_ALEN);
2119 } 2021 }
2120 mutex_unlock(&priv->mutex); 2022 mutex_unlock(&priv->mutex);
@@ -2215,22 +2117,10 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2215 2117
2216 iwl_set_flags_for_band(priv, conf->channel->band, priv->vif); 2118 iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
2217 spin_unlock_irqrestore(&priv->lock, flags); 2119 spin_unlock_irqrestore(&priv->lock, flags);
2218 if (iwl_is_associated(priv) && 2120
2219 (le16_to_cpu(priv->active_rxon.channel) != ch) && 2121 if (priv->cfg->ops->lib->update_bcast_station)
2220 priv->cfg->ops->lib->set_channel_switch) { 2122 ret = priv->cfg->ops->lib->update_bcast_station(priv);
2221 iwl_set_rate(priv); 2123
2222 /*
2223 * at this point, staging_rxon has the
2224 * configuration for channel switch
2225 */
2226 ret = priv->cfg->ops->lib->set_channel_switch(priv,
2227 ch);
2228 if (!ret) {
2229 iwl_print_rx_config_cmd(priv);
2230 goto out;
2231 }
2232 priv->switch_rxon.switch_in_progress = false;
2233 }
2234 set_ch_out: 2124 set_ch_out:
2235 /* The list of supported rates and rate mask can be different 2125 /* The list of supported rates and rate mask can be different
2236 * for each band; since the band may have changed, reset 2126 * for each band; since the band may have changed, reset
@@ -2252,15 +2142,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2252 iwl_set_tx_power(priv, conf->power_level, false); 2142 iwl_set_tx_power(priv, conf->power_level, false);
2253 } 2143 }
2254 2144
2255 if (changed & IEEE80211_CONF_CHANGE_QOS) {
2256 bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
2257
2258 spin_lock_irqsave(&priv->lock, flags);
2259 priv->qos_data.qos_active = qos_active;
2260 iwl_update_qos(priv);
2261 spin_unlock_irqrestore(&priv->lock, flags);
2262 }
2263
2264 if (!iwl_is_ready(priv)) { 2145 if (!iwl_is_ready(priv)) {
2265 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2146 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2266 goto out; 2147 goto out;
@@ -2588,7 +2469,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
2588EXPORT_SYMBOL(iwl_update_stats); 2469EXPORT_SYMBOL(iwl_update_stats);
2589#endif 2470#endif
2590 2471
2591const static char *get_csr_string(int cmd) 2472static const char *get_csr_string(int cmd)
2592{ 2473{
2593 switch (cmd) { 2474 switch (cmd) {
2594 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2475 IWL_CMD(CSR_HW_IF_CONFIG_REG);
@@ -2659,7 +2540,7 @@ void iwl_dump_csr(struct iwl_priv *priv)
2659} 2540}
2660EXPORT_SYMBOL(iwl_dump_csr); 2541EXPORT_SYMBOL(iwl_dump_csr);
2661 2542
2662const static char *get_fh_string(int cmd) 2543static const char *get_fh_string(int cmd)
2663{ 2544{
2664 switch (cmd) { 2545 switch (cmd) {
2665 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); 2546 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
@@ -2745,7 +2626,7 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
2745} 2626}
2746 2627
2747 2628
2748int iwl_force_reset(struct iwl_priv *priv, int mode) 2629int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2749{ 2630{
2750 struct iwl_force_reset *force_reset; 2631 struct iwl_force_reset *force_reset;
2751 2632
@@ -2758,12 +2639,14 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
2758 } 2639 }
2759 force_reset = &priv->force_reset[mode]; 2640 force_reset = &priv->force_reset[mode];
2760 force_reset->reset_request_count++; 2641 force_reset->reset_request_count++;
2761 if (force_reset->last_force_reset_jiffies && 2642 if (!external) {
2762 time_after(force_reset->last_force_reset_jiffies + 2643 if (force_reset->last_force_reset_jiffies &&
2763 force_reset->reset_duration, jiffies)) { 2644 time_after(force_reset->last_force_reset_jiffies +
2764 IWL_DEBUG_INFO(priv, "force reset rejected\n"); 2645 force_reset->reset_duration, jiffies)) {
2765 force_reset->reset_reject_count++; 2646 IWL_DEBUG_INFO(priv, "force reset rejected\n");
2766 return -EAGAIN; 2647 force_reset->reset_reject_count++;
2648 return -EAGAIN;
2649 }
2767 } 2650 }
2768 force_reset->reset_success_count++; 2651 force_reset->reset_success_count++;
2769 force_reset->last_force_reset_jiffies = jiffies; 2652 force_reset->last_force_reset_jiffies = jiffies;
@@ -2773,6 +2656,19 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
2773 iwl_force_rf_reset(priv); 2656 iwl_force_rf_reset(priv);
2774 break; 2657 break;
2775 case IWL_FW_RESET: 2658 case IWL_FW_RESET:
2659 /*
2660 * if the request is from external(ex: debugfs),
2661 * then always perform the request in regardless the module
2662 * parameter setting
2663 * if the request is from internal (uCode error or driver
2664 * detect failure), then fw_restart module parameter
2665 * need to be check before performing firmware reload
2666 */
2667 if (!external && !priv->cfg->mod_params->restart_fw) {
2668 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
2669 "module parameter setting\n");
2670 break;
2671 }
2776 IWL_ERR(priv, "On demand firmware reload\n"); 2672 IWL_ERR(priv, "On demand firmware reload\n");
2777 /* Set the FW error flag -- cleared on iwl_down */ 2673 /* Set the FW error flag -- cleared on iwl_down */
2778 set_bit(STATUS_FW_ERROR, &priv->status); 2674 set_bit(STATUS_FW_ERROR, &priv->status);
@@ -2831,7 +2727,7 @@ static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2831 "queue %d stuck %d time. Fw reload.\n", 2727 "queue %d stuck %d time. Fw reload.\n",
2832 q->id, q->repeat_same_read_ptr); 2728 q->id, q->repeat_same_read_ptr);
2833 q->repeat_same_read_ptr = 0; 2729 q->repeat_same_read_ptr = 0;
2834 iwl_force_reset(priv, IWL_FW_RESET); 2730 iwl_force_reset(priv, IWL_FW_RESET, false);
2835 } else { 2731 } else {
2836 q->repeat_same_read_ptr++; 2732 q->repeat_same_read_ptr++;
2837 IWL_DEBUG_RADIO(priv, 2733 IWL_DEBUG_RADIO(priv,
@@ -2881,6 +2777,61 @@ void iwl_bg_monitor_recover(unsigned long data)
2881} 2777}
2882EXPORT_SYMBOL(iwl_bg_monitor_recover); 2778EXPORT_SYMBOL(iwl_bg_monitor_recover);
2883 2779
2780
2781/*
2782 * extended beacon time format
2783 * time in usec will be changed into a 32-bit value in extended:internal format
2784 * the extended part is the beacon counts
2785 * the internal part is the time in usec within one beacon interval
2786 */
2787u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
2788{
2789 u32 quot;
2790 u32 rem;
2791 u32 interval = beacon_interval * TIME_UNIT;
2792
2793 if (!interval || !usec)
2794 return 0;
2795
2796 quot = (usec / interval) &
2797 (iwl_beacon_time_mask_high(priv,
2798 priv->hw_params.beacon_time_tsf_bits) >>
2799 priv->hw_params.beacon_time_tsf_bits);
2800 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
2801 priv->hw_params.beacon_time_tsf_bits);
2802
2803 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
2804}
2805EXPORT_SYMBOL(iwl_usecs_to_beacons);
2806
2807/* base is usually what we get from ucode with each received frame,
2808 * the same as HW timer counter counting down
2809 */
2810__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
2811 u32 addon, u32 beacon_interval)
2812{
2813 u32 base_low = base & iwl_beacon_time_mask_low(priv,
2814 priv->hw_params.beacon_time_tsf_bits);
2815 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
2816 priv->hw_params.beacon_time_tsf_bits);
2817 u32 interval = beacon_interval * TIME_UNIT;
2818 u32 res = (base & iwl_beacon_time_mask_high(priv,
2819 priv->hw_params.beacon_time_tsf_bits)) +
2820 (addon & iwl_beacon_time_mask_high(priv,
2821 priv->hw_params.beacon_time_tsf_bits));
2822
2823 if (base_low > addon_low)
2824 res += base_low - addon_low;
2825 else if (base_low < addon_low) {
2826 res += interval + base_low - addon_low;
2827 res += (1 << priv->hw_params.beacon_time_tsf_bits);
2828 } else
2829 res += (1 << priv->hw_params.beacon_time_tsf_bits);
2830
2831 return cpu_to_le32(res);
2832}
2833EXPORT_SYMBOL(iwl_add_beacon_time);
2834
2884#ifdef CONFIG_PM 2835#ifdef CONFIG_PM
2885 2836
2886int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) 2837int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -2908,6 +2859,7 @@ int iwl_pci_resume(struct pci_dev *pdev)
2908{ 2859{
2909 struct iwl_priv *priv = pci_get_drvdata(pdev); 2860 struct iwl_priv *priv = pci_get_drvdata(pdev);
2910 int ret; 2861 int ret;
2862 bool hw_rfkill = false;
2911 2863
2912 /* 2864 /*
2913 * We disable the RETRY_TIMEOUT register (0x41) to keep 2865 * We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -2922,6 +2874,17 @@ int iwl_pci_resume(struct pci_dev *pdev)
2922 pci_restore_state(pdev); 2874 pci_restore_state(pdev);
2923 iwl_enable_interrupts(priv); 2875 iwl_enable_interrupts(priv);
2924 2876
2877 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2878 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2879 hw_rfkill = true;
2880
2881 if (hw_rfkill)
2882 set_bit(STATUS_RF_KILL_HW, &priv->status);
2883 else
2884 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2885
2886 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2887
2925 return 0; 2888 return 0;
2926} 2889}
2927EXPORT_SYMBOL(iwl_pci_resume); 2890EXPORT_SYMBOL(iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 31775bd9c361..e9d23f2f869d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -79,6 +79,8 @@ struct iwl_cmd;
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ 79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg) 80 .driver_data = (kernel_ulong_t)&(cfg)
81 81
82#define TIME_UNIT 1024
83
82#define IWL_SKU_G 0x1 84#define IWL_SKU_G 0x1
83#define IWL_SKU_A 0x2 85#define IWL_SKU_A 0x2
84#define IWL_SKU_N 0x8 86#define IWL_SKU_N 0x8
@@ -123,6 +125,8 @@ struct iwl_debugfs_ops {
123 size_t count, loff_t *ppos); 125 size_t count, loff_t *ppos);
124 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf, 126 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
125 size_t count, loff_t *ppos); 127 size_t count, loff_t *ppos);
128 ssize_t (*bt_stats_read)(struct file *file, char __user *user_buf,
129 size_t count, loff_t *ppos);
126}; 130};
127 131
128struct iwl_temp_ops { 132struct iwl_temp_ops {
@@ -173,7 +177,8 @@ struct iwl_lib_ops {
173 void (*dump_nic_error_log)(struct iwl_priv *priv); 177 void (*dump_nic_error_log)(struct iwl_priv *priv);
174 void (*dump_csr)(struct iwl_priv *priv); 178 void (*dump_csr)(struct iwl_priv *priv);
175 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display); 179 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
176 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel); 180 int (*set_channel_switch)(struct iwl_priv *priv,
181 struct ieee80211_channel_switch *ch_switch);
177 /* power management */ 182 /* power management */
178 struct iwl_apm_ops apm_ops; 183 struct iwl_apm_ops apm_ops;
179 184
@@ -193,6 +198,7 @@ struct iwl_lib_ops {
193 /* station management */ 198 /* station management */
194 int (*manage_ibss_station)(struct iwl_priv *priv, 199 int (*manage_ibss_station)(struct iwl_priv *priv,
195 struct ieee80211_vif *vif, bool add); 200 struct ieee80211_vif *vif, bool add);
201 int (*update_bcast_station)(struct iwl_priv *priv);
196 /* recover from tx queue stall */ 202 /* recover from tx queue stall */
197 void (*recover_from_tx_stall)(unsigned long data); 203 void (*recover_from_tx_stall)(unsigned long data);
198 /* check for plcp health */ 204 /* check for plcp health */
@@ -201,6 +207,9 @@ struct iwl_lib_ops {
201 /* check for ack health */ 207 /* check for ack health */
202 bool (*check_ack_health)(struct iwl_priv *priv, 208 bool (*check_ack_health)(struct iwl_priv *priv,
203 struct iwl_rx_packet *pkt); 209 struct iwl_rx_packet *pkt);
210 int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
211 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
212
204 struct iwl_debugfs_ops debugfs_ops; 213 struct iwl_debugfs_ops debugfs_ops;
205}; 214};
206 215
@@ -325,7 +334,10 @@ struct iwl_cfg {
325 const bool ucode_tracing; 334 const bool ucode_tracing;
326 const bool sensitivity_calib_by_driver; 335 const bool sensitivity_calib_by_driver;
327 const bool chain_noise_calib_by_driver; 336 const bool chain_noise_calib_by_driver;
328 u8 scan_antennas[IEEE80211_NUM_BANDS]; 337 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
338 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
339 const bool need_dc_calib;
340 const bool bt_statistics;
329}; 341};
330 342
331/*************************** 343/***************************
@@ -343,6 +355,9 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv);
343int iwl_full_rxon_required(struct iwl_priv *priv); 355int iwl_full_rxon_required(struct iwl_priv *priv);
344void iwl_set_rxon_chain(struct iwl_priv *priv); 356void iwl_set_rxon_chain(struct iwl_priv *priv);
345int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); 357int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
358void iwl_set_flags_for_band(struct iwl_priv *priv,
359 enum ieee80211_band band,
360 struct ieee80211_vif *vif);
346u8 iwl_get_single_channel_number(struct iwl_priv *priv, 361u8 iwl_get_single_channel_number(struct iwl_priv *priv,
347 enum ieee80211_band band); 362 enum ieee80211_band band);
348void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); 363void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
@@ -350,6 +365,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
350 struct ieee80211_sta_ht_cap *sta_ht_inf); 365 struct ieee80211_sta_ht_cap *sta_ht_inf);
351void iwl_connection_init_rx_config(struct iwl_priv *priv, 366void iwl_connection_init_rx_config(struct iwl_priv *priv,
352 struct ieee80211_vif *vif); 367 struct ieee80211_vif *vif);
368void iwl_set_rate(struct iwl_priv *priv);
353int iwl_set_decrypted_flag(struct iwl_priv *priv, 369int iwl_set_decrypted_flag(struct iwl_priv *priv,
354 struct ieee80211_hdr *hdr, 370 struct ieee80211_hdr *hdr,
355 u32 decrypt_res, 371 u32 decrypt_res,
@@ -364,7 +380,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
364 struct ieee80211_vif *vif, 380 struct ieee80211_vif *vif,
365 struct ieee80211_bss_conf *bss_conf, 381 struct ieee80211_bss_conf *bss_conf,
366 u32 changes); 382 u32 changes);
367int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
368int iwl_commit_rxon(struct iwl_priv *priv); 383int iwl_commit_rxon(struct iwl_priv *priv);
369int iwl_mac_add_interface(struct ieee80211_hw *hw, 384int iwl_mac_add_interface(struct ieee80211_hw *hw,
370 struct ieee80211_vif *vif); 385 struct ieee80211_vif *vif);
@@ -447,20 +462,11 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
447int iwl_rx_queue_space(const struct iwl_rx_queue *q); 462int iwl_rx_queue_space(const struct iwl_rx_queue *q);
448void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 463void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
449/* Handlers */ 464/* Handlers */
450void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
451 struct iwl_rx_mem_buffer *rxb);
452void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 465void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
453 struct iwl_rx_mem_buffer *rxb); 466 struct iwl_rx_mem_buffer *rxb);
454bool iwl_good_plcp_health(struct iwl_priv *priv,
455 struct iwl_rx_packet *pkt);
456bool iwl_good_ack_health(struct iwl_priv *priv,
457 struct iwl_rx_packet *pkt);
458void iwl_recover_from_statistics(struct iwl_priv *priv, 467void iwl_recover_from_statistics(struct iwl_priv *priv,
459 struct iwl_rx_packet *pkt); 468 struct iwl_rx_packet *pkt);
460void iwl_rx_statistics(struct iwl_priv *priv, 469void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
461 struct iwl_rx_mem_buffer *rxb);
462void iwl_reply_statistics(struct iwl_priv *priv,
463 struct iwl_rx_mem_buffer *rxb);
464void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 470void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
465 471
466/* TX helpers */ 472/* TX helpers */
@@ -474,8 +480,6 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
474 dma_addr_t addr, u16 len, u8 reset, u8 pad); 480 dma_addr_t addr, u16 len, u8 reset, u8 pad);
475int iwl_hw_tx_queue_init(struct iwl_priv *priv, 481int iwl_hw_tx_queue_init(struct iwl_priv *priv,
476 struct iwl_tx_queue *txq); 482 struct iwl_tx_queue *txq);
477void iwl_free_tfds_in_queue(struct iwl_priv *priv,
478 int sta_id, int tid, int freed);
479void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 483void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
480int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 484int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
481 int slots_num, u32 txq_id); 485 int slots_num, u32 txq_id);
@@ -495,7 +499,7 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
495 499
496u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); 500u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
497 501
498u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx); 502u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
499 503
500static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) 504static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
501{ 505{
@@ -526,9 +530,9 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
526 struct cfg80211_scan_request *req); 530 struct cfg80211_scan_request *req);
527void iwl_bg_start_internal_scan(struct work_struct *work); 531void iwl_bg_start_internal_scan(struct work_struct *work);
528void iwl_internal_short_hw_scan(struct iwl_priv *priv); 532void iwl_internal_short_hw_scan(struct iwl_priv *priv);
529int iwl_force_reset(struct iwl_priv *priv, int mode); 533int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
530u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 534u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
531 const u8 *ie, int ie_len, int left); 535 const u8 *ta, const u8 *ie, int ie_len, int left);
532void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); 536void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
533u16 iwl_get_active_dwell_time(struct iwl_priv *priv, 537u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
534 enum ieee80211_band band, 538 enum ieee80211_band band,
@@ -595,6 +599,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
595} 599}
596 600
597void iwl_bg_monitor_recover(unsigned long data); 601void iwl_bg_monitor_recover(unsigned long data);
602u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
603__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
604 u32 addon, u32 beacon_interval);
598 605
599#ifdef CONFIG_PM 606#ifdef CONFIG_PM
600int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); 607int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -693,7 +700,6 @@ extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
693extern void iwl_send_bt_config(struct iwl_priv *priv); 700extern void iwl_send_bt_config(struct iwl_priv *priv);
694extern int iwl_send_statistics_request(struct iwl_priv *priv, 701extern int iwl_send_statistics_request(struct iwl_priv *priv,
695 u8 flags, bool clear); 702 u8 flags, bool clear);
696extern int iwl_verify_ucode(struct iwl_priv *priv);
697extern int iwl_send_lq_cmd(struct iwl_priv *priv, 703extern int iwl_send_lq_cmd(struct iwl_priv *priv,
698 struct iwl_link_quality_cmd *lq, u8 flags, bool init); 704 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
699void iwl_apm_stop(struct iwl_priv *priv); 705void iwl_apm_stop(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 254c35ae8b38..ecf98e7ac4ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -298,6 +298,7 @@
298#define CSR_HW_REV_TYPE_1000 (0x0000060) 298#define CSR_HW_REV_TYPE_1000 (0x0000060)
299#define CSR_HW_REV_TYPE_6x00 (0x0000070) 299#define CSR_HW_REV_TYPE_6x00 (0x0000070)
300#define CSR_HW_REV_TYPE_6x50 (0x0000080) 300#define CSR_HW_REV_TYPE_6x50 (0x0000080)
301#define CSR_HW_REV_TYPE_6x50g2 (0x0000084)
301#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0) 302#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
302#define CSR_HW_REV_TYPE_NONE (0x00000F0) 303#define CSR_HW_REV_TYPE_NONE (0x00000F0)
303 304
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 9659c5d01df9..e96a1bb12783 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,27 +106,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
106 .open = iwl_dbgfs_open_file_generic, \ 106 .open = iwl_dbgfs_open_file_generic, \
107}; 107};
108 108
109int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
110{
111 int p = 0;
112
113 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
114 le32_to_cpu(priv->statistics.flag));
115 if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
116 p += scnprintf(buf + p, bufsz - p,
117 "\tStatistics have been cleared\n");
118 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
119 (le32_to_cpu(priv->statistics.flag) &
120 UCODE_STATISTICS_FREQUENCY_MSK)
121 ? "2.4 GHz" : "5.2 GHz");
122 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
123 (le32_to_cpu(priv->statistics.flag) &
124 UCODE_STATISTICS_NARROW_BAND_MSK)
125 ? "enabled" : "disabled");
126 return p;
127}
128EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
129
130static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file, 109static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
131 char __user *user_buf, 110 char __user *user_buf,
132 size_t count, loff_t *ppos) { 111 size_t count, loff_t *ppos) {
@@ -330,45 +309,35 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
330 309
331 for (i = 0; i < max_sta; i++) { 310 for (i = 0; i < max_sta; i++) {
332 station = &priv->stations[i]; 311 station = &priv->stations[i];
333 if (station->used) { 312 if (!station->used)
334 pos += scnprintf(buf + pos, bufsz - pos, 313 continue;
335 "station %d:\ngeneral data:\n", i+1); 314 pos += scnprintf(buf + pos, bufsz - pos,
336 pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n", 315 "station %d - addr: %pM, flags: %#x\n",
337 station->sta.sta.sta_id); 316 i, station->sta.sta.addr,
338 pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n", 317 station->sta.station_flags_msk);
339 station->sta.mode); 318 pos += scnprintf(buf + pos, bufsz - pos,
340 pos += scnprintf(buf + pos, bufsz - pos, 319 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
341 "flags: 0x%x\n", 320 pos += scnprintf(buf + pos, bufsz - pos,
342 station->sta.station_flags_msk); 321 "start_idx\tbitmap\t\t\trate_n_flags\n");
343 pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n");
344 pos += scnprintf(buf + pos, bufsz - pos,
345 "seq_num\t\ttxq_id");
346 pos += scnprintf(buf + pos, bufsz - pos,
347 "\tframe_count\twait_for_ba\t");
348 pos += scnprintf(buf + pos, bufsz - pos,
349 "start_idx\tbitmap0\t");
350 pos += scnprintf(buf + pos, bufsz - pos,
351 "bitmap1\trate_n_flags");
352 pos += scnprintf(buf + pos, bufsz - pos, "\n");
353 322
354 for (j = 0; j < MAX_TID_COUNT; j++) { 323 for (j = 0; j < MAX_TID_COUNT; j++) {
355 pos += scnprintf(buf + pos, bufsz - pos, 324 pos += scnprintf(buf + pos, bufsz - pos,
356 "[%d]:\t\t%u", j, 325 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
357 station->tid[j].seq_number); 326 j, station->tid[j].seq_number,
358 pos += scnprintf(buf + pos, bufsz - pos, 327 station->tid[j].agg.txq_id,
359 "\t%u\t\t%u\t\t%u\t\t", 328 station->tid[j].agg.frame_count,
360 station->tid[j].agg.txq_id, 329 station->tid[j].tfds_in_queue,
361 station->tid[j].agg.frame_count, 330 station->tid[j].agg.start_idx,
362 station->tid[j].agg.wait_for_ba); 331 station->tid[j].agg.bitmap,
332 station->tid[j].agg.rate_n_flags);
333
334 if (station->tid[j].agg.wait_for_ba)
363 pos += scnprintf(buf + pos, bufsz - pos, 335 pos += scnprintf(buf + pos, bufsz - pos,
364 "%u\t%llu\t%u", 336 " - waitforba");
365 station->tid[j].agg.start_idx,
366 (unsigned long long)station->tid[j].agg.bitmap,
367 station->tid[j].agg.rate_n_flags);
368 pos += scnprintf(buf + pos, bufsz - pos, "\n");
369 }
370 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 337 pos += scnprintf(buf + pos, bufsz - pos, "\n");
371 } 338 }
339
340 pos += scnprintf(buf + pos, bufsz - pos, "\n");
372 } 341 }
373 342
374 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 343 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1049,8 +1018,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1049 rxq->write); 1018 rxq->write);
1050 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", 1019 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1051 rxq->free_count); 1020 rxq->free_count);
1052 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", 1021 if (rxq->rb_stts) {
1022 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1053 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); 1023 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1024 } else {
1025 pos += scnprintf(buf + pos, bufsz - pos,
1026 "closed_rb_num: Not Allocated\n");
1027 }
1054 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1028 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1055} 1029}
1056 1030
@@ -1293,7 +1267,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
1293 char __user *user_buf, 1267 char __user *user_buf,
1294 size_t count, loff_t *ppos) { 1268 size_t count, loff_t *ppos) {
1295 1269
1296 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1270 struct iwl_priv *priv = file->private_data;
1297 int pos = 0; 1271 int pos = 0;
1298 char buf[128]; 1272 char buf[128];
1299 const size_t bufsz = sizeof(buf); 1273 const size_t bufsz = sizeof(buf);
@@ -1343,7 +1317,7 @@ static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
1343 char __user *user_buf, 1317 char __user *user_buf,
1344 size_t count, loff_t *ppos) { 1318 size_t count, loff_t *ppos) {
1345 1319
1346 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1320 struct iwl_priv *priv = file->private_data;
1347 int len = 0; 1321 int len = 0;
1348 char buf[20]; 1322 char buf[20];
1349 1323
@@ -1355,7 +1329,7 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
1355 char __user *user_buf, 1329 char __user *user_buf,
1356 size_t count, loff_t *ppos) { 1330 size_t count, loff_t *ppos) {
1357 1331
1358 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1332 struct iwl_priv *priv = file->private_data;
1359 int len = 0; 1333 int len = 0;
1360 char buf[20]; 1334 char buf[20];
1361 1335
@@ -1368,7 +1342,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1368 char __user *user_buf, 1342 char __user *user_buf,
1369 size_t count, loff_t *ppos) 1343 size_t count, loff_t *ppos)
1370{ 1344{
1371 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1345 struct iwl_priv *priv = file->private_data;
1372 char *buf; 1346 char *buf;
1373 int pos = 0; 1347 int pos = 0;
1374 ssize_t ret = -EFAULT; 1348 ssize_t ret = -EFAULT;
@@ -1430,7 +1404,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
1430 char __user *user_buf, 1404 char __user *user_buf,
1431 size_t count, loff_t *ppos) { 1405 size_t count, loff_t *ppos) {
1432 1406
1433 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1407 struct iwl_priv *priv = file->private_data;
1434 int pos = 0; 1408 int pos = 0;
1435 char buf[12]; 1409 char buf[12];
1436 const size_t bufsz = sizeof(buf); 1410 const size_t bufsz = sizeof(buf);
@@ -1456,10 +1430,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
1456 return -EFAULT; 1430 return -EFAULT;
1457 if (sscanf(buf, "%d", &plcp) != 1) 1431 if (sscanf(buf, "%d", &plcp) != 1)
1458 return -EINVAL; 1432 return -EINVAL;
1459 if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || 1433 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
1460 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) 1434 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
1461 priv->cfg->plcp_delta_threshold = 1435 priv->cfg->plcp_delta_threshold =
1462 IWL_MAX_PLCP_ERR_THRESHOLD_DEF; 1436 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
1463 else 1437 else
1464 priv->cfg->plcp_delta_threshold = plcp; 1438 priv->cfg->plcp_delta_threshold = plcp;
1465 return count; 1439 return count;
@@ -1513,7 +1487,7 @@ static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
1513 switch (reset) { 1487 switch (reset) {
1514 case IWL_RF_RESET: 1488 case IWL_RF_RESET:
1515 case IWL_FW_RESET: 1489 case IWL_FW_RESET:
1516 ret = iwl_force_reset(priv, reset); 1490 ret = iwl_force_reset(priv, reset, true);
1517 break; 1491 break;
1518 default: 1492 default:
1519 return -EINVAL; 1493 return -EINVAL;
@@ -1521,6 +1495,40 @@ static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
1521 return ret ? ret : count; 1495 return ret ? ret : count;
1522} 1496}
1523 1497
1498static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
1499 const char __user *user_buf,
1500 size_t count, loff_t *ppos) {
1501
1502 struct iwl_priv *priv = file->private_data;
1503 char buf[8];
1504 int buf_size;
1505 int flush;
1506
1507 memset(buf, 0, sizeof(buf));
1508 buf_size = min(count, sizeof(buf) - 1);
1509 if (copy_from_user(buf, user_buf, buf_size))
1510 return -EFAULT;
1511 if (sscanf(buf, "%d", &flush) != 1)
1512 return -EINVAL;
1513
1514 if (iwl_is_rfkill(priv))
1515 return -EFAULT;
1516
1517 priv->cfg->ops->lib->dev_txfifo_flush(priv, IWL_DROP_ALL);
1518
1519 return count;
1520}
1521
1522static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1523 char __user *user_buf,
1524 size_t count, loff_t *ppos)
1525{
1526 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1527
1528 return priv->cfg->ops->lib->debugfs_ops.bt_stats_read(file,
1529 user_buf, count, ppos);
1530}
1531
1524DEBUGFS_READ_FILE_OPS(rx_statistics); 1532DEBUGFS_READ_FILE_OPS(rx_statistics);
1525DEBUGFS_READ_FILE_OPS(tx_statistics); 1533DEBUGFS_READ_FILE_OPS(tx_statistics);
1526DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1534DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1542,6 +1550,8 @@ DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
1542DEBUGFS_READ_WRITE_FILE_OPS(force_reset); 1550DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1543DEBUGFS_READ_FILE_OPS(rxon_flags); 1551DEBUGFS_READ_FILE_OPS(rxon_flags);
1544DEBUGFS_READ_FILE_OPS(rxon_filter_flags); 1552DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1553DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
1554DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
1545 1555
1546/* 1556/*
1547 * Create the debugfs files and directories 1557 * Create the debugfs files and directories
@@ -1600,6 +1610,8 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1600 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); 1610 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1601 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); 1611 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1602 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); 1612 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1613 if (priv->cfg->ops->lib->dev_txfifo_flush)
1614 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
1603 1615
1604 if (priv->cfg->sensitivity_calib_by_driver) 1616 if (priv->cfg->sensitivity_calib_by_driver)
1605 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); 1617 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
@@ -1607,6 +1619,8 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1607 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 1619 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1608 if (priv->cfg->ucode_tracing) 1620 if (priv->cfg->ucode_tracing)
1609 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 1621 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1622 if (priv->cfg->bt_statistics)
1623 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1610 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 1624 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1611 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 1625 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1612 if (priv->cfg->sensitivity_calib_by_driver) 1626 if (priv->cfg->sensitivity_calib_by_driver)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index f3f3473c5c7e..f35bcad56e36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -48,25 +48,6 @@
48#include "iwl-power.h" 48#include "iwl-power.h"
49#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
50 50
51/* configuration for the iwl4965 */
52extern struct iwl_cfg iwl4965_agn_cfg;
53extern struct iwl_cfg iwl5300_agn_cfg;
54extern struct iwl_cfg iwl5100_agn_cfg;
55extern struct iwl_cfg iwl5350_agn_cfg;
56extern struct iwl_cfg iwl5100_bgn_cfg;
57extern struct iwl_cfg iwl5100_abg_cfg;
58extern struct iwl_cfg iwl5150_agn_cfg;
59extern struct iwl_cfg iwl5150_abg_cfg;
60extern struct iwl_cfg iwl6000g2a_2agn_cfg;
61extern struct iwl_cfg iwl6000i_2agn_cfg;
62extern struct iwl_cfg iwl6000i_2abg_cfg;
63extern struct iwl_cfg iwl6000i_2bg_cfg;
64extern struct iwl_cfg iwl6000_3agn_cfg;
65extern struct iwl_cfg iwl6050_2agn_cfg;
66extern struct iwl_cfg iwl6050_2abg_cfg;
67extern struct iwl_cfg iwl1000_bgn_cfg;
68extern struct iwl_cfg iwl1000_bg_cfg;
69
70struct iwl_tx_queue; 51struct iwl_tx_queue;
71 52
72/* CT-KILL constants */ 53/* CT-KILL constants */
@@ -133,8 +114,8 @@ struct iwl_cmd_meta {
133 * structure is stored at the end of the shared queue memory. */ 114 * structure is stored at the end of the shared queue memory. */
134 u32 flags; 115 u32 flags;
135 116
136 DECLARE_PCI_UNMAP_ADDR(mapping) 117 DEFINE_DMA_UNMAP_ADDR(mapping);
137 DECLARE_PCI_UNMAP_LEN(len) 118 DEFINE_DMA_UNMAP_LEN(len);
138}; 119};
139 120
140/* 121/*
@@ -157,11 +138,11 @@ struct iwl_queue {
157 * space more than this */ 138 * space more than this */
158 int high_mark; /* high watermark, stop queue if free 139 int high_mark; /* high watermark, stop queue if free
159 * space less than this */ 140 * space less than this */
160} __attribute__ ((packed)); 141} __packed;
161 142
162/* One for each TFD */ 143/* One for each TFD */
163struct iwl_tx_info { 144struct iwl_tx_info {
164 struct sk_buff *skb[IWL_NUM_OF_TBS - 1]; 145 struct sk_buff *skb;
165}; 146};
166 147
167/** 148/**
@@ -343,8 +324,8 @@ struct iwl_device_cmd {
343 struct iwl_tx_cmd tx; 324 struct iwl_tx_cmd tx;
344 struct iwl6000_channel_switch_cmd chswitch; 325 struct iwl6000_channel_switch_cmd chswitch;
345 u8 payload[DEF_CMD_PAYLOAD_SIZE]; 326 u8 payload[DEF_CMD_PAYLOAD_SIZE];
346 } __attribute__ ((packed)) cmd; 327 } __packed cmd;
347} __attribute__ ((packed)); 328} __packed;
348 329
349#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 330#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
350 331
@@ -367,7 +348,7 @@ struct iwl_host_cmd {
367/** 348/**
368 * struct iwl_rx_queue - Rx queue 349 * struct iwl_rx_queue - Rx queue
369 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 350 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
370 * @dma_addr: bus address of buffer of receive buffer descriptors (rbd) 351 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
371 * @read: Shared index to newest available Rx buffer 352 * @read: Shared index to newest available Rx buffer
372 * @write: Shared index to oldest written Rx packet 353 * @write: Shared index to oldest written Rx packet
373 * @free_count: Number of pre-allocated buffers in rx_free 354 * @free_count: Number of pre-allocated buffers in rx_free
@@ -381,7 +362,7 @@ struct iwl_host_cmd {
381 */ 362 */
382struct iwl_rx_queue { 363struct iwl_rx_queue {
383 __le32 *bd; 364 __le32 *bd;
384 dma_addr_t dma_addr; 365 dma_addr_t bd_dma;
385 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 366 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
386 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 367 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
387 u32 read; 368 u32 read;
@@ -433,7 +414,7 @@ struct iwl_ht_agg {
433 414
434 415
435struct iwl_tid_data { 416struct iwl_tid_data {
436 u16 seq_number; 417 u16 seq_number; /* agn only */
437 u16 tfds_in_queue; 418 u16 tfds_in_queue;
438 struct iwl_ht_agg agg; 419 struct iwl_ht_agg agg;
439}; 420};
@@ -583,6 +564,14 @@ enum iwl_ucode_tlv_type {
583 IWL_UCODE_TLV_INIT_DATA = 4, 564 IWL_UCODE_TLV_INIT_DATA = 4,
584 IWL_UCODE_TLV_BOOT = 5, 565 IWL_UCODE_TLV_BOOT = 5,
585 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ 566 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
567 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
568 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
569 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
570 IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
571 IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
572 IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
573 IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
574 IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
586}; 575};
587 576
588struct iwl_ucode_tlv { 577struct iwl_ucode_tlv {
@@ -590,7 +579,7 @@ struct iwl_ucode_tlv {
590 __le16 alternative; /* see comment */ 579 __le16 alternative; /* see comment */
591 __le32 length; /* not including type/length fields */ 580 __le32 length; /* not including type/length fields */
592 u8 data[0]; 581 u8 data[0];
593} __attribute__ ((packed)); 582} __packed;
594 583
595#define IWL_TLV_UCODE_MAGIC 0x0a4c5749 584#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
596 585
@@ -675,6 +664,7 @@ struct iwl_sensitivity_ranges {
675 * @sw_crypto: 0 for hw, 1 for sw 664 * @sw_crypto: 0 for hw, 1 for sw
676 * @max_xxx_size: for ucode uses 665 * @max_xxx_size: for ucode uses
677 * @ct_kill_threshold: temperature threshold 666 * @ct_kill_threshold: temperature threshold
667 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
678 * @calib_init_cfg: setup initial calibrations for the hw 668 * @calib_init_cfg: setup initial calibrations for the hw
679 * @struct iwl_sensitivity_ranges: range of sensitivity values 669 * @struct iwl_sensitivity_ranges: range of sensitivity values
680 */ 670 */
@@ -701,6 +691,7 @@ struct iwl_hw_params {
701 u32 ct_kill_threshold; /* value in hw-dependent units */ 691 u32 ct_kill_threshold; /* value in hw-dependent units */
702 u32 ct_kill_exit_threshold; /* value in hw-dependent units */ 692 u32 ct_kill_exit_threshold; /* value in hw-dependent units */
703 /* for 1000, 6000 series and up */ 693 /* for 1000, 6000 series and up */
694 u16 beacon_time_tsf_bits;
704 u32 calib_init_cfg; 695 u32 calib_init_cfg;
705 const struct iwl_sensitivity_ranges *sens; 696 const struct iwl_sensitivity_ranges *sens;
706}; 697};
@@ -1047,11 +1038,12 @@ struct iwl_event_log {
1047 * This is the threshold value of plcp error rate per 100mSecs. It is 1038 * This is the threshold value of plcp error rate per 100mSecs. It is
1048 * used to set and check for the validity of plcp_delta. 1039 * used to set and check for the validity of plcp_delta.
1049 */ 1040 */
1050#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0) 1041#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
1051#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) 1042#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
1052#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) 1043#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
1053#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) 1044#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
1054#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) 1045#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
1046#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
1055 1047
1056#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) 1048#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
1057#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) 1049#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
@@ -1075,6 +1067,20 @@ struct iwl_force_reset {
1075 unsigned long last_force_reset_jiffies; 1067 unsigned long last_force_reset_jiffies;
1076}; 1068};
1077 1069
1070/* extend beacon time format bit shifting */
1071/*
1072 * for _3945 devices
1073 * bits 31:24 - extended
1074 * bits 23:0 - interval
1075 */
1076#define IWL3945_EXT_BEACON_TIME_POS 24
1077/*
1078 * for _agn devices
1079 * bits 31:22 - extended
1080 * bits 21:0 - interval
1081 */
1082#define IWLAGN_EXT_BEACON_TIME_POS 22
1083
1078struct iwl_priv { 1084struct iwl_priv {
1079 1085
1080 /* ieee device used by generic ieee processing code */ 1086 /* ieee device used by generic ieee processing code */
@@ -1109,7 +1115,7 @@ struct iwl_priv {
1109 /* force reset */ 1115 /* force reset */
1110 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; 1116 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1111 1117
1112 /* we allocate array of iwl4965_channel_info for NIC's valid channels. 1118 /* we allocate array of iwl_channel_info for NIC's valid channels.
1113 * Access via channel # using indirect index array */ 1119 * Access via channel # using indirect index array */
1114 struct iwl_channel_info *channel_info; /* channel info array */ 1120 struct iwl_channel_info *channel_info; /* channel info array */
1115 u8 channel_count; /* # of channels */ 1121 u8 channel_count; /* # of channels */
@@ -1127,6 +1133,7 @@ struct iwl_priv {
1127 void *scan_cmd; 1133 void *scan_cmd;
1128 enum ieee80211_band scan_band; 1134 enum ieee80211_band scan_band;
1129 struct cfg80211_scan_request *scan_request; 1135 struct cfg80211_scan_request *scan_request;
1136 struct ieee80211_vif *scan_vif;
1130 bool is_internal_short_scan; 1137 bool is_internal_short_scan;
1131 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 1138 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1132 u8 mgmt_tx_ant; 1139 u8 mgmt_tx_ant;
@@ -1147,6 +1154,9 @@ struct iwl_priv {
1147 u32 hw_wa_rev; 1154 u32 hw_wa_rev;
1148 u8 rev_id; 1155 u8 rev_id;
1149 1156
1157 /* EEPROM MAC addresses */
1158 struct mac_address addresses[2];
1159
1150 /* uCode images, save to reload in case of failure */ 1160 /* uCode images, save to reload in case of failure */
1151 int fw_index; /* firmware we're trying to load */ 1161 int fw_index; /* firmware we're trying to load */
1152 u32 ucode_ver; /* version of ucode, copy of 1162 u32 ucode_ver; /* version of ucode, copy of
@@ -1174,7 +1184,7 @@ struct iwl_priv {
1174 struct iwl_switch_rxon switch_rxon; 1184 struct iwl_switch_rxon switch_rxon;
1175 1185
1176 /* 1st responses from initialize and runtime uCode images. 1186 /* 1st responses from initialize and runtime uCode images.
1177 * 4965's initialize alive response contains some calibration data. */ 1187 * _agn's initialize alive response contains some calibration data. */
1178 struct iwl_init_alive_resp card_alive_init; 1188 struct iwl_init_alive_resp card_alive_init;
1179 struct iwl_alive_resp card_alive; 1189 struct iwl_alive_resp card_alive;
1180 1190
@@ -1188,7 +1198,9 @@ struct iwl_priv {
1188 u8 start_calib; 1198 u8 start_calib;
1189 struct iwl_sensitivity_data sensitivity_data; 1199 struct iwl_sensitivity_data sensitivity_data;
1190 struct iwl_chain_noise_data chain_noise_data; 1200 struct iwl_chain_noise_data chain_noise_data;
1201 bool enhance_sensitivity_table;
1191 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 1202 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1203 __le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES];
1192 1204
1193 struct iwl_ht_config current_ht_config; 1205 struct iwl_ht_config current_ht_config;
1194 1206
@@ -1220,18 +1232,12 @@ struct iwl_priv {
1220 struct iwl_power_mgr power_data; 1232 struct iwl_power_mgr power_data;
1221 struct iwl_tt_mgmt thermal_throttle; 1233 struct iwl_tt_mgmt thermal_throttle;
1222 1234
1223 struct iwl_notif_statistics statistics;
1224#ifdef CONFIG_IWLWIFI_DEBUG
1225 struct iwl_notif_statistics accum_statistics;
1226 struct iwl_notif_statistics delta_statistics;
1227 struct iwl_notif_statistics max_delta;
1228#endif
1229
1230 /* context information */ 1235 /* context information */
1231 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ 1236 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1232 u8 mac_addr[ETH_ALEN];
1233 1237
1234 /*station table variables */ 1238 /* station table variables */
1239
1240 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1235 spinlock_t sta_lock; 1241 spinlock_t sta_lock;
1236 int num_stations; 1242 int num_stations;
1237 struct iwl_station_entry stations[IWL_STATION_COUNT]; 1243 struct iwl_station_entry stations[IWL_STATION_COUNT];
@@ -1273,7 +1279,7 @@ struct iwl_priv {
1273 struct delayed_work rfkill_poll; 1279 struct delayed_work rfkill_poll;
1274 1280
1275 struct iwl3945_notif_statistics statistics; 1281 struct iwl3945_notif_statistics statistics;
1276#ifdef CONFIG_IWLWIFI_DEBUG 1282#ifdef CONFIG_IWLWIFI_DEBUGFS
1277 struct iwl3945_notif_statistics accum_statistics; 1283 struct iwl3945_notif_statistics accum_statistics;
1278 struct iwl3945_notif_statistics delta_statistics; 1284 struct iwl3945_notif_statistics delta_statistics;
1279 struct iwl3945_notif_statistics max_delta; 1285 struct iwl3945_notif_statistics max_delta;
@@ -1315,6 +1321,28 @@ struct iwl_priv {
1315 bool last_phy_res_valid; 1321 bool last_phy_res_valid;
1316 1322
1317 struct completion firmware_loading_complete; 1323 struct completion firmware_loading_complete;
1324
1325 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
1326 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
1327
1328 /*
1329 * chain noise reset and gain commands are the
1330 * two extra calibration commands follows the standard
1331 * phy calibration commands
1332 */
1333 u8 phy_calib_chain_noise_reset_cmd;
1334 u8 phy_calib_chain_noise_gain_cmd;
1335
1336 struct iwl_notif_statistics statistics;
1337 struct iwl_bt_notif_statistics statistics_bt;
1338#ifdef CONFIG_IWLWIFI_DEBUGFS
1339 struct iwl_notif_statistics accum_statistics;
1340 struct iwl_notif_statistics delta_statistics;
1341 struct iwl_notif_statistics max_delta;
1342 struct iwl_bt_notif_statistics accum_statistics_bt;
1343 struct iwl_bt_notif_statistics delta_statistics_bt;
1344 struct iwl_bt_notif_statistics max_delta_bt;
1345#endif
1318 } _agn; 1346 } _agn;
1319#endif 1347#endif
1320 }; 1348 };
@@ -1336,6 +1364,7 @@ struct iwl_priv {
1336 struct work_struct ct_enter; 1364 struct work_struct ct_enter;
1337 struct work_struct ct_exit; 1365 struct work_struct ct_exit;
1338 struct work_struct start_internal_scan; 1366 struct work_struct start_internal_scan;
1367 struct work_struct tx_flush;
1339 1368
1340 struct tasklet_struct irq_tasklet; 1369 struct tasklet_struct irq_tasklet;
1341 1370
@@ -1353,9 +1382,7 @@ struct iwl_priv {
1353 /* debugging info */ 1382 /* debugging info */
1354 u32 debug_level; /* per device debugging will override global 1383 u32 debug_level; /* per device debugging will override global
1355 iwl_debug_level if set */ 1384 iwl_debug_level if set */
1356 u32 framecnt_to_us; 1385#endif /* CONFIG_IWLWIFI_DEBUG */
1357 atomic_t restrict_refcnt;
1358 bool disable_ht40;
1359#ifdef CONFIG_IWLWIFI_DEBUGFS 1386#ifdef CONFIG_IWLWIFI_DEBUGFS
1360 /* debugfs */ 1387 /* debugfs */
1361 u16 tx_traffic_idx; 1388 u16 tx_traffic_idx;
@@ -1364,8 +1391,8 @@ struct iwl_priv {
1364 u8 *rx_traffic; 1391 u8 *rx_traffic;
1365 struct dentry *debugfs_dir; 1392 struct dentry *debugfs_dir;
1366 u32 dbgfs_sram_offset, dbgfs_sram_len; 1393 u32 dbgfs_sram_offset, dbgfs_sram_len;
1394 bool disable_ht40;
1367#endif /* CONFIG_IWLWIFI_DEBUGFS */ 1395#endif /* CONFIG_IWLWIFI_DEBUGFS */
1368#endif /* CONFIG_IWLWIFI_DEBUG */
1369 1396
1370 struct work_struct txpower_work; 1397 struct work_struct txpower_work;
1371 u32 disable_sens_cal; 1398 u32 disable_sens_cal;
@@ -1419,9 +1446,9 @@ static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
1419static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv, 1446static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1420 int txq_id, int idx) 1447 int txq_id, int idx)
1421{ 1448{
1422 if (priv->txq[txq_id].txb[idx].skb[0]) 1449 if (priv->txq[txq_id].txb[idx].skb)
1423 return (struct ieee80211_hdr *)priv->txq[txq_id]. 1450 return (struct ieee80211_hdr *)priv->txq[txq_id].
1424 txb[idx].skb[0]->data; 1451 txb[idx].skb->data;
1425 return NULL; 1452 return NULL;
1426} 1453}
1427 1454
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index ee11452519e6..a45d02e555cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -629,6 +629,9 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
629 calib_ver < priv->cfg->eeprom_calib_ver) 629 calib_ver < priv->cfg->eeprom_calib_ver)
630 goto err; 630 goto err;
631 631
632 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
633 eeprom_ver, calib_ver);
634
632 return 0; 635 return 0;
633err: 636err:
634 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", 637 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 95aa202c85e3..a4772aff51fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -118,7 +118,7 @@ enum {
118struct iwl_eeprom_channel { 118struct iwl_eeprom_channel {
119 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ 119 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
120 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 120 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
121} __attribute__ ((packed)); 121} __packed;
122 122
123/** 123/**
124 * iwl_eeprom_enhanced_txpwr structure 124 * iwl_eeprom_enhanced_txpwr structure
@@ -144,7 +144,7 @@ struct iwl_eeprom_enhanced_txpwr {
144 s8 reserved; 144 s8 reserved;
145 s8 mimo2_max; 145 s8 mimo2_max;
146 s8 mimo3_max; 146 s8 mimo3_max;
147} __attribute__ ((packed)); 147} __packed;
148 148
149/* 3945 Specific */ 149/* 3945 Specific */
150#define EEPROM_3945_EEPROM_VERSION (0x2f) 150#define EEPROM_3945_EEPROM_VERSION (0x2f)
@@ -276,6 +276,10 @@ struct iwl_eeprom_enhanced_txpwr {
276#define EEPROM_6050_TX_POWER_VERSION (4) 276#define EEPROM_6050_TX_POWER_VERSION (4)
277#define EEPROM_6050_EEPROM_VERSION (0x532) 277#define EEPROM_6050_EEPROM_VERSION (0x532)
278 278
279/* 6x50g2 Specific */
280#define EEPROM_6050G2_TX_POWER_VERSION (6)
281#define EEPROM_6050G2_EEPROM_VERSION (0x553)
282
279/* 6x00g2 Specific */ 283/* 6x00g2 Specific */
280#define EEPROM_6000G2_TX_POWER_VERSION (6) 284#define EEPROM_6000G2_TX_POWER_VERSION (6)
281#define EEPROM_6000G2_EEPROM_VERSION (0x709) 285#define EEPROM_6000G2_EEPROM_VERSION (0x709)
@@ -312,7 +316,7 @@ struct iwl_eeprom_calib_measure {
312 u8 gain_idx; /* Index into gain table */ 316 u8 gain_idx; /* Index into gain table */
313 u8 actual_pow; /* Measured RF output power, half-dBm */ 317 u8 actual_pow; /* Measured RF output power, half-dBm */
314 s8 pa_det; /* Power amp detector level (not used) */ 318 s8 pa_det; /* Power amp detector level (not used) */
315} __attribute__ ((packed)); 319} __packed;
316 320
317 321
318/* 322/*
@@ -328,7 +332,7 @@ struct iwl_eeprom_calib_ch_info {
328 struct iwl_eeprom_calib_measure 332 struct iwl_eeprom_calib_measure
329 measurements[EEPROM_TX_POWER_TX_CHAINS] 333 measurements[EEPROM_TX_POWER_TX_CHAINS]
330 [EEPROM_TX_POWER_MEASUREMENTS]; 334 [EEPROM_TX_POWER_MEASUREMENTS];
331} __attribute__ ((packed)); 335} __packed;
332 336
333/* 337/*
334 * txpower subband info. 338 * txpower subband info.
@@ -345,7 +349,7 @@ struct iwl_eeprom_calib_subband_info {
345 u8 ch_to; /* channel number of highest channel in subband */ 349 u8 ch_to; /* channel number of highest channel in subband */
346 struct iwl_eeprom_calib_ch_info ch1; 350 struct iwl_eeprom_calib_ch_info ch1;
347 struct iwl_eeprom_calib_ch_info ch2; 351 struct iwl_eeprom_calib_ch_info ch2;
348} __attribute__ ((packed)); 352} __packed;
349 353
350 354
351/* 355/*
@@ -374,7 +378,7 @@ struct iwl_eeprom_calib_info {
374 __le16 voltage; /* signed */ 378 __le16 voltage; /* signed */
375 struct iwl_eeprom_calib_subband_info 379 struct iwl_eeprom_calib_subband_info
376 band_info[EEPROM_TX_POWER_BANDS]; 380 band_info[EEPROM_TX_POWER_BANDS];
377} __attribute__ ((packed)); 381} __packed;
378 382
379 383
380#define ADDRESS_MSK 0x0000FFFF 384#define ADDRESS_MSK 0x0000FFFF
@@ -398,6 +402,7 @@ struct iwl_eeprom_calib_info {
398#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ 402#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
399#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ 403#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
400#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */ 404#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
405#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
401 406
402/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ 407/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
403#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ 408#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 113c3669b9ce..55b8370bc6d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -398,12 +398,7 @@
398 */ 398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) 399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400 400
401#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24) 401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
403
404#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
405 (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
406 FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
407 402
408/* Tx service channels */ 403/* Tx service channels */
409#define FH_SRVC_CHNL (9) 404#define FH_SRVC_CHNL (9)
@@ -449,7 +444,7 @@ struct iwl_rb_status {
449 __le16 finished_rb_num; 444 __le16 finished_rb_num;
450 __le16 finished_fr_nam; 445 __le16 finished_fr_nam;
451 __le32 __unused; /* 3945 only */ 446 __le32 __unused; /* 3945 only */
452} __attribute__ ((packed)); 447} __packed;
453 448
454 449
455#define TFD_QUEUE_SIZE_MAX (256) 450#define TFD_QUEUE_SIZE_MAX (256)
@@ -475,7 +470,7 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
475struct iwl_tfd_tb { 470struct iwl_tfd_tb {
476 __le32 lo; 471 __le32 lo;
477 __le16 hi_n_len; 472 __le16 hi_n_len;
478} __attribute__((packed)); 473} __packed;
479 474
480/** 475/**
481 * struct iwl_tfd 476 * struct iwl_tfd
@@ -510,7 +505,7 @@ struct iwl_tfd {
510 u8 num_tbs; 505 u8 num_tbs;
511 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; 506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
512 __le32 __pad; 507 __le32 __pad;
513} __attribute__ ((packed)); 508} __packed;
514 509
515/* Keep Warm Size */ 510/* Keep Warm Size */
516#define IWL_KW_SIZE 0x1000 /* 4k */ 511#define IWL_KW_SIZE 0x1000 /* 4k */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 51f89e7ba681..258d059ef41f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -49,6 +49,7 @@ const char *get_cmd_string(u8 cmd)
49 IWL_CMD(REPLY_ADD_STA); 49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA); 50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_REMOVE_ALL_STA); 51 IWL_CMD(REPLY_REMOVE_ALL_STA);
52 IWL_CMD(REPLY_TXFIFO_FLUSH);
52 IWL_CMD(REPLY_WEPKEY); 53 IWL_CMD(REPLY_WEPKEY);
53 IWL_CMD(REPLY_3945_RX); 54 IWL_CMD(REPLY_3945_RX);
54 IWL_CMD(REPLY_TX); 55 IWL_CMD(REPLY_TX);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 3ff6b9d25a10..621abe3c5afc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -92,6 +92,11 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
92static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev, 92static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
93 struct fw_desc *desc) 93 struct fw_desc *desc)
94{ 94{
95 if (!desc->len) {
96 desc->v_addr = NULL;
97 return -EINVAL;
98 }
99
95 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len, 100 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
96 &desc->p_addr, GFP_KERNEL); 101 &desc->p_addr, GFP_KERNEL);
97 return (desc->v_addr != NULL) ? 0 : -ENOMEM; 102 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
@@ -170,4 +175,26 @@ static inline void iwl_enable_interrupts(struct iwl_priv *priv)
170 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask); 175 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
171} 176}
172 177
178/**
179 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
180 * @priv -- pointer to iwl_priv data structure
181 * @tsf_bits -- number of bits need to shift for masking)
182 */
183static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
184 u16 tsf_bits)
185{
186 return (1 << tsf_bits) - 1;
187}
188
189/**
190 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
191 * @priv -- pointer to iwl_priv data structure
192 * @tsf_bits -- number of bits need to shift for masking)
193 */
194static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
195 u16 tsf_bits)
196{
197 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
198}
199
173#endif /* __iwl_helpers_h__ */ 200#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 0a5d7cf25196..79773e353baa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -175,7 +175,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
175 INIT_LIST_HEAD(&rxq->rx_used); 175 INIT_LIST_HEAD(&rxq->rx_used);
176 176
177 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 177 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
178 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr, 178 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
179 GFP_KERNEL); 179 GFP_KERNEL);
180 if (!rxq->bd) 180 if (!rxq->bd)
181 goto err_bd; 181 goto err_bd;
@@ -199,32 +199,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
199 199
200err_rb: 200err_rb:
201 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 201 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
202 rxq->dma_addr); 202 rxq->bd_dma);
203err_bd: 203err_bd:
204 return -ENOMEM; 204 return -ENOMEM;
205} 205}
206EXPORT_SYMBOL(iwl_rx_queue_alloc); 206EXPORT_SYMBOL(iwl_rx_queue_alloc);
207 207
208void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
209 struct iwl_rx_mem_buffer *rxb)
210
211{
212 struct iwl_rx_packet *pkt = rxb_addr(rxb);
213 struct iwl_missed_beacon_notif *missed_beacon;
214
215 missed_beacon = &pkt->u.missed_beacon;
216 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
217 priv->missed_beacon_threshold) {
218 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
219 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
220 le32_to_cpu(missed_beacon->total_missed_becons),
221 le32_to_cpu(missed_beacon->num_recvd_beacons),
222 le32_to_cpu(missed_beacon->num_expected_beacons));
223 if (!test_bit(STATUS_SCANNING, &priv->status))
224 iwl_init_sensitivity(priv);
225 }
226}
227EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
228 208
229void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 209void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
230 struct iwl_rx_mem_buffer *rxb) 210 struct iwl_rx_mem_buffer *rxb)
@@ -243,161 +223,6 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
243} 223}
244EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif); 224EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
245 225
246
247
248/* Calculate noise level, based on measurements during network silence just
249 * before arriving beacon. This measurement can be done only if we know
250 * exactly when to expect beacons, therefore only when we're associated. */
251static void iwl_rx_calc_noise(struct iwl_priv *priv)
252{
253 struct statistics_rx_non_phy *rx_info
254 = &(priv->statistics.rx.general);
255 int num_active_rx = 0;
256 int total_silence = 0;
257 int bcn_silence_a =
258 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
259 int bcn_silence_b =
260 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
261 int bcn_silence_c =
262 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
263 int last_rx_noise;
264
265 if (bcn_silence_a) {
266 total_silence += bcn_silence_a;
267 num_active_rx++;
268 }
269 if (bcn_silence_b) {
270 total_silence += bcn_silence_b;
271 num_active_rx++;
272 }
273 if (bcn_silence_c) {
274 total_silence += bcn_silence_c;
275 num_active_rx++;
276 }
277
278 /* Average among active antennas */
279 if (num_active_rx)
280 last_rx_noise = (total_silence / num_active_rx) - 107;
281 else
282 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
283
284 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
285 bcn_silence_a, bcn_silence_b, bcn_silence_c,
286 last_rx_noise);
287}
288
289#ifdef CONFIG_IWLWIFI_DEBUG
290/*
291 * based on the assumption of all statistics counter are in DWORD
292 * FIXME: This function is for debugging, do not deal with
293 * the case of counters roll-over.
294 */
295static void iwl_accumulative_statistics(struct iwl_priv *priv,
296 __le32 *stats)
297{
298 int i;
299 __le32 *prev_stats;
300 u32 *accum_stats;
301 u32 *delta, *max_delta;
302
303 prev_stats = (__le32 *)&priv->statistics;
304 accum_stats = (u32 *)&priv->accum_statistics;
305 delta = (u32 *)&priv->delta_statistics;
306 max_delta = (u32 *)&priv->max_delta;
307
308 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
309 i += sizeof(__le32), stats++, prev_stats++, delta++,
310 max_delta++, accum_stats++) {
311 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
312 *delta = (le32_to_cpu(*stats) -
313 le32_to_cpu(*prev_stats));
314 *accum_stats += *delta;
315 if (*delta > *max_delta)
316 *max_delta = *delta;
317 }
318 }
319
320 /* reset accumulative statistics for "no-counter" type statistics */
321 priv->accum_statistics.general.temperature =
322 priv->statistics.general.temperature;
323 priv->accum_statistics.general.temperature_m =
324 priv->statistics.general.temperature_m;
325 priv->accum_statistics.general.ttl_timestamp =
326 priv->statistics.general.ttl_timestamp;
327 priv->accum_statistics.tx.tx_power.ant_a =
328 priv->statistics.tx.tx_power.ant_a;
329 priv->accum_statistics.tx.tx_power.ant_b =
330 priv->statistics.tx.tx_power.ant_b;
331 priv->accum_statistics.tx.tx_power.ant_c =
332 priv->statistics.tx.tx_power.ant_c;
333}
334#endif
335
336#define REG_RECALIB_PERIOD (60)
337
338/**
339 * iwl_good_plcp_health - checks for plcp error.
340 *
341 * When the plcp error is exceeding the thresholds, reset the radio
342 * to improve the throughput.
343 */
344bool iwl_good_plcp_health(struct iwl_priv *priv,
345 struct iwl_rx_packet *pkt)
346{
347 bool rc = true;
348 int combined_plcp_delta;
349 unsigned int plcp_msec;
350 unsigned long plcp_received_jiffies;
351
352 /*
353 * check for plcp_err and trigger radio reset if it exceeds
354 * the plcp error threshold plcp_delta.
355 */
356 plcp_received_jiffies = jiffies;
357 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
358 (long) priv->plcp_jiffies);
359 priv->plcp_jiffies = plcp_received_jiffies;
360 /*
361 * check to make sure plcp_msec is not 0 to prevent division
362 * by zero.
363 */
364 if (plcp_msec) {
365 combined_plcp_delta =
366 (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
367 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
368 (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
369 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
370
371 if ((combined_plcp_delta > 0) &&
372 ((combined_plcp_delta * 100) / plcp_msec) >
373 priv->cfg->plcp_delta_threshold) {
374 /*
375 * if plcp_err exceed the threshold,
376 * the following data is printed in csv format:
377 * Text: plcp_err exceeded %d,
378 * Received ofdm.plcp_err,
379 * Current ofdm.plcp_err,
380 * Received ofdm_ht.plcp_err,
381 * Current ofdm_ht.plcp_err,
382 * combined_plcp_delta,
383 * plcp_msec
384 */
385 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
386 "%u, %u, %u, %u, %d, %u mSecs\n",
387 priv->cfg->plcp_delta_threshold,
388 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
389 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
390 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
391 le32_to_cpu(
392 priv->statistics.rx.ofdm_ht.plcp_err),
393 combined_plcp_delta, plcp_msec);
394 rc = false;
395 }
396 }
397 return rc;
398}
399EXPORT_SYMBOL(iwl_good_plcp_health);
400
401void iwl_recover_from_statistics(struct iwl_priv *priv, 226void iwl_recover_from_statistics(struct iwl_priv *priv,
402 struct iwl_rx_packet *pkt) 227 struct iwl_rx_packet *pkt)
403{ 228{
@@ -413,7 +238,7 @@ void iwl_recover_from_statistics(struct iwl_priv *priv,
413 */ 238 */
414 IWL_ERR(priv, "low ack count detected, " 239 IWL_ERR(priv, "low ack count detected, "
415 "restart firmware\n"); 240 "restart firmware\n");
416 if (!iwl_force_reset(priv, IWL_FW_RESET)) 241 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
417 return; 242 return;
418 } 243 }
419 } 244 }
@@ -424,76 +249,13 @@ void iwl_recover_from_statistics(struct iwl_priv *priv,
424 * high plcp error detected 249 * high plcp error detected
425 * reset Radio 250 * reset Radio
426 */ 251 */
427 iwl_force_reset(priv, IWL_RF_RESET); 252 iwl_force_reset(priv, IWL_RF_RESET, false);
428 } 253 }
429 } 254 }
430 } 255 }
431} 256}
432EXPORT_SYMBOL(iwl_recover_from_statistics); 257EXPORT_SYMBOL(iwl_recover_from_statistics);
433 258
434void iwl_rx_statistics(struct iwl_priv *priv,
435 struct iwl_rx_mem_buffer *rxb)
436{
437 int change;
438 struct iwl_rx_packet *pkt = rxb_addr(rxb);
439
440
441 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
442 (int)sizeof(priv->statistics),
443 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
444
445 change = ((priv->statistics.general.temperature !=
446 pkt->u.stats.general.temperature) ||
447 ((priv->statistics.flag &
448 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
449 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
450
451#ifdef CONFIG_IWLWIFI_DEBUG
452 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
453#endif
454 iwl_recover_from_statistics(priv, pkt);
455
456 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
457
458 set_bit(STATUS_STATISTICS, &priv->status);
459
460 /* Reschedule the statistics timer to occur in
461 * REG_RECALIB_PERIOD seconds to ensure we get a
462 * thermal update even if the uCode doesn't give
463 * us one */
464 mod_timer(&priv->statistics_periodic, jiffies +
465 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
466
467 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
468 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
469 iwl_rx_calc_noise(priv);
470 queue_work(priv->workqueue, &priv->run_time_calib_work);
471 }
472 if (priv->cfg->ops->lib->temp_ops.temperature && change)
473 priv->cfg->ops->lib->temp_ops.temperature(priv);
474}
475EXPORT_SYMBOL(iwl_rx_statistics);
476
477void iwl_reply_statistics(struct iwl_priv *priv,
478 struct iwl_rx_mem_buffer *rxb)
479{
480 struct iwl_rx_packet *pkt = rxb_addr(rxb);
481
482 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
483#ifdef CONFIG_IWLWIFI_DEBUG
484 memset(&priv->accum_statistics, 0,
485 sizeof(struct iwl_notif_statistics));
486 memset(&priv->delta_statistics, 0,
487 sizeof(struct iwl_notif_statistics));
488 memset(&priv->max_delta, 0,
489 sizeof(struct iwl_notif_statistics));
490#endif
491 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
492 }
493 iwl_rx_statistics(priv, rxb);
494}
495EXPORT_SYMBOL(iwl_reply_statistics);
496
497/* 259/*
498 * returns non-zero if packet should be dropped 260 * returns non-zero if packet should be dropped
499 */ 261 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 386c5f96eff8..b0c6b0473901 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -333,7 +333,8 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
333 goto out_unlock; 333 goto out_unlock;
334 } 334 }
335 335
336 if (test_bit(STATUS_SCANNING, &priv->status)) { 336 if (test_bit(STATUS_SCANNING, &priv->status) &&
337 !priv->is_internal_short_scan) {
337 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 338 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
338 ret = -EAGAIN; 339 ret = -EAGAIN;
339 goto out_unlock; 340 goto out_unlock;
@@ -348,8 +349,16 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
348 /* mac80211 will only ask for one band at a time */ 349 /* mac80211 will only ask for one band at a time */
349 priv->scan_band = req->channels[0]->band; 350 priv->scan_band = req->channels[0]->band;
350 priv->scan_request = req; 351 priv->scan_request = req;
352 priv->scan_vif = vif;
351 353
352 ret = iwl_scan_initiate(priv, vif); 354 /*
355 * If an internal scan is in progress, just set
356 * up the scan_request as per above.
357 */
358 if (priv->is_internal_short_scan)
359 ret = 0;
360 else
361 ret = iwl_scan_initiate(priv, vif);
353 362
354 IWL_DEBUG_MAC80211(priv, "leave\n"); 363 IWL_DEBUG_MAC80211(priv, "leave\n");
355 364
@@ -420,11 +429,10 @@ void iwl_bg_scan_check(struct work_struct *data)
420 return; 429 return;
421 430
422 mutex_lock(&priv->mutex); 431 mutex_lock(&priv->mutex);
423 if (test_bit(STATUS_SCANNING, &priv->status) || 432 if (test_bit(STATUS_SCANNING, &priv->status) &&
424 test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 433 !test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
425 IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting " 434 IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n",
426 "adapter (%dms)\n", 435 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
427 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
428 436
429 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 437 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
430 iwl_send_scan_abort(priv); 438 iwl_send_scan_abort(priv);
@@ -438,7 +446,7 @@ EXPORT_SYMBOL(iwl_bg_scan_check);
438 */ 446 */
439 447
440u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 448u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
441 const u8 *ies, int ie_len, int left) 449 const u8 *ta, const u8 *ies, int ie_len, int left)
442{ 450{
443 int len = 0; 451 int len = 0;
444 u8 *pos = NULL; 452 u8 *pos = NULL;
@@ -451,7 +459,7 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
451 459
452 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 460 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
453 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN); 461 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
454 memcpy(frame->sa, priv->mac_addr, ETH_ALEN); 462 memcpy(frame->sa, ta, ETH_ALEN);
455 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN); 463 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
456 frame->seq_ctrl = 0; 464 frame->seq_ctrl = 0;
457 465
@@ -489,12 +497,11 @@ void iwl_bg_abort_scan(struct work_struct *work)
489 !test_bit(STATUS_GEO_CONFIGURED, &priv->status)) 497 !test_bit(STATUS_GEO_CONFIGURED, &priv->status))
490 return; 498 return;
491 499
492 mutex_lock(&priv->mutex); 500 cancel_delayed_work(&priv->scan_check);
493
494 cancel_delayed_work_sync(&priv->scan_check);
495 set_bit(STATUS_SCAN_ABORTING, &priv->status);
496 iwl_send_scan_abort(priv);
497 501
502 mutex_lock(&priv->mutex);
503 if (test_bit(STATUS_SCAN_ABORTING, &priv->status))
504 iwl_send_scan_abort(priv);
498 mutex_unlock(&priv->mutex); 505 mutex_unlock(&priv->mutex);
499} 506}
500EXPORT_SYMBOL(iwl_bg_abort_scan); 507EXPORT_SYMBOL(iwl_bg_abort_scan);
@@ -514,7 +521,30 @@ void iwl_bg_scan_completed(struct work_struct *work)
514 priv->is_internal_short_scan = false; 521 priv->is_internal_short_scan = false;
515 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 522 IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
516 internal = true; 523 internal = true;
524 } else {
525 priv->scan_request = NULL;
526 priv->scan_vif = NULL;
517 } 527 }
528
529 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
530 goto out;
531
532 if (internal && priv->scan_request)
533 iwl_scan_initiate(priv, priv->scan_vif);
534
535 /* Since setting the TXPOWER may have been deferred while
536 * performing the scan, fire one off */
537 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
538
539 /*
540 * Since setting the RXON may have been deferred while
541 * performing the scan, fire one off if needed
542 */
543 if (memcmp(&priv->active_rxon,
544 &priv->staging_rxon, sizeof(priv->staging_rxon)))
545 iwlcore_commit_rxon(priv);
546
547 out:
518 mutex_unlock(&priv->mutex); 548 mutex_unlock(&priv->mutex);
519 549
520 /* 550 /*
@@ -524,15 +554,6 @@ void iwl_bg_scan_completed(struct work_struct *work)
524 */ 554 */
525 if (!internal) 555 if (!internal)
526 ieee80211_scan_completed(priv->hw, false); 556 ieee80211_scan_completed(priv->hw, false);
527
528 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
529 return;
530
531 /* Since setting the TXPOWER may have been deferred while
532 * performing the scan, fire one off */
533 mutex_lock(&priv->mutex);
534 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
535 mutex_unlock(&priv->mutex);
536} 557}
537EXPORT_SYMBOL(iwl_bg_scan_completed); 558EXPORT_SYMBOL(iwl_bg_scan_completed);
538 559
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index af6babee2891..c4ca0b5d77da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -42,7 +42,7 @@ struct ieee80211_basic_report {
42 __le64 start_time; 42 __le64 start_time;
43 __le16 duration; 43 __le16 duration;
44 u8 map; 44 u8 map;
45} __attribute__ ((packed)); 45} __packed;
46 46
47enum { /* ieee80211_measurement_request.mode */ 47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */ 48 /* Bit 0 is reserved */
@@ -63,13 +63,13 @@ struct ieee80211_measurement_params {
63 u8 channel; 63 u8 channel;
64 __le64 start_time; 64 __le64 start_time;
65 __le16 duration; 65 __le16 duration;
66} __attribute__ ((packed)); 66} __packed;
67 67
68struct ieee80211_info_element { 68struct ieee80211_info_element {
69 u8 id; 69 u8 id;
70 u8 len; 70 u8 len;
71 u8 data[0]; 71 u8 data[0];
72} __attribute__ ((packed)); 72} __packed;
73 73
74struct ieee80211_measurement_request { 74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie; 75 struct ieee80211_info_element ie;
@@ -77,7 +77,7 @@ struct ieee80211_measurement_request {
77 u8 mode; 77 u8 mode;
78 u8 type; 78 u8 type;
79 struct ieee80211_measurement_params params[0]; 79 struct ieee80211_measurement_params params[0];
80} __attribute__ ((packed)); 80} __packed;
81 81
82struct ieee80211_measurement_report { 82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie; 83 struct ieee80211_info_element ie;
@@ -87,6 +87,6 @@ struct ieee80211_measurement_report {
87 union { 87 union {
88 struct ieee80211_basic_report basic[0]; 88 struct ieee80211_basic_report basic[0];
89 } u; 89 } u;
90} __attribute__ ((packed)); 90} __packed;
91 91
92#endif 92#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c27c13fbb1ae..9511f03f07e0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -30,6 +30,7 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/lockdep.h>
33 34
34#include "iwl-dev.h" 35#include "iwl-dev.h"
35#include "iwl-core.h" 36#include "iwl-core.h"
@@ -54,18 +55,19 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
54 } 55 }
55} 56}
56 57
57static void iwl_process_add_sta_resp(struct iwl_priv *priv, 58static int iwl_process_add_sta_resp(struct iwl_priv *priv,
58 struct iwl_addsta_cmd *addsta, 59 struct iwl_addsta_cmd *addsta,
59 struct iwl_rx_packet *pkt, 60 struct iwl_rx_packet *pkt,
60 bool sync) 61 bool sync)
61{ 62{
62 u8 sta_id = addsta->sta.sta_id; 63 u8 sta_id = addsta->sta.sta_id;
63 unsigned long flags; 64 unsigned long flags;
65 int ret = -EIO;
64 66
65 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 67 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
66 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 68 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
67 pkt->hdr.flags); 69 pkt->hdr.flags);
68 return; 70 return ret;
69 } 71 }
70 72
71 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", 73 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
@@ -77,6 +79,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
77 case ADD_STA_SUCCESS_MSK: 79 case ADD_STA_SUCCESS_MSK:
78 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); 80 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
79 iwl_sta_ucode_activate(priv, sta_id); 81 iwl_sta_ucode_activate(priv, sta_id);
82 ret = 0;
80 break; 83 break;
81 case ADD_STA_NO_ROOM_IN_TABLE: 84 case ADD_STA_NO_ROOM_IN_TABLE:
82 IWL_ERR(priv, "Adding station %d failed, no room in table.\n", 85 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
@@ -114,6 +117,8 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
114 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 117 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
115 addsta->sta.addr); 118 addsta->sta.addr);
116 spin_unlock_irqrestore(&priv->sta_lock, flags); 119 spin_unlock_irqrestore(&priv->sta_lock, flags);
120
121 return ret;
117} 122}
118 123
119static void iwl_add_sta_callback(struct iwl_priv *priv, 124static void iwl_add_sta_callback(struct iwl_priv *priv,
@@ -145,8 +150,10 @@ int iwl_send_add_sta(struct iwl_priv *priv,
145 150
146 if (flags & CMD_ASYNC) 151 if (flags & CMD_ASYNC)
147 cmd.callback = iwl_add_sta_callback; 152 cmd.callback = iwl_add_sta_callback;
148 else 153 else {
149 cmd.flags |= CMD_WANT_SKB; 154 cmd.flags |= CMD_WANT_SKB;
155 might_sleep();
156 }
150 157
151 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); 158 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
152 ret = iwl_send_cmd(priv, &cmd); 159 ret = iwl_send_cmd(priv, &cmd);
@@ -156,7 +163,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
156 163
157 if (ret == 0) { 164 if (ret == 0) {
158 pkt = (struct iwl_rx_packet *)cmd.reply_page; 165 pkt = (struct iwl_rx_packet *)cmd.reply_page;
159 iwl_process_add_sta_resp(priv, sta, pkt, true); 166 ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
160 } 167 }
161 iwl_free_pages(priv, cmd.reply_page); 168 iwl_free_pages(priv, cmd.reply_page);
162 169
@@ -311,10 +318,10 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
311 struct ieee80211_sta_ht_cap *ht_info, 318 struct ieee80211_sta_ht_cap *ht_info,
312 u8 *sta_id_r) 319 u8 *sta_id_r)
313{ 320{
314 struct iwl_station_entry *station;
315 unsigned long flags_spin; 321 unsigned long flags_spin;
316 int ret = 0; 322 int ret = 0;
317 u8 sta_id; 323 u8 sta_id;
324 struct iwl_addsta_cmd sta_cmd;
318 325
319 *sta_id_r = 0; 326 *sta_id_r = 0;
320 spin_lock_irqsave(&priv->sta_lock, flags_spin); 327 spin_lock_irqsave(&priv->sta_lock, flags_spin);
@@ -347,14 +354,15 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
347 } 354 }
348 355
349 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; 356 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
350 station = &priv->stations[sta_id]; 357 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
351 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 358 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
352 359
353 /* Add station to device's station table */ 360 /* Add station to device's station table */
354 ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC); 361 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
355 if (ret) { 362 if (ret) {
356 IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
357 spin_lock_irqsave(&priv->sta_lock, flags_spin); 363 spin_lock_irqsave(&priv->sta_lock, flags_spin);
364 IWL_ERR(priv, "Adding station %pM failed.\n",
365 priv->stations[sta_id].sta.sta.addr);
358 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 366 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
359 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 367 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
360 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 368 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -488,7 +496,7 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
488} 496}
489 497
490static int iwl_send_remove_station(struct iwl_priv *priv, 498static int iwl_send_remove_station(struct iwl_priv *priv,
491 struct iwl_station_entry *station) 499 const u8 *addr, int sta_id)
492{ 500{
493 struct iwl_rx_packet *pkt; 501 struct iwl_rx_packet *pkt;
494 int ret; 502 int ret;
@@ -505,7 +513,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
505 513
506 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 514 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
507 rm_sta_cmd.num_sta = 1; 515 rm_sta_cmd.num_sta = 1;
508 memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN); 516 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
509 517
510 cmd.flags |= CMD_WANT_SKB; 518 cmd.flags |= CMD_WANT_SKB;
511 519
@@ -525,7 +533,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
525 switch (pkt->u.rem_sta.status) { 533 switch (pkt->u.rem_sta.status) {
526 case REM_STA_SUCCESS_MSK: 534 case REM_STA_SUCCESS_MSK:
527 spin_lock_irqsave(&priv->sta_lock, flags_spin); 535 spin_lock_irqsave(&priv->sta_lock, flags_spin);
528 iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id); 536 iwl_sta_ucode_deactivate(priv, sta_id);
529 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 537 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
530 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 538 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
531 break; 539 break;
@@ -546,7 +554,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
546int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, 554int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
547 const u8 *addr) 555 const u8 *addr)
548{ 556{
549 struct iwl_station_entry *station;
550 unsigned long flags; 557 unsigned long flags;
551 558
552 if (!iwl_is_ready(priv)) { 559 if (!iwl_is_ready(priv)) {
@@ -592,10 +599,9 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
592 599
593 BUG_ON(priv->num_stations < 0); 600 BUG_ON(priv->num_stations < 0);
594 601
595 station = &priv->stations[sta_id];
596 spin_unlock_irqrestore(&priv->sta_lock, flags); 602 spin_unlock_irqrestore(&priv->sta_lock, flags);
597 603
598 return iwl_send_remove_station(priv, station); 604 return iwl_send_remove_station(priv, addr, sta_id);
599out_err: 605out_err:
600 spin_unlock_irqrestore(&priv->sta_lock, flags); 606 spin_unlock_irqrestore(&priv->sta_lock, flags);
601 return -EINVAL; 607 return -EINVAL;
@@ -643,11 +649,13 @@ EXPORT_SYMBOL(iwl_clear_ucode_stations);
643 */ 649 */
644void iwl_restore_stations(struct iwl_priv *priv) 650void iwl_restore_stations(struct iwl_priv *priv)
645{ 651{
646 struct iwl_station_entry *station; 652 struct iwl_addsta_cmd sta_cmd;
653 struct iwl_link_quality_cmd lq;
647 unsigned long flags_spin; 654 unsigned long flags_spin;
648 int i; 655 int i;
649 bool found = false; 656 bool found = false;
650 int ret; 657 int ret;
658 bool send_lq;
651 659
652 if (!iwl_is_ready(priv)) { 660 if (!iwl_is_ready(priv)) {
653 IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n"); 661 IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
@@ -669,13 +677,20 @@ void iwl_restore_stations(struct iwl_priv *priv)
669 677
670 for (i = 0; i < priv->hw_params.max_stations; i++) { 678 for (i = 0; i < priv->hw_params.max_stations; i++) {
671 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { 679 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
680 memcpy(&sta_cmd, &priv->stations[i].sta,
681 sizeof(struct iwl_addsta_cmd));
682 send_lq = false;
683 if (priv->stations[i].lq) {
684 memcpy(&lq, priv->stations[i].lq,
685 sizeof(struct iwl_link_quality_cmd));
686 send_lq = true;
687 }
672 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 688 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
673 station = &priv->stations[i]; 689 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
674 ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
675 if (ret) { 690 if (ret) {
676 IWL_ERR(priv, "Adding station %pM failed.\n",
677 station->sta.sta.addr);
678 spin_lock_irqsave(&priv->sta_lock, flags_spin); 691 spin_lock_irqsave(&priv->sta_lock, flags_spin);
692 IWL_ERR(priv, "Adding station %pM failed.\n",
693 priv->stations[i].sta.sta.addr);
679 priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE; 694 priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
680 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 695 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
681 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 696 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -684,8 +699,8 @@ void iwl_restore_stations(struct iwl_priv *priv)
684 * Rate scaling has already been initialized, send 699 * Rate scaling has already been initialized, send
685 * current LQ command 700 * current LQ command
686 */ 701 */
687 if (station->lq) 702 if (send_lq)
688 iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true); 703 iwl_send_lq_cmd(priv, &lq, CMD_SYNC, true);
689 spin_lock_irqsave(&priv->sta_lock, flags_spin); 704 spin_lock_irqsave(&priv->sta_lock, flags_spin);
690 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 705 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
691 } 706 }
@@ -823,7 +838,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
823{ 838{
824 unsigned long flags; 839 unsigned long flags;
825 __le16 key_flags = 0; 840 __le16 key_flags = 0;
826 int ret; 841 struct iwl_addsta_cmd sta_cmd;
842
843 lockdep_assert_held(&priv->mutex);
827 844
828 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 845 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
829 846
@@ -863,11 +880,10 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
863 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 880 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
864 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 881 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
865 882
866 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 883 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
867
868 spin_unlock_irqrestore(&priv->sta_lock, flags); 884 spin_unlock_irqrestore(&priv->sta_lock, flags);
869 885
870 return ret; 886 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
871} 887}
872 888
873static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, 889static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
@@ -876,7 +892,9 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
876{ 892{
877 unsigned long flags; 893 unsigned long flags;
878 __le16 key_flags = 0; 894 __le16 key_flags = 0;
879 int ret; 895 struct iwl_addsta_cmd sta_cmd;
896
897 lockdep_assert_held(&priv->mutex);
880 898
881 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 899 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
882 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 900 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -911,11 +929,10 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
911 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 929 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
912 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 930 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
913 931
914 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 932 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
915
916 spin_unlock_irqrestore(&priv->sta_lock, flags); 933 spin_unlock_irqrestore(&priv->sta_lock, flags);
917 934
918 return ret; 935 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
919} 936}
920 937
921static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 938static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -972,24 +989,16 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
972 unsigned long flags; 989 unsigned long flags;
973 int i; 990 int i;
974 991
975 if (sta) {
976 sta_id = iwl_sta_id(sta);
977
978 if (sta_id == IWL_INVALID_STATION) {
979 IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n",
980 sta->addr);
981 return;
982 }
983 } else
984 sta_id = priv->hw_params.bcast_sta_id;
985
986
987 if (iwl_scan_cancel(priv)) { 992 if (iwl_scan_cancel(priv)) {
988 /* cancel scan failed, just live w/ bad key and rely 993 /* cancel scan failed, just live w/ bad key and rely
989 briefly on SW decryption */ 994 briefly on SW decryption */
990 return; 995 return;
991 } 996 }
992 997
998 sta_id = iwl_sta_id_or_broadcast(priv, sta);
999 if (sta_id == IWL_INVALID_STATION)
1000 return;
1001
993 spin_lock_irqsave(&priv->sta_lock, flags); 1002 spin_lock_irqsave(&priv->sta_lock, flags);
994 1003
995 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; 1004 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
@@ -1013,9 +1022,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1013 u8 sta_id) 1022 u8 sta_id)
1014{ 1023{
1015 unsigned long flags; 1024 unsigned long flags;
1016 int ret = 0;
1017 u16 key_flags; 1025 u16 key_flags;
1018 u8 keyidx; 1026 u8 keyidx;
1027 struct iwl_addsta_cmd sta_cmd;
1028
1029 lockdep_assert_held(&priv->mutex);
1019 1030
1020 priv->key_mapping_key--; 1031 priv->key_mapping_key--;
1021 1032
@@ -1062,9 +1073,10 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1062 spin_unlock_irqrestore(&priv->sta_lock, flags); 1073 spin_unlock_irqrestore(&priv->sta_lock, flags);
1063 return 0; 1074 return 0;
1064 } 1075 }
1065 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 1076 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1066 spin_unlock_irqrestore(&priv->sta_lock, flags); 1077 spin_unlock_irqrestore(&priv->sta_lock, flags);
1067 return ret; 1078
1079 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1068} 1080}
1069EXPORT_SYMBOL(iwl_remove_dynamic_key); 1081EXPORT_SYMBOL(iwl_remove_dynamic_key);
1070 1082
@@ -1073,6 +1085,8 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1073{ 1085{
1074 int ret; 1086 int ret;
1075 1087
1088 lockdep_assert_held(&priv->mutex);
1089
1076 priv->key_mapping_key++; 1090 priv->key_mapping_key++;
1077 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 1091 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
1078 1092
@@ -1245,6 +1259,36 @@ int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq)
1245} 1259}
1246EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station); 1260EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
1247 1261
1262/**
1263 * iwl_update_bcast_station - update broadcast station's LQ command
1264 *
1265 * Only used by iwlagn. Placed here to have all bcast station management
1266 * code together.
1267 */
1268int iwl_update_bcast_station(struct iwl_priv *priv)
1269{
1270 unsigned long flags;
1271 struct iwl_link_quality_cmd *link_cmd;
1272 u8 sta_id = priv->hw_params.bcast_sta_id;
1273
1274 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
1275 if (!link_cmd) {
1276 IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
1277 return -ENOMEM;
1278 }
1279
1280 spin_lock_irqsave(&priv->sta_lock, flags);
1281 if (priv->stations[sta_id].lq)
1282 kfree(priv->stations[sta_id].lq);
1283 else
1284 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
1285 priv->stations[sta_id].lq = link_cmd;
1286 spin_unlock_irqrestore(&priv->sta_lock, flags);
1287
1288 return 0;
1289}
1290EXPORT_SYMBOL_GPL(iwl_update_bcast_station);
1291
1248void iwl_dealloc_bcast_station(struct iwl_priv *priv) 1292void iwl_dealloc_bcast_station(struct iwl_priv *priv)
1249{ 1293{
1250 unsigned long flags; 1294 unsigned long flags;
@@ -1268,18 +1312,22 @@ EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station);
1268/** 1312/**
1269 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table 1313 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
1270 */ 1314 */
1271void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) 1315int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1272{ 1316{
1273 unsigned long flags; 1317 unsigned long flags;
1318 struct iwl_addsta_cmd sta_cmd;
1319
1320 lockdep_assert_held(&priv->mutex);
1274 1321
1275 /* Remove "disable" flag, to enable Tx for this TID */ 1322 /* Remove "disable" flag, to enable Tx for this TID */
1276 spin_lock_irqsave(&priv->sta_lock, flags); 1323 spin_lock_irqsave(&priv->sta_lock, flags);
1277 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; 1324 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1278 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); 1325 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
1279 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1326 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1327 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1280 spin_unlock_irqrestore(&priv->sta_lock, flags); 1328 spin_unlock_irqrestore(&priv->sta_lock, flags);
1281 1329
1282 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 1330 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1283} 1331}
1284EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid); 1332EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
1285 1333
@@ -1288,6 +1336,9 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1288{ 1336{
1289 unsigned long flags; 1337 unsigned long flags;
1290 int sta_id; 1338 int sta_id;
1339 struct iwl_addsta_cmd sta_cmd;
1340
1341 lockdep_assert_held(&priv->mutex);
1291 1342
1292 sta_id = iwl_sta_id(sta); 1343 sta_id = iwl_sta_id(sta);
1293 if (sta_id == IWL_INVALID_STATION) 1344 if (sta_id == IWL_INVALID_STATION)
@@ -1299,10 +1350,10 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1299 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; 1350 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
1300 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); 1351 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
1301 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1352 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1353 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1302 spin_unlock_irqrestore(&priv->sta_lock, flags); 1354 spin_unlock_irqrestore(&priv->sta_lock, flags);
1303 1355
1304 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 1356 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1305 CMD_ASYNC);
1306} 1357}
1307EXPORT_SYMBOL(iwl_sta_rx_agg_start); 1358EXPORT_SYMBOL(iwl_sta_rx_agg_start);
1308 1359
@@ -1311,6 +1362,9 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1311{ 1362{
1312 unsigned long flags; 1363 unsigned long flags;
1313 int sta_id; 1364 int sta_id;
1365 struct iwl_addsta_cmd sta_cmd;
1366
1367 lockdep_assert_held(&priv->mutex);
1314 1368
1315 sta_id = iwl_sta_id(sta); 1369 sta_id = iwl_sta_id(sta);
1316 if (sta_id == IWL_INVALID_STATION) { 1370 if (sta_id == IWL_INVALID_STATION) {
@@ -1323,10 +1377,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1323 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; 1377 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
1324 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; 1378 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
1325 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1379 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1380 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1326 spin_unlock_irqrestore(&priv->sta_lock, flags); 1381 spin_unlock_irqrestore(&priv->sta_lock, flags);
1327 1382
1328 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 1383 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1329 CMD_ASYNC);
1330} 1384}
1331EXPORT_SYMBOL(iwl_sta_rx_agg_stop); 1385EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
1332 1386
@@ -1340,9 +1394,9 @@ void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1340 priv->stations[sta_id].sta.sta.modify_mask = 0; 1394 priv->stations[sta_id].sta.sta.modify_mask = 0;
1341 priv->stations[sta_id].sta.sleep_tx_count = 0; 1395 priv->stations[sta_id].sta.sleep_tx_count = 0;
1342 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1396 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1397 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1343 spin_unlock_irqrestore(&priv->sta_lock, flags); 1398 spin_unlock_irqrestore(&priv->sta_lock, flags);
1344 1399
1345 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1346} 1400}
1347EXPORT_SYMBOL(iwl_sta_modify_ps_wake); 1401EXPORT_SYMBOL(iwl_sta_modify_ps_wake);
1348 1402
@@ -1357,9 +1411,9 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
1357 STA_MODIFY_SLEEP_TX_COUNT_MSK; 1411 STA_MODIFY_SLEEP_TX_COUNT_MSK;
1358 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); 1412 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
1359 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1413 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1414 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1360 spin_unlock_irqrestore(&priv->sta_lock, flags); 1415 spin_unlock_irqrestore(&priv->sta_lock, flags);
1361 1416
1362 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1363} 1417}
1364EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count); 1418EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
1365 1419
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index dc43ebd1f1fd..d38a350ba0bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -60,6 +60,7 @@ void iwl_restore_stations(struct iwl_priv *priv);
60void iwl_clear_ucode_stations(struct iwl_priv *priv); 60void iwl_clear_ucode_stations(struct iwl_priv *priv);
61int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq); 61int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
62void iwl_dealloc_bcast_station(struct iwl_priv *priv); 62void iwl_dealloc_bcast_station(struct iwl_priv *priv);
63int iwl_update_bcast_station(struct iwl_priv *priv);
63int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 64int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
64int iwl_send_add_sta(struct iwl_priv *priv, 65int iwl_send_add_sta(struct iwl_priv *priv,
65 struct iwl_addsta_cmd *sta, u8 flags); 66 struct iwl_addsta_cmd *sta, u8 flags);
@@ -73,7 +74,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
73 const u8 *addr); 74 const u8 *addr);
74int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 75int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
75 struct ieee80211_sta *sta); 76 struct ieee80211_sta *sta);
76void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); 77int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
77int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, 78int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
78 int tid, u16 ssn); 79 int tid, u16 ssn);
79int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, 80int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -118,4 +119,33 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
118 119
119 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id; 120 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
120} 121}
122
123/**
124 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
125 * @priv: iwl priv
126 * @sta: mac80211 station
127 *
128 * In certain circumstances mac80211 passes a station pointer
129 * that may be %NULL, for example during TX or key setup. In
130 * that case, we need to use the broadcast station, so this
131 * inline wraps that pattern.
132 */
133static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
134 struct ieee80211_sta *sta)
135{
136 int sta_id;
137
138 if (!sta)
139 return priv->hw_params.bcast_sta_id;
140
141 sta_id = iwl_sta_id(sta);
142
143 /*
144 * mac80211 should not be passing a partially
145 * initialised station!
146 */
147 WARN_ON(sta_id == IWL_INVALID_STATION);
148
149 return sta_id;
150}
121#endif /* __iwl_sta_h__ */ 151#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 1ece2ea09773..a81989c06983 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -77,21 +77,6 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
77} 77}
78EXPORT_SYMBOL(iwl_txq_update_write_ptr); 78EXPORT_SYMBOL(iwl_txq_update_write_ptr);
79 79
80
81void iwl_free_tfds_in_queue(struct iwl_priv *priv,
82 int sta_id, int tid, int freed)
83{
84 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
85 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
86 else {
87 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
88 priv->stations[sta_id].tid[tid].tfds_in_queue,
89 freed);
90 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
91 }
92}
93EXPORT_SYMBOL(iwl_free_tfds_in_queue);
94
95/** 80/**
96 * iwl_tx_queue_free - Deallocate DMA queue. 81 * iwl_tx_queue_free - Deallocate DMA queue.
97 * @txq: Transmit queue to deallocate. 82 * @txq: Transmit queue to deallocate.
@@ -169,15 +154,15 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
169 } 154 }
170 155
171 pci_unmap_single(priv->pci_dev, 156 pci_unmap_single(priv->pci_dev,
172 pci_unmap_addr(&txq->meta[i], mapping), 157 dma_unmap_addr(&txq->meta[i], mapping),
173 pci_unmap_len(&txq->meta[i], len), 158 dma_unmap_len(&txq->meta[i], len),
174 PCI_DMA_BIDIRECTIONAL); 159 PCI_DMA_BIDIRECTIONAL);
175 } 160 }
176 if (huge) { 161 if (huge) {
177 i = q->n_window; 162 i = q->n_window;
178 pci_unmap_single(priv->pci_dev, 163 pci_unmap_single(priv->pci_dev,
179 pci_unmap_addr(&txq->meta[i], mapping), 164 dma_unmap_addr(&txq->meta[i], mapping),
180 pci_unmap_len(&txq->meta[i], len), 165 dma_unmap_len(&txq->meta[i], len),
181 PCI_DMA_BIDIRECTIONAL); 166 PCI_DMA_BIDIRECTIONAL);
182 } 167 }
183 168
@@ -287,7 +272,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
287 /* Driver private data, only for Tx (not command) queues, 272 /* Driver private data, only for Tx (not command) queues,
288 * not shared with device. */ 273 * not shared with device. */
289 if (id != IWL_CMD_QUEUE_NUM) { 274 if (id != IWL_CMD_QUEUE_NUM) {
290 txq->txb = kmalloc(sizeof(txq->txb[0]) * 275 txq->txb = kzalloc(sizeof(txq->txb[0]) *
291 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 276 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
292 if (!txq->txb) { 277 if (!txq->txb) {
293 IWL_ERR(priv, "kmalloc for auxiliary BD " 278 IWL_ERR(priv, "kmalloc for auxiliary BD "
@@ -531,8 +516,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
531 516
532 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, 517 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
533 fix_size, PCI_DMA_BIDIRECTIONAL); 518 fix_size, PCI_DMA_BIDIRECTIONAL);
534 pci_unmap_addr_set(out_meta, mapping, phys_addr); 519 dma_unmap_addr_set(out_meta, mapping, phys_addr);
535 pci_unmap_len_set(out_meta, len, fix_size); 520 dma_unmap_len_set(out_meta, len, fix_size);
536 521
537 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); 522 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
538 523
@@ -626,8 +611,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
626 meta = &txq->meta[cmd_index]; 611 meta = &txq->meta[cmd_index];
627 612
628 pci_unmap_single(priv->pci_dev, 613 pci_unmap_single(priv->pci_dev,
629 pci_unmap_addr(meta, mapping), 614 dma_unmap_addr(meta, mapping),
630 pci_unmap_len(meta, len), 615 dma_unmap_len(meta, len),
631 PCI_DMA_BIDIRECTIONAL); 616 PCI_DMA_BIDIRECTIONAL);
632 617
633 /* Input error checking is done when commands are added to queue. */ 618 /* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index a27872de4106..d24eb47d3705 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -27,6 +27,8 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/kernel.h> 32#include <linux/kernel.h>
31#include <linux/module.h> 33#include <linux/module.h>
32#include <linux/init.h> 34#include <linux/init.h>
@@ -197,6 +199,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
197static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) 199static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
198{ 200{
199 unsigned long flags; 201 unsigned long flags;
202 struct iwl_addsta_cmd sta_cmd;
200 203
201 spin_lock_irqsave(&priv->sta_lock, flags); 204 spin_lock_irqsave(&priv->sta_lock, flags);
202 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); 205 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -205,11 +208,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
205 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 208 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
206 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 209 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
207 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 210 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
211 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
208 spin_unlock_irqrestore(&priv->sta_lock, flags); 212 spin_unlock_irqrestore(&priv->sta_lock, flags);
209 213
210 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); 214 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
211 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 0); 215 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
212 return 0;
213} 216}
214 217
215static int iwl3945_set_dynamic_key(struct iwl_priv *priv, 218static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -310,9 +313,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
310 int left) 313 int left)
311{ 314{
312 315
313 if (!iwl_is_associated(priv) || !priv->ibss_beacon || 316 if (!iwl_is_associated(priv) || !priv->ibss_beacon)
314 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
315 (priv->iw_mode != NL80211_IFTYPE_AP)))
316 return 0; 317 return 0;
317 318
318 if (priv->ibss_beacon->len > left) 319 if (priv->ibss_beacon->len > left)
@@ -474,10 +475,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
474 u8 unicast; 475 u8 unicast;
475 u8 sta_id; 476 u8 sta_id;
476 u8 tid = 0; 477 u8 tid = 0;
477 u16 seq_number = 0;
478 __le16 fc; 478 __le16 fc;
479 u8 wait_write_ptr = 0; 479 u8 wait_write_ptr = 0;
480 u8 *qc = NULL;
481 unsigned long flags; 480 unsigned long flags;
482 481
483 spin_lock_irqsave(&priv->lock, flags); 482 spin_lock_irqsave(&priv->lock, flags);
@@ -510,10 +509,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
510 hdr_len = ieee80211_hdrlen(fc); 509 hdr_len = ieee80211_hdrlen(fc);
511 510
512 /* Find index into station table for destination station */ 511 /* Find index into station table for destination station */
513 if (!info->control.sta) 512 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
514 sta_id = priv->hw_params.bcast_sta_id;
515 else
516 sta_id = iwl_sta_id(info->control.sta);
517 if (sta_id == IWL_INVALID_STATION) { 513 if (sta_id == IWL_INVALID_STATION) {
518 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 514 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
519 hdr->addr1); 515 hdr->addr1);
@@ -523,16 +519,10 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
523 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); 519 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
524 520
525 if (ieee80211_is_data_qos(fc)) { 521 if (ieee80211_is_data_qos(fc)) {
526 qc = ieee80211_get_qos_ctl(hdr); 522 u8 *qc = ieee80211_get_qos_ctl(hdr);
527 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 523 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
528 if (unlikely(tid >= MAX_TID_COUNT)) 524 if (unlikely(tid >= MAX_TID_COUNT))
529 goto drop; 525 goto drop;
530 seq_number = priv->stations[sta_id].tid[tid].seq_number &
531 IEEE80211_SCTL_SEQ;
532 hdr->seq_ctrl = cpu_to_le16(seq_number) |
533 (hdr->seq_ctrl &
534 cpu_to_le16(IEEE80211_SCTL_FRAG));
535 seq_number += 0x10;
536 } 526 }
537 527
538 /* Descriptor for chosen Tx queue */ 528 /* Descriptor for chosen Tx queue */
@@ -548,7 +538,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
548 538
549 /* Set up driver data for this TFD */ 539 /* Set up driver data for this TFD */
550 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 540 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
551 txq->txb[q->write_ptr].skb[0] = skb; 541 txq->txb[q->write_ptr].skb = skb;
552 542
553 /* Init first empty entry in queue's array of Tx/cmd buffers */ 543 /* Init first empty entry in queue's array of Tx/cmd buffers */
554 out_cmd = txq->cmd[idx]; 544 out_cmd = txq->cmd[idx];
@@ -591,8 +581,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
591 581
592 if (!ieee80211_has_morefrags(hdr->frame_control)) { 582 if (!ieee80211_has_morefrags(hdr->frame_control)) {
593 txq->need_update = 1; 583 txq->need_update = 1;
594 if (qc)
595 priv->stations[sta_id].tid[tid].seq_number = seq_number;
596 } else { 584 } else {
597 wait_write_ptr = 1; 585 wait_write_ptr = 1;
598 txq->need_update = 0; 586 txq->need_update = 0;
@@ -631,8 +619,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
631 len, PCI_DMA_TODEVICE); 619 len, PCI_DMA_TODEVICE);
632 /* we do not map meta data ... so we can safely access address to 620 /* we do not map meta data ... so we can safely access address to
633 * provide to unmap command*/ 621 * provide to unmap command*/
634 pci_unmap_addr_set(out_meta, mapping, txcmd_phys); 622 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
635 pci_unmap_len_set(out_meta, len, len); 623 dma_unmap_len_set(out_meta, len, len);
636 624
637 /* Add buffer containing Tx command and MAC(!) header to TFD's 625 /* Add buffer containing Tx command and MAC(!) header to TFD's
638 * first entry */ 626 * first entry */
@@ -677,55 +665,6 @@ drop:
677 return -1; 665 return -1;
678} 666}
679 667
680#define BEACON_TIME_MASK_LOW 0x00FFFFFF
681#define BEACON_TIME_MASK_HIGH 0xFF000000
682#define TIME_UNIT 1024
683
684/*
685 * extended beacon time format
686 * time in usec will be changed into a 32-bit value in 8:24 format
687 * the high 1 byte is the beacon counts
688 * the lower 3 bytes is the time in usec within one beacon interval
689 */
690
691static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
692{
693 u32 quot;
694 u32 rem;
695 u32 interval = beacon_interval * 1024;
696
697 if (!interval || !usec)
698 return 0;
699
700 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
701 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
702
703 return (quot << 24) + rem;
704}
705
706/* base is usually what we get from ucode with each received frame,
707 * the same as HW timer counter counting down
708 */
709
710static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
711{
712 u32 base_low = base & BEACON_TIME_MASK_LOW;
713 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
714 u32 interval = beacon_interval * TIME_UNIT;
715 u32 res = (base & BEACON_TIME_MASK_HIGH) +
716 (addon & BEACON_TIME_MASK_HIGH);
717
718 if (base_low > addon_low)
719 res += base_low - addon_low;
720 else if (base_low < addon_low) {
721 res += interval + base_low - addon_low;
722 res += (1 << 24);
723 } else
724 res += (1 << 24);
725
726 return cpu_to_le32(res);
727}
728
729static int iwl3945_get_measurement(struct iwl_priv *priv, 668static int iwl3945_get_measurement(struct iwl_priv *priv,
730 struct ieee80211_measurement_params *params, 669 struct ieee80211_measurement_params *params,
731 u8 type) 670 u8 type)
@@ -743,8 +682,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
743 int duration = le16_to_cpu(params->duration); 682 int duration = le16_to_cpu(params->duration);
744 683
745 if (iwl_is_associated(priv)) 684 if (iwl_is_associated(priv))
746 add_time = 685 add_time = iwl_usecs_to_beacons(priv,
747 iwl3945_usecs_to_beacons(
748 le64_to_cpu(params->start_time) - priv->_3945.last_tsf, 686 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
749 le16_to_cpu(priv->rxon_timing.beacon_interval)); 687 le16_to_cpu(priv->rxon_timing.beacon_interval));
750 688
@@ -759,8 +697,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
759 697
760 if (iwl_is_associated(priv)) 698 if (iwl_is_associated(priv))
761 spectrum.start_time = 699 spectrum.start_time =
762 iwl3945_add_beacon_time(priv->_3945.last_beacon_time, 700 iwl_add_beacon_time(priv,
763 add_time, 701 priv->_3945.last_beacon_time, add_time,
764 le16_to_cpu(priv->rxon_timing.beacon_interval)); 702 le16_to_cpu(priv->rxon_timing.beacon_interval));
765 else 703 else
766 spectrum.start_time = 0; 704 spectrum.start_time = 0;
@@ -1233,7 +1171,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1233 } 1171 }
1234 1172
1235 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 1173 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1236 rxq->dma_addr); 1174 rxq->bd_dma);
1237 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), 1175 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1238 rxq->rb_stts, rxq->rb_stts_dma); 1176 rxq->rb_stts, rxq->rb_stts_dma);
1239 rxq->bd = NULL; 1177 rxq->bd = NULL;
@@ -1314,6 +1252,8 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1314 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 1252 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1315 1253
1316 while (i != r) { 1254 while (i != r) {
1255 int len;
1256
1317 rxb = rxq->queue[i]; 1257 rxb = rxq->queue[i];
1318 1258
1319 /* If an RXB doesn't have a Rx queue slot associated with it, 1259 /* If an RXB doesn't have a Rx queue slot associated with it,
@@ -1328,8 +1268,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1328 PCI_DMA_FROMDEVICE); 1268 PCI_DMA_FROMDEVICE);
1329 pkt = rxb_addr(rxb); 1269 pkt = rxb_addr(rxb);
1330 1270
1331 trace_iwlwifi_dev_rx(priv, pkt, 1271 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1332 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 1272 len += sizeof(u32); /* account for status word */
1273 trace_iwlwifi_dev_rx(priv, pkt, len);
1333 1274
1334 /* Reclaim a command buffer only if this packet is a response 1275 /* Reclaim a command buffer only if this packet is a response
1335 * to a (driver-originated) command. 1276 * to a (driver-originated) command.
@@ -1483,7 +1424,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1483 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); 1424 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1484 1425
1485 IWL_ERR(priv, 1426 IWL_ERR(priv,
1486 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1427 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1487 desc_lookup(desc), desc, time, blink1, blink2, 1428 desc_lookup(desc), desc, time, blink1, blink2,
1488 ilink1, ilink2, data1); 1429 ilink1, ilink2, data1);
1489 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, 1430 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
@@ -2942,7 +2883,10 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2942 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 2883 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2943 2884
2944 spin_lock_irqsave(&priv->lock, flags); 2885 spin_lock_irqsave(&priv->lock, flags);
2945 interval = vif ? vif->bss_conf.beacon_int : 0; 2886 if (priv->is_internal_short_scan)
2887 interval = 0;
2888 else
2889 interval = vif->bss_conf.beacon_int;
2946 spin_unlock_irqrestore(&priv->lock, flags); 2890 spin_unlock_irqrestore(&priv->lock, flags);
2947 2891
2948 scan->suspend_time = 0; 2892 scan->suspend_time = 0;
@@ -3022,14 +2966,16 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
3022 scan->tx_cmd.len = cpu_to_le16( 2966 scan->tx_cmd.len = cpu_to_le16(
3023 iwl_fill_probe_req(priv, 2967 iwl_fill_probe_req(priv,
3024 (struct ieee80211_mgmt *)scan->data, 2968 (struct ieee80211_mgmt *)scan->data,
2969 vif->addr,
3025 priv->scan_request->ie, 2970 priv->scan_request->ie,
3026 priv->scan_request->ie_len, 2971 priv->scan_request->ie_len,
3027 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2972 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
3028 } else { 2973 } else {
2974 /* use bcast addr, will not be transmitted but must be valid */
3029 scan->tx_cmd.len = cpu_to_le16( 2975 scan->tx_cmd.len = cpu_to_le16(
3030 iwl_fill_probe_req(priv, 2976 iwl_fill_probe_req(priv,
3031 (struct ieee80211_mgmt *)scan->data, 2977 (struct ieee80211_mgmt *)scan->data,
3032 NULL, 0, 2978 iwl_bcast_addr, NULL, 0,
3033 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2979 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
3034 } 2980 }
3035 /* select Rx antennas */ 2981 /* select Rx antennas */
@@ -3158,19 +3104,16 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3158 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3104 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3159 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3105 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3160 3106
3161 if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 3107 if (vif->bss_conf.use_short_preamble)
3162 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3108 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3163 else 3109 else
3164 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3110 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3165 3111
3166 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3112 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
3167 if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) 3113 if (vif->bss_conf.use_short_slot)
3168 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3114 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
3169 else 3115 else
3170 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3116 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3171
3172 if (vif->type == NL80211_IFTYPE_ADHOC)
3173 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3174 } 3117 }
3175 3118
3176 iwlcore_commit_rxon(priv); 3119 iwlcore_commit_rxon(priv);
@@ -3334,8 +3277,7 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3334 3277
3335 priv->staging_rxon.assoc_id = 0; 3278 priv->staging_rxon.assoc_id = 0;
3336 3279
3337 if (vif->bss_conf.assoc_capability & 3280 if (vif->bss_conf.use_short_preamble)
3338 WLAN_CAPABILITY_SHORT_PREAMBLE)
3339 priv->staging_rxon.flags |= 3281 priv->staging_rxon.flags |=
3340 RXON_FLG_SHORT_PREAMBLE_MSK; 3282 RXON_FLG_SHORT_PREAMBLE_MSK;
3341 else 3283 else
@@ -3343,17 +3285,12 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3343 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3285 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3344 3286
3345 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3287 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
3346 if (vif->bss_conf.assoc_capability & 3288 if (vif->bss_conf.use_short_slot)
3347 WLAN_CAPABILITY_SHORT_SLOT_TIME)
3348 priv->staging_rxon.flags |= 3289 priv->staging_rxon.flags |=
3349 RXON_FLG_SHORT_SLOT_MSK; 3290 RXON_FLG_SHORT_SLOT_MSK;
3350 else 3291 else
3351 priv->staging_rxon.flags &= 3292 priv->staging_rxon.flags &=
3352 ~RXON_FLG_SHORT_SLOT_MSK; 3293 ~RXON_FLG_SHORT_SLOT_MSK;
3353
3354 if (vif->type == NL80211_IFTYPE_ADHOC)
3355 priv->staging_rxon.flags &=
3356 ~RXON_FLG_SHORT_SLOT_MSK;
3357 } 3294 }
3358 /* restore RXON assoc */ 3295 /* restore RXON assoc */
3359 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3296 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
@@ -3386,17 +3323,9 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3386 static_key = !iwl_is_associated(priv); 3323 static_key = !iwl_is_associated(priv);
3387 3324
3388 if (!static_key) { 3325 if (!static_key) {
3389 if (!sta) { 3326 sta_id = iwl_sta_id_or_broadcast(priv, sta);
3390 sta_id = priv->hw_params.bcast_sta_id; 3327 if (sta_id == IWL_INVALID_STATION)
3391 } else { 3328 return -EINVAL;
3392 sta_id = iwl_sta_id(sta);
3393 if (sta_id == IWL_INVALID_STATION) {
3394 IWL_DEBUG_MAC80211(priv,
3395 "leave - %pM not in station map.\n",
3396 sta->addr);
3397 return -EINVAL;
3398 }
3399 }
3400 } 3329 }
3401 3330
3402 mutex_lock(&priv->mutex); 3331 mutex_lock(&priv->mutex);
@@ -4006,7 +3935,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4006 * space for this driver's private structure */ 3935 * space for this driver's private structure */
4007 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops); 3936 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
4008 if (hw == NULL) { 3937 if (hw == NULL) {
4009 printk(KERN_ERR DRV_NAME "Can not allocate network device\n"); 3938 pr_err("Can not allocate network device\n");
4010 err = -ENOMEM; 3939 err = -ENOMEM;
4011 goto out; 3940 goto out;
4012 } 3941 }
@@ -4028,9 +3957,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4028 priv->pci_dev = pdev; 3957 priv->pci_dev = pdev;
4029 priv->inta_mask = CSR_INI_SET_MASK; 3958 priv->inta_mask = CSR_INI_SET_MASK;
4030 3959
4031#ifdef CONFIG_IWLWIFI_DEBUG
4032 atomic_set(&priv->restrict_refcnt, 0);
4033#endif
4034 if (iwl_alloc_traffic_mem(priv)) 3960 if (iwl_alloc_traffic_mem(priv))
4035 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 3961 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
4036 3962
@@ -4099,9 +4025,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4099 } 4025 }
4100 /* MAC Address location in EEPROM same for 3945/4965 */ 4026 /* MAC Address location in EEPROM same for 3945/4965 */
4101 eeprom = (struct iwl3945_eeprom *)priv->eeprom; 4027 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
4102 memcpy(priv->mac_addr, eeprom->mac_address, ETH_ALEN); 4028 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
4103 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); 4029 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
4104 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
4105 4030
4106 /*********************** 4031 /***********************
4107 * 5. Setup HW Constants 4032 * 5. Setup HW Constants
@@ -4302,19 +4227,18 @@ static int __init iwl3945_init(void)
4302{ 4227{
4303 4228
4304 int ret; 4229 int ret;
4305 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 4230 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
4306 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 4231 pr_info(DRV_COPYRIGHT "\n");
4307 4232
4308 ret = iwl3945_rate_control_register(); 4233 ret = iwl3945_rate_control_register();
4309 if (ret) { 4234 if (ret) {
4310 printk(KERN_ERR DRV_NAME 4235 pr_err("Unable to register rate control algorithm: %d\n", ret);
4311 "Unable to register rate control algorithm: %d\n", ret);
4312 return ret; 4236 return ret;
4313 } 4237 }
4314 4238
4315 ret = pci_register_driver(&iwl3945_driver); 4239 ret = pci_register_driver(&iwl3945_driver);
4316 if (ret) { 4240 if (ret) {
4317 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); 4241 pr_err("Unable to initialize PCI module\n");
4318 goto error_register; 4242 goto error_register;
4319 } 4243 }
4320 4244
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 902e95f70f6e..60619678f4ec 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -670,20 +670,24 @@ static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
670} 670}
671 671
672static int iwm_cfg80211_set_txpower(struct wiphy *wiphy, 672static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
673 enum tx_power_setting type, int dbm) 673 enum nl80211_tx_power_setting type, int mbm)
674{ 674{
675 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 675 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
676 int ret; 676 int ret;
677 677
678 switch (type) { 678 switch (type) {
679 case TX_POWER_AUTOMATIC: 679 case NL80211_TX_POWER_AUTOMATIC:
680 return 0; 680 return 0;
681 case TX_POWER_FIXED: 681 case NL80211_TX_POWER_FIXED:
682 if (mbm < 0 || (mbm % 100))
683 return -EOPNOTSUPP;
684
682 if (!test_bit(IWM_STATUS_READY, &iwm->status)) 685 if (!test_bit(IWM_STATUS_READY, &iwm->status))
683 return 0; 686 return 0;
684 687
685 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, 688 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
686 CFG_TX_PWR_LIMIT_USR, dbm * 2); 689 CFG_TX_PWR_LIMIT_USR,
690 MBM_TO_DBM(mbm) * 2);
687 if (ret < 0) 691 if (ret < 0)
688 return ret; 692 return ret;
689 693
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 7e16bcf59978..6421689f5e8e 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -56,7 +56,7 @@
56 56
57struct iwm_umac_cmd_reset { 57struct iwm_umac_cmd_reset {
58 __le32 flags; 58 __le32 flags;
59} __attribute__ ((packed)); 59} __packed;
60 60
61#define UMAC_PARAM_TBL_ORD_FIX 0x0 61#define UMAC_PARAM_TBL_ORD_FIX 0x0
62#define UMAC_PARAM_TBL_ORD_VAR 0x1 62#define UMAC_PARAM_TBL_ORD_VAR 0x1
@@ -220,37 +220,37 @@ struct iwm_umac_cmd_set_param_fix {
220 __le16 tbl; 220 __le16 tbl;
221 __le16 key; 221 __le16 key;
222 __le32 value; 222 __le32 value;
223} __attribute__ ((packed)); 223} __packed;
224 224
225struct iwm_umac_cmd_set_param_var { 225struct iwm_umac_cmd_set_param_var {
226 __le16 tbl; 226 __le16 tbl;
227 __le16 key; 227 __le16 key;
228 __le16 len; 228 __le16 len;
229 __le16 reserved; 229 __le16 reserved;
230} __attribute__ ((packed)); 230} __packed;
231 231
232struct iwm_umac_cmd_get_param { 232struct iwm_umac_cmd_get_param {
233 __le16 tbl; 233 __le16 tbl;
234 __le16 key; 234 __le16 key;
235} __attribute__ ((packed)); 235} __packed;
236 236
237struct iwm_umac_cmd_get_param_resp { 237struct iwm_umac_cmd_get_param_resp {
238 __le16 tbl; 238 __le16 tbl;
239 __le16 key; 239 __le16 key;
240 __le16 len; 240 __le16 len;
241 __le16 reserved; 241 __le16 reserved;
242} __attribute__ ((packed)); 242} __packed;
243 243
244struct iwm_umac_cmd_eeprom_proxy_hdr { 244struct iwm_umac_cmd_eeprom_proxy_hdr {
245 __le32 type; 245 __le32 type;
246 __le32 offset; 246 __le32 offset;
247 __le32 len; 247 __le32 len;
248} __attribute__ ((packed)); 248} __packed;
249 249
250struct iwm_umac_cmd_eeprom_proxy { 250struct iwm_umac_cmd_eeprom_proxy {
251 struct iwm_umac_cmd_eeprom_proxy_hdr hdr; 251 struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
252 u8 buf[0]; 252 u8 buf[0];
253} __attribute__ ((packed)); 253} __packed;
254 254
255#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1 255#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
256#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2 256#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
@@ -267,13 +267,13 @@ struct iwm_umac_channel_info {
267 u8 reserved; 267 u8 reserved;
268 u8 flags; 268 u8 flags;
269 __le32 channels_mask; 269 __le32 channels_mask;
270} __attribute__ ((packed)); 270} __packed;
271 271
272struct iwm_umac_cmd_get_channel_list { 272struct iwm_umac_cmd_get_channel_list {
273 __le16 count; 273 __le16 count;
274 __le16 reserved; 274 __le16 reserved;
275 struct iwm_umac_channel_info ch[0]; 275 struct iwm_umac_channel_info ch[0];
276} __attribute__ ((packed)); 276} __packed;
277 277
278 278
279/* UMAC WiFi interface commands */ 279/* UMAC WiFi interface commands */
@@ -304,7 +304,7 @@ struct iwm_umac_ssid {
304 u8 ssid_len; 304 u8 ssid_len;
305 u8 ssid[IEEE80211_MAX_SSID_LEN]; 305 u8 ssid[IEEE80211_MAX_SSID_LEN];
306 u8 reserved[3]; 306 u8 reserved[3];
307} __attribute__ ((packed)); 307} __packed;
308 308
309struct iwm_umac_cmd_scan_request { 309struct iwm_umac_cmd_scan_request {
310 struct iwm_umac_wifi_if hdr; 310 struct iwm_umac_wifi_if hdr;
@@ -314,7 +314,7 @@ struct iwm_umac_cmd_scan_request {
314 u8 timeout; /* In seconds */ 314 u8 timeout; /* In seconds */
315 u8 reserved; 315 u8 reserved;
316 struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX]; 316 struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
317} __attribute__ ((packed)); 317} __packed;
318 318
319#define UMAC_CIPHER_TYPE_NONE 0xFF 319#define UMAC_CIPHER_TYPE_NONE 0xFF
320#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00 320#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
@@ -357,7 +357,7 @@ struct iwm_umac_security {
357 u8 ucast_cipher; 357 u8 ucast_cipher;
358 u8 mcast_cipher; 358 u8 mcast_cipher;
359 u8 flags; 359 u8 flags;
360} __attribute__ ((packed)); 360} __packed;
361 361
362struct iwm_umac_ibss { 362struct iwm_umac_ibss {
363 u8 beacon_interval; /* in millisecond */ 363 u8 beacon_interval; /* in millisecond */
@@ -366,7 +366,7 @@ struct iwm_umac_ibss {
366 u8 band; 366 u8 band;
367 u8 channel; 367 u8 channel;
368 u8 reserved[3]; 368 u8 reserved[3];
369} __attribute__ ((packed)); 369} __packed;
370 370
371#define UMAC_MODE_BSS 0 371#define UMAC_MODE_BSS 0
372#define UMAC_MODE_IBSS 1 372#define UMAC_MODE_IBSS 1
@@ -385,13 +385,13 @@ struct iwm_umac_profile {
385 __le16 flags; 385 __le16 flags;
386 u8 wireless_mode; 386 u8 wireless_mode;
387 u8 bss_num; 387 u8 bss_num;
388} __attribute__ ((packed)); 388} __packed;
389 389
390struct iwm_umac_invalidate_profile { 390struct iwm_umac_invalidate_profile {
391 struct iwm_umac_wifi_if hdr; 391 struct iwm_umac_wifi_if hdr;
392 u8 reason; 392 u8 reason;
393 u8 reserved[3]; 393 u8 reserved[3];
394} __attribute__ ((packed)); 394} __packed;
395 395
396/* Encryption key commands */ 396/* Encryption key commands */
397struct iwm_umac_key_wep40 { 397struct iwm_umac_key_wep40 {
@@ -400,7 +400,7 @@ struct iwm_umac_key_wep40 {
400 u8 key[WLAN_KEY_LEN_WEP40]; 400 u8 key[WLAN_KEY_LEN_WEP40];
401 u8 static_key; 401 u8 static_key;
402 u8 reserved[2]; 402 u8 reserved[2];
403} __attribute__ ((packed)); 403} __packed;
404 404
405struct iwm_umac_key_wep104 { 405struct iwm_umac_key_wep104 {
406 struct iwm_umac_wifi_if hdr; 406 struct iwm_umac_wifi_if hdr;
@@ -408,7 +408,7 @@ struct iwm_umac_key_wep104 {
408 u8 key[WLAN_KEY_LEN_WEP104]; 408 u8 key[WLAN_KEY_LEN_WEP104];
409 u8 static_key; 409 u8 static_key;
410 u8 reserved[2]; 410 u8 reserved[2];
411} __attribute__ ((packed)); 411} __packed;
412 412
413#define IWM_TKIP_KEY_SIZE 16 413#define IWM_TKIP_KEY_SIZE 16
414#define IWM_TKIP_MIC_SIZE 8 414#define IWM_TKIP_MIC_SIZE 8
@@ -420,7 +420,7 @@ struct iwm_umac_key_tkip {
420 u8 tkip_key[IWM_TKIP_KEY_SIZE]; 420 u8 tkip_key[IWM_TKIP_KEY_SIZE];
421 u8 mic_rx_key[IWM_TKIP_MIC_SIZE]; 421 u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
422 u8 mic_tx_key[IWM_TKIP_MIC_SIZE]; 422 u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
423} __attribute__ ((packed)); 423} __packed;
424 424
425struct iwm_umac_key_ccmp { 425struct iwm_umac_key_ccmp {
426 struct iwm_umac_wifi_if hdr; 426 struct iwm_umac_wifi_if hdr;
@@ -428,27 +428,27 @@ struct iwm_umac_key_ccmp {
428 u8 iv_count[6]; 428 u8 iv_count[6];
429 u8 reserved[2]; 429 u8 reserved[2];
430 u8 key[WLAN_KEY_LEN_CCMP]; 430 u8 key[WLAN_KEY_LEN_CCMP];
431} __attribute__ ((packed)); 431} __packed;
432 432
433struct iwm_umac_key_remove { 433struct iwm_umac_key_remove {
434 struct iwm_umac_wifi_if hdr; 434 struct iwm_umac_wifi_if hdr;
435 struct iwm_umac_key_hdr key_hdr; 435 struct iwm_umac_key_hdr key_hdr;
436} __attribute__ ((packed)); 436} __packed;
437 437
438struct iwm_umac_tx_key_id { 438struct iwm_umac_tx_key_id {
439 struct iwm_umac_wifi_if hdr; 439 struct iwm_umac_wifi_if hdr;
440 u8 key_idx; 440 u8 key_idx;
441 u8 reserved[3]; 441 u8 reserved[3];
442} __attribute__ ((packed)); 442} __packed;
443 443
444struct iwm_umac_pwr_trigger { 444struct iwm_umac_pwr_trigger {
445 struct iwm_umac_wifi_if hdr; 445 struct iwm_umac_wifi_if hdr;
446 __le32 reseved; 446 __le32 reseved;
447} __attribute__ ((packed)); 447} __packed;
448 448
449struct iwm_umac_cmd_stats_req { 449struct iwm_umac_cmd_stats_req {
450 __le32 flags; 450 __le32 flags;
451} __attribute__ ((packed)); 451} __packed;
452 452
453struct iwm_umac_cmd_stop_resume_tx { 453struct iwm_umac_cmd_stop_resume_tx {
454 u8 flags; 454 u8 flags;
@@ -456,7 +456,7 @@ struct iwm_umac_cmd_stop_resume_tx {
456 __le16 stop_resume_tid_msk; 456 __le16 stop_resume_tid_msk;
457 __le16 last_seq_num[IWM_UMAC_TID_NR]; 457 __le16 last_seq_num[IWM_UMAC_TID_NR];
458 u16 reserved; 458 u16 reserved;
459} __attribute__ ((packed)); 459} __packed;
460 460
461#define IWM_CMD_PMKID_ADD 1 461#define IWM_CMD_PMKID_ADD 1
462#define IWM_CMD_PMKID_DEL 2 462#define IWM_CMD_PMKID_DEL 2
@@ -468,7 +468,7 @@ struct iwm_umac_pmkid_update {
468 u8 bssid[ETH_ALEN]; 468 u8 bssid[ETH_ALEN];
469 __le16 reserved; 469 __le16 reserved;
470 u8 pmkid[WLAN_PMKID_LEN]; 470 u8 pmkid[WLAN_PMKID_LEN];
471} __attribute__ ((packed)); 471} __packed;
472 472
473/* LMAC commands */ 473/* LMAC commands */
474int iwm_read_mac(struct iwm_priv *iwm, u8 *mac); 474int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 9531b18cf72a..907ac890997c 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -54,7 +54,7 @@
54 * LMAC. If you look at LMAC commands you'll se that they 54 * LMAC. If you look at LMAC commands you'll se that they
55 * are actually regular iwlwifi target commands encapsulated 55 * are actually regular iwlwifi target commands encapsulated
56 * into a special UMAC command called UMAC passthrough. 56 * into a special UMAC command called UMAC passthrough.
57 * This is due to the fact the the host talks exclusively 57 * This is due to the fact the host talks exclusively
58 * to the UMAC and so there needs to be a special UMAC 58 * to the UMAC and so there needs to be a special UMAC
59 * command for talking to the LMAC. 59 * command for talking to the LMAC.
60 * This is how a wifi command is layed out: 60 * This is how a wifi command is layed out:
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 13266c3842f8..51d7efa15ae6 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -162,7 +162,7 @@ struct iwm_umac_key_hdr {
162 u8 mac[ETH_ALEN]; 162 u8 mac[ETH_ALEN];
163 u8 key_idx; 163 u8 key_idx;
164 u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */ 164 u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
165} __attribute__ ((packed)); 165} __packed;
166 166
167struct iwm_key { 167struct iwm_key {
168 struct iwm_umac_key_hdr hdr; 168 struct iwm_umac_key_hdr hdr;
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index a855a99e49b8..5ddcdf8c70c0 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -43,7 +43,7 @@ struct iwm_lmac_hdr {
43 u8 id; 43 u8 id;
44 u8 flags; 44 u8 flags;
45 __le16 seq_num; 45 __le16 seq_num;
46} __attribute__ ((packed)); 46} __packed;
47 47
48/* LMAC commands */ 48/* LMAC commands */
49#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1 49#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1
@@ -54,23 +54,23 @@ struct iwm_lmac_cal_cfg_elt {
54 __le32 send_res; /* 1 for sending back results */ 54 __le32 send_res; /* 1 for sending back results */
55 __le32 apply_res; /* 1 for applying calibration results to HW */ 55 __le32 apply_res; /* 1 for applying calibration results to HW */
56 __le32 reserved; 56 __le32 reserved;
57} __attribute__ ((packed)); 57} __packed;
58 58
59struct iwm_lmac_cal_cfg_status { 59struct iwm_lmac_cal_cfg_status {
60 struct iwm_lmac_cal_cfg_elt init; 60 struct iwm_lmac_cal_cfg_elt init;
61 struct iwm_lmac_cal_cfg_elt periodic; 61 struct iwm_lmac_cal_cfg_elt periodic;
62 __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */ 62 __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
63} __attribute__ ((packed)); 63} __packed;
64 64
65struct iwm_lmac_cal_cfg_cmd { 65struct iwm_lmac_cal_cfg_cmd {
66 struct iwm_lmac_cal_cfg_status ucode_cfg; 66 struct iwm_lmac_cal_cfg_status ucode_cfg;
67 struct iwm_lmac_cal_cfg_status driver_cfg; 67 struct iwm_lmac_cal_cfg_status driver_cfg;
68 __le32 reserved; 68 __le32 reserved;
69} __attribute__ ((packed)); 69} __packed;
70 70
71struct iwm_lmac_cal_cfg_resp { 71struct iwm_lmac_cal_cfg_resp {
72 __le32 status; 72 __le32 status;
73} __attribute__ ((packed)); 73} __packed;
74 74
75#define IWM_CARD_STATE_SW_HW_ENABLED 0x00 75#define IWM_CARD_STATE_SW_HW_ENABLED 0x00
76#define IWM_CARD_STATE_HW_DISABLED 0x01 76#define IWM_CARD_STATE_HW_DISABLED 0x01
@@ -80,7 +80,7 @@ struct iwm_lmac_cal_cfg_resp {
80 80
81struct iwm_lmac_card_state { 81struct iwm_lmac_card_state {
82 __le32 flags; 82 __le32 flags;
83} __attribute__ ((packed)); 83} __packed;
84 84
85/** 85/**
86 * COEX_PRIORITY_TABLE_CMD 86 * COEX_PRIORITY_TABLE_CMD
@@ -131,7 +131,7 @@ struct coex_event {
131 u8 win_med_prio; 131 u8 win_med_prio;
132 u8 reserved; 132 u8 reserved;
133 u8 flags; 133 u8 flags;
134} __attribute__ ((packed)); 134} __packed;
135 135
136#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1 136#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1
137#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4 137#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4
@@ -142,7 +142,7 @@ struct iwm_coex_prio_table_cmd {
142 u8 flags; 142 u8 flags;
143 u8 reserved[3]; 143 u8 reserved[3];
144 struct coex_event sta_prio[COEX_EVENTS_NUM]; 144 struct coex_event sta_prio[COEX_EVENTS_NUM];
145} __attribute__ ((packed)); 145} __packed;
146 146
147/* Coexistence definitions 147/* Coexistence definitions
148 * 148 *
@@ -192,7 +192,7 @@ struct iwm_ct_kill_cfg_cmd {
192 u32 exit_threshold; 192 u32 exit_threshold;
193 u32 reserved; 193 u32 reserved;
194 u32 entry_threshold; 194 u32 entry_threshold;
195} __attribute__ ((packed)); 195} __packed;
196 196
197 197
198/* LMAC OP CODES */ 198/* LMAC OP CODES */
@@ -428,7 +428,7 @@ struct iwm_lmac_calib_hdr {
428 u8 first_grp; 428 u8 first_grp;
429 u8 grp_num; 429 u8 grp_num;
430 u8 all_data_valid; 430 u8 all_data_valid;
431} __attribute__ ((packed)); 431} __packed;
432 432
433#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7 433#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7
434#define IWM_CALIB_FREQ_GROUPS_NR 5 434#define IWM_CALIB_FREQ_GROUPS_NR 5
@@ -437,20 +437,20 @@ struct iwm_lmac_calib_hdr {
437struct iwm_calib_rxiq_entry { 437struct iwm_calib_rxiq_entry {
438 u16 ptam_postdist_ars; 438 u16 ptam_postdist_ars;
439 u16 ptam_postdist_arc; 439 u16 ptam_postdist_arc;
440} __attribute__ ((packed)); 440} __packed;
441 441
442struct iwm_calib_rxiq_group { 442struct iwm_calib_rxiq_group {
443 struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR]; 443 struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
444} __attribute__ ((packed)); 444} __packed;
445 445
446struct iwm_lmac_calib_rxiq { 446struct iwm_lmac_calib_rxiq {
447 struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR]; 447 struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
448} __attribute__ ((packed)); 448} __packed;
449 449
450struct iwm_calib_rxiq { 450struct iwm_calib_rxiq {
451 struct iwm_lmac_calib_hdr hdr; 451 struct iwm_lmac_calib_hdr hdr;
452 struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR]; 452 struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
453} __attribute__ ((packed)); 453} __packed;
454 454
455#define LMAC_STA_ID_SEED 0x0f 455#define LMAC_STA_ID_SEED 0x0f
456#define LMAC_STA_ID_POS 0 456#define LMAC_STA_ID_POS 0
@@ -463,7 +463,7 @@ struct iwm_lmac_power_report {
463 u8 pa_integ_res_A[3]; 463 u8 pa_integ_res_A[3];
464 u8 pa_integ_res_B[3]; 464 u8 pa_integ_res_B[3];
465 u8 pa_integ_res_C[3]; 465 u8 pa_integ_res_C[3];
466} __attribute__ ((packed)); 466} __packed;
467 467
468struct iwm_lmac_tx_resp { 468struct iwm_lmac_tx_resp {
469 u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */ 469 u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
@@ -479,6 +479,6 @@ struct iwm_lmac_tx_resp {
479 u8 ra_tid; 479 u8 ra_tid;
480 __le16 frame_ctl; 480 __le16 frame_ctl;
481 __le32 status; 481 __le32 status;
482} __attribute__ ((packed)); 482} __packed;
483 483
484#endif 484#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index e1184deca559..c02fcedea9fa 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -321,14 +321,14 @@ iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket)
321 return ERR_PTR(-ENOMEM); 321 return ERR_PTR(-ENOMEM);
322 } 322 }
323 323
324 ticket_node->ticket = kzalloc(sizeof(struct iwm_rx_ticket), GFP_KERNEL); 324 ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket),
325 GFP_KERNEL);
325 if (!ticket_node->ticket) { 326 if (!ticket_node->ticket) {
326 IWM_ERR(iwm, "Couldn't allocate RX ticket\n"); 327 IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
327 kfree(ticket_node); 328 kfree(ticket_node);
328 return ERR_PTR(-ENOMEM); 329 return ERR_PTR(-ENOMEM);
329 } 330 }
330 331
331 memcpy(ticket_node->ticket, ticket, sizeof(struct iwm_rx_ticket));
332 INIT_LIST_HEAD(&ticket_node->node); 332 INIT_LIST_HEAD(&ticket_node->node);
333 333
334 return ticket_node; 334 return ticket_node;
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 0cbba3ecc813..4a137d334a42 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -42,19 +42,19 @@
42struct iwm_udma_in_hdr { 42struct iwm_udma_in_hdr {
43 __le32 cmd; 43 __le32 cmd;
44 __le32 size; 44 __le32 size;
45} __attribute__ ((packed)); 45} __packed;
46 46
47struct iwm_udma_out_nonwifi_hdr { 47struct iwm_udma_out_nonwifi_hdr {
48 __le32 cmd; 48 __le32 cmd;
49 __le32 addr; 49 __le32 addr;
50 __le32 op1_sz; 50 __le32 op1_sz;
51 __le32 op2; 51 __le32 op2;
52} __attribute__ ((packed)); 52} __packed;
53 53
54struct iwm_udma_out_wifi_hdr { 54struct iwm_udma_out_wifi_hdr {
55 __le32 cmd; 55 __le32 cmd;
56 __le32 meta_data; 56 __le32 meta_data;
57} __attribute__ ((packed)); 57} __packed;
58 58
59/* Sequence numbering */ 59/* Sequence numbering */
60#define UMAC_WIFI_SEQ_NUM_BASE 1 60#define UMAC_WIFI_SEQ_NUM_BASE 1
@@ -408,12 +408,12 @@ struct iwm_rx_ticket {
408 __le16 flags; 408 __le16 flags;
409 u8 payload_offset; /* includes: MAC header, pad, IV */ 409 u8 payload_offset; /* includes: MAC header, pad, IV */
410 u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */ 410 u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
411} __attribute__ ((packed)); 411} __packed;
412 412
413struct iwm_rx_mpdu_hdr { 413struct iwm_rx_mpdu_hdr {
414 __le16 len; 414 __le16 len;
415 __le16 reserved; 415 __le16 reserved;
416} __attribute__ ((packed)); 416} __packed;
417 417
418/* UMAC SW WIFI API */ 418/* UMAC SW WIFI API */
419 419
@@ -421,31 +421,31 @@ struct iwm_dev_cmd_hdr {
421 u8 cmd; 421 u8 cmd;
422 u8 flags; 422 u8 flags;
423 __le16 seq_num; 423 __le16 seq_num;
424} __attribute__ ((packed)); 424} __packed;
425 425
426struct iwm_umac_fw_cmd_hdr { 426struct iwm_umac_fw_cmd_hdr {
427 __le32 meta_data; 427 __le32 meta_data;
428 struct iwm_dev_cmd_hdr cmd; 428 struct iwm_dev_cmd_hdr cmd;
429} __attribute__ ((packed)); 429} __packed;
430 430
431struct iwm_umac_wifi_out_hdr { 431struct iwm_umac_wifi_out_hdr {
432 struct iwm_udma_out_wifi_hdr hw_hdr; 432 struct iwm_udma_out_wifi_hdr hw_hdr;
433 struct iwm_umac_fw_cmd_hdr sw_hdr; 433 struct iwm_umac_fw_cmd_hdr sw_hdr;
434} __attribute__ ((packed)); 434} __packed;
435 435
436struct iwm_umac_nonwifi_out_hdr { 436struct iwm_umac_nonwifi_out_hdr {
437 struct iwm_udma_out_nonwifi_hdr hw_hdr; 437 struct iwm_udma_out_nonwifi_hdr hw_hdr;
438} __attribute__ ((packed)); 438} __packed;
439 439
440struct iwm_umac_wifi_in_hdr { 440struct iwm_umac_wifi_in_hdr {
441 struct iwm_udma_in_hdr hw_hdr; 441 struct iwm_udma_in_hdr hw_hdr;
442 struct iwm_umac_fw_cmd_hdr sw_hdr; 442 struct iwm_umac_fw_cmd_hdr sw_hdr;
443} __attribute__ ((packed)); 443} __packed;
444 444
445struct iwm_umac_nonwifi_in_hdr { 445struct iwm_umac_nonwifi_in_hdr {
446 struct iwm_udma_in_hdr hw_hdr; 446 struct iwm_udma_in_hdr hw_hdr;
447 __le32 time_stamp; 447 __le32 time_stamp;
448} __attribute__ ((packed)); 448} __packed;
449 449
450#define IWM_UMAC_PAGE_SIZE 0x200 450#define IWM_UMAC_PAGE_SIZE 0x200
451 451
@@ -521,7 +521,7 @@ struct iwm_umac_notif_wifi_if {
521 u8 status; 521 u8 status;
522 u8 flags; 522 u8 flags;
523 __le16 buf_size; 523 __le16 buf_size;
524} __attribute__ ((packed)); 524} __packed;
525 525
526#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1 526#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1
527#define UMAC_ROAM_REASON_AP_DEAUTH 0x2 527#define UMAC_ROAM_REASON_AP_DEAUTH 0x2
@@ -535,7 +535,7 @@ struct iwm_umac_notif_assoc_start {
535 __le32 roam_reason; 535 __le32 roam_reason;
536 u8 bssid[ETH_ALEN]; 536 u8 bssid[ETH_ALEN];
537 u8 reserved[2]; 537 u8 reserved[2];
538} __attribute__ ((packed)); 538} __packed;
539 539
540#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0 540#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0
541#define UMAC_ASSOC_COMPLETE_FAILURE 0x1 541#define UMAC_ASSOC_COMPLETE_FAILURE 0x1
@@ -546,7 +546,7 @@ struct iwm_umac_notif_assoc_complete {
546 u8 bssid[ETH_ALEN]; 546 u8 bssid[ETH_ALEN];
547 u8 band; 547 u8 band;
548 u8 channel; 548 u8 channel;
549} __attribute__ ((packed)); 549} __packed;
550 550
551#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0 551#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0
552#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1 552#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1
@@ -556,7 +556,7 @@ struct iwm_umac_notif_assoc_complete {
556struct iwm_umac_notif_profile_invalidate { 556struct iwm_umac_notif_profile_invalidate {
557 struct iwm_umac_notif_wifi_if mlme_hdr; 557 struct iwm_umac_notif_wifi_if mlme_hdr;
558 __le32 reason; 558 __le32 reason;
559} __attribute__ ((packed)); 559} __packed;
560 560
561#define UMAC_SCAN_RESULT_SUCCESS 0x0 561#define UMAC_SCAN_RESULT_SUCCESS 0x0
562#define UMAC_SCAN_RESULT_ABORTED 0x1 562#define UMAC_SCAN_RESULT_ABORTED 0x1
@@ -568,7 +568,7 @@ struct iwm_umac_notif_scan_complete {
568 __le32 type; 568 __le32 type;
569 __le32 result; 569 __le32 result;
570 u8 seq_num; 570 u8 seq_num;
571} __attribute__ ((packed)); 571} __packed;
572 572
573#define UMAC_OPCODE_ADD_MODIFY 0x0 573#define UMAC_OPCODE_ADD_MODIFY 0x0
574#define UMAC_OPCODE_REMOVE 0x1 574#define UMAC_OPCODE_REMOVE 0x1
@@ -582,7 +582,7 @@ struct iwm_umac_notif_sta_info {
582 u8 mac_addr[ETH_ALEN]; 582 u8 mac_addr[ETH_ALEN];
583 u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */ 583 u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
584 u8 flags; 584 u8 flags;
585} __attribute__ ((packed)); 585} __packed;
586 586
587#define UMAC_BAND_2GHZ 0 587#define UMAC_BAND_2GHZ 0
588#define UMAC_BAND_5GHZ 1 588#define UMAC_BAND_5GHZ 1
@@ -601,7 +601,7 @@ struct iwm_umac_notif_bss_info {
601 s8 rssi; 601 s8 rssi;
602 u8 reserved; 602 u8 reserved;
603 u8 frame_buf[1]; 603 u8 frame_buf[1];
604} __attribute__ ((packed)); 604} __packed;
605 605
606#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff 606#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff
607#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00 607#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00
@@ -614,13 +614,13 @@ struct iwm_umac_notif_bss_removed {
614 struct iwm_umac_notif_wifi_if mlme_hdr; 614 struct iwm_umac_notif_wifi_if mlme_hdr;
615 __le32 count; 615 __le32 count;
616 __le16 entries[0]; 616 __le16 entries[0];
617} __attribute__ ((packed)); 617} __packed;
618 618
619struct iwm_umac_notif_mgt_frame { 619struct iwm_umac_notif_mgt_frame {
620 struct iwm_umac_notif_wifi_if mlme_hdr; 620 struct iwm_umac_notif_wifi_if mlme_hdr;
621 __le16 len; 621 __le16 len;
622 u8 frame[1]; 622 u8 frame[1];
623} __attribute__ ((packed)); 623} __packed;
624 624
625struct iwm_umac_notif_alive { 625struct iwm_umac_notif_alive {
626 struct iwm_umac_wifi_in_hdr hdr; 626 struct iwm_umac_wifi_in_hdr hdr;
@@ -630,13 +630,13 @@ struct iwm_umac_notif_alive {
630 __le16 reserved2; 630 __le16 reserved2;
631 __le16 page_grp_count; 631 __le16 page_grp_count;
632 __le32 page_grp_state[IWM_MACS_OUT_GROUPS]; 632 __le32 page_grp_state[IWM_MACS_OUT_GROUPS];
633} __attribute__ ((packed)); 633} __packed;
634 634
635struct iwm_umac_notif_init_complete { 635struct iwm_umac_notif_init_complete {
636 struct iwm_umac_wifi_in_hdr hdr; 636 struct iwm_umac_wifi_in_hdr hdr;
637 __le16 status; 637 __le16 status;
638 __le16 reserved; 638 __le16 reserved;
639} __attribute__ ((packed)); 639} __packed;
640 640
641/* error categories */ 641/* error categories */
642enum { 642enum {
@@ -667,12 +667,12 @@ struct iwm_fw_error_hdr {
667 __le32 dbm_buf_end; 667 __le32 dbm_buf_end;
668 __le32 dbm_buf_write_ptr; 668 __le32 dbm_buf_write_ptr;
669 __le32 dbm_buf_cycle_cnt; 669 __le32 dbm_buf_cycle_cnt;
670} __attribute__ ((packed)); 670} __packed;
671 671
672struct iwm_umac_notif_error { 672struct iwm_umac_notif_error {
673 struct iwm_umac_wifi_in_hdr hdr; 673 struct iwm_umac_wifi_in_hdr hdr;
674 struct iwm_fw_error_hdr err; 674 struct iwm_fw_error_hdr err;
675} __attribute__ ((packed)); 675} __packed;
676 676
677#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0 677#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0
678#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff 678#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff
@@ -687,20 +687,20 @@ struct iwm_umac_notif_page_dealloc {
687 struct iwm_umac_wifi_in_hdr hdr; 687 struct iwm_umac_wifi_in_hdr hdr;
688 __le32 changes; 688 __le32 changes;
689 __le32 grp_info[IWM_MACS_OUT_GROUPS]; 689 __le32 grp_info[IWM_MACS_OUT_GROUPS];
690} __attribute__ ((packed)); 690} __packed;
691 691
692struct iwm_umac_notif_wifi_status { 692struct iwm_umac_notif_wifi_status {
693 struct iwm_umac_wifi_in_hdr hdr; 693 struct iwm_umac_wifi_in_hdr hdr;
694 __le16 status; 694 __le16 status;
695 __le16 reserved; 695 __le16 reserved;
696} __attribute__ ((packed)); 696} __packed;
697 697
698struct iwm_umac_notif_rx_ticket { 698struct iwm_umac_notif_rx_ticket {
699 struct iwm_umac_wifi_in_hdr hdr; 699 struct iwm_umac_wifi_in_hdr hdr;
700 u8 num_tickets; 700 u8 num_tickets;
701 u8 reserved[3]; 701 u8 reserved[3];
702 struct iwm_rx_ticket tickets[1]; 702 struct iwm_rx_ticket tickets[1];
703} __attribute__ ((packed)); 703} __packed;
704 704
705/* Tx/Rx rates window (number of max of last update window per second) */ 705/* Tx/Rx rates window (number of max of last update window per second) */
706#define UMAC_NTF_RATE_SAMPLE_NR 4 706#define UMAC_NTF_RATE_SAMPLE_NR 4
@@ -758,7 +758,7 @@ struct iwm_umac_notif_stats {
758 __le32 roam_unassoc; 758 __le32 roam_unassoc;
759 __le32 roam_deauth; 759 __le32 roam_deauth;
760 __le32 roam_ap_loadblance; 760 __le32 roam_ap_loadblance;
761} __attribute__ ((packed)); 761} __packed;
762 762
763#define UMAC_STOP_TX_FLAG 0x1 763#define UMAC_STOP_TX_FLAG 0x1
764#define UMAC_RESUME_TX_FLAG 0x2 764#define UMAC_RESUME_TX_FLAG 0x2
@@ -770,7 +770,7 @@ struct iwm_umac_notif_stop_resume_tx {
770 u8 flags; /* UMAC_*_TX_FLAG_* */ 770 u8 flags; /* UMAC_*_TX_FLAG_* */
771 u8 sta_id; 771 u8 sta_id;
772 __le16 stop_resume_tid_msk; /* tid bitmask */ 772 __le16 stop_resume_tid_msk; /* tid bitmask */
773} __attribute__ ((packed)); 773} __packed;
774 774
775#define UMAC_MAX_NUM_PMKIDS 4 775#define UMAC_MAX_NUM_PMKIDS 4
776 776
@@ -779,7 +779,7 @@ struct iwm_umac_wifi_if {
779 u8 oid; 779 u8 oid;
780 u8 flags; 780 u8 flags;
781 __le16 buf_size; 781 __le16 buf_size;
782} __attribute__ ((packed)); 782} __packed;
783 783
784#define IWM_SEQ_NUM_HOST_MSK 0x0000 784#define IWM_SEQ_NUM_HOST_MSK 0x0000
785#define IWM_SEQ_NUM_UMAC_MSK 0x4000 785#define IWM_SEQ_NUM_UMAC_MSK 0x4000
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 45e870e33117..f7d01bfa2e4a 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,4 +1,3 @@
1libertas-y += assoc.o
2libertas-y += cfg.o 1libertas-y += cfg.o
3libertas-y += cmd.o 2libertas-y += cmd.o
4libertas-y += cmdresp.o 3libertas-y += cmdresp.o
@@ -6,9 +5,7 @@ libertas-y += debugfs.o
6libertas-y += ethtool.o 5libertas-y += ethtool.o
7libertas-y += main.o 6libertas-y += main.o
8libertas-y += rx.o 7libertas-y += rx.o
9libertas-y += scan.o
10libertas-y += tx.o 8libertas-y += tx.o
11libertas-y += wext.o
12libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o 9libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
13 10
14usb8xxx-objs += if_usb.o 11usb8xxx-objs += if_usb.o
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index 2726c044430f..60fd1afe89ac 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -226,6 +226,18 @@ setuserscan
226 All entries in the scan table (not just the new scan data when keep=1) 226 All entries in the scan table (not just the new scan data when keep=1)
227 will be displayed upon completion by use of the getscantable ioctl. 227 will be displayed upon completion by use of the getscantable ioctl.
228 228
229hostsleep
230 This command is used to enable/disable host sleep.
231 Note: Host sleep parameters should be configured using
232 "ethtool -s ethX wol X" command before enabling host sleep.
233
234 Path: /sys/kernel/debug/libertas_wireless/ethX/
235
236 Usage:
237 cat hostsleep: reads the current hostsleep state
238 echo "1" > hostsleep : enable host sleep.
239 echo "0" > hostsleep : disable host sleep
240
229======================== 241========================
230IWCONFIG COMMANDS 242IWCONFIG COMMANDS
231======================== 243========================
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
deleted file mode 100644
index aa06070e5eab..000000000000
--- a/drivers/net/wireless/libertas/assoc.c
+++ /dev/null
@@ -1,2264 +0,0 @@
1/* Copyright (C) 2006, Red Hat, Inc. */
2
3#include <linux/types.h>
4#include <linux/etherdevice.h>
5#include <linux/ieee80211.h>
6#include <linux/if_arp.h>
7#include <linux/slab.h>
8#include <net/lib80211.h>
9
10#include "assoc.h"
11#include "decl.h"
12#include "host.h"
13#include "scan.h"
14#include "cmd.h"
15
16static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) =
17 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
18static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
19 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
20
21/* The firmware needs the following bits masked out of the beacon-derived
22 * capability field when associating/joining to a BSS:
23 * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
24 */
25#define CAPINFO_MASK (~(0xda00))
26
27/**
28 * 802.11b/g supported bitrates (in 500Kb/s units)
29 */
30u8 lbs_bg_rates[MAX_RATES] =
31 { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
320x00, 0x00 };
33
34
35static int assoc_helper_wep_keys(struct lbs_private *priv,
36 struct assoc_request *assoc_req);
37
38/**
39 * @brief This function finds common rates between rates and card rates.
40 *
41 * It will fill common rates in rates as output if found.
42 *
43 * NOTE: Setting the MSB of the basic rates need to be taken
44 * care, either before or after calling this function
45 *
46 * @param priv A pointer to struct lbs_private structure
47 * @param rates the buffer which keeps input and output
48 * @param rates_size the size of rates buffer; new size of buffer on return,
49 * which will be less than or equal to original rates_size
50 *
51 * @return 0 on success, or -1 on error
52 */
53static int get_common_rates(struct lbs_private *priv,
54 u8 *rates,
55 u16 *rates_size)
56{
57 int i, j;
58 u8 intersection[MAX_RATES];
59 u16 intersection_size;
60 u16 num_rates = 0;
61
62 intersection_size = min_t(u16, *rates_size, ARRAY_SIZE(intersection));
63
64 /* Allow each rate from 'rates' that is supported by the hardware */
65 for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && lbs_bg_rates[i]; i++) {
66 for (j = 0; j < intersection_size && rates[j]; j++) {
67 if (rates[j] == lbs_bg_rates[i])
68 intersection[num_rates++] = rates[j];
69 }
70 }
71
72 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
73 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", lbs_bg_rates,
74 ARRAY_SIZE(lbs_bg_rates));
75 lbs_deb_hex(LBS_DEB_JOIN, "common rates", intersection, num_rates);
76 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
77
78 if (!priv->enablehwauto) {
79 for (i = 0; i < num_rates; i++) {
80 if (intersection[i] == priv->cur_rate)
81 goto done;
82 }
83 lbs_pr_alert("Previously set fixed data rate %#x isn't "
84 "compatible with the network.\n", priv->cur_rate);
85 return -1;
86 }
87
88done:
89 memset(rates, 0, *rates_size);
90 *rates_size = num_rates;
91 memcpy(rates, intersection, num_rates);
92 return 0;
93}
94
95
96/**
97 * @brief Sets the MSB on basic rates as the firmware requires
98 *
99 * Scan through an array and set the MSB for basic data rates.
100 *
101 * @param rates buffer of data rates
102 * @param len size of buffer
103 */
104static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
105{
106 int i;
107
108 for (i = 0; i < len; i++) {
109 if (rates[i] == 0x02 || rates[i] == 0x04 ||
110 rates[i] == 0x0b || rates[i] == 0x16)
111 rates[i] |= 0x80;
112 }
113}
114
115
116static u8 iw_auth_to_ieee_auth(u8 auth)
117{
118 if (auth == IW_AUTH_ALG_OPEN_SYSTEM)
119 return 0x00;
120 else if (auth == IW_AUTH_ALG_SHARED_KEY)
121 return 0x01;
122 else if (auth == IW_AUTH_ALG_LEAP)
123 return 0x80;
124
125 lbs_deb_join("%s: invalid auth alg 0x%X\n", __func__, auth);
126 return 0;
127}
128
129/**
130 * @brief This function prepares the authenticate command. AUTHENTICATE only
131 * sets the authentication suite for future associations, as the firmware
132 * handles authentication internally during the ASSOCIATE command.
133 *
134 * @param priv A pointer to struct lbs_private structure
135 * @param bssid The peer BSSID with which to authenticate
136 * @param auth The authentication mode to use (from wireless.h)
137 *
138 * @return 0 or -1
139 */
140static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth)
141{
142 struct cmd_ds_802_11_authenticate cmd;
143 int ret = -1;
144
145 lbs_deb_enter(LBS_DEB_JOIN);
146
147 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
148 memcpy(cmd.bssid, bssid, ETH_ALEN);
149
150 cmd.authtype = iw_auth_to_ieee_auth(auth);
151
152 lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n", bssid, cmd.authtype);
153
154 ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
155
156 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
157 return ret;
158}
159
160
161int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
162 struct assoc_request *assoc)
163{
164 struct cmd_ds_802_11_set_wep cmd;
165 int ret = 0;
166
167 lbs_deb_enter(LBS_DEB_CMD);
168
169 memset(&cmd, 0, sizeof(cmd));
170 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
171 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
172
173 cmd.action = cpu_to_le16(cmd_action);
174
175 if (cmd_action == CMD_ACT_ADD) {
176 int i;
177
178 /* default tx key index */
179 cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
180 CMD_WEP_KEY_INDEX_MASK);
181
182 /* Copy key types and material to host command structure */
183 for (i = 0; i < 4; i++) {
184 struct enc_key *pkey = &assoc->wep_keys[i];
185
186 switch (pkey->len) {
187 case KEY_LEN_WEP_40:
188 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
189 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
190 lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
191 break;
192 case KEY_LEN_WEP_104:
193 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
194 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
195 lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
196 break;
197 case 0:
198 break;
199 default:
200 lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
201 i, pkey->len);
202 ret = -1;
203 goto done;
204 break;
205 }
206 }
207 } else if (cmd_action == CMD_ACT_REMOVE) {
208 /* ACT_REMOVE clears _all_ WEP keys */
209
210 /* default tx key index */
211 cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
212 CMD_WEP_KEY_INDEX_MASK);
213 lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
214 }
215
216 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
217done:
218 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
219 return ret;
220}
221
222int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
223 uint16_t *enable)
224{
225 struct cmd_ds_802_11_enable_rsn cmd;
226 int ret;
227
228 lbs_deb_enter(LBS_DEB_CMD);
229
230 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
231 cmd.action = cpu_to_le16(cmd_action);
232
233 if (cmd_action == CMD_ACT_GET)
234 cmd.enable = 0;
235 else {
236 if (*enable)
237 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
238 else
239 cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
240 lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
241 }
242
243 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
244 if (!ret && cmd_action == CMD_ACT_GET)
245 *enable = le16_to_cpu(cmd.enable);
246
247 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
248 return ret;
249}
250
251static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
252 struct enc_key *key)
253{
254 lbs_deb_enter(LBS_DEB_CMD);
255
256 if (key->flags & KEY_INFO_WPA_ENABLED)
257 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
258 if (key->flags & KEY_INFO_WPA_UNICAST)
259 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
260 if (key->flags & KEY_INFO_WPA_MCAST)
261 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
262
263 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
264 keyparam->keytypeid = cpu_to_le16(key->type);
265 keyparam->keylen = cpu_to_le16(key->len);
266 memcpy(keyparam->key, key->key, key->len);
267
268 /* Length field doesn't include the {type,length} header */
269 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
270 lbs_deb_leave(LBS_DEB_CMD);
271}
272
273int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
274 struct assoc_request *assoc)
275{
276 struct cmd_ds_802_11_key_material cmd;
277 int ret = 0;
278 int index = 0;
279
280 lbs_deb_enter(LBS_DEB_CMD);
281
282 cmd.action = cpu_to_le16(cmd_action);
283 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
284
285 if (cmd_action == CMD_ACT_GET) {
286 cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
287 } else {
288 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
289
290 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
291 set_one_wpa_key(&cmd.keyParamSet[index],
292 &assoc->wpa_unicast_key);
293 index++;
294 }
295
296 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
297 set_one_wpa_key(&cmd.keyParamSet[index],
298 &assoc->wpa_mcast_key);
299 index++;
300 }
301
302 /* The common header and as many keys as we included */
303 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
304 keyParamSet[index]));
305 }
306 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
307 /* Copy the returned key to driver private data */
308 if (!ret && cmd_action == CMD_ACT_GET) {
309 void *buf_ptr = cmd.keyParamSet;
310 void *resp_end = &(&cmd)[1];
311
312 while (buf_ptr < resp_end) {
313 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
314 struct enc_key *key;
315 uint16_t param_set_len = le16_to_cpu(keyparam->length);
316 uint16_t key_len = le16_to_cpu(keyparam->keylen);
317 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
318 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
319 void *end;
320
321 end = (void *)keyparam + sizeof(keyparam->type)
322 + sizeof(keyparam->length) + param_set_len;
323
324 /* Make sure we don't access past the end of the IEs */
325 if (end > resp_end)
326 break;
327
328 if (key_flags & KEY_INFO_WPA_UNICAST)
329 key = &priv->wpa_unicast_key;
330 else if (key_flags & KEY_INFO_WPA_MCAST)
331 key = &priv->wpa_mcast_key;
332 else
333 break;
334
335 /* Copy returned key into driver */
336 memset(key, 0, sizeof(struct enc_key));
337 if (key_len > sizeof(key->key))
338 break;
339 key->type = key_type;
340 key->flags = key_flags;
341 key->len = key_len;
342 memcpy(key->key, keyparam->key, key->len);
343
344 buf_ptr = end + 1;
345 }
346 }
347
348 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
349 return ret;
350}
351
352static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
353{
354/* Bit Rate
355* 15:13 Reserved
356* 12 54 Mbps
357* 11 48 Mbps
358* 10 36 Mbps
359* 9 24 Mbps
360* 8 18 Mbps
361* 7 12 Mbps
362* 6 9 Mbps
363* 5 6 Mbps
364* 4 Reserved
365* 3 11 Mbps
366* 2 5.5 Mbps
367* 1 2 Mbps
368* 0 1 Mbps
369**/
370
371 uint16_t ratemask;
372 int i = lbs_data_rate_to_fw_index(rate);
373 if (lower_rates_ok)
374 ratemask = (0x1fef >> (12 - i));
375 else
376 ratemask = (1 << i);
377 return cpu_to_le16(ratemask);
378}
379
380int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
381 uint16_t cmd_action)
382{
383 struct cmd_ds_802_11_rate_adapt_rateset cmd;
384 int ret;
385
386 lbs_deb_enter(LBS_DEB_CMD);
387
388 if (!priv->cur_rate && !priv->enablehwauto)
389 return -EINVAL;
390
391 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
392
393 cmd.action = cpu_to_le16(cmd_action);
394 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
395 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
396 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
397 if (!ret && cmd_action == CMD_ACT_GET)
398 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
399
400 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
401 return ret;
402}
403
404/**
405 * @brief Set the data rate
406 *
407 * @param priv A pointer to struct lbs_private structure
408 * @param rate The desired data rate, or 0 to clear a locked rate
409 *
410 * @return 0 on success, error on failure
411 */
412int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
413{
414 struct cmd_ds_802_11_data_rate cmd;
415 int ret = 0;
416
417 lbs_deb_enter(LBS_DEB_CMD);
418
419 memset(&cmd, 0, sizeof(cmd));
420 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
421
422 if (rate > 0) {
423 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
424 cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
425 if (cmd.rates[0] == 0) {
426 lbs_deb_cmd("DATA_RATE: invalid requested rate of"
427 " 0x%02X\n", rate);
428 ret = 0;
429 goto out;
430 }
431 lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
432 } else {
433 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
434 lbs_deb_cmd("DATA_RATE: setting auto\n");
435 }
436
437 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
438 if (ret)
439 goto out;
440
441 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
442
443 /* FIXME: get actual rates FW can do if this command actually returns
444 * all data rates supported.
445 */
446 priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
447 lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
448
449out:
450 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
451 return ret;
452}
453
454
455int lbs_cmd_802_11_rssi(struct lbs_private *priv,
456 struct cmd_ds_command *cmd)
457{
458
459 lbs_deb_enter(LBS_DEB_CMD);
460 cmd->command = cpu_to_le16(CMD_802_11_RSSI);
461 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
462 sizeof(struct cmd_header));
463 cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
464
465 /* reset Beacon SNR/NF/RSSI values */
466 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
467 priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
468 priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
469 priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
470 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
471 priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
472
473 lbs_deb_leave(LBS_DEB_CMD);
474 return 0;
475}
476
477int lbs_ret_802_11_rssi(struct lbs_private *priv,
478 struct cmd_ds_command *resp)
479{
480 struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
481
482 lbs_deb_enter(LBS_DEB_CMD);
483
484 /* store the non average value */
485 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
486 priv->NF[TYPE_BEACON][TYPE_NOAVG] =
487 get_unaligned_le16(&rssirsp->noisefloor);
488
489 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
490 priv->NF[TYPE_BEACON][TYPE_AVG] =
491 get_unaligned_le16(&rssirsp->avgnoisefloor);
492
493 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
494 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
495 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
496
497 priv->RSSI[TYPE_BEACON][TYPE_AVG] =
498 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
499 priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
500
501 lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
502 priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
503 priv->RSSI[TYPE_BEACON][TYPE_AVG]);
504
505 lbs_deb_leave(LBS_DEB_CMD);
506 return 0;
507}
508
509
510int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
511 struct cmd_ds_command *cmd,
512 u16 cmd_action)
513{
514 struct cmd_ds_802_11_beacon_control
515 *bcn_ctrl = &cmd->params.bcn_ctrl;
516
517 lbs_deb_enter(LBS_DEB_CMD);
518 cmd->size =
519 cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
520 + sizeof(struct cmd_header));
521 cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
522
523 bcn_ctrl->action = cpu_to_le16(cmd_action);
524 bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
525 bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
526
527 lbs_deb_leave(LBS_DEB_CMD);
528 return 0;
529}
530
531int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
532 struct cmd_ds_command *resp)
533{
534 struct cmd_ds_802_11_beacon_control *bcn_ctrl =
535 &resp->params.bcn_ctrl;
536
537 lbs_deb_enter(LBS_DEB_CMD);
538
539 if (bcn_ctrl->action == CMD_ACT_GET) {
540 priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
541 priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
542 }
543
544 lbs_deb_enter(LBS_DEB_CMD);
545 return 0;
546}
547
548
549
550static int lbs_assoc_post(struct lbs_private *priv,
551 struct cmd_ds_802_11_associate_response *resp)
552{
553 int ret = 0;
554 union iwreq_data wrqu;
555 struct bss_descriptor *bss;
556 u16 status_code;
557
558 lbs_deb_enter(LBS_DEB_ASSOC);
559
560 if (!priv->in_progress_assoc_req) {
561 lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
562 ret = -1;
563 goto done;
564 }
565 bss = &priv->in_progress_assoc_req->bss;
566
567 /*
568 * Older FW versions map the IEEE 802.11 Status Code in the association
569 * response to the following values returned in resp->statuscode:
570 *
571 * IEEE Status Code Marvell Status Code
572 * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
573 * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
574 * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
575 * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
576 * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
577 * others -> 0x0003 ASSOC_RESULT_REFUSED
578 *
579 * Other response codes:
580 * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
581 * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
582 * association response from the AP)
583 */
584
585 status_code = le16_to_cpu(resp->statuscode);
586 if (priv->fwrelease < 0x09000000) {
587 switch (status_code) {
588 case 0x00:
589 break;
590 case 0x01:
591 lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
592 break;
593 case 0x02:
594 lbs_deb_assoc("ASSOC_RESP: internal timer "
595 "expired while waiting for the AP\n");
596 break;
597 case 0x03:
598 lbs_deb_assoc("ASSOC_RESP: association "
599 "refused by AP\n");
600 break;
601 case 0x04:
602 lbs_deb_assoc("ASSOC_RESP: authentication "
603 "refused by AP\n");
604 break;
605 default:
606 lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
607 " unknown\n", status_code);
608 break;
609 }
610 } else {
611 /* v9+ returns the AP's association response */
612 lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x\n", status_code);
613 }
614
615 if (status_code) {
616 lbs_mac_event_disconnected(priv);
617 ret = status_code;
618 goto done;
619 }
620
621 lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP",
622 (void *) (resp + sizeof (resp->hdr)),
623 le16_to_cpu(resp->hdr.size) - sizeof (resp->hdr));
624
625 /* Send a Media Connected event, according to the Spec */
626 priv->connect_status = LBS_CONNECTED;
627
628 /* Update current SSID and BSSID */
629 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
630 priv->curbssparams.ssid_len = bss->ssid_len;
631 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
632
633 priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
634 priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
635
636 memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
637 memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
638 priv->nextSNRNF = 0;
639 priv->numSNRNF = 0;
640
641 netif_carrier_on(priv->dev);
642 if (!priv->tx_pending_len)
643 netif_wake_queue(priv->dev);
644
645 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
646 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
647 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
648
649done:
650 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
651 return ret;
652}
653
654/**
655 * @brief This function prepares an association-class command.
656 *
657 * @param priv A pointer to struct lbs_private structure
658 * @param assoc_req The association request describing the BSS to associate
659 * or reassociate with
660 * @param command The actual command, either CMD_802_11_ASSOCIATE or
661 * CMD_802_11_REASSOCIATE
662 *
663 * @return 0 or -1
664 */
665static int lbs_associate(struct lbs_private *priv,
666 struct assoc_request *assoc_req,
667 u16 command)
668{
669 struct cmd_ds_802_11_associate cmd;
670 int ret = 0;
671 struct bss_descriptor *bss = &assoc_req->bss;
672 u8 *pos = &(cmd.iebuf[0]);
673 u16 tmpcap, tmplen, tmpauth;
674 struct mrvl_ie_ssid_param_set *ssid;
675 struct mrvl_ie_ds_param_set *ds;
676 struct mrvl_ie_cf_param_set *cf;
677 struct mrvl_ie_rates_param_set *rates;
678 struct mrvl_ie_rsn_param_set *rsn;
679 struct mrvl_ie_auth_type *auth;
680
681 lbs_deb_enter(LBS_DEB_ASSOC);
682
683 BUG_ON((command != CMD_802_11_ASSOCIATE) &&
684 (command != CMD_802_11_REASSOCIATE));
685
686 memset(&cmd, 0, sizeof(cmd));
687 cmd.hdr.command = cpu_to_le16(command);
688
689 /* Fill in static fields */
690 memcpy(cmd.bssid, bss->bssid, ETH_ALEN);
691 cmd.listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
692
693 /* Capability info */
694 tmpcap = (bss->capability & CAPINFO_MASK);
695 if (bss->mode == IW_MODE_INFRA)
696 tmpcap |= WLAN_CAPABILITY_ESS;
697 cmd.capability = cpu_to_le16(tmpcap);
698 lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
699
700 /* SSID */
701 ssid = (struct mrvl_ie_ssid_param_set *) pos;
702 ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
703 tmplen = bss->ssid_len;
704 ssid->header.len = cpu_to_le16(tmplen);
705 memcpy(ssid->ssid, bss->ssid, tmplen);
706 pos += sizeof(ssid->header) + tmplen;
707
708 ds = (struct mrvl_ie_ds_param_set *) pos;
709 ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
710 ds->header.len = cpu_to_le16(1);
711 ds->channel = bss->phy.ds.channel;
712 pos += sizeof(ds->header) + 1;
713
714 cf = (struct mrvl_ie_cf_param_set *) pos;
715 cf->header.type = cpu_to_le16(TLV_TYPE_CF);
716 tmplen = sizeof(*cf) - sizeof (cf->header);
717 cf->header.len = cpu_to_le16(tmplen);
718 /* IE payload should be zeroed, firmware fills it in for us */
719 pos += sizeof(*cf);
720
721 rates = (struct mrvl_ie_rates_param_set *) pos;
722 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
723 tmplen = min_t(u16, ARRAY_SIZE(bss->rates), MAX_RATES);
724 memcpy(&rates->rates, &bss->rates, tmplen);
725 if (get_common_rates(priv, rates->rates, &tmplen)) {
726 ret = -1;
727 goto done;
728 }
729 pos += sizeof(rates->header) + tmplen;
730 rates->header.len = cpu_to_le16(tmplen);
731 lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
732
733 /* Copy the infra. association rates into Current BSS state structure */
734 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
735 memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
736
737 /* Set MSB on basic rates as the firmware requires, but _after_
738 * copying to current bss rates.
739 */
740 lbs_set_basic_rate_flags(rates->rates, tmplen);
741
742 /* Firmware v9+ indicate authentication suites as a TLV */
743 if (priv->fwrelease >= 0x09000000) {
744 auth = (struct mrvl_ie_auth_type *) pos;
745 auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
746 auth->header.len = cpu_to_le16(2);
747 tmpauth = iw_auth_to_ieee_auth(priv->secinfo.auth_mode);
748 auth->auth = cpu_to_le16(tmpauth);
749 pos += sizeof(auth->header) + 2;
750
751 lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n",
752 bss->bssid, priv->secinfo.auth_mode);
753 }
754
755 /* WPA/WPA2 IEs */
756 if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
757 rsn = (struct mrvl_ie_rsn_param_set *) pos;
758 /* WPA_IE or WPA2_IE */
759 rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
760 tmplen = (u16) assoc_req->wpa_ie[1];
761 rsn->header.len = cpu_to_le16(tmplen);
762 memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
763 lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: WPA/RSN IE", (u8 *) rsn,
764 sizeof(rsn->header) + tmplen);
765 pos += sizeof(rsn->header) + tmplen;
766 }
767
768 cmd.hdr.size = cpu_to_le16((sizeof(cmd) - sizeof(cmd.iebuf)) +
769 (u16)(pos - (u8 *) &cmd.iebuf));
770
771 /* update curbssparams */
772 priv->channel = bss->phy.ds.channel;
773
774 ret = lbs_cmd_with_response(priv, command, &cmd);
775 if (ret == 0) {
776 ret = lbs_assoc_post(priv,
777 (struct cmd_ds_802_11_associate_response *) &cmd);
778 }
779
780done:
781 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
782 return ret;
783}
784
785/**
786 * @brief Associate to a specific BSS discovered in a scan
787 *
788 * @param priv A pointer to struct lbs_private structure
789 * @param assoc_req The association request describing the BSS to associate with
790 *
791 * @return 0-success, otherwise fail
792 */
793static int lbs_try_associate(struct lbs_private *priv,
794 struct assoc_request *assoc_req)
795{
796 int ret;
797 u8 preamble = RADIO_PREAMBLE_LONG;
798
799 lbs_deb_enter(LBS_DEB_ASSOC);
800
801 /* FW v9 and higher indicate authentication suites as a TLV in the
802 * association command, not as a separate authentication command.
803 */
804 if (priv->fwrelease < 0x09000000) {
805 ret = lbs_set_authentication(priv, assoc_req->bss.bssid,
806 priv->secinfo.auth_mode);
807 if (ret)
808 goto out;
809 }
810
811 /* Use short preamble only when both the BSS and firmware support it */
812 if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
813 preamble = RADIO_PREAMBLE_SHORT;
814
815 ret = lbs_set_radio(priv, preamble, 1);
816 if (ret)
817 goto out;
818
819 ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
820 /* If the association fails with current auth mode, let's
821 * try by changing the auth mode
822 */
823 if ((priv->authtype_auto) &&
824 (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
825 (assoc_req->secinfo.wep_enabled) &&
826 (priv->connect_status != LBS_CONNECTED)) {
827 if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
828 priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
829 else
830 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
831 if (!assoc_helper_wep_keys(priv, assoc_req))
832 ret = lbs_associate(priv, assoc_req,
833 CMD_802_11_ASSOCIATE);
834 }
835
836 if (ret)
837 ret = -1;
838out:
839 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
840 return ret;
841}
842
843static int lbs_adhoc_post(struct lbs_private *priv,
844 struct cmd_ds_802_11_ad_hoc_result *resp)
845{
846 int ret = 0;
847 u16 command = le16_to_cpu(resp->hdr.command);
848 u16 result = le16_to_cpu(resp->hdr.result);
849 union iwreq_data wrqu;
850 struct bss_descriptor *bss;
851 DECLARE_SSID_BUF(ssid);
852
853 lbs_deb_enter(LBS_DEB_JOIN);
854
855 if (!priv->in_progress_assoc_req) {
856 lbs_deb_join("ADHOC_RESP: no in-progress association "
857 "request\n");
858 ret = -1;
859 goto done;
860 }
861 bss = &priv->in_progress_assoc_req->bss;
862
863 /*
864 * Join result code 0 --> SUCCESS
865 */
866 if (result) {
867 lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
868 if (priv->connect_status == LBS_CONNECTED)
869 lbs_mac_event_disconnected(priv);
870 ret = -1;
871 goto done;
872 }
873
874 /* Send a Media Connected event, according to the Spec */
875 priv->connect_status = LBS_CONNECTED;
876
877 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
878 /* Update the created network descriptor with the new BSSID */
879 memcpy(bss->bssid, resp->bssid, ETH_ALEN);
880 }
881
882 /* Set the BSSID from the joined/started descriptor */
883 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
884
885 /* Set the new SSID to current SSID */
886 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
887 priv->curbssparams.ssid_len = bss->ssid_len;
888
889 netif_carrier_on(priv->dev);
890 if (!priv->tx_pending_len)
891 netif_wake_queue(priv->dev);
892
893 memset(&wrqu, 0, sizeof(wrqu));
894 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
895 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
896 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
897
898 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
899 print_ssid(ssid, bss->ssid, bss->ssid_len),
900 priv->curbssparams.bssid,
901 priv->channel);
902
903done:
904 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
905 return ret;
906}
907
908/**
909 * @brief Join an adhoc network found in a previous scan
910 *
911 * @param priv A pointer to struct lbs_private structure
912 * @param assoc_req The association request describing the BSS to join
913 *
914 * @return 0 on success, error on failure
915 */
916static int lbs_adhoc_join(struct lbs_private *priv,
917 struct assoc_request *assoc_req)
918{
919 struct cmd_ds_802_11_ad_hoc_join cmd;
920 struct bss_descriptor *bss = &assoc_req->bss;
921 u8 preamble = RADIO_PREAMBLE_LONG;
922 DECLARE_SSID_BUF(ssid);
923 u16 ratesize = 0;
924 int ret = 0;
925
926 lbs_deb_enter(LBS_DEB_ASSOC);
927
928 lbs_deb_join("current SSID '%s', ssid length %u\n",
929 print_ssid(ssid, priv->curbssparams.ssid,
930 priv->curbssparams.ssid_len),
931 priv->curbssparams.ssid_len);
932 lbs_deb_join("requested ssid '%s', ssid length %u\n",
933 print_ssid(ssid, bss->ssid, bss->ssid_len),
934 bss->ssid_len);
935
936 /* check if the requested SSID is already joined */
937 if (priv->curbssparams.ssid_len &&
938 !lbs_ssid_cmp(priv->curbssparams.ssid,
939 priv->curbssparams.ssid_len,
940 bss->ssid, bss->ssid_len) &&
941 (priv->mode == IW_MODE_ADHOC) &&
942 (priv->connect_status == LBS_CONNECTED)) {
943 union iwreq_data wrqu;
944
945 lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as "
946 "current, not attempting to re-join");
947
948 /* Send the re-association event though, because the association
949 * request really was successful, even if just a null-op.
950 */
951 memset(&wrqu, 0, sizeof(wrqu));
952 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid,
953 ETH_ALEN);
954 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
955 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
956 goto out;
957 }
958
959 /* Use short preamble only when both the BSS and firmware support it */
960 if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
961 lbs_deb_join("AdhocJoin: Short preamble\n");
962 preamble = RADIO_PREAMBLE_SHORT;
963 }
964
965 ret = lbs_set_radio(priv, preamble, 1);
966 if (ret)
967 goto out;
968
969 lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
970 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
971
972 priv->adhoccreate = 0;
973 priv->channel = bss->channel;
974
975 /* Build the join command */
976 memset(&cmd, 0, sizeof(cmd));
977 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
978
979 cmd.bss.type = CMD_BSS_TYPE_IBSS;
980 cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
981
982 memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
983 memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
984
985 memcpy(&cmd.bss.ds, &bss->phy.ds, sizeof(struct ieee_ie_ds_param_set));
986
987 memcpy(&cmd.bss.ibss, &bss->ss.ibss,
988 sizeof(struct ieee_ie_ibss_param_set));
989
990 cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
991 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
992 bss->capability, CAPINFO_MASK);
993
994 /* information on BSSID descriptor passed to FW */
995 lbs_deb_join("ADHOC_J_CMD: BSSID = %pM, SSID = '%s'\n",
996 cmd.bss.bssid, cmd.bss.ssid);
997
998 /* Only v8 and below support setting these */
999 if (priv->fwrelease < 0x09000000) {
1000 /* failtimeout */
1001 cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
1002 /* probedelay */
1003 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1004 }
1005
1006 /* Copy Data rates from the rates recorded in scan response */
1007 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
1008 ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), ARRAY_SIZE (bss->rates));
1009 memcpy(cmd.bss.rates, bss->rates, ratesize);
1010 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
1011 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
1012 ret = -1;
1013 goto out;
1014 }
1015
1016 /* Copy the ad-hoc creation rates into Current BSS state structure */
1017 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1018 memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize);
1019
1020 /* Set MSB on basic rates as the firmware requires, but _after_
1021 * copying to current bss rates.
1022 */
1023 lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
1024
1025 cmd.bss.ibss.atimwindow = bss->atimwindow;
1026
1027 if (assoc_req->secinfo.wep_enabled) {
1028 u16 tmp = le16_to_cpu(cmd.bss.capability);
1029 tmp |= WLAN_CAPABILITY_PRIVACY;
1030 cmd.bss.capability = cpu_to_le16(tmp);
1031 }
1032
1033 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
1034 __le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM);
1035
1036 /* wake up first */
1037 ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
1038 CMD_ACT_SET, 0, 0,
1039 &local_ps_mode);
1040 if (ret) {
1041 ret = -1;
1042 goto out;
1043 }
1044 }
1045
1046 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
1047 if (ret == 0) {
1048 ret = lbs_adhoc_post(priv,
1049 (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
1050 }
1051
1052out:
1053 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1054 return ret;
1055}
1056
1057/**
1058 * @brief Start an Adhoc Network
1059 *
1060 * @param priv A pointer to struct lbs_private structure
1061 * @param assoc_req The association request describing the BSS to start
1062 *
1063 * @return 0 on success, error on failure
1064 */
1065static int lbs_adhoc_start(struct lbs_private *priv,
1066 struct assoc_request *assoc_req)
1067{
1068 struct cmd_ds_802_11_ad_hoc_start cmd;
1069 u8 preamble = RADIO_PREAMBLE_SHORT;
1070 size_t ratesize = 0;
1071 u16 tmpcap = 0;
1072 int ret = 0;
1073 DECLARE_SSID_BUF(ssid);
1074
1075 lbs_deb_enter(LBS_DEB_ASSOC);
1076
1077 ret = lbs_set_radio(priv, preamble, 1);
1078 if (ret)
1079 goto out;
1080
1081 /* Build the start command */
1082 memset(&cmd, 0, sizeof(cmd));
1083 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1084
1085 memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len);
1086
1087 lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n",
1088 print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
1089 assoc_req->ssid_len);
1090
1091 cmd.bsstype = CMD_BSS_TYPE_IBSS;
1092
1093 if (priv->beacon_period == 0)
1094 priv->beacon_period = MRVDRV_BEACON_INTERVAL;
1095 cmd.beaconperiod = cpu_to_le16(priv->beacon_period);
1096
1097 WARN_ON(!assoc_req->channel);
1098
1099 /* set Physical parameter set */
1100 cmd.ds.header.id = WLAN_EID_DS_PARAMS;
1101 cmd.ds.header.len = 1;
1102 cmd.ds.channel = assoc_req->channel;
1103
1104 /* set IBSS parameter set */
1105 cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
1106 cmd.ibss.header.len = 2;
1107 cmd.ibss.atimwindow = cpu_to_le16(0);
1108
1109 /* set capability info */
1110 tmpcap = WLAN_CAPABILITY_IBSS;
1111 if (assoc_req->secinfo.wep_enabled ||
1112 assoc_req->secinfo.WPAenabled ||
1113 assoc_req->secinfo.WPA2enabled) {
1114 lbs_deb_join("ADHOC_START: WEP/WPA enabled, privacy on\n");
1115 tmpcap |= WLAN_CAPABILITY_PRIVACY;
1116 } else
1117 lbs_deb_join("ADHOC_START: WEP disabled, privacy off\n");
1118
1119 cmd.capability = cpu_to_le16(tmpcap);
1120
1121 /* Only v8 and below support setting probe delay */
1122 if (priv->fwrelease < 0x09000000)
1123 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1124
1125 ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates));
1126 memcpy(cmd.rates, lbs_bg_rates, ratesize);
1127
1128 /* Copy the ad-hoc creating rates into Current BSS state structure */
1129 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1130 memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize);
1131
1132 /* Set MSB on basic rates as the firmware requires, but _after_
1133 * copying to current bss rates.
1134 */
1135 lbs_set_basic_rate_flags(cmd.rates, ratesize);
1136
1137 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
1138 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
1139
1140 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
1141 assoc_req->channel, assoc_req->band);
1142
1143 priv->adhoccreate = 1;
1144 priv->mode = IW_MODE_ADHOC;
1145
1146 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
1147 if (ret == 0)
1148 ret = lbs_adhoc_post(priv,
1149 (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
1150
1151out:
1152 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1153 return ret;
1154}
1155
1156/**
1157 * @brief Stop and Ad-Hoc network and exit Ad-Hoc mode
1158 *
1159 * @param priv A pointer to struct lbs_private structure
1160 * @return 0 on success, or an error
1161 */
1162int lbs_adhoc_stop(struct lbs_private *priv)
1163{
1164 struct cmd_ds_802_11_ad_hoc_stop cmd;
1165 int ret;
1166
1167 lbs_deb_enter(LBS_DEB_JOIN);
1168
1169 memset(&cmd, 0, sizeof (cmd));
1170 cmd.hdr.size = cpu_to_le16 (sizeof (cmd));
1171
1172 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
1173
1174 /* Clean up everything even if there was an error */
1175 lbs_mac_event_disconnected(priv);
1176
1177 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1178 return ret;
1179}
1180
1181static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
1182 struct bss_descriptor *match_bss)
1183{
1184 if (!secinfo->wep_enabled &&
1185 !secinfo->WPAenabled && !secinfo->WPA2enabled &&
1186 match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
1187 match_bss->rsn_ie[0] != WLAN_EID_RSN &&
1188 !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
1189 return 1;
1190 else
1191 return 0;
1192}
1193
1194static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
1195 struct bss_descriptor *match_bss)
1196{
1197 if (secinfo->wep_enabled &&
1198 !secinfo->WPAenabled && !secinfo->WPA2enabled &&
1199 (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
1200 return 1;
1201 else
1202 return 0;
1203}
1204
1205static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
1206 struct bss_descriptor *match_bss)
1207{
1208 if (!secinfo->wep_enabled && secinfo->WPAenabled &&
1209 (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
1210 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
1211 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
1212 )
1213 return 1;
1214 else
1215 return 0;
1216}
1217
1218static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
1219 struct bss_descriptor *match_bss)
1220{
1221 if (!secinfo->wep_enabled && secinfo->WPA2enabled &&
1222 (match_bss->rsn_ie[0] == WLAN_EID_RSN)
1223 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
1224 (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
1225 )
1226 return 1;
1227 else
1228 return 0;
1229}
1230
1231static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
1232 struct bss_descriptor *match_bss)
1233{
1234 if (!secinfo->wep_enabled &&
1235 !secinfo->WPAenabled && !secinfo->WPA2enabled &&
1236 (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
1237 (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
1238 (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
1239 return 1;
1240 else
1241 return 0;
1242}
1243
1244/**
1245 * @brief Check if a scanned network compatible with the driver settings
1246 *
1247 * WEP WPA WPA2 ad-hoc encrypt Network
1248 * enabled enabled enabled AES mode privacy WPA WPA2 Compatible
1249 * 0 0 0 0 NONE 0 0 0 yes No security
1250 * 1 0 0 0 NONE 1 0 0 yes Static WEP
1251 * 0 1 0 0 x 1x 1 x yes WPA
1252 * 0 0 1 0 x 1x x 1 yes WPA2
1253 * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
1254 * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
1255 *
1256 *
1257 * @param priv A pointer to struct lbs_private
1258 * @param index Index in scantable to check against current driver settings
1259 * @param mode Network mode: Infrastructure or IBSS
1260 *
1261 * @return Index in scantable, or error code if negative
1262 */
1263static int is_network_compatible(struct lbs_private *priv,
1264 struct bss_descriptor *bss, uint8_t mode)
1265{
1266 int matched = 0;
1267
1268 lbs_deb_enter(LBS_DEB_SCAN);
1269
1270 if (bss->mode != mode)
1271 goto done;
1272
1273 matched = match_bss_no_security(&priv->secinfo, bss);
1274 if (matched)
1275 goto done;
1276 matched = match_bss_static_wep(&priv->secinfo, bss);
1277 if (matched)
1278 goto done;
1279 matched = match_bss_wpa(&priv->secinfo, bss);
1280 if (matched) {
1281 lbs_deb_scan("is_network_compatible() WPA: wpa_ie 0x%x "
1282 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
1283 "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
1284 priv->secinfo.wep_enabled ? "e" : "d",
1285 priv->secinfo.WPAenabled ? "e" : "d",
1286 priv->secinfo.WPA2enabled ? "e" : "d",
1287 (bss->capability & WLAN_CAPABILITY_PRIVACY));
1288 goto done;
1289 }
1290 matched = match_bss_wpa2(&priv->secinfo, bss);
1291 if (matched) {
1292 lbs_deb_scan("is_network_compatible() WPA2: wpa_ie 0x%x "
1293 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
1294 "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
1295 priv->secinfo.wep_enabled ? "e" : "d",
1296 priv->secinfo.WPAenabled ? "e" : "d",
1297 priv->secinfo.WPA2enabled ? "e" : "d",
1298 (bss->capability & WLAN_CAPABILITY_PRIVACY));
1299 goto done;
1300 }
1301 matched = match_bss_dynamic_wep(&priv->secinfo, bss);
1302 if (matched) {
1303 lbs_deb_scan("is_network_compatible() dynamic WEP: "
1304 "wpa_ie 0x%x wpa2_ie 0x%x privacy 0x%x\n",
1305 bss->wpa_ie[0], bss->rsn_ie[0],
1306 (bss->capability & WLAN_CAPABILITY_PRIVACY));
1307 goto done;
1308 }
1309
1310 /* bss security settings don't match those configured on card */
1311 lbs_deb_scan("is_network_compatible() FAILED: wpa_ie 0x%x "
1312 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s privacy 0x%x\n",
1313 bss->wpa_ie[0], bss->rsn_ie[0],
1314 priv->secinfo.wep_enabled ? "e" : "d",
1315 priv->secinfo.WPAenabled ? "e" : "d",
1316 priv->secinfo.WPA2enabled ? "e" : "d",
1317 (bss->capability & WLAN_CAPABILITY_PRIVACY));
1318
1319done:
1320 lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched);
1321 return matched;
1322}
1323
1324/**
1325 * @brief This function finds a specific compatible BSSID in the scan list
1326 *
1327 * Used in association code
1328 *
1329 * @param priv A pointer to struct lbs_private
1330 * @param bssid BSSID to find in the scan list
1331 * @param mode Network mode: Infrastructure or IBSS
1332 *
1333 * @return index in BSSID list, or error return code (< 0)
1334 */
1335static struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv,
1336 uint8_t *bssid, uint8_t mode)
1337{
1338 struct bss_descriptor *iter_bss;
1339 struct bss_descriptor *found_bss = NULL;
1340
1341 lbs_deb_enter(LBS_DEB_SCAN);
1342
1343 if (!bssid)
1344 goto out;
1345
1346 lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN);
1347
1348 /* Look through the scan table for a compatible match. The loop will
1349 * continue past a matched bssid that is not compatible in case there
1350 * is an AP with multiple SSIDs assigned to the same BSSID
1351 */
1352 mutex_lock(&priv->lock);
1353 list_for_each_entry(iter_bss, &priv->network_list, list) {
1354 if (compare_ether_addr(iter_bss->bssid, bssid))
1355 continue; /* bssid doesn't match */
1356 switch (mode) {
1357 case IW_MODE_INFRA:
1358 case IW_MODE_ADHOC:
1359 if (!is_network_compatible(priv, iter_bss, mode))
1360 break;
1361 found_bss = iter_bss;
1362 break;
1363 default:
1364 found_bss = iter_bss;
1365 break;
1366 }
1367 }
1368 mutex_unlock(&priv->lock);
1369
1370out:
1371 lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
1372 return found_bss;
1373}
1374
1375/**
1376 * @brief This function finds ssid in ssid list.
1377 *
1378 * Used in association code
1379 *
1380 * @param priv A pointer to struct lbs_private
1381 * @param ssid SSID to find in the list
1382 * @param bssid BSSID to qualify the SSID selection (if provided)
1383 * @param mode Network mode: Infrastructure or IBSS
1384 *
1385 * @return index in BSSID list
1386 */
1387static struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv,
1388 uint8_t *ssid, uint8_t ssid_len,
1389 uint8_t *bssid, uint8_t mode,
1390 int channel)
1391{
1392 u32 bestrssi = 0;
1393 struct bss_descriptor *iter_bss = NULL;
1394 struct bss_descriptor *found_bss = NULL;
1395 struct bss_descriptor *tmp_oldest = NULL;
1396
1397 lbs_deb_enter(LBS_DEB_SCAN);
1398
1399 mutex_lock(&priv->lock);
1400
1401 list_for_each_entry(iter_bss, &priv->network_list, list) {
1402 if (!tmp_oldest ||
1403 (iter_bss->last_scanned < tmp_oldest->last_scanned))
1404 tmp_oldest = iter_bss;
1405
1406 if (lbs_ssid_cmp(iter_bss->ssid, iter_bss->ssid_len,
1407 ssid, ssid_len) != 0)
1408 continue; /* ssid doesn't match */
1409 if (bssid && compare_ether_addr(iter_bss->bssid, bssid) != 0)
1410 continue; /* bssid doesn't match */
1411 if ((channel > 0) && (iter_bss->channel != channel))
1412 continue; /* channel doesn't match */
1413
1414 switch (mode) {
1415 case IW_MODE_INFRA:
1416 case IW_MODE_ADHOC:
1417 if (!is_network_compatible(priv, iter_bss, mode))
1418 break;
1419
1420 if (bssid) {
1421 /* Found requested BSSID */
1422 found_bss = iter_bss;
1423 goto out;
1424 }
1425
1426 if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
1427 bestrssi = SCAN_RSSI(iter_bss->rssi);
1428 found_bss = iter_bss;
1429 }
1430 break;
1431 case IW_MODE_AUTO:
1432 default:
1433 if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
1434 bestrssi = SCAN_RSSI(iter_bss->rssi);
1435 found_bss = iter_bss;
1436 }
1437 break;
1438 }
1439 }
1440
1441out:
1442 mutex_unlock(&priv->lock);
1443 lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
1444 return found_bss;
1445}
1446
1447static int assoc_helper_essid(struct lbs_private *priv,
1448 struct assoc_request * assoc_req)
1449{
1450 int ret = 0;
1451 struct bss_descriptor * bss;
1452 int channel = -1;
1453 DECLARE_SSID_BUF(ssid);
1454
1455 lbs_deb_enter(LBS_DEB_ASSOC);
1456
1457 /* FIXME: take channel into account when picking SSIDs if a channel
1458 * is set.
1459 */
1460
1461 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
1462 channel = assoc_req->channel;
1463
1464 lbs_deb_assoc("SSID '%s' requested\n",
1465 print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len));
1466 if (assoc_req->mode == IW_MODE_INFRA) {
1467 lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
1468 assoc_req->ssid_len);
1469
1470 bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
1471 assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel);
1472 if (bss != NULL) {
1473 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
1474 ret = lbs_try_associate(priv, assoc_req);
1475 } else {
1476 lbs_deb_assoc("SSID not found; cannot associate\n");
1477 }
1478 } else if (assoc_req->mode == IW_MODE_ADHOC) {
1479 /* Scan for the network, do not save previous results. Stale
1480 * scan data will cause us to join a non-existant adhoc network
1481 */
1482 lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
1483 assoc_req->ssid_len);
1484
1485 /* Search for the requested SSID in the scan table */
1486 bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
1487 assoc_req->ssid_len, NULL, IW_MODE_ADHOC, channel);
1488 if (bss != NULL) {
1489 lbs_deb_assoc("SSID found, will join\n");
1490 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
1491 lbs_adhoc_join(priv, assoc_req);
1492 } else {
1493 /* else send START command */
1494 lbs_deb_assoc("SSID not found, creating adhoc network\n");
1495 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
1496 IEEE80211_MAX_SSID_LEN);
1497 assoc_req->bss.ssid_len = assoc_req->ssid_len;
1498 lbs_adhoc_start(priv, assoc_req);
1499 }
1500 }
1501
1502 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1503 return ret;
1504}
1505
1506
1507static int assoc_helper_bssid(struct lbs_private *priv,
1508 struct assoc_request * assoc_req)
1509{
1510 int ret = 0;
1511 struct bss_descriptor * bss;
1512
1513 lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %pM", assoc_req->bssid);
1514
1515 /* Search for index position in list for requested MAC */
1516 bss = lbs_find_bssid_in_list(priv, assoc_req->bssid,
1517 assoc_req->mode);
1518 if (bss == NULL) {
1519 lbs_deb_assoc("ASSOC: WAP: BSSID %pM not found, "
1520 "cannot associate.\n", assoc_req->bssid);
1521 goto out;
1522 }
1523
1524 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
1525 if (assoc_req->mode == IW_MODE_INFRA) {
1526 ret = lbs_try_associate(priv, assoc_req);
1527 lbs_deb_assoc("ASSOC: lbs_try_associate(bssid) returned %d\n",
1528 ret);
1529 } else if (assoc_req->mode == IW_MODE_ADHOC) {
1530 lbs_adhoc_join(priv, assoc_req);
1531 }
1532
1533out:
1534 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1535 return ret;
1536}
1537
1538
1539static int assoc_helper_associate(struct lbs_private *priv,
1540 struct assoc_request * assoc_req)
1541{
1542 int ret = 0, done = 0;
1543
1544 lbs_deb_enter(LBS_DEB_ASSOC);
1545
1546 /* If we're given and 'any' BSSID, try associating based on SSID */
1547
1548 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
1549 if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
1550 compare_ether_addr(bssid_off, assoc_req->bssid)) {
1551 ret = assoc_helper_bssid(priv, assoc_req);
1552 done = 1;
1553 }
1554 }
1555
1556 if (!done && test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
1557 ret = assoc_helper_essid(priv, assoc_req);
1558 }
1559
1560 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1561 return ret;
1562}
1563
1564
1565static int assoc_helper_mode(struct lbs_private *priv,
1566 struct assoc_request * assoc_req)
1567{
1568 int ret = 0;
1569
1570 lbs_deb_enter(LBS_DEB_ASSOC);
1571
1572 if (assoc_req->mode == priv->mode)
1573 goto done;
1574
1575 if (assoc_req->mode == IW_MODE_INFRA) {
1576 if (priv->psstate != PS_STATE_FULL_POWER)
1577 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
1578 priv->psmode = LBS802_11POWERMODECAM;
1579 }
1580
1581 priv->mode = assoc_req->mode;
1582 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
1583 assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
1584
1585done:
1586 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1587 return ret;
1588}
1589
1590static int assoc_helper_channel(struct lbs_private *priv,
1591 struct assoc_request * assoc_req)
1592{
1593 int ret = 0;
1594
1595 lbs_deb_enter(LBS_DEB_ASSOC);
1596
1597 ret = lbs_update_channel(priv);
1598 if (ret) {
1599 lbs_deb_assoc("ASSOC: channel: error getting channel.\n");
1600 goto done;
1601 }
1602
1603 if (assoc_req->channel == priv->channel)
1604 goto done;
1605
1606 if (priv->mesh_dev) {
1607 /* Change mesh channel first; 21.p21 firmware won't let
1608 you change channel otherwise (even though it'll return
1609 an error to this */
1610 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP,
1611 assoc_req->channel);
1612 }
1613
1614 lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
1615 priv->channel, assoc_req->channel);
1616
1617 ret = lbs_set_channel(priv, assoc_req->channel);
1618 if (ret < 0)
1619 lbs_deb_assoc("ASSOC: channel: error setting channel.\n");
1620
1621 /* FIXME: shouldn't need to grab the channel _again_ after setting
1622 * it since the firmware is supposed to return the new channel, but
1623 * whatever... */
1624 ret = lbs_update_channel(priv);
1625 if (ret) {
1626 lbs_deb_assoc("ASSOC: channel: error getting channel.\n");
1627 goto done;
1628 }
1629
1630 if (assoc_req->channel != priv->channel) {
1631 lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
1632 assoc_req->channel);
1633 goto restore_mesh;
1634 }
1635
1636 if (assoc_req->secinfo.wep_enabled &&
1637 (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
1638 assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
1639 /* Make sure WEP keys are re-sent to firmware */
1640 set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
1641 }
1642
1643 /* Must restart/rejoin adhoc networks after channel change */
1644 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
1645
1646 restore_mesh:
1647 if (priv->mesh_dev)
1648 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1649 priv->channel);
1650
1651 done:
1652 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1653 return ret;
1654}
1655
1656
1657static int assoc_helper_wep_keys(struct lbs_private *priv,
1658 struct assoc_request *assoc_req)
1659{
1660 int i;
1661 int ret = 0;
1662
1663 lbs_deb_enter(LBS_DEB_ASSOC);
1664
1665 /* Set or remove WEP keys */
1666 if (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
1667 assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)
1668 ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_ADD, assoc_req);
1669 else
1670 ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_REMOVE, assoc_req);
1671
1672 if (ret)
1673 goto out;
1674
1675 /* enable/disable the MAC's WEP packet filter */
1676 if (assoc_req->secinfo.wep_enabled)
1677 priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE;
1678 else
1679 priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE;
1680
1681 lbs_set_mac_control(priv);
1682
1683 mutex_lock(&priv->lock);
1684
1685 /* Copy WEP keys into priv wep key fields */
1686 for (i = 0; i < 4; i++) {
1687 memcpy(&priv->wep_keys[i], &assoc_req->wep_keys[i],
1688 sizeof(struct enc_key));
1689 }
1690 priv->wep_tx_keyidx = assoc_req->wep_tx_keyidx;
1691
1692 mutex_unlock(&priv->lock);
1693
1694out:
1695 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1696 return ret;
1697}
1698
1699static int assoc_helper_secinfo(struct lbs_private *priv,
1700 struct assoc_request * assoc_req)
1701{
1702 int ret = 0;
1703 uint16_t do_wpa;
1704 uint16_t rsn = 0;
1705
1706 lbs_deb_enter(LBS_DEB_ASSOC);
1707
1708 memcpy(&priv->secinfo, &assoc_req->secinfo,
1709 sizeof(struct lbs_802_11_security));
1710
1711 lbs_set_mac_control(priv);
1712
1713 /* If RSN is already enabled, don't try to enable it again, since
1714 * ENABLE_RSN resets internal state machines and will clobber the
1715 * 4-way WPA handshake.
1716 */
1717
1718 /* Get RSN enabled/disabled */
1719 ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_GET, &rsn);
1720 if (ret) {
1721 lbs_deb_assoc("Failed to get RSN status: %d\n", ret);
1722 goto out;
1723 }
1724
1725 /* Don't re-enable RSN if it's already enabled */
1726 do_wpa = assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled;
1727 if (do_wpa == rsn)
1728 goto out;
1729
1730 /* Set RSN enabled/disabled */
1731 ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_SET, &do_wpa);
1732
1733out:
1734 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1735 return ret;
1736}
1737
1738
1739static int assoc_helper_wpa_keys(struct lbs_private *priv,
1740 struct assoc_request * assoc_req)
1741{
1742 int ret = 0;
1743 unsigned int flags = assoc_req->flags;
1744
1745 lbs_deb_enter(LBS_DEB_ASSOC);
1746
1747 /* Work around older firmware bug where WPA unicast and multicast
1748 * keys must be set independently. Seen in SDIO parts with firmware
1749 * version 5.0.11p0.
1750 */
1751
1752 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
1753 clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
1754 ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
1755 assoc_req->flags = flags;
1756 }
1757
1758 if (ret)
1759 goto out;
1760
1761 memcpy(&priv->wpa_unicast_key, &assoc_req->wpa_unicast_key,
1762 sizeof(struct enc_key));
1763
1764 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) {
1765 clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
1766
1767 ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
1768 assoc_req->flags = flags;
1769
1770 memcpy(&priv->wpa_mcast_key, &assoc_req->wpa_mcast_key,
1771 sizeof(struct enc_key));
1772 }
1773
1774out:
1775 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1776 return ret;
1777}
1778
1779
1780static int assoc_helper_wpa_ie(struct lbs_private *priv,
1781 struct assoc_request * assoc_req)
1782{
1783 int ret = 0;
1784
1785 lbs_deb_enter(LBS_DEB_ASSOC);
1786
1787 if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
1788 memcpy(&priv->wpa_ie, &assoc_req->wpa_ie, assoc_req->wpa_ie_len);
1789 priv->wpa_ie_len = assoc_req->wpa_ie_len;
1790 } else {
1791 memset(&priv->wpa_ie, 0, MAX_WPA_IE_LEN);
1792 priv->wpa_ie_len = 0;
1793 }
1794
1795 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1796 return ret;
1797}
1798
1799
1800static int should_deauth_infrastructure(struct lbs_private *priv,
1801 struct assoc_request * assoc_req)
1802{
1803 int ret = 0;
1804
1805 if (priv->connect_status != LBS_CONNECTED)
1806 return 0;
1807
1808 lbs_deb_enter(LBS_DEB_ASSOC);
1809 if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
1810 lbs_deb_assoc("Deauthenticating due to new SSID\n");
1811 ret = 1;
1812 goto out;
1813 }
1814
1815 if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
1816 if (priv->secinfo.auth_mode != assoc_req->secinfo.auth_mode) {
1817 lbs_deb_assoc("Deauthenticating due to new security\n");
1818 ret = 1;
1819 goto out;
1820 }
1821 }
1822
1823 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
1824 lbs_deb_assoc("Deauthenticating due to new BSSID\n");
1825 ret = 1;
1826 goto out;
1827 }
1828
1829 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
1830 lbs_deb_assoc("Deauthenticating due to channel switch\n");
1831 ret = 1;
1832 goto out;
1833 }
1834
1835 /* FIXME: deal with 'auto' mode somehow */
1836 if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
1837 if (assoc_req->mode != IW_MODE_INFRA) {
1838 lbs_deb_assoc("Deauthenticating due to leaving "
1839 "infra mode\n");
1840 ret = 1;
1841 goto out;
1842 }
1843 }
1844
1845out:
1846 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1847 return ret;
1848}
1849
1850
1851static int should_stop_adhoc(struct lbs_private *priv,
1852 struct assoc_request * assoc_req)
1853{
1854 lbs_deb_enter(LBS_DEB_ASSOC);
1855
1856 if (priv->connect_status != LBS_CONNECTED)
1857 return 0;
1858
1859 if (lbs_ssid_cmp(priv->curbssparams.ssid,
1860 priv->curbssparams.ssid_len,
1861 assoc_req->ssid, assoc_req->ssid_len) != 0)
1862 return 1;
1863
1864 /* FIXME: deal with 'auto' mode somehow */
1865 if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
1866 if (assoc_req->mode != IW_MODE_ADHOC)
1867 return 1;
1868 }
1869
1870 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
1871 if (assoc_req->channel != priv->channel)
1872 return 1;
1873 }
1874
1875 lbs_deb_leave(LBS_DEB_ASSOC);
1876 return 0;
1877}
1878
1879
1880/**
1881 * @brief This function finds the best SSID in the Scan List
1882 *
1883 * Search the scan table for the best SSID that also matches the current
1884 * adapter network preference (infrastructure or adhoc)
1885 *
1886 * @param priv A pointer to struct lbs_private
1887 *
1888 * @return index in BSSID list
1889 */
1890static struct bss_descriptor *lbs_find_best_ssid_in_list(
1891 struct lbs_private *priv, uint8_t mode)
1892{
1893 uint8_t bestrssi = 0;
1894 struct bss_descriptor *iter_bss;
1895 struct bss_descriptor *best_bss = NULL;
1896
1897 lbs_deb_enter(LBS_DEB_SCAN);
1898
1899 mutex_lock(&priv->lock);
1900
1901 list_for_each_entry(iter_bss, &priv->network_list, list) {
1902 switch (mode) {
1903 case IW_MODE_INFRA:
1904 case IW_MODE_ADHOC:
1905 if (!is_network_compatible(priv, iter_bss, mode))
1906 break;
1907 if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
1908 break;
1909 bestrssi = SCAN_RSSI(iter_bss->rssi);
1910 best_bss = iter_bss;
1911 break;
1912 case IW_MODE_AUTO:
1913 default:
1914 if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
1915 break;
1916 bestrssi = SCAN_RSSI(iter_bss->rssi);
1917 best_bss = iter_bss;
1918 break;
1919 }
1920 }
1921
1922 mutex_unlock(&priv->lock);
1923 lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss);
1924 return best_bss;
1925}
1926
1927/**
1928 * @brief Find the best AP
1929 *
1930 * Used from association worker.
1931 *
1932 * @param priv A pointer to struct lbs_private structure
1933 * @param pSSID A pointer to AP's ssid
1934 *
1935 * @return 0--success, otherwise--fail
1936 */
1937static int lbs_find_best_network_ssid(struct lbs_private *priv,
1938 uint8_t *out_ssid, uint8_t *out_ssid_len, uint8_t preferred_mode,
1939 uint8_t *out_mode)
1940{
1941 int ret = -1;
1942 struct bss_descriptor *found;
1943
1944 lbs_deb_enter(LBS_DEB_SCAN);
1945
1946 priv->scan_ssid_len = 0;
1947 lbs_scan_networks(priv, 1);
1948 if (priv->surpriseremoved)
1949 goto out;
1950
1951 found = lbs_find_best_ssid_in_list(priv, preferred_mode);
1952 if (found && (found->ssid_len > 0)) {
1953 memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
1954 *out_ssid_len = found->ssid_len;
1955 *out_mode = found->mode;
1956 ret = 0;
1957 }
1958
1959out:
1960 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
1961 return ret;
1962}
1963
1964
1965void lbs_association_worker(struct work_struct *work)
1966{
1967 struct lbs_private *priv = container_of(work, struct lbs_private,
1968 assoc_work.work);
1969 struct assoc_request * assoc_req = NULL;
1970 int ret = 0;
1971 int find_any_ssid = 0;
1972 DECLARE_SSID_BUF(ssid);
1973
1974 lbs_deb_enter(LBS_DEB_ASSOC);
1975
1976 mutex_lock(&priv->lock);
1977 assoc_req = priv->pending_assoc_req;
1978 priv->pending_assoc_req = NULL;
1979 priv->in_progress_assoc_req = assoc_req;
1980 mutex_unlock(&priv->lock);
1981
1982 if (!assoc_req)
1983 goto done;
1984
1985 lbs_deb_assoc(
1986 "Association Request:\n"
1987 " flags: 0x%08lx\n"
1988 " SSID: '%s'\n"
1989 " chann: %d\n"
1990 " band: %d\n"
1991 " mode: %d\n"
1992 " BSSID: %pM\n"
1993 " secinfo: %s%s%s\n"
1994 " auth_mode: %d\n",
1995 assoc_req->flags,
1996 print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
1997 assoc_req->channel, assoc_req->band, assoc_req->mode,
1998 assoc_req->bssid,
1999 assoc_req->secinfo.WPAenabled ? " WPA" : "",
2000 assoc_req->secinfo.WPA2enabled ? " WPA2" : "",
2001 assoc_req->secinfo.wep_enabled ? " WEP" : "",
2002 assoc_req->secinfo.auth_mode);
2003
2004 /* If 'any' SSID was specified, find an SSID to associate with */
2005 if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
2006 !assoc_req->ssid_len)
2007 find_any_ssid = 1;
2008
2009 /* But don't use 'any' SSID if there's a valid locked BSSID to use */
2010 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
2011 if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
2012 compare_ether_addr(assoc_req->bssid, bssid_off))
2013 find_any_ssid = 0;
2014 }
2015
2016 if (find_any_ssid) {
2017 u8 new_mode = assoc_req->mode;
2018
2019 ret = lbs_find_best_network_ssid(priv, assoc_req->ssid,
2020 &assoc_req->ssid_len, assoc_req->mode, &new_mode);
2021 if (ret) {
2022 lbs_deb_assoc("Could not find best network\n");
2023 ret = -ENETUNREACH;
2024 goto out;
2025 }
2026
2027 /* Ensure we switch to the mode of the AP */
2028 if (assoc_req->mode == IW_MODE_AUTO) {
2029 set_bit(ASSOC_FLAG_MODE, &assoc_req->flags);
2030 assoc_req->mode = new_mode;
2031 }
2032 }
2033
2034 /*
2035 * Check if the attributes being changing require deauthentication
2036 * from the currently associated infrastructure access point.
2037 */
2038 if (priv->mode == IW_MODE_INFRA) {
2039 if (should_deauth_infrastructure(priv, assoc_req)) {
2040 ret = lbs_cmd_80211_deauthenticate(priv,
2041 priv->curbssparams.bssid,
2042 WLAN_REASON_DEAUTH_LEAVING);
2043 if (ret) {
2044 lbs_deb_assoc("Deauthentication due to new "
2045 "configuration request failed: %d\n",
2046 ret);
2047 }
2048 }
2049 } else if (priv->mode == IW_MODE_ADHOC) {
2050 if (should_stop_adhoc(priv, assoc_req)) {
2051 ret = lbs_adhoc_stop(priv);
2052 if (ret) {
2053 lbs_deb_assoc("Teardown of AdHoc network due to "
2054 "new configuration request failed: %d\n",
2055 ret);
2056 }
2057
2058 }
2059 }
2060
2061 /* Send the various configuration bits to the firmware */
2062 if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
2063 ret = assoc_helper_mode(priv, assoc_req);
2064 if (ret)
2065 goto out;
2066 }
2067
2068 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
2069 ret = assoc_helper_channel(priv, assoc_req);
2070 if (ret)
2071 goto out;
2072 }
2073
2074 if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
2075 ret = assoc_helper_secinfo(priv, assoc_req);
2076 if (ret)
2077 goto out;
2078 }
2079
2080 if (test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) {
2081 ret = assoc_helper_wpa_ie(priv, assoc_req);
2082 if (ret)
2083 goto out;
2084 }
2085
2086 /*
2087 * v10 FW wants WPA keys to be set/cleared before WEP key operations,
2088 * otherwise it will fail to correctly associate to WEP networks.
2089 * Other firmware versions don't appear to care.
2090 */
2091 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
2092 test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
2093 ret = assoc_helper_wpa_keys(priv, assoc_req);
2094 if (ret)
2095 goto out;
2096 }
2097
2098 if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
2099 test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
2100 ret = assoc_helper_wep_keys(priv, assoc_req);
2101 if (ret)
2102 goto out;
2103 }
2104
2105
2106 /* SSID/BSSID should be the _last_ config option set, because they
2107 * trigger the association attempt.
2108 */
2109 if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
2110 test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
2111 int success = 1;
2112
2113 ret = assoc_helper_associate(priv, assoc_req);
2114 if (ret) {
2115 lbs_deb_assoc("ASSOC: association unsuccessful: %d\n",
2116 ret);
2117 success = 0;
2118 }
2119
2120 if (priv->connect_status != LBS_CONNECTED) {
2121 lbs_deb_assoc("ASSOC: association unsuccessful, "
2122 "not connected\n");
2123 success = 0;
2124 }
2125
2126 if (success) {
2127 lbs_deb_assoc("associated to %pM\n",
2128 priv->curbssparams.bssid);
2129 lbs_prepare_and_send_command(priv,
2130 CMD_802_11_RSSI,
2131 0, CMD_OPTION_WAITFORRSP, 0, NULL);
2132 } else {
2133 ret = -1;
2134 }
2135 }
2136
2137out:
2138 if (ret) {
2139 lbs_deb_assoc("ASSOC: reconfiguration attempt unsuccessful: %d\n",
2140 ret);
2141 }
2142
2143 mutex_lock(&priv->lock);
2144 priv->in_progress_assoc_req = NULL;
2145 mutex_unlock(&priv->lock);
2146 kfree(assoc_req);
2147
2148done:
2149 lbs_deb_leave(LBS_DEB_ASSOC);
2150}
2151
2152
2153/*
2154 * Caller MUST hold any necessary locks
2155 */
2156struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
2157{
2158 struct assoc_request * assoc_req;
2159
2160 lbs_deb_enter(LBS_DEB_ASSOC);
2161 if (!priv->pending_assoc_req) {
2162 priv->pending_assoc_req = kzalloc(sizeof(struct assoc_request),
2163 GFP_KERNEL);
2164 if (!priv->pending_assoc_req) {
2165 lbs_pr_info("Not enough memory to allocate association"
2166 " request!\n");
2167 return NULL;
2168 }
2169 }
2170
2171 /* Copy current configuration attributes to the association request,
2172 * but don't overwrite any that are already set.
2173 */
2174 assoc_req = priv->pending_assoc_req;
2175 if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
2176 memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
2177 IEEE80211_MAX_SSID_LEN);
2178 assoc_req->ssid_len = priv->curbssparams.ssid_len;
2179 }
2180
2181 if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
2182 assoc_req->channel = priv->channel;
2183
2184 if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
2185 assoc_req->band = priv->curbssparams.band;
2186
2187 if (!test_bit(ASSOC_FLAG_MODE, &assoc_req->flags))
2188 assoc_req->mode = priv->mode;
2189
2190 if (!test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
2191 memcpy(&assoc_req->bssid, priv->curbssparams.bssid,
2192 ETH_ALEN);
2193 }
2194
2195 if (!test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)) {
2196 int i;
2197 for (i = 0; i < 4; i++) {
2198 memcpy(&assoc_req->wep_keys[i], &priv->wep_keys[i],
2199 sizeof(struct enc_key));
2200 }
2201 }
2202
2203 if (!test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags))
2204 assoc_req->wep_tx_keyidx = priv->wep_tx_keyidx;
2205
2206 if (!test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) {
2207 memcpy(&assoc_req->wpa_mcast_key, &priv->wpa_mcast_key,
2208 sizeof(struct enc_key));
2209 }
2210
2211 if (!test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
2212 memcpy(&assoc_req->wpa_unicast_key, &priv->wpa_unicast_key,
2213 sizeof(struct enc_key));
2214 }
2215
2216 if (!test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
2217 memcpy(&assoc_req->secinfo, &priv->secinfo,
2218 sizeof(struct lbs_802_11_security));
2219 }
2220
2221 if (!test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) {
2222 memcpy(&assoc_req->wpa_ie, &priv->wpa_ie,
2223 MAX_WPA_IE_LEN);
2224 assoc_req->wpa_ie_len = priv->wpa_ie_len;
2225 }
2226
2227 lbs_deb_leave(LBS_DEB_ASSOC);
2228 return assoc_req;
2229}
2230
2231
2232/**
2233 * @brief Deauthenticate from a specific BSS
2234 *
2235 * @param priv A pointer to struct lbs_private structure
2236 * @param bssid The specific BSS to deauthenticate from
2237 * @param reason The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating
2238 *
2239 * @return 0 on success, error on failure
2240 */
2241int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
2242 u16 reason)
2243{
2244 struct cmd_ds_802_11_deauthenticate cmd;
2245 int ret;
2246
2247 lbs_deb_enter(LBS_DEB_JOIN);
2248
2249 memset(&cmd, 0, sizeof(cmd));
2250 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
2251 memcpy(cmd.macaddr, &bssid[0], ETH_ALEN);
2252 cmd.reasoncode = cpu_to_le16(reason);
2253
2254 ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd);
2255
2256 /* Clean up everything even if there was an error; can't assume that
2257 * we're still authenticated to the AP after trying to deauth.
2258 */
2259 lbs_mac_event_disconnected(priv);
2260
2261 lbs_deb_leave(LBS_DEB_JOIN);
2262 return ret;
2263}
2264
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
deleted file mode 100644
index 40621b789fc5..000000000000
--- a/drivers/net/wireless/libertas/assoc.h
+++ /dev/null
@@ -1,155 +0,0 @@
1/* Copyright (C) 2006, Red Hat, Inc. */
2
3#ifndef _LBS_ASSOC_H_
4#define _LBS_ASSOC_H_
5
6
7#include "defs.h"
8#include "host.h"
9
10
11struct lbs_private;
12
13/*
14 * In theory, the IE is limited to the IE length, 255,
15 * but in practice 64 bytes are enough.
16 */
17#define MAX_WPA_IE_LEN 64
18
19
20
21struct lbs_802_11_security {
22 u8 WPAenabled;
23 u8 WPA2enabled;
24 u8 wep_enabled;
25 u8 auth_mode;
26 u32 key_mgmt;
27};
28
29/** Current Basic Service Set State Structure */
30struct current_bss_params {
31 /** bssid */
32 u8 bssid[ETH_ALEN];
33 /** ssid */
34 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
35 u8 ssid_len;
36
37 /** band */
38 u8 band;
39 /** channel is directly in priv->channel */
40 /** zero-terminated array of supported data rates */
41 u8 rates[MAX_RATES + 1];
42};
43
44/**
45 * @brief Structure used to store information for each beacon/probe response
46 */
47struct bss_descriptor {
48 u8 bssid[ETH_ALEN];
49
50 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
51 u8 ssid_len;
52
53 u16 capability;
54 u32 rssi;
55 u32 channel;
56 u16 beaconperiod;
57 __le16 atimwindow;
58
59 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
60 u8 mode;
61
62 /* zero-terminated array of supported data rates */
63 u8 rates[MAX_RATES + 1];
64
65 unsigned long last_scanned;
66
67 union ieee_phy_param_set phy;
68 union ieee_ss_param_set ss;
69
70 u8 wpa_ie[MAX_WPA_IE_LEN];
71 size_t wpa_ie_len;
72 u8 rsn_ie[MAX_WPA_IE_LEN];
73 size_t rsn_ie_len;
74
75 u8 mesh;
76
77 struct list_head list;
78};
79
80/** Association request
81 *
82 * Encapsulates all the options that describe a specific assocation request
83 * or configuration of the wireless card's radio, mode, and security settings.
84 */
85struct assoc_request {
86#define ASSOC_FLAG_SSID 1
87#define ASSOC_FLAG_CHANNEL 2
88#define ASSOC_FLAG_BAND 3
89#define ASSOC_FLAG_MODE 4
90#define ASSOC_FLAG_BSSID 5
91#define ASSOC_FLAG_WEP_KEYS 6
92#define ASSOC_FLAG_WEP_TX_KEYIDX 7
93#define ASSOC_FLAG_WPA_MCAST_KEY 8
94#define ASSOC_FLAG_WPA_UCAST_KEY 9
95#define ASSOC_FLAG_SECINFO 10
96#define ASSOC_FLAG_WPA_IE 11
97 unsigned long flags;
98
99 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
100 u8 ssid_len;
101 u8 channel;
102 u8 band;
103 u8 mode;
104 u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
105
106 /** WEP keys */
107 struct enc_key wep_keys[4];
108 u16 wep_tx_keyidx;
109
110 /** WPA keys */
111 struct enc_key wpa_mcast_key;
112 struct enc_key wpa_unicast_key;
113
114 struct lbs_802_11_security secinfo;
115
116 /** WPA Information Elements*/
117 u8 wpa_ie[MAX_WPA_IE_LEN];
118 u8 wpa_ie_len;
119
120 /* BSS to associate with for infrastructure of Ad-Hoc join */
121 struct bss_descriptor bss;
122};
123
124
125extern u8 lbs_bg_rates[MAX_RATES];
126
127void lbs_association_worker(struct work_struct *work);
128struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
129
130int lbs_adhoc_stop(struct lbs_private *priv);
131
132int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
133 u8 bssid[ETH_ALEN], u16 reason);
134
135int lbs_cmd_802_11_rssi(struct lbs_private *priv,
136 struct cmd_ds_command *cmd);
137int lbs_ret_802_11_rssi(struct lbs_private *priv,
138 struct cmd_ds_command *resp);
139
140int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
141 struct cmd_ds_command *cmd,
142 u16 cmd_action);
143int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
144 struct cmd_ds_command *resp);
145
146int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
147 struct assoc_request *assoc);
148
149int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
150 uint16_t *enable);
151
152int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
153 struct assoc_request *assoc);
154
155#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 9d5d3ccf08c8..25f902760980 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -7,8 +7,11 @@
7 */ 7 */
8 8
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/ieee80211.h>
10#include <net/cfg80211.h> 11#include <net/cfg80211.h>
12#include <asm/unaligned.h>
11 13
14#include "decl.h"
12#include "cfg.h" 15#include "cfg.h"
13#include "cmd.h" 16#include "cmd.h"
14 17
@@ -39,26 +42,27 @@ static struct ieee80211_channel lbs_2ghz_channels[] = {
39 CHAN2G(14, 2484, 0), 42 CHAN2G(14, 2484, 0),
40}; 43};
41 44
42#define RATETAB_ENT(_rate, _rateid, _flags) { \ 45#define RATETAB_ENT(_rate, _hw_value, _flags) { \
43 .bitrate = (_rate), \ 46 .bitrate = (_rate), \
44 .hw_value = (_rateid), \ 47 .hw_value = (_hw_value), \
45 .flags = (_flags), \ 48 .flags = (_flags), \
46} 49}
47 50
48 51
52/* Table 6 in section 3.2.1.1 */
49static struct ieee80211_rate lbs_rates[] = { 53static struct ieee80211_rate lbs_rates[] = {
50 RATETAB_ENT(10, 0x1, 0), 54 RATETAB_ENT(10, 0, 0),
51 RATETAB_ENT(20, 0x2, 0), 55 RATETAB_ENT(20, 1, 0),
52 RATETAB_ENT(55, 0x4, 0), 56 RATETAB_ENT(55, 2, 0),
53 RATETAB_ENT(110, 0x8, 0), 57 RATETAB_ENT(110, 3, 0),
54 RATETAB_ENT(60, 0x10, 0), 58 RATETAB_ENT(60, 9, 0),
55 RATETAB_ENT(90, 0x20, 0), 59 RATETAB_ENT(90, 6, 0),
56 RATETAB_ENT(120, 0x40, 0), 60 RATETAB_ENT(120, 7, 0),
57 RATETAB_ENT(180, 0x80, 0), 61 RATETAB_ENT(180, 8, 0),
58 RATETAB_ENT(240, 0x100, 0), 62 RATETAB_ENT(240, 9, 0),
59 RATETAB_ENT(360, 0x200, 0), 63 RATETAB_ENT(360, 10, 0),
60 RATETAB_ENT(480, 0x400, 0), 64 RATETAB_ENT(480, 11, 0),
61 RATETAB_ENT(540, 0x800, 0), 65 RATETAB_ENT(540, 12, 0),
62}; 66};
63 67
64static struct ieee80211_supported_band lbs_band_2ghz = { 68static struct ieee80211_supported_band lbs_band_2ghz = {
@@ -76,22 +80,1616 @@ static const u32 cipher_suites[] = {
76 WLAN_CIPHER_SUITE_CCMP, 80 WLAN_CIPHER_SUITE_CCMP,
77}; 81};
78 82
83/* Time to stay on the channel */
84#define LBS_DWELL_PASSIVE 100
85#define LBS_DWELL_ACTIVE 40
79 86
80 87
88/***************************************************************************
89 * Misc utility functions
90 *
91 * TLVs are Marvell specific. They are very similar to IEs, they have the
92 * same structure: type, length, data*. The only difference: for IEs, the
93 * type and length are u8, but for TLVs they're __le16.
94 */
95
96/*
97 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
98 * in the firmware spec
99 */
100static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
101{
102 int ret = -ENOTSUPP;
103
104 switch (auth_type) {
105 case NL80211_AUTHTYPE_OPEN_SYSTEM:
106 case NL80211_AUTHTYPE_SHARED_KEY:
107 ret = auth_type;
108 break;
109 case NL80211_AUTHTYPE_AUTOMATIC:
110 ret = NL80211_AUTHTYPE_OPEN_SYSTEM;
111 break;
112 case NL80211_AUTHTYPE_NETWORK_EAP:
113 ret = 0x80;
114 break;
115 default:
116 /* silence compiler */
117 break;
118 }
119 return ret;
120}
121
122
123/* Various firmware commands need the list of supported rates, but with
124 the hight-bit set for basic rates */
125static int lbs_add_rates(u8 *rates)
126{
127 size_t i;
128
129 for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
130 u8 rate = lbs_rates[i].bitrate / 5;
131 if (rate == 0x02 || rate == 0x04 ||
132 rate == 0x0b || rate == 0x16)
133 rate |= 0x80;
134 rates[i] = rate;
135 }
136 return ARRAY_SIZE(lbs_rates);
137}
138
139
140/***************************************************************************
141 * TLV utility functions
142 *
143 * TLVs are Marvell specific. They are very similar to IEs, they have the
144 * same structure: type, length, data*. The only difference: for IEs, the
145 * type and length are u8, but for TLVs they're __le16.
146 */
147
148
149/*
150 * Add ssid TLV
151 */
152#define LBS_MAX_SSID_TLV_SIZE \
153 (sizeof(struct mrvl_ie_header) \
154 + IEEE80211_MAX_SSID_LEN)
155
156static int lbs_add_ssid_tlv(u8 *tlv, const u8 *ssid, int ssid_len)
157{
158 struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
159
160 /*
161 * TLV-ID SSID 00 00
162 * length 06 00
163 * ssid 4d 4e 54 45 53 54
164 */
165 ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
166 ssid_tlv->header.len = cpu_to_le16(ssid_len);
167 memcpy(ssid_tlv->ssid, ssid, ssid_len);
168 return sizeof(ssid_tlv->header) + ssid_len;
169}
170
171
172/*
173 * Add channel list TLV (section 8.4.2)
174 *
175 * Actual channel data comes from priv->wdev->wiphy->channels.
176 */
177#define LBS_MAX_CHANNEL_LIST_TLV_SIZE \
178 (sizeof(struct mrvl_ie_header) \
179 + (LBS_SCAN_BEFORE_NAP * sizeof(struct chanscanparamset)))
180
181static int lbs_add_channel_list_tlv(struct lbs_private *priv, u8 *tlv,
182 int last_channel, int active_scan)
183{
184 int chanscanparamsize = sizeof(struct chanscanparamset) *
185 (last_channel - priv->scan_channel);
186
187 struct mrvl_ie_header *header = (void *) tlv;
188
189 /*
190 * TLV-ID CHANLIST 01 01
191 * length 0e 00
192 * channel 00 01 00 00 00 64 00
193 * radio type 00
194 * channel 01
195 * scan type 00
196 * min scan time 00 00
197 * max scan time 64 00
198 * channel 2 00 02 00 00 00 64 00
199 *
200 */
201
202 header->type = cpu_to_le16(TLV_TYPE_CHANLIST);
203 header->len = cpu_to_le16(chanscanparamsize);
204 tlv += sizeof(struct mrvl_ie_header);
205
206 /* lbs_deb_scan("scan: channels %d to %d\n", priv->scan_channel,
207 last_channel); */
208 memset(tlv, 0, chanscanparamsize);
209
210 while (priv->scan_channel < last_channel) {
211 struct chanscanparamset *param = (void *) tlv;
212
213 param->radiotype = CMD_SCAN_RADIO_TYPE_BG;
214 param->channumber =
215 priv->scan_req->channels[priv->scan_channel]->hw_value;
216 if (active_scan) {
217 param->maxscantime = cpu_to_le16(LBS_DWELL_ACTIVE);
218 } else {
219 param->chanscanmode.passivescan = 1;
220 param->maxscantime = cpu_to_le16(LBS_DWELL_PASSIVE);
221 }
222 tlv += sizeof(struct chanscanparamset);
223 priv->scan_channel++;
224 }
225 return sizeof(struct mrvl_ie_header) + chanscanparamsize;
226}
227
228
229/*
230 * Add rates TLV
231 *
232 * The rates are in lbs_bg_rates[], but for the 802.11b
233 * rates the high bit is set. We add this TLV only because
234 * there's a firmware which otherwise doesn't report all
235 * APs in range.
236 */
237#define LBS_MAX_RATES_TLV_SIZE \
238 (sizeof(struct mrvl_ie_header) \
239 + (ARRAY_SIZE(lbs_rates)))
240
241/* Adds a TLV with all rates the hardware supports */
242static int lbs_add_supported_rates_tlv(u8 *tlv)
243{
244 size_t i;
245 struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
246
247 /*
248 * TLV-ID RATES 01 00
249 * length 0e 00
250 * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c
251 */
252 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
253 tlv += sizeof(rate_tlv->header);
254 i = lbs_add_rates(tlv);
255 tlv += i;
256 rate_tlv->header.len = cpu_to_le16(i);
257 return sizeof(rate_tlv->header) + i;
258}
259
260
261/*
262 * Adds a TLV with all rates the hardware *and* BSS supports.
263 */
264static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss)
265{
266 struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
267 const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
268 int n;
269
270 /*
271 * 01 00 TLV_TYPE_RATES
272 * 04 00 len
273 * 82 84 8b 96 rates
274 */
275 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
276 tlv += sizeof(rate_tlv->header);
277
278 if (!rates_eid) {
279 /* Fallback: add basic 802.11b rates */
280 *tlv++ = 0x82;
281 *tlv++ = 0x84;
282 *tlv++ = 0x8b;
283 *tlv++ = 0x96;
284 n = 4;
285 } else {
286 int hw, ap;
287 u8 ap_max = rates_eid[1];
288 n = 0;
289 for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
290 u8 hw_rate = lbs_rates[hw].bitrate / 5;
291 for (ap = 0; ap < ap_max; ap++) {
292 if (hw_rate == (rates_eid[ap+2] & 0x7f)) {
293 *tlv++ = rates_eid[ap+2];
294 n++;
295 }
296 }
297 }
298 }
299
300 rate_tlv->header.len = cpu_to_le16(n);
301 return sizeof(rate_tlv->header) + n;
302}
303
304
305/*
306 * Add auth type TLV.
307 *
308 * This is only needed for newer firmware (V9 and up).
309 */
310#define LBS_MAX_AUTH_TYPE_TLV_SIZE \
311 sizeof(struct mrvl_ie_auth_type)
312
313static int lbs_add_auth_type_tlv(u8 *tlv, enum nl80211_auth_type auth_type)
314{
315 struct mrvl_ie_auth_type *auth = (void *) tlv;
316
317 /*
318 * 1f 01 TLV_TYPE_AUTH_TYPE
319 * 01 00 len
320 * 01 auth type
321 */
322 auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
323 auth->header.len = cpu_to_le16(sizeof(*auth)-sizeof(auth->header));
324 auth->auth = cpu_to_le16(lbs_auth_to_authtype(auth_type));
325 return sizeof(*auth);
326}
327
328
329/*
330 * Add channel (phy ds) TLV
331 */
332#define LBS_MAX_CHANNEL_TLV_SIZE \
333 sizeof(struct mrvl_ie_header)
334
335static int lbs_add_channel_tlv(u8 *tlv, u8 channel)
336{
337 struct mrvl_ie_ds_param_set *ds = (void *) tlv;
338
339 /*
340 * 03 00 TLV_TYPE_PHY_DS
341 * 01 00 len
342 * 06 channel
343 */
344 ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
345 ds->header.len = cpu_to_le16(sizeof(*ds)-sizeof(ds->header));
346 ds->channel = channel;
347 return sizeof(*ds);
348}
349
350
351/*
352 * Add (empty) CF param TLV of the form:
353 */
354#define LBS_MAX_CF_PARAM_TLV_SIZE \
355 sizeof(struct mrvl_ie_header)
356
357static int lbs_add_cf_param_tlv(u8 *tlv)
358{
359 struct mrvl_ie_cf_param_set *cf = (void *)tlv;
360
361 /*
362 * 04 00 TLV_TYPE_CF
363 * 06 00 len
364 * 00 cfpcnt
365 * 00 cfpperiod
366 * 00 00 cfpmaxduration
367 * 00 00 cfpdurationremaining
368 */
369 cf->header.type = cpu_to_le16(TLV_TYPE_CF);
370 cf->header.len = cpu_to_le16(sizeof(*cf)-sizeof(cf->header));
371 return sizeof(*cf);
372}
373
374/*
375 * Add WPA TLV
376 */
377#define LBS_MAX_WPA_TLV_SIZE \
378 (sizeof(struct mrvl_ie_header) \
379 + 128 /* TODO: I guessed the size */)
380
381static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
382{
383 size_t tlv_len;
384
385 /*
386 * We need just convert an IE to an TLV. IEs use u8 for the header,
387 * u8 type
388 * u8 len
389 * u8[] data
390 * but TLVs use __le16 instead:
391 * __le16 type
392 * __le16 len
393 * u8[] data
394 */
395 *tlv++ = *ie++;
396 *tlv++ = 0;
397 tlv_len = *tlv++ = *ie++;
398 *tlv++ = 0;
399 while (tlv_len--)
400 *tlv++ = *ie++;
401 /* the TLV is two bytes larger than the IE */
402 return ie_len + 2;
403}
404
405/***************************************************************************
406 * Set Channel
407 */
408
81static int lbs_cfg_set_channel(struct wiphy *wiphy, 409static int lbs_cfg_set_channel(struct wiphy *wiphy,
82 struct net_device *netdev, 410 struct net_device *netdev,
83 struct ieee80211_channel *chan, 411 struct ieee80211_channel *channel,
84 enum nl80211_channel_type channel_type) 412 enum nl80211_channel_type channel_type)
85{ 413{
86 struct lbs_private *priv = wiphy_priv(wiphy); 414 struct lbs_private *priv = wiphy_priv(wiphy);
87 int ret = -ENOTSUPP; 415 int ret = -ENOTSUPP;
88 416
89 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type); 417 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
418 channel->center_freq, channel_type);
90 419
91 if (channel_type != NL80211_CHAN_NO_HT) 420 if (channel_type != NL80211_CHAN_NO_HT)
92 goto out; 421 goto out;
93 422
94 ret = lbs_set_channel(priv, chan->hw_value); 423 ret = lbs_set_channel(priv, channel->hw_value);
424
425 out:
426 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
427 return ret;
428}
429
430
431
432/***************************************************************************
433 * Scanning
434 */
435
436/*
437 * When scanning, the firmware doesn't send a nul packet with the power-safe
438 * bit to the AP. So we cannot stay away from our current channel too long,
439 * otherwise we loose data. So take a "nap" while scanning every other
440 * while.
441 */
442#define LBS_SCAN_BEFORE_NAP 4
443
444
445/*
446 * When the firmware reports back a scan-result, it gives us an "u8 rssi",
447 * which isn't really an RSSI, as it becomes larger when moving away from
448 * the AP. Anyway, we need to convert that into mBm.
449 */
450#define LBS_SCAN_RSSI_TO_MBM(rssi) \
451 ((-(int)rssi + 3)*100)
452
453static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
454 struct cmd_header *resp)
455{
456 struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
457 int bsssize;
458 const u8 *pos;
459 u16 nr_sets;
460 const u8 *tsfdesc;
461 int tsfsize;
462 int i;
463 int ret = -EILSEQ;
464
465 lbs_deb_enter(LBS_DEB_CFG80211);
466
467 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize);
468 nr_sets = le16_to_cpu(resp->size);
469
470 /*
471 * The general layout of the scan response is described in chapter
472 * 5.7.1. Basically we have a common part, then any number of BSS
473 * descriptor sections. Finally we have section with the same number
474 * of TSFs.
475 *
476 * cmd_ds_802_11_scan_rsp
477 * cmd_header
478 * pos_size
479 * nr_sets
480 * bssdesc 1
481 * bssid
482 * rssi
483 * timestamp
484 * intvl
485 * capa
486 * IEs
487 * bssdesc 2
488 * bssdesc n
489 * MrvlIEtypes_TsfFimestamp_t
490 * TSF for BSS 1
491 * TSF for BSS 2
492 * TSF for BSS n
493 */
494
495 pos = scanresp->bssdesc_and_tlvbuffer;
496
497 tsfdesc = pos + bsssize;
498 tsfsize = 4 + 8 * scanresp->nr_sets;
499
500 /* Validity check: we expect a Marvell-Local TLV */
501 i = get_unaligned_le16(tsfdesc);
502 tsfdesc += 2;
503 if (i != TLV_TYPE_TSFTIMESTAMP)
504 goto done;
505 /* Validity check: the TLV holds TSF values with 8 bytes each, so
506 * the size in the TLV must match the nr_sets value */
507 i = get_unaligned_le16(tsfdesc);
508 tsfdesc += 2;
509 if (i / 8 != scanresp->nr_sets)
510 goto done;
511
512 for (i = 0; i < scanresp->nr_sets; i++) {
513 const u8 *bssid;
514 const u8 *ie;
515 int left;
516 int ielen;
517 int rssi;
518 u16 intvl;
519 u16 capa;
520 int chan_no = -1;
521 const u8 *ssid = NULL;
522 u8 ssid_len = 0;
523 DECLARE_SSID_BUF(ssid_buf);
524
525 int len = get_unaligned_le16(pos);
526 pos += 2;
527
528 /* BSSID */
529 bssid = pos;
530 pos += ETH_ALEN;
531 /* RSSI */
532 rssi = *pos++;
533 /* Packet time stamp */
534 pos += 8;
535 /* Beacon interval */
536 intvl = get_unaligned_le16(pos);
537 pos += 2;
538 /* Capabilities */
539 capa = get_unaligned_le16(pos);
540 pos += 2;
541
542 /* To find out the channel, we must parse the IEs */
543 ie = pos;
544 /* 6+1+8+2+2: size of BSSID, RSSI, time stamp, beacon
545 interval, capabilities */
546 ielen = left = len - (6 + 1 + 8 + 2 + 2);
547 while (left >= 2) {
548 u8 id, elen;
549 id = *pos++;
550 elen = *pos++;
551 left -= 2;
552 if (elen > left || elen == 0)
553 goto done;
554 if (id == WLAN_EID_DS_PARAMS)
555 chan_no = *pos;
556 if (id == WLAN_EID_SSID) {
557 ssid = pos;
558 ssid_len = elen;
559 }
560 left -= elen;
561 pos += elen;
562 }
563
564 /* No channel, no luck */
565 if (chan_no != -1) {
566 struct wiphy *wiphy = priv->wdev->wiphy;
567 int freq = ieee80211_channel_to_frequency(chan_no);
568 struct ieee80211_channel *channel =
569 ieee80211_get_channel(wiphy, freq);
570
571 lbs_deb_scan("scan: %pM, capa %04x, chan %2d, %s, "
572 "%d dBm\n",
573 bssid, capa, chan_no,
574 print_ssid(ssid_buf, ssid, ssid_len),
575 LBS_SCAN_RSSI_TO_MBM(rssi)/100);
576
577 if (channel ||
578 !(channel->flags & IEEE80211_CHAN_DISABLED))
579 cfg80211_inform_bss(wiphy, channel,
580 bssid, le64_to_cpu(*(__le64 *)tsfdesc),
581 capa, intvl, ie, ielen,
582 LBS_SCAN_RSSI_TO_MBM(rssi),
583 GFP_KERNEL);
584 }
585 tsfdesc += 8;
586 }
587 ret = 0;
588
589 done:
590 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
591 return ret;
592}
593
594
595/*
596 * Our scan command contains a TLV, consting of a SSID TLV, a channel list
597 * TLV and a rates TLV. Determine the maximum size of them:
598 */
599#define LBS_SCAN_MAX_CMD_SIZE \
600 (sizeof(struct cmd_ds_802_11_scan) \
601 + LBS_MAX_SSID_TLV_SIZE \
602 + LBS_MAX_CHANNEL_LIST_TLV_SIZE \
603 + LBS_MAX_RATES_TLV_SIZE)
604
605/*
606 * Assumes priv->scan_req is initialized and valid
607 * Assumes priv->scan_channel is initialized
608 */
609static void lbs_scan_worker(struct work_struct *work)
610{
611 struct lbs_private *priv =
612 container_of(work, struct lbs_private, scan_work.work);
613 struct cmd_ds_802_11_scan *scan_cmd;
614 u8 *tlv; /* pointer into our current, growing TLV storage area */
615 int last_channel;
616 int running, carrier;
617
618 lbs_deb_enter(LBS_DEB_SCAN);
619
620 scan_cmd = kzalloc(LBS_SCAN_MAX_CMD_SIZE, GFP_KERNEL);
621 if (scan_cmd == NULL)
622 goto out_no_scan_cmd;
623
624 /* prepare fixed part of scan command */
625 scan_cmd->bsstype = CMD_BSS_TYPE_ANY;
626
627 /* stop network while we're away from our main channel */
628 running = !netif_queue_stopped(priv->dev);
629 carrier = netif_carrier_ok(priv->dev);
630 if (running)
631 netif_stop_queue(priv->dev);
632 if (carrier)
633 netif_carrier_off(priv->dev);
634
635 /* prepare fixed part of scan command */
636 tlv = scan_cmd->tlvbuffer;
637
638 /* add SSID TLV */
639 if (priv->scan_req->n_ssids)
640 tlv += lbs_add_ssid_tlv(tlv,
641 priv->scan_req->ssids[0].ssid,
642 priv->scan_req->ssids[0].ssid_len);
643
644 /* add channel TLVs */
645 last_channel = priv->scan_channel + LBS_SCAN_BEFORE_NAP;
646 if (last_channel > priv->scan_req->n_channels)
647 last_channel = priv->scan_req->n_channels;
648 tlv += lbs_add_channel_list_tlv(priv, tlv, last_channel,
649 priv->scan_req->n_ssids);
650
651 /* add rates TLV */
652 tlv += lbs_add_supported_rates_tlv(tlv);
653
654 if (priv->scan_channel < priv->scan_req->n_channels) {
655 cancel_delayed_work(&priv->scan_work);
656 queue_delayed_work(priv->work_thread, &priv->scan_work,
657 msecs_to_jiffies(300));
658 }
659
660 /* This is the final data we are about to send */
661 scan_cmd->hdr.size = cpu_to_le16(tlv - (u8 *)scan_cmd);
662 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd,
663 sizeof(*scan_cmd));
664 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer,
665 tlv - scan_cmd->tlvbuffer);
666
667 __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr,
668 le16_to_cpu(scan_cmd->hdr.size),
669 lbs_ret_scan, 0);
670
671 if (priv->scan_channel >= priv->scan_req->n_channels) {
672 /* Mark scan done */
673 cfg80211_scan_done(priv->scan_req, false);
674 priv->scan_req = NULL;
675 }
676
677 /* Restart network */
678 if (carrier)
679 netif_carrier_on(priv->dev);
680 if (running && !priv->tx_pending_len)
681 netif_wake_queue(priv->dev);
682
683 kfree(scan_cmd);
684
685 out_no_scan_cmd:
686 lbs_deb_leave(LBS_DEB_SCAN);
687}
688
689
690static int lbs_cfg_scan(struct wiphy *wiphy,
691 struct net_device *dev,
692 struct cfg80211_scan_request *request)
693{
694 struct lbs_private *priv = wiphy_priv(wiphy);
695 int ret = 0;
696
697 lbs_deb_enter(LBS_DEB_CFG80211);
698
699 if (priv->scan_req || delayed_work_pending(&priv->scan_work)) {
700 /* old scan request not yet processed */
701 ret = -EAGAIN;
702 goto out;
703 }
704
705 lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n",
706 request->n_ssids, request->n_channels, request->ie_len);
707
708 priv->scan_channel = 0;
709 queue_delayed_work(priv->work_thread, &priv->scan_work,
710 msecs_to_jiffies(50));
711
712 if (priv->surpriseremoved)
713 ret = -EIO;
714
715 priv->scan_req = request;
716
717 out:
718 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
719 return ret;
720}
721
722
723
724
725/***************************************************************************
726 * Events
727 */
728
729void lbs_send_disconnect_notification(struct lbs_private *priv)
730{
731 lbs_deb_enter(LBS_DEB_CFG80211);
732
733 cfg80211_disconnected(priv->dev,
734 0,
735 NULL, 0,
736 GFP_KERNEL);
737
738 lbs_deb_leave(LBS_DEB_CFG80211);
739}
740
741void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
742{
743 lbs_deb_enter(LBS_DEB_CFG80211);
744
745 cfg80211_michael_mic_failure(priv->dev,
746 priv->assoc_bss,
747 event == MACREG_INT_CODE_MIC_ERR_MULTICAST ?
748 NL80211_KEYTYPE_GROUP :
749 NL80211_KEYTYPE_PAIRWISE,
750 -1,
751 NULL,
752 GFP_KERNEL);
753
754 lbs_deb_leave(LBS_DEB_CFG80211);
755}
756
757
758
759
760/***************************************************************************
761 * Connect/disconnect
762 */
763
764
765/*
766 * This removes all WEP keys
767 */
768static int lbs_remove_wep_keys(struct lbs_private *priv)
769{
770 struct cmd_ds_802_11_set_wep cmd;
771 int ret;
772
773 lbs_deb_enter(LBS_DEB_CFG80211);
774
775 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
777 cmd.keyindex = cpu_to_le16(priv->wep_tx_key);
778 cmd.action = cpu_to_le16(CMD_ACT_REMOVE);
779
780 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
781
782 lbs_deb_leave(LBS_DEB_CFG80211);
783 return ret;
784}
785
786/*
787 * Set WEP keys
788 */
789static int lbs_set_wep_keys(struct lbs_private *priv)
790{
791 struct cmd_ds_802_11_set_wep cmd;
792 int i;
793 int ret;
794
795 lbs_deb_enter(LBS_DEB_CFG80211);
796
797 /*
798 * command 13 00
799 * size 50 00
800 * sequence xx xx
801 * result 00 00
802 * action 02 00 ACT_ADD
803 * transmit key 00 00
804 * type for key 1 01 WEP40
805 * type for key 2 00
806 * type for key 3 00
807 * type for key 4 00
808 * key 1 39 39 39 39 39 00 00 00
809 * 00 00 00 00 00 00 00 00
810 * key 2 00 00 00 00 00 00 00 00
811 * 00 00 00 00 00 00 00 00
812 * key 3 00 00 00 00 00 00 00 00
813 * 00 00 00 00 00 00 00 00
814 * key 4 00 00 00 00 00 00 00 00
815 */
816 if (priv->wep_key_len[0] || priv->wep_key_len[1] ||
817 priv->wep_key_len[2] || priv->wep_key_len[3]) {
818 /* Only set wep keys if we have at least one of them */
819 memset(&cmd, 0, sizeof(cmd));
820 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
821 cmd.keyindex = cpu_to_le16(priv->wep_tx_key);
822 cmd.action = cpu_to_le16(CMD_ACT_ADD);
823
824 for (i = 0; i < 4; i++) {
825 switch (priv->wep_key_len[i]) {
826 case WLAN_KEY_LEN_WEP40:
827 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
828 break;
829 case WLAN_KEY_LEN_WEP104:
830 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
831 break;
832 default:
833 cmd.keytype[i] = 0;
834 break;
835 }
836 memcpy(cmd.keymaterial[i], priv->wep_key[i],
837 priv->wep_key_len[i]);
838 }
839
840 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
841 } else {
842 /* Otherwise remove all wep keys */
843 ret = lbs_remove_wep_keys(priv);
844 }
845
846 lbs_deb_leave(LBS_DEB_CFG80211);
847 return ret;
848}
849
850
851/*
852 * Enable/Disable RSN status
853 */
854static int lbs_enable_rsn(struct lbs_private *priv, int enable)
855{
856 struct cmd_ds_802_11_enable_rsn cmd;
857 int ret;
858
859 lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", enable);
860
861 /*
862 * cmd 2f 00
863 * size 0c 00
864 * sequence xx xx
865 * result 00 00
866 * action 01 00 ACT_SET
867 * enable 01 00
868 */
869 memset(&cmd, 0, sizeof(cmd));
870 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
871 cmd.action = cpu_to_le16(CMD_ACT_SET);
872 cmd.enable = cpu_to_le16(enable);
873
874 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
875
876 lbs_deb_leave(LBS_DEB_CFG80211);
877 return ret;
878}
879
880
881/*
882 * Set WPA/WPA key material
883 */
884
885/* like "struct cmd_ds_802_11_key_material", but with cmd_header. Once we
886 * get rid of WEXT, this should go into host.h */
887
888struct cmd_key_material {
889 struct cmd_header hdr;
890
891 __le16 action;
892 struct MrvlIEtype_keyParamSet param;
893} __packed;
894
895static int lbs_set_key_material(struct lbs_private *priv,
896 int key_type,
897 int key_info,
898 u8 *key, u16 key_len)
899{
900 struct cmd_key_material cmd;
901 int ret;
902
903 lbs_deb_enter(LBS_DEB_CFG80211);
904
905 /*
906 * Example for WPA (TKIP):
907 *
908 * cmd 5e 00
909 * size 34 00
910 * sequence xx xx
911 * result 00 00
912 * action 01 00
913 * TLV type 00 01 key param
914 * length 00 26
915 * key type 01 00 TKIP
916 * key info 06 00 UNICAST | ENABLED
917 * key len 20 00
918 * key 32 bytes
919 */
920 memset(&cmd, 0, sizeof(cmd));
921 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
922 cmd.action = cpu_to_le16(CMD_ACT_SET);
923 cmd.param.type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
924 cmd.param.length = cpu_to_le16(sizeof(cmd.param) - 4);
925 cmd.param.keytypeid = cpu_to_le16(key_type);
926 cmd.param.keyinfo = cpu_to_le16(key_info);
927 cmd.param.keylen = cpu_to_le16(key_len);
928 if (key && key_len)
929 memcpy(cmd.param.key, key, key_len);
930
931 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
932
933 lbs_deb_leave(LBS_DEB_CFG80211);
934 return ret;
935}
936
937
938/*
939 * Sets the auth type (open, shared, etc) in the firmware. That
940 * we use CMD_802_11_AUTHENTICATE is misleading, this firmware
941 * command doesn't send an authentication frame at all, it just
942 * stores the auth_type.
943 */
944static int lbs_set_authtype(struct lbs_private *priv,
945 struct cfg80211_connect_params *sme)
946{
947 struct cmd_ds_802_11_authenticate cmd;
948 int ret;
949
950 lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", sme->auth_type);
951
952 /*
953 * cmd 11 00
954 * size 19 00
955 * sequence xx xx
956 * result 00 00
957 * BSS id 00 13 19 80 da 30
958 * auth type 00
959 * reserved 00 00 00 00 00 00 00 00 00 00
960 */
961 memset(&cmd, 0, sizeof(cmd));
962 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
963 if (sme->bssid)
964 memcpy(cmd.bssid, sme->bssid, ETH_ALEN);
965 /* convert auth_type */
966 ret = lbs_auth_to_authtype(sme->auth_type);
967 if (ret < 0)
968 goto done;
969
970 cmd.authtype = ret;
971 ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
972
973 done:
974 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
975 return ret;
976}
977
978
979/*
980 * Create association request
981 */
982#define LBS_ASSOC_MAX_CMD_SIZE \
983 (sizeof(struct cmd_ds_802_11_associate) \
984 - 512 /* cmd_ds_802_11_associate.iebuf */ \
985 + LBS_MAX_SSID_TLV_SIZE \
986 + LBS_MAX_CHANNEL_TLV_SIZE \
987 + LBS_MAX_CF_PARAM_TLV_SIZE \
988 + LBS_MAX_AUTH_TYPE_TLV_SIZE \
989 + LBS_MAX_WPA_TLV_SIZE)
990
991static int lbs_associate(struct lbs_private *priv,
992 struct cfg80211_bss *bss,
993 struct cfg80211_connect_params *sme)
994{
995 struct cmd_ds_802_11_associate_response *resp;
996 struct cmd_ds_802_11_associate *cmd = kzalloc(LBS_ASSOC_MAX_CMD_SIZE,
997 GFP_KERNEL);
998 const u8 *ssid_eid;
999 size_t len, resp_ie_len;
1000 int status;
1001 int ret;
1002 u8 *pos = &(cmd->iebuf[0]);
1003
1004 lbs_deb_enter(LBS_DEB_CFG80211);
1005
1006 if (!cmd) {
1007 ret = -ENOMEM;
1008 goto done;
1009 }
1010
1011 /*
1012 * cmd 50 00
1013 * length 34 00
1014 * sequence xx xx
1015 * result 00 00
1016 * BSS id 00 13 19 80 da 30
1017 * capabilities 11 00
1018 * listen interval 0a 00
1019 * beacon interval 00 00
1020 * DTIM period 00
1021 * TLVs xx (up to 512 bytes)
1022 */
1023 cmd->hdr.command = cpu_to_le16(CMD_802_11_ASSOCIATE);
1024
1025 /* Fill in static fields */
1026 memcpy(cmd->bssid, bss->bssid, ETH_ALEN);
1027 cmd->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
1028 cmd->capability = cpu_to_le16(bss->capability);
1029
1030 /* add SSID TLV */
1031 ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
1032 if (ssid_eid)
1033 pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]);
1034 else
1035 lbs_deb_assoc("no SSID\n");
1036
1037 /* add DS param TLV */
1038 if (bss->channel)
1039 pos += lbs_add_channel_tlv(pos, bss->channel->hw_value);
1040 else
1041 lbs_deb_assoc("no channel\n");
1042
1043 /* add (empty) CF param TLV */
1044 pos += lbs_add_cf_param_tlv(pos);
1045
1046 /* add rates TLV */
1047 pos += lbs_add_common_rates_tlv(pos, bss);
1048
1049 /* add auth type TLV */
1050 if (priv->fwrelease >= 0x09000000)
1051 pos += lbs_add_auth_type_tlv(pos, sme->auth_type);
1052
1053 /* add WPA/WPA2 TLV */
1054 if (sme->ie && sme->ie_len)
1055 pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len);
1056
1057 len = (sizeof(*cmd) - sizeof(cmd->iebuf)) +
1058 (u16)(pos - (u8 *) &cmd->iebuf);
1059 cmd->hdr.size = cpu_to_le16(len);
1060
1061 /* store for later use */
1062 memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN);
1063
1064 ret = lbs_cmd_with_response(priv, CMD_802_11_ASSOCIATE, cmd);
1065 if (ret)
1066 goto done;
1067
1068
1069 /* generate connect message to cfg80211 */
1070
1071 resp = (void *) cmd; /* recast for easier field access */
1072 status = le16_to_cpu(resp->statuscode);
1073
1074 /* Convert statis code of old firmware */
1075 if (priv->fwrelease < 0x09000000)
1076 switch (status) {
1077 case 0:
1078 break;
1079 case 1:
1080 lbs_deb_assoc("invalid association parameters\n");
1081 status = WLAN_STATUS_CAPS_UNSUPPORTED;
1082 break;
1083 case 2:
1084 lbs_deb_assoc("timer expired while waiting for AP\n");
1085 status = WLAN_STATUS_AUTH_TIMEOUT;
1086 break;
1087 case 3:
1088 lbs_deb_assoc("association refused by AP\n");
1089 status = WLAN_STATUS_ASSOC_DENIED_UNSPEC;
1090 break;
1091 case 4:
1092 lbs_deb_assoc("authentication refused by AP\n");
1093 status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
1094 break;
1095 default:
1096 lbs_deb_assoc("association failure %d\n", status);
1097 status = WLAN_STATUS_UNSPECIFIED_FAILURE;
1098 }
1099
1100 lbs_deb_assoc("status %d, capability 0x%04x\n", status,
1101 le16_to_cpu(resp->capability));
1102
1103 resp_ie_len = le16_to_cpu(resp->hdr.size)
1104 - sizeof(resp->hdr)
1105 - 6;
1106 cfg80211_connect_result(priv->dev,
1107 priv->assoc_bss,
1108 sme->ie, sme->ie_len,
1109 resp->iebuf, resp_ie_len,
1110 status,
1111 GFP_KERNEL);
1112
1113 if (status == 0) {
1114 /* TODO: get rid of priv->connect_status */
1115 priv->connect_status = LBS_CONNECTED;
1116 netif_carrier_on(priv->dev);
1117 if (!priv->tx_pending_len)
1118 netif_tx_wake_all_queues(priv->dev);
1119 }
1120
1121
1122done:
1123 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1124 return ret;
1125}
1126
1127
1128
1129static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
1130 struct cfg80211_connect_params *sme)
1131{
1132 struct lbs_private *priv = wiphy_priv(wiphy);
1133 struct cfg80211_bss *bss = NULL;
1134 int ret = 0;
1135 u8 preamble = RADIO_PREAMBLE_SHORT;
1136
1137 lbs_deb_enter(LBS_DEB_CFG80211);
1138
1139 if (sme->bssid) {
1140 bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
1141 sme->ssid, sme->ssid_len,
1142 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
1143 } else {
1144 /*
1145 * Here we have an impedance mismatch. The firmware command
1146 * CMD_802_11_ASSOCIATE always needs a BSSID, it cannot
1147 * connect otherwise. However, for the connect-API of
1148 * cfg80211 the bssid is purely optional. We don't get one,
1149 * except the user specifies one on the "iw" command line.
1150 *
1151 * If we don't got one, we could initiate a scan and look
1152 * for the best matching cfg80211_bss entry.
1153 *
1154 * Or, better yet, net/wireless/sme.c get's rewritten into
1155 * something more generally useful.
1156 */
1157 lbs_pr_err("TODO: no BSS specified\n");
1158 ret = -ENOTSUPP;
1159 goto done;
1160 }
1161
1162
1163 if (!bss) {
1164 lbs_pr_err("assicate: bss %pM not in scan results\n",
1165 sme->bssid);
1166 ret = -ENOENT;
1167 goto done;
1168 }
1169 lbs_deb_assoc("trying %pM", sme->bssid);
1170 lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n",
1171 sme->crypto.cipher_group,
1172 sme->key_idx, sme->key_len);
1173
1174 /* As this is a new connection, clear locally stored WEP keys */
1175 priv->wep_tx_key = 0;
1176 memset(priv->wep_key, 0, sizeof(priv->wep_key));
1177 memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
1178
1179 /* set/remove WEP keys */
1180 switch (sme->crypto.cipher_group) {
1181 case WLAN_CIPHER_SUITE_WEP40:
1182 case WLAN_CIPHER_SUITE_WEP104:
1183 /* Store provided WEP keys in priv-> */
1184 priv->wep_tx_key = sme->key_idx;
1185 priv->wep_key_len[sme->key_idx] = sme->key_len;
1186 memcpy(priv->wep_key[sme->key_idx], sme->key, sme->key_len);
1187 /* Set WEP keys and WEP mode */
1188 lbs_set_wep_keys(priv);
1189 priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE;
1190 lbs_set_mac_control(priv);
1191 /* No RSN mode for WEP */
1192 lbs_enable_rsn(priv, 0);
1193 break;
1194 case 0: /* there's no WLAN_CIPHER_SUITE_NONE definition */
1195 /*
1196 * If we don't have no WEP, no WPA and no WPA2,
1197 * we remove all keys like in the WPA/WPA2 setup,
1198 * we just don't set RSN.
1199 *
1200 * Therefore: fall-throught
1201 */
1202 case WLAN_CIPHER_SUITE_TKIP:
1203 case WLAN_CIPHER_SUITE_CCMP:
1204 /* Remove WEP keys and WEP mode */
1205 lbs_remove_wep_keys(priv);
1206 priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE;
1207 lbs_set_mac_control(priv);
1208
1209 /* clear the WPA/WPA2 keys */
1210 lbs_set_key_material(priv,
1211 KEY_TYPE_ID_WEP, /* doesn't matter */
1212 KEY_INFO_WPA_UNICAST,
1213 NULL, 0);
1214 lbs_set_key_material(priv,
1215 KEY_TYPE_ID_WEP, /* doesn't matter */
1216 KEY_INFO_WPA_MCAST,
1217 NULL, 0);
1218 /* RSN mode for WPA/WPA2 */
1219 lbs_enable_rsn(priv, sme->crypto.cipher_group != 0);
1220 break;
1221 default:
1222 lbs_pr_err("unsupported cipher group 0x%x\n",
1223 sme->crypto.cipher_group);
1224 ret = -ENOTSUPP;
1225 goto done;
1226 }
1227
1228 lbs_set_authtype(priv, sme);
1229 lbs_set_radio(priv, preamble, 1);
1230
1231 /* Do the actual association */
1232 lbs_associate(priv, bss, sme);
1233
1234 done:
1235 if (bss)
1236 cfg80211_put_bss(bss);
1237 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1238 return ret;
1239}
1240
1241static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
1242 u16 reason_code)
1243{
1244 struct lbs_private *priv = wiphy_priv(wiphy);
1245 struct cmd_ds_802_11_deauthenticate cmd;
1246
1247 lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code);
1248
1249 /* store for lbs_cfg_ret_disconnect() */
1250 priv->disassoc_reason = reason_code;
1251
1252 memset(&cmd, 0, sizeof(cmd));
1253 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1254 /* Mildly ugly to use a locally store my own BSSID ... */
1255 memcpy(cmd.macaddr, &priv->assoc_bss, ETH_ALEN);
1256 cmd.reasoncode = cpu_to_le16(reason_code);
1257
1258 if (lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd))
1259 return -EFAULT;
1260
1261 cfg80211_disconnected(priv->dev,
1262 priv->disassoc_reason,
1263 NULL, 0,
1264 GFP_KERNEL);
1265 priv->connect_status = LBS_DISCONNECTED;
1266
1267 return 0;
1268}
1269
1270
1271static int lbs_cfg_set_default_key(struct wiphy *wiphy,
1272 struct net_device *netdev,
1273 u8 key_index)
1274{
1275 struct lbs_private *priv = wiphy_priv(wiphy);
1276
1277 lbs_deb_enter(LBS_DEB_CFG80211);
1278
1279 if (key_index != priv->wep_tx_key) {
1280 lbs_deb_assoc("set_default_key: to %d\n", key_index);
1281 priv->wep_tx_key = key_index;
1282 lbs_set_wep_keys(priv);
1283 }
1284
1285 return 0;
1286}
1287
1288
1289static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
1290 u8 idx, const u8 *mac_addr,
1291 struct key_params *params)
1292{
1293 struct lbs_private *priv = wiphy_priv(wiphy);
1294 u16 key_info;
1295 u16 key_type;
1296 int ret = 0;
1297
1298 lbs_deb_enter(LBS_DEB_CFG80211);
1299
1300 lbs_deb_assoc("add_key: cipher 0x%x, mac_addr %pM\n",
1301 params->cipher, mac_addr);
1302 lbs_deb_assoc("add_key: key index %d, key len %d\n",
1303 idx, params->key_len);
1304 if (params->key_len)
1305 lbs_deb_hex(LBS_DEB_CFG80211, "KEY",
1306 params->key, params->key_len);
1307
1308 lbs_deb_assoc("add_key: seq len %d\n", params->seq_len);
1309 if (params->seq_len)
1310 lbs_deb_hex(LBS_DEB_CFG80211, "SEQ",
1311 params->seq, params->seq_len);
1312
1313 switch (params->cipher) {
1314 case WLAN_CIPHER_SUITE_WEP40:
1315 case WLAN_CIPHER_SUITE_WEP104:
1316 /* actually compare if something has changed ... */
1317 if ((priv->wep_key_len[idx] != params->key_len) ||
1318 memcmp(priv->wep_key[idx],
1319 params->key, params->key_len) != 0) {
1320 priv->wep_key_len[idx] = params->key_len;
1321 memcpy(priv->wep_key[idx],
1322 params->key, params->key_len);
1323 lbs_set_wep_keys(priv);
1324 }
1325 break;
1326 case WLAN_CIPHER_SUITE_TKIP:
1327 case WLAN_CIPHER_SUITE_CCMP:
1328 key_info = KEY_INFO_WPA_ENABLED | ((idx == 0)
1329 ? KEY_INFO_WPA_UNICAST
1330 : KEY_INFO_WPA_MCAST);
1331 key_type = (params->cipher == WLAN_CIPHER_SUITE_TKIP)
1332 ? KEY_TYPE_ID_TKIP
1333 : KEY_TYPE_ID_AES;
1334 lbs_set_key_material(priv,
1335 key_type,
1336 key_info,
1337 params->key, params->key_len);
1338 break;
1339 default:
1340 lbs_pr_err("unhandled cipher 0x%x\n", params->cipher);
1341 ret = -ENOTSUPP;
1342 break;
1343 }
1344
1345 return ret;
1346}
1347
1348
1349static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
1350 u8 key_index, const u8 *mac_addr)
1351{
1352
1353 lbs_deb_enter(LBS_DEB_CFG80211);
1354
1355 lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n",
1356 key_index, mac_addr);
1357
1358#ifdef TODO
1359 struct lbs_private *priv = wiphy_priv(wiphy);
1360 /*
1361 * I think can keep this a NO-OP, because:
1362
1363 * - we clear all keys whenever we do lbs_cfg_connect() anyway
1364 * - neither "iw" nor "wpa_supplicant" won't call this during
1365 * an ongoing connection
1366 * - TODO: but I have to check if this is still true when
1367 * I set the AP to periodic re-keying
1368 * - we've not kzallec() something when we've added a key at
1369 * lbs_cfg_connect() or lbs_cfg_add_key().
1370 *
1371 * This causes lbs_cfg_del_key() only called at disconnect time,
1372 * where we'd just waste time deleting a key that is not going
1373 * to be used anyway.
1374 */
1375 if (key_index < 3 && priv->wep_key_len[key_index]) {
1376 priv->wep_key_len[key_index] = 0;
1377 lbs_set_wep_keys(priv);
1378 }
1379#endif
1380
1381 return 0;
1382}
1383
1384
1385/***************************************************************************
1386 * Get station
1387 */
1388
1389static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
1390 u8 *mac, struct station_info *sinfo)
1391{
1392 struct lbs_private *priv = wiphy_priv(wiphy);
1393 s8 signal, noise;
1394 int ret;
1395 size_t i;
1396
1397 lbs_deb_enter(LBS_DEB_CFG80211);
1398
1399 sinfo->filled |= STATION_INFO_TX_BYTES |
1400 STATION_INFO_TX_PACKETS |
1401 STATION_INFO_RX_BYTES |
1402 STATION_INFO_RX_PACKETS;
1403 sinfo->tx_bytes = priv->dev->stats.tx_bytes;
1404 sinfo->tx_packets = priv->dev->stats.tx_packets;
1405 sinfo->rx_bytes = priv->dev->stats.rx_bytes;
1406 sinfo->rx_packets = priv->dev->stats.rx_packets;
1407
1408 /* Get current RSSI */
1409 ret = lbs_get_rssi(priv, &signal, &noise);
1410 if (ret == 0) {
1411 sinfo->signal = signal;
1412 sinfo->filled |= STATION_INFO_SIGNAL;
1413 }
1414
1415 /* Convert priv->cur_rate from hw_value to NL80211 value */
1416 for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
1417 if (priv->cur_rate == lbs_rates[i].hw_value) {
1418 sinfo->txrate.legacy = lbs_rates[i].bitrate;
1419 sinfo->filled |= STATION_INFO_TX_BITRATE;
1420 break;
1421 }
1422 }
1423
1424 return 0;
1425}
1426
1427
1428
1429
1430/***************************************************************************
1431 * "Site survey", here just current channel and noise level
1432 */
1433
1434static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
1435 int idx, struct survey_info *survey)
1436{
1437 struct lbs_private *priv = wiphy_priv(wiphy);
1438 s8 signal, noise;
1439 int ret;
1440
1441 if (idx != 0)
1442 ret = -ENOENT;
1443
1444 lbs_deb_enter(LBS_DEB_CFG80211);
1445
1446 survey->channel = ieee80211_get_channel(wiphy,
1447 ieee80211_channel_to_frequency(priv->channel));
1448
1449 ret = lbs_get_rssi(priv, &signal, &noise);
1450 if (ret == 0) {
1451 survey->filled = SURVEY_INFO_NOISE_DBM;
1452 survey->noise = noise;
1453 }
1454
1455 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1456 return ret;
1457}
1458
1459
1460
1461
1462/***************************************************************************
1463 * Change interface
1464 */
1465
1466static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev,
1467 enum nl80211_iftype type, u32 *flags,
1468 struct vif_params *params)
1469{
1470 struct lbs_private *priv = wiphy_priv(wiphy);
1471 int ret = 0;
1472
1473 lbs_deb_enter(LBS_DEB_CFG80211);
1474
1475 switch (type) {
1476 case NL80211_IFTYPE_MONITOR:
1477 ret = lbs_set_monitor_mode(priv, 1);
1478 break;
1479 case NL80211_IFTYPE_STATION:
1480 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
1481 ret = lbs_set_monitor_mode(priv, 0);
1482 if (!ret)
1483 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1);
1484 break;
1485 case NL80211_IFTYPE_ADHOC:
1486 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
1487 ret = lbs_set_monitor_mode(priv, 0);
1488 if (!ret)
1489 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2);
1490 break;
1491 default:
1492 ret = -ENOTSUPP;
1493 }
1494
1495 if (!ret)
1496 priv->wdev->iftype = type;
1497
1498 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1499 return ret;
1500}
1501
1502
1503
1504/***************************************************************************
1505 * IBSS (Ad-Hoc)
1506 */
1507
1508/* The firmware needs the following bits masked out of the beacon-derived
1509 * capability field when associating/joining to a BSS:
1510 * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
1511 */
1512#define CAPINFO_MASK (~(0xda00))
1513
1514
1515static void lbs_join_post(struct lbs_private *priv,
1516 struct cfg80211_ibss_params *params,
1517 u8 *bssid, u16 capability)
1518{
1519 u8 fake_ie[2 + IEEE80211_MAX_SSID_LEN + /* ssid */
1520 2 + 4 + /* basic rates */
1521 2 + 1 + /* DS parameter */
1522 2 + 2 + /* atim */
1523 2 + 8]; /* extended rates */
1524 u8 *fake = fake_ie;
1525
1526 lbs_deb_enter(LBS_DEB_CFG80211);
1527
1528 /*
1529 * For cfg80211_inform_bss, we'll need a fake IE, as we can't get
1530 * the real IE from the firmware. So we fabricate a fake IE based on
1531 * what the firmware actually sends (sniffed with wireshark).
1532 */
1533 /* Fake SSID IE */
1534 *fake++ = WLAN_EID_SSID;
1535 *fake++ = params->ssid_len;
1536 memcpy(fake, params->ssid, params->ssid_len);
1537 fake += params->ssid_len;
1538 /* Fake supported basic rates IE */
1539 *fake++ = WLAN_EID_SUPP_RATES;
1540 *fake++ = 4;
1541 *fake++ = 0x82;
1542 *fake++ = 0x84;
1543 *fake++ = 0x8b;
1544 *fake++ = 0x96;
1545 /* Fake DS channel IE */
1546 *fake++ = WLAN_EID_DS_PARAMS;
1547 *fake++ = 1;
1548 *fake++ = params->channel->hw_value;
1549 /* Fake IBSS params IE */
1550 *fake++ = WLAN_EID_IBSS_PARAMS;
1551 *fake++ = 2;
1552 *fake++ = 0; /* ATIM=0 */
1553 *fake++ = 0;
1554 /* Fake extended rates IE, TODO: don't add this for 802.11b only,
1555 * but I don't know how this could be checked */
1556 *fake++ = WLAN_EID_EXT_SUPP_RATES;
1557 *fake++ = 8;
1558 *fake++ = 0x0c;
1559 *fake++ = 0x12;
1560 *fake++ = 0x18;
1561 *fake++ = 0x24;
1562 *fake++ = 0x30;
1563 *fake++ = 0x48;
1564 *fake++ = 0x60;
1565 *fake++ = 0x6c;
1566 lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
1567
1568 cfg80211_inform_bss(priv->wdev->wiphy,
1569 params->channel,
1570 bssid,
1571 0,
1572 capability,
1573 params->beacon_interval,
1574 fake_ie, fake - fake_ie,
1575 0, GFP_KERNEL);
1576
1577 memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
1578 priv->wdev->ssid_len = params->ssid_len;
1579
1580 cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
1581
1582 /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
1583 priv->connect_status = LBS_CONNECTED;
1584 netif_carrier_on(priv->dev);
1585 if (!priv->tx_pending_len)
1586 netif_wake_queue(priv->dev);
1587
1588 lbs_deb_leave(LBS_DEB_CFG80211);
1589}
1590
1591static int lbs_ibss_join_existing(struct lbs_private *priv,
1592 struct cfg80211_ibss_params *params,
1593 struct cfg80211_bss *bss)
1594{
1595 const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
1596 struct cmd_ds_802_11_ad_hoc_join cmd;
1597 u8 preamble = RADIO_PREAMBLE_SHORT;
1598 int ret = 0;
1599
1600 lbs_deb_enter(LBS_DEB_CFG80211);
1601
1602 /* TODO: set preamble based on scan result */
1603 ret = lbs_set_radio(priv, preamble, 1);
1604 if (ret)
1605 goto out;
1606
1607 /*
1608 * Example CMD_802_11_AD_HOC_JOIN command:
1609 *
1610 * command 2c 00 CMD_802_11_AD_HOC_JOIN
1611 * size 65 00
1612 * sequence xx xx
1613 * result 00 00
1614 * bssid 02 27 27 97 2f 96
1615 * ssid 49 42 53 53 00 00 00 00
1616 * 00 00 00 00 00 00 00 00
1617 * 00 00 00 00 00 00 00 00
1618 * 00 00 00 00 00 00 00 00
1619 * type 02 CMD_BSS_TYPE_IBSS
1620 * beacon period 64 00
1621 * dtim period 00
1622 * timestamp 00 00 00 00 00 00 00 00
1623 * localtime 00 00 00 00 00 00 00 00
1624 * IE DS 03
1625 * IE DS len 01
1626 * IE DS channel 01
1627 * reserveed 00 00 00 00
1628 * IE IBSS 06
1629 * IE IBSS len 02
1630 * IE IBSS atim 00 00
1631 * reserved 00 00 00 00
1632 * capability 02 00
1633 * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c 00
1634 * fail timeout ff 00
1635 * probe delay 00 00
1636 */
1637 memset(&cmd, 0, sizeof(cmd));
1638 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1639
1640 memcpy(cmd.bss.bssid, bss->bssid, ETH_ALEN);
1641 memcpy(cmd.bss.ssid, params->ssid, params->ssid_len);
1642 cmd.bss.type = CMD_BSS_TYPE_IBSS;
1643 cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval);
1644 cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS;
1645 cmd.bss.ds.header.len = 1;
1646 cmd.bss.ds.channel = params->channel->hw_value;
1647 cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS;
1648 cmd.bss.ibss.header.len = 2;
1649 cmd.bss.ibss.atimwindow = 0;
1650 cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
1651
1652 /* set rates to the intersection of our rates and the rates in the
1653 bss */
1654 if (!rates_eid) {
1655 lbs_add_rates(cmd.bss.rates);
1656 } else {
1657 int hw, i;
1658 u8 rates_max = rates_eid[1];
1659 u8 *rates = cmd.bss.rates;
1660 for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
1661 u8 hw_rate = lbs_rates[hw].bitrate / 5;
1662 for (i = 0; i < rates_max; i++) {
1663 if (hw_rate == (rates_eid[i+2] & 0x7f)) {
1664 u8 rate = rates_eid[i+2];
1665 if (rate == 0x02 || rate == 0x04 ||
1666 rate == 0x0b || rate == 0x16)
1667 rate |= 0x80;
1668 *rates++ = rate;
1669 }
1670 }
1671 }
1672 }
1673
1674 /* Only v8 and below support setting this */
1675 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) {
1676 cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
1677 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1678 }
1679 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
1680 if (ret)
1681 goto out;
1682
1683 /*
1684 * This is a sample response to CMD_802_11_AD_HOC_JOIN:
1685 *
1686 * response 2c 80
1687 * size 09 00
1688 * sequence xx xx
1689 * result 00 00
1690 * reserved 00
1691 */
1692 lbs_join_post(priv, params, bss->bssid, bss->capability);
95 1693
96 out: 1694 out:
97 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 1695 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -100,9 +1698,169 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
100 1698
101 1699
102 1700
1701static int lbs_ibss_start_new(struct lbs_private *priv,
1702 struct cfg80211_ibss_params *params)
1703{
1704 struct cmd_ds_802_11_ad_hoc_start cmd;
1705 struct cmd_ds_802_11_ad_hoc_result *resp =
1706 (struct cmd_ds_802_11_ad_hoc_result *) &cmd;
1707 u8 preamble = RADIO_PREAMBLE_SHORT;
1708 int ret = 0;
1709 u16 capability;
1710
1711 lbs_deb_enter(LBS_DEB_CFG80211);
1712
1713 ret = lbs_set_radio(priv, preamble, 1);
1714 if (ret)
1715 goto out;
1716
1717 /*
1718 * Example CMD_802_11_AD_HOC_START command:
1719 *
1720 * command 2b 00 CMD_802_11_AD_HOC_START
1721 * size b1 00
1722 * sequence xx xx
1723 * result 00 00
1724 * ssid 54 45 53 54 00 00 00 00
1725 * 00 00 00 00 00 00 00 00
1726 * 00 00 00 00 00 00 00 00
1727 * 00 00 00 00 00 00 00 00
1728 * bss type 02
1729 * beacon period 64 00
1730 * dtim period 00
1731 * IE IBSS 06
1732 * IE IBSS len 02
1733 * IE IBSS atim 00 00
1734 * reserved 00 00 00 00
1735 * IE DS 03
1736 * IE DS len 01
1737 * IE DS channel 01
1738 * reserved 00 00 00 00
1739 * probe delay 00 00
1740 * capability 02 00
1741 * rates 82 84 8b 96 (basic rates with have bit 7 set)
1742 * 0c 12 18 24 30 48 60 6c
1743 * padding 100 bytes
1744 */
1745 memset(&cmd, 0, sizeof(cmd));
1746 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1747 memcpy(cmd.ssid, params->ssid, params->ssid_len);
1748 cmd.bsstype = CMD_BSS_TYPE_IBSS;
1749 cmd.beaconperiod = cpu_to_le16(params->beacon_interval);
1750 cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
1751 cmd.ibss.header.len = 2;
1752 cmd.ibss.atimwindow = 0;
1753 cmd.ds.header.id = WLAN_EID_DS_PARAMS;
1754 cmd.ds.header.len = 1;
1755 cmd.ds.channel = params->channel->hw_value;
1756 /* Only v8 and below support setting probe delay */
1757 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8)
1758 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1759 /* TODO: mix in WLAN_CAPABILITY_PRIVACY */
1760 capability = WLAN_CAPABILITY_IBSS;
1761 cmd.capability = cpu_to_le16(capability);
1762 lbs_add_rates(cmd.rates);
1763
1764
1765 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
1766 if (ret)
1767 goto out;
1768
1769 /*
1770 * This is a sample response to CMD_802_11_AD_HOC_JOIN:
1771 *
1772 * response 2b 80
1773 * size 14 00
1774 * sequence xx xx
1775 * result 00 00
1776 * reserved 00
1777 * bssid 02 2b 7b 0f 86 0e
1778 */
1779 lbs_join_post(priv, params, resp->bssid, capability);
1780
1781 out:
1782 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1783 return ret;
1784}
1785
1786
1787static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1788 struct cfg80211_ibss_params *params)
1789{
1790 struct lbs_private *priv = wiphy_priv(wiphy);
1791 int ret = 0;
1792 struct cfg80211_bss *bss;
1793 DECLARE_SSID_BUF(ssid_buf);
1794
1795 lbs_deb_enter(LBS_DEB_CFG80211);
1796
1797 if (!params->channel) {
1798 ret = -ENOTSUPP;
1799 goto out;
1800 }
1801
1802 ret = lbs_set_channel(priv, params->channel->hw_value);
1803 if (ret)
1804 goto out;
1805
1806 /* Search if someone is beaconing. This assumes that the
1807 * bss list is populated already */
1808 bss = cfg80211_get_bss(wiphy, params->channel, params->bssid,
1809 params->ssid, params->ssid_len,
1810 WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
1811
1812 if (bss) {
1813 ret = lbs_ibss_join_existing(priv, params, bss);
1814 cfg80211_put_bss(bss);
1815 } else
1816 ret = lbs_ibss_start_new(priv, params);
1817
1818
1819 out:
1820 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1821 return ret;
1822}
1823
1824
1825static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1826{
1827 struct lbs_private *priv = wiphy_priv(wiphy);
1828 struct cmd_ds_802_11_ad_hoc_stop cmd;
1829 int ret = 0;
1830
1831 lbs_deb_enter(LBS_DEB_CFG80211);
1832
1833 memset(&cmd, 0, sizeof(cmd));
1834 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1835 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
1836
1837 /* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
1838 lbs_mac_event_disconnected(priv);
1839
1840 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1841 return ret;
1842}
1843
1844
1845
1846
1847/***************************************************************************
1848 * Initialization
1849 */
103 1850
104static struct cfg80211_ops lbs_cfg80211_ops = { 1851static struct cfg80211_ops lbs_cfg80211_ops = {
105 .set_channel = lbs_cfg_set_channel, 1852 .set_channel = lbs_cfg_set_channel,
1853 .scan = lbs_cfg_scan,
1854 .connect = lbs_cfg_connect,
1855 .disconnect = lbs_cfg_disconnect,
1856 .add_key = lbs_cfg_add_key,
1857 .del_key = lbs_cfg_del_key,
1858 .set_default_key = lbs_cfg_set_default_key,
1859 .get_station = lbs_cfg_get_station,
1860 .dump_survey = lbs_get_survey,
1861 .change_virtual_intf = lbs_change_intf,
1862 .join_ibss = lbs_join_ibss,
1863 .leave_ibss = lbs_leave_ibss,
106}; 1864};
107 1865
108 1866
@@ -142,6 +1900,36 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev)
142} 1900}
143 1901
144 1902
1903static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv)
1904{
1905 struct region_code_mapping {
1906 const char *cn;
1907 int code;
1908 };
1909
1910 /* Section 5.17.2 */
1911 static struct region_code_mapping regmap[] = {
1912 {"US ", 0x10}, /* US FCC */
1913 {"CA ", 0x20}, /* Canada */
1914 {"EU ", 0x30}, /* ETSI */
1915 {"ES ", 0x31}, /* Spain */
1916 {"FR ", 0x32}, /* France */
1917 {"JP ", 0x40}, /* Japan */
1918 };
1919 size_t i;
1920
1921 lbs_deb_enter(LBS_DEB_CFG80211);
1922
1923 for (i = 0; i < ARRAY_SIZE(regmap); i++)
1924 if (regmap[i].code == priv->regioncode) {
1925 regulatory_hint(priv->wdev->wiphy, regmap[i].cn);
1926 break;
1927 }
1928
1929 lbs_deb_leave(LBS_DEB_CFG80211);
1930}
1931
1932
145/* 1933/*
146 * This function get's called after lbs_setup_firmware() determined the 1934 * This function get's called after lbs_setup_firmware() determined the
147 * firmware capabities. So we can setup the wiphy according to our 1935 * firmware capabities. So we can setup the wiphy according to our
@@ -157,10 +1945,12 @@ int lbs_cfg_register(struct lbs_private *priv)
157 wdev->wiphy->max_scan_ssids = 1; 1945 wdev->wiphy->max_scan_ssids = 1;
158 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 1946 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
159 1947
160 /* TODO: BIT(NL80211_IFTYPE_ADHOC); */ 1948 wdev->wiphy->interface_modes =
161 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1949 BIT(NL80211_IFTYPE_STATION) |
1950 BIT(NL80211_IFTYPE_ADHOC);
1951 if (lbs_rtap_supported(priv))
1952 wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
162 1953
163 /* TODO: honor priv->regioncode */
164 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz; 1954 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
165 1955
166 /* 1956 /*
@@ -169,6 +1959,7 @@ int lbs_cfg_register(struct lbs_private *priv)
169 */ 1959 */
170 wdev->wiphy->cipher_suites = cipher_suites; 1960 wdev->wiphy->cipher_suites = cipher_suites;
171 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 1961 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
1962 wdev->wiphy->reg_notifier = lbs_reg_notifier;
172 1963
173 ret = wiphy_register(wdev->wiphy); 1964 ret = wiphy_register(wdev->wiphy);
174 if (ret < 0) 1965 if (ret < 0)
@@ -180,10 +1971,36 @@ int lbs_cfg_register(struct lbs_private *priv)
180 if (ret) 1971 if (ret)
181 lbs_pr_err("cannot register network device\n"); 1972 lbs_pr_err("cannot register network device\n");
182 1973
1974 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
1975
1976 lbs_cfg_set_regulatory_hint(priv);
1977
183 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 1978 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
184 return ret; 1979 return ret;
185} 1980}
186 1981
1982int lbs_reg_notifier(struct wiphy *wiphy,
1983 struct regulatory_request *request)
1984{
1985 struct lbs_private *priv = wiphy_priv(wiphy);
1986 int ret;
1987
1988 lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
1989 "callback for domain %c%c\n", request->alpha2[0],
1990 request->alpha2[1]);
1991
1992 ret = lbs_set_11d_domain_info(priv, request, wiphy->bands);
1993
1994 lbs_deb_leave(LBS_DEB_CFG80211);
1995 return ret;
1996}
1997
1998void lbs_scan_deinit(struct lbs_private *priv)
1999{
2000 lbs_deb_enter(LBS_DEB_CFG80211);
2001 cancel_delayed_work_sync(&priv->scan_work);
2002}
2003
187 2004
188void lbs_cfg_free(struct lbs_private *priv) 2005void lbs_cfg_free(struct lbs_private *priv)
189{ 2006{
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index e09a193a34d6..4f46bb744bee 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -1,16 +1,21 @@
1#ifndef __LBS_CFG80211_H__ 1#ifndef __LBS_CFG80211_H__
2#define __LBS_CFG80211_H__ 2#define __LBS_CFG80211_H__
3 3
4#include "dev.h" 4struct device;
5struct lbs_private;
6struct regulatory_request;
7struct wiphy;
5 8
6struct wireless_dev *lbs_cfg_alloc(struct device *dev); 9struct wireless_dev *lbs_cfg_alloc(struct device *dev);
7int lbs_cfg_register(struct lbs_private *priv); 10int lbs_cfg_register(struct lbs_private *priv);
8void lbs_cfg_free(struct lbs_private *priv); 11void lbs_cfg_free(struct lbs_private *priv);
9 12
10int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, 13int lbs_reg_notifier(struct wiphy *wiphy,
11 u8 ssid_len); 14 struct regulatory_request *request);
12int lbs_scan_networks(struct lbs_private *priv, int full_scan);
13void lbs_cfg_scan_worker(struct work_struct *work);
14 15
16void lbs_send_disconnect_notification(struct lbs_private *priv);
17void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
18
19void lbs_scan_deinit(struct lbs_private *priv);
15 20
16#endif 21#endif
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index cdb9b9650d73..70745928f3f8 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -6,18 +6,14 @@
6#include <linux/kfifo.h> 6#include <linux/kfifo.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/if_arp.h>
9 10
10#include "host.h"
11#include "decl.h" 11#include "decl.h"
12#include "defs.h" 12#include "cfg.h"
13#include "dev.h"
14#include "assoc.h"
15#include "wext.h"
16#include "scan.h"
17#include "cmd.h" 13#include "cmd.h"
18 14
19 15#define CAL_NF(nf) ((s32)(-(s32)(nf)))
20static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv); 16#define CAL_RSSI(snr, nf) ((s32)((s32)(snr) + CAL_NF(nf)))
21 17
22/** 18/**
23 * @brief Simple callback that copies response back into command 19 * @brief Simple callback that copies response back into command
@@ -70,6 +66,8 @@ static u8 is_command_allowed_in_ps(u16 cmd)
70 switch (cmd) { 66 switch (cmd) {
71 case CMD_802_11_RSSI: 67 case CMD_802_11_RSSI:
72 return 1; 68 return 1;
69 case CMD_802_11_HOST_SLEEP_CFG:
70 return 1;
73 default: 71 default:
74 break; 72 break;
75 } 73 }
@@ -77,30 +75,6 @@ static u8 is_command_allowed_in_ps(u16 cmd)
77} 75}
78 76
79/** 77/**
80 * @brief This function checks if the command is allowed.
81 *
82 * @param priv A pointer to lbs_private structure
83 * @return allowed or not allowed.
84 */
85
86static int lbs_is_cmd_allowed(struct lbs_private *priv)
87{
88 int ret = 1;
89
90 lbs_deb_enter(LBS_DEB_CMD);
91
92 if (!priv->is_auto_deep_sleep_enabled) {
93 if (priv->is_deep_sleep) {
94 lbs_deb_cmd("command not allowed in deep sleep\n");
95 ret = 0;
96 }
97 }
98
99 lbs_deb_leave(LBS_DEB_CMD);
100 return ret;
101}
102
103/**
104 * @brief Updates the hardware details like MAC address and regulatory region 78 * @brief Updates the hardware details like MAC address and regulatory region
105 * 79 *
106 * @param priv A pointer to struct lbs_private structure 80 * @param priv A pointer to struct lbs_private structure
@@ -175,16 +149,28 @@ int lbs_update_hw_spec(struct lbs_private *priv)
175 if (priv->mesh_dev) 149 if (priv->mesh_dev)
176 memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN); 150 memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
177 151
178 if (lbs_set_regiontable(priv, priv->regioncode, 0)) {
179 ret = -1;
180 goto out;
181 }
182
183out: 152out:
184 lbs_deb_leave(LBS_DEB_CMD); 153 lbs_deb_leave(LBS_DEB_CMD);
185 return ret; 154 return ret;
186} 155}
187 156
157static int lbs_ret_host_sleep_cfg(struct lbs_private *priv, unsigned long dummy,
158 struct cmd_header *resp)
159{
160 lbs_deb_enter(LBS_DEB_CMD);
161 if (priv->is_host_sleep_activated) {
162 priv->is_host_sleep_configured = 0;
163 if (priv->psstate == PS_STATE_FULL_POWER) {
164 priv->is_host_sleep_activated = 0;
165 wake_up_interruptible(&priv->host_sleep_q);
166 }
167 } else {
168 priv->is_host_sleep_configured = 1;
169 }
170 lbs_deb_leave(LBS_DEB_CMD);
171 return 0;
172}
173
188int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, 174int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
189 struct wol_config *p_wol_config) 175 struct wol_config *p_wol_config)
190{ 176{
@@ -202,12 +188,11 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
202 else 188 else
203 cmd_config.wol_conf.action = CMD_ACT_ACTION_NONE; 189 cmd_config.wol_conf.action = CMD_ACT_ACTION_NONE;
204 190
205 ret = lbs_cmd_with_response(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config); 191 ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config.hdr,
192 le16_to_cpu(cmd_config.hdr.size),
193 lbs_ret_host_sleep_cfg, 0);
206 if (!ret) { 194 if (!ret) {
207 if (criteria) { 195 if (p_wol_config)
208 lbs_deb_cmd("Set WOL criteria to %x\n", criteria);
209 priv->wol_criteria = criteria;
210 } else
211 memcpy((uint8_t *) p_wol_config, 196 memcpy((uint8_t *) p_wol_config,
212 (uint8_t *)&cmd_config.wol_conf, 197 (uint8_t *)&cmd_config.wol_conf,
213 sizeof(struct wol_config)); 198 sizeof(struct wol_config));
@@ -219,42 +204,49 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
219} 204}
220EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg); 205EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg);
221 206
222static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd, 207/**
223 u16 cmd_action) 208 * @brief Sets the Power Save mode
209 *
210 * @param priv A pointer to struct lbs_private structure
211 * @param cmd_action The Power Save operation (PS_MODE_ACTION_ENTER_PS or
212 * PS_MODE_ACTION_EXIT_PS)
213 * @param block Whether to block on a response or not
214 *
215 * @return 0 on success, error on failure
216 */
217int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block)
224{ 218{
225 struct cmd_ds_802_11_ps_mode *psm = &cmd->params.psmode; 219 struct cmd_ds_802_11_ps_mode cmd;
220 int ret = 0;
226 221
227 lbs_deb_enter(LBS_DEB_CMD); 222 lbs_deb_enter(LBS_DEB_CMD);
228 223
229 cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); 224 memset(&cmd, 0, sizeof(cmd));
230 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + 225 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
231 sizeof(struct cmd_header)); 226 cmd.action = cpu_to_le16(cmd_action);
232 psm->action = cpu_to_le16(cmd_action);
233 psm->multipledtim = 0;
234 switch (cmd_action) {
235 case CMD_SUBCMD_ENTER_PS:
236 lbs_deb_cmd("PS command:" "SubCode- Enter PS\n");
237
238 psm->locallisteninterval = 0;
239 psm->nullpktinterval = 0;
240 psm->multipledtim =
241 cpu_to_le16(MRVDRV_DEFAULT_MULTIPLE_DTIM);
242 break;
243
244 case CMD_SUBCMD_EXIT_PS:
245 lbs_deb_cmd("PS command:" "SubCode- Exit PS\n");
246 break;
247
248 case CMD_SUBCMD_SLEEP_CONFIRMED:
249 lbs_deb_cmd("PS command: SubCode- sleep confirm\n");
250 break;
251 227
252 default: 228 if (cmd_action == PS_MODE_ACTION_ENTER_PS) {
253 break; 229 lbs_deb_cmd("PS_MODE: action ENTER_PS\n");
230 cmd.multipledtim = cpu_to_le16(1); /* Default DTIM multiple */
231 } else if (cmd_action == PS_MODE_ACTION_EXIT_PS) {
232 lbs_deb_cmd("PS_MODE: action EXIT_PS\n");
233 } else {
234 /* We don't handle CONFIRM_SLEEP here because it needs to
235 * be fastpathed to the firmware.
236 */
237 lbs_deb_cmd("PS_MODE: unknown action 0x%X\n", cmd_action);
238 ret = -EOPNOTSUPP;
239 goto out;
254 } 240 }
255 241
256 lbs_deb_leave(LBS_DEB_CMD); 242 if (block)
257 return 0; 243 ret = lbs_cmd_with_response(priv, CMD_802_11_PS_MODE, &cmd);
244 else
245 lbs_cmd_async(priv, CMD_802_11_PS_MODE, &cmd.hdr, sizeof (cmd));
246
247out:
248 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
249 return ret;
258} 250}
259 251
260int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 252int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
@@ -353,6 +345,65 @@ int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep)
353 return ret; 345 return ret;
354} 346}
355 347
348static int lbs_ret_host_sleep_activate(struct lbs_private *priv,
349 unsigned long dummy,
350 struct cmd_header *cmd)
351{
352 lbs_deb_enter(LBS_DEB_FW);
353 priv->is_host_sleep_activated = 1;
354 wake_up_interruptible(&priv->host_sleep_q);
355 lbs_deb_leave(LBS_DEB_FW);
356 return 0;
357}
358
359int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep)
360{
361 struct cmd_header cmd;
362 int ret = 0;
363 uint32_t criteria = EHS_REMOVE_WAKEUP;
364
365 lbs_deb_enter(LBS_DEB_CMD);
366
367 if (host_sleep) {
368 if (priv->is_host_sleep_activated != 1) {
369 memset(&cmd, 0, sizeof(cmd));
370 ret = lbs_host_sleep_cfg(priv, priv->wol_criteria,
371 (struct wol_config *)NULL);
372 if (ret) {
373 lbs_pr_info("Host sleep configuration failed: "
374 "%d\n", ret);
375 return ret;
376 }
377 if (priv->psstate == PS_STATE_FULL_POWER) {
378 ret = __lbs_cmd(priv,
379 CMD_802_11_HOST_SLEEP_ACTIVATE,
380 &cmd,
381 sizeof(cmd),
382 lbs_ret_host_sleep_activate, 0);
383 if (ret)
384 lbs_pr_info("HOST_SLEEP_ACTIVATE "
385 "failed: %d\n", ret);
386 }
387
388 if (!wait_event_interruptible_timeout(
389 priv->host_sleep_q,
390 priv->is_host_sleep_activated,
391 (10 * HZ))) {
392 lbs_pr_err("host_sleep_q: timer expired\n");
393 ret = -1;
394 }
395 } else {
396 lbs_pr_err("host sleep: already enabled\n");
397 }
398 } else {
399 if (priv->is_host_sleep_activated)
400 ret = lbs_host_sleep_cfg(priv, criteria,
401 (struct wol_config *)NULL);
402 }
403
404 return ret;
405}
406
356/** 407/**
357 * @brief Set an SNMP MIB value 408 * @brief Set an SNMP MIB value
358 * 409 *
@@ -509,23 +560,35 @@ int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
509 return ret; 560 return ret;
510} 561}
511 562
512static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd, 563/**
513 u16 cmd_action, void *pdata_buf) 564 * @brief Enable or disable monitor mode (only implemented on OLPC usb8388 FW)
565 *
566 * @param priv A pointer to struct lbs_private structure
567 * @param enable 1 to enable monitor mode, 0 to disable
568 *
569 * @return 0 on success, error on failure
570 */
571int lbs_set_monitor_mode(struct lbs_private *priv, int enable)
514{ 572{
515 struct cmd_ds_802_11_monitor_mode *monitor = &cmd->params.monitor; 573 struct cmd_ds_802_11_monitor_mode cmd;
574 int ret;
575
576 memset(&cmd, 0, sizeof(cmd));
577 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
578 cmd.action = cpu_to_le16(CMD_ACT_SET);
579 if (enable)
580 cmd.mode = cpu_to_le16(0x1);
516 581
517 cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); 582 lbs_deb_cmd("SET_MONITOR_MODE: %d\n", enable);
518 cmd->size =
519 cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) +
520 sizeof(struct cmd_header));
521 583
522 monitor->action = cpu_to_le16(cmd_action); 584 ret = lbs_cmd_with_response(priv, CMD_802_11_MONITOR_MODE, &cmd);
523 if (cmd_action == CMD_ACT_SET) { 585 if (ret == 0) {
524 monitor->mode = 586 priv->dev->type = enable ? ARPHRD_IEEE80211_RADIOTAP :
525 cpu_to_le16((u16) (*(u32 *) pdata_buf)); 587 ARPHRD_ETHER;
526 } 588 }
527 589
528 return 0; 590 lbs_deb_leave(LBS_DEB_CMD);
591 return ret;
529} 592}
530 593
531/** 594/**
@@ -610,78 +673,242 @@ out:
610 return ret; 673 return ret;
611} 674}
612 675
613static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr, 676/**
614 u8 cmd_action, void *pdata_buf) 677 * @brief Get current RSSI and noise floor
678 *
679 * @param priv A pointer to struct lbs_private structure
680 * @param rssi On successful return, signal level in mBm
681 *
682 * @return The channel on success, error on failure
683 */
684int lbs_get_rssi(struct lbs_private *priv, s8 *rssi, s8 *nf)
615{ 685{
616 struct lbs_offset_value *offval; 686 struct cmd_ds_802_11_rssi cmd;
687 int ret = 0;
617 688
618 lbs_deb_enter(LBS_DEB_CMD); 689 lbs_deb_enter(LBS_DEB_CMD);
619 690
620 offval = (struct lbs_offset_value *)pdata_buf; 691 BUG_ON(rssi == NULL);
692 BUG_ON(nf == NULL);
621 693
622 switch (le16_to_cpu(cmdptr->command)) { 694 memset(&cmd, 0, sizeof(cmd));
623 case CMD_MAC_REG_ACCESS: 695 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
624 { 696 /* Average SNR over last 8 beacons */
625 struct cmd_ds_mac_reg_access *macreg; 697 cmd.n_or_snr = cpu_to_le16(8);
626 698
627 cmdptr->size = 699 ret = lbs_cmd_with_response(priv, CMD_802_11_RSSI, &cmd);
628 cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access) 700 if (ret == 0) {
629 + sizeof(struct cmd_header)); 701 *nf = CAL_NF(le16_to_cpu(cmd.nf));
630 macreg = 702 *rssi = CAL_RSSI(le16_to_cpu(cmd.n_or_snr), le16_to_cpu(cmd.nf));
631 (struct cmd_ds_mac_reg_access *)&cmdptr->params. 703 }
632 macreg;
633 704
634 macreg->action = cpu_to_le16(cmd_action); 705 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
635 macreg->offset = cpu_to_le16((u16) offval->offset); 706 return ret;
636 macreg->value = cpu_to_le32(offval->value); 707}
637 708
638 break; 709/**
639 } 710 * @brief Send regulatory and 802.11d domain information to the firmware
711 *
712 * @param priv pointer to struct lbs_private
713 * @param request cfg80211 regulatory request structure
714 * @param bands the device's supported bands and channels
715 *
716 * @return 0 on success, error code on failure
717*/
718int lbs_set_11d_domain_info(struct lbs_private *priv,
719 struct regulatory_request *request,
720 struct ieee80211_supported_band **bands)
721{
722 struct cmd_ds_802_11d_domain_info cmd;
723 struct mrvl_ie_domain_param_set *domain = &cmd.domain;
724 struct ieee80211_country_ie_triplet *t;
725 enum ieee80211_band band;
726 struct ieee80211_channel *ch;
727 u8 num_triplet = 0;
728 u8 num_parsed_chan = 0;
729 u8 first_channel = 0, next_chan = 0, max_pwr = 0;
730 u8 i, flag = 0;
731 size_t triplet_size;
732 int ret;
640 733
641 case CMD_BBP_REG_ACCESS: 734 lbs_deb_enter(LBS_DEB_11D);
642 {
643 struct cmd_ds_bbp_reg_access *bbpreg;
644 735
645 cmdptr->size = 736 memset(&cmd, 0, sizeof(cmd));
646 cpu_to_le16(sizeof 737 cmd.action = cpu_to_le16(CMD_ACT_SET);
647 (struct cmd_ds_bbp_reg_access)
648 + sizeof(struct cmd_header));
649 bbpreg =
650 (struct cmd_ds_bbp_reg_access *)&cmdptr->params.
651 bbpreg;
652 738
653 bbpreg->action = cpu_to_le16(cmd_action); 739 lbs_deb_11d("Setting country code '%c%c'\n",
654 bbpreg->offset = cpu_to_le16((u16) offval->offset); 740 request->alpha2[0], request->alpha2[1]);
655 bbpreg->value = (u8) offval->value;
656 741
657 break; 742 domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
658 }
659 743
660 case CMD_RF_REG_ACCESS: 744 /* Set country code */
661 { 745 domain->country_code[0] = request->alpha2[0];
662 struct cmd_ds_rf_reg_access *rfreg; 746 domain->country_code[1] = request->alpha2[1];
747 domain->country_code[2] = ' ';
663 748
664 cmdptr->size = 749 /* Now set up the channel triplets; firmware is somewhat picky here
665 cpu_to_le16(sizeof 750 * and doesn't validate channel numbers and spans; hence it would
666 (struct cmd_ds_rf_reg_access) + 751 * interpret a triplet of (36, 4, 20) as channels 36, 37, 38, 39. Since
667 sizeof(struct cmd_header)); 752 * the last 3 aren't valid channels, the driver is responsible for
668 rfreg = 753 * splitting that up into 4 triplet pairs of (36, 1, 20) + (40, 1, 20)
669 (struct cmd_ds_rf_reg_access *)&cmdptr->params. 754 * etc.
670 rfreg; 755 */
756 for (band = 0;
757 (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
758 band++) {
759
760 if (!bands[band])
761 continue;
762
763 for (i = 0;
764 (i < bands[band]->n_channels) && (num_triplet < MAX_11D_TRIPLETS);
765 i++) {
766 ch = &bands[band]->channels[i];
767 if (ch->flags & IEEE80211_CHAN_DISABLED)
768 continue;
769
770 if (!flag) {
771 flag = 1;
772 next_chan = first_channel = (u32) ch->hw_value;
773 max_pwr = ch->max_power;
774 num_parsed_chan = 1;
775 continue;
776 }
671 777
672 rfreg->action = cpu_to_le16(cmd_action); 778 if ((ch->hw_value == next_chan + 1) &&
673 rfreg->offset = cpu_to_le16((u16) offval->offset); 779 (ch->max_power == max_pwr)) {
674 rfreg->value = (u8) offval->value; 780 /* Consolidate adjacent channels */
781 next_chan++;
782 num_parsed_chan++;
783 } else {
784 /* Add this triplet */
785 lbs_deb_11d("11D triplet (%d, %d, %d)\n",
786 first_channel, num_parsed_chan,
787 max_pwr);
788 t = &domain->triplet[num_triplet];
789 t->chans.first_channel = first_channel;
790 t->chans.num_channels = num_parsed_chan;
791 t->chans.max_power = max_pwr;
792 num_triplet++;
793 flag = 0;
794 }
795 }
675 796
676 break; 797 if (flag) {
798 /* Add last triplet */
799 lbs_deb_11d("11D triplet (%d, %d, %d)\n", first_channel,
800 num_parsed_chan, max_pwr);
801 t = &domain->triplet[num_triplet];
802 t->chans.first_channel = first_channel;
803 t->chans.num_channels = num_parsed_chan;
804 t->chans.max_power = max_pwr;
805 num_triplet++;
677 } 806 }
807 }
678 808
679 default: 809 lbs_deb_11d("# triplets %d\n", num_triplet);
680 break; 810
811 /* Set command header sizes */
812 triplet_size = num_triplet * sizeof(struct ieee80211_country_ie_triplet);
813 domain->header.len = cpu_to_le16(sizeof(domain->country_code) +
814 triplet_size);
815
816 lbs_deb_hex(LBS_DEB_11D, "802.11D domain param set",
817 (u8 *) &cmd.domain.country_code,
818 le16_to_cpu(domain->header.len));
819
820 cmd.hdr.size = cpu_to_le16(sizeof(cmd.hdr) +
821 sizeof(cmd.action) +
822 sizeof(cmd.domain.header) +
823 sizeof(cmd.domain.country_code) +
824 triplet_size);
825
826 ret = lbs_cmd_with_response(priv, CMD_802_11D_DOMAIN_INFO, &cmd);
827
828 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
829 return ret;
830}
831
832/**
833 * @brief Read a MAC, Baseband, or RF register
834 *
835 * @param priv pointer to struct lbs_private
836 * @param cmd register command, one of CMD_MAC_REG_ACCESS,
837 * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS
838 * @param offset byte offset of the register to get
839 * @param value on success, the value of the register at 'offset'
840 *
841 * @return 0 on success, error code on failure
842*/
843int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value)
844{
845 struct cmd_ds_reg_access cmd;
846 int ret = 0;
847
848 lbs_deb_enter(LBS_DEB_CMD);
849
850 BUG_ON(value == NULL);
851
852 memset(&cmd, 0, sizeof(cmd));
853 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
854 cmd.action = cpu_to_le16(CMD_ACT_GET);
855
856 if (reg != CMD_MAC_REG_ACCESS &&
857 reg != CMD_BBP_REG_ACCESS &&
858 reg != CMD_RF_REG_ACCESS) {
859 ret = -EINVAL;
860 goto out;
681 } 861 }
682 862
683 lbs_deb_leave(LBS_DEB_CMD); 863 ret = lbs_cmd_with_response(priv, reg, &cmd);
684 return 0; 864 if (ret) {
865 if (reg == CMD_BBP_REG_ACCESS || reg == CMD_RF_REG_ACCESS)
866 *value = cmd.value.bbp_rf;
867 else if (reg == CMD_MAC_REG_ACCESS)
868 *value = le32_to_cpu(cmd.value.mac);
869 }
870
871out:
872 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
873 return ret;
874}
875
876/**
877 * @brief Write a MAC, Baseband, or RF register
878 *
879 * @param priv pointer to struct lbs_private
880 * @param cmd register command, one of CMD_MAC_REG_ACCESS,
881 * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS
882 * @param offset byte offset of the register to set
883 * @param value the value to write to the register at 'offset'
884 *
885 * @return 0 on success, error code on failure
886*/
887int lbs_set_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 value)
888{
889 struct cmd_ds_reg_access cmd;
890 int ret = 0;
891
892 lbs_deb_enter(LBS_DEB_CMD);
893
894 memset(&cmd, 0, sizeof(cmd));
895 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
896 cmd.action = cpu_to_le16(CMD_ACT_SET);
897
898 if (reg == CMD_BBP_REG_ACCESS || reg == CMD_RF_REG_ACCESS)
899 cmd.value.bbp_rf = (u8) (value & 0xFF);
900 else if (reg == CMD_MAC_REG_ACCESS)
901 cmd.value.mac = cpu_to_le32(value);
902 else {
903 ret = -EINVAL;
904 goto out;
905 }
906
907 ret = lbs_cmd_with_response(priv, reg, &cmd);
908
909out:
910 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
911 return ret;
685} 912}
686 913
687static void lbs_queue_cmd(struct lbs_private *priv, 914static void lbs_queue_cmd(struct lbs_private *priv,
@@ -704,14 +931,17 @@ static void lbs_queue_cmd(struct lbs_private *priv,
704 931
705 /* Exit_PS command needs to be queued in the header always. */ 932 /* Exit_PS command needs to be queued in the header always. */
706 if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) { 933 if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) {
707 struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf[1]; 934 struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf;
708 935
709 if (psm->action == cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { 936 if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
710 if (priv->psstate != PS_STATE_FULL_POWER) 937 if (priv->psstate != PS_STATE_FULL_POWER)
711 addtail = 0; 938 addtail = 0;
712 } 939 }
713 } 940 }
714 941
942 if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_WAKEUP_CONFIRM)
943 addtail = 0;
944
715 spin_lock_irqsave(&priv->driver_lock, flags); 945 spin_lock_irqsave(&priv->driver_lock, flags);
716 946
717 if (addtail) 947 if (addtail)
@@ -744,7 +974,6 @@ static void lbs_submit_command(struct lbs_private *priv,
744 974
745 spin_lock_irqsave(&priv->driver_lock, flags); 975 spin_lock_irqsave(&priv->driver_lock, flags);
746 priv->cur_cmd = cmdnode; 976 priv->cur_cmd = cmdnode;
747 priv->cur_cmd_retcode = 0;
748 spin_unlock_irqrestore(&priv->driver_lock, flags); 977 spin_unlock_irqrestore(&priv->driver_lock, flags);
749 978
750 cmdsize = le16_to_cpu(cmd->size); 979 cmdsize = le16_to_cpu(cmd->size);
@@ -817,9 +1046,6 @@ static void lbs_cleanup_and_insert_cmd(struct lbs_private *priv,
817void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd, 1046void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
818 int result) 1047 int result)
819{ 1048{
820 if (cmd == priv->cur_cmd)
821 priv->cur_cmd_retcode = result;
822
823 cmd->result = result; 1049 cmd->result = result;
824 cmd->cmdwaitqwoken = 1; 1050 cmd->cmdwaitqwoken = 1;
825 wake_up_interruptible(&cmd->cmdwait_q); 1051 wake_up_interruptible(&cmd->cmdwait_q);
@@ -887,175 +1113,6 @@ void lbs_set_mac_control(struct lbs_private *priv)
887} 1113}
888 1114
889/** 1115/**
890 * @brief This function prepare the command before send to firmware.
891 *
892 * @param priv A pointer to struct lbs_private structure
893 * @param cmd_no command number
894 * @param cmd_action command action: GET or SET
895 * @param wait_option wait option: wait response or not
896 * @param cmd_oid cmd oid: treated as sub command
897 * @param pdata_buf A pointer to informaion buffer
898 * @return 0 or -1
899 */
900int lbs_prepare_and_send_command(struct lbs_private *priv,
901 u16 cmd_no,
902 u16 cmd_action,
903 u16 wait_option, u32 cmd_oid, void *pdata_buf)
904{
905 int ret = 0;
906 struct cmd_ctrl_node *cmdnode;
907 struct cmd_ds_command *cmdptr;
908 unsigned long flags;
909
910 lbs_deb_enter(LBS_DEB_HOST);
911
912 if (!priv) {
913 lbs_deb_host("PREP_CMD: priv is NULL\n");
914 ret = -1;
915 goto done;
916 }
917
918 if (priv->surpriseremoved) {
919 lbs_deb_host("PREP_CMD: card removed\n");
920 ret = -1;
921 goto done;
922 }
923
924 if (!lbs_is_cmd_allowed(priv)) {
925 ret = -EBUSY;
926 goto done;
927 }
928
929 cmdnode = lbs_get_cmd_ctrl_node(priv);
930
931 if (cmdnode == NULL) {
932 lbs_deb_host("PREP_CMD: cmdnode is NULL\n");
933
934 /* Wake up main thread to execute next command */
935 wake_up_interruptible(&priv->waitq);
936 ret = -1;
937 goto done;
938 }
939
940 cmdnode->callback = NULL;
941 cmdnode->callback_arg = (unsigned long)pdata_buf;
942
943 cmdptr = (struct cmd_ds_command *)cmdnode->cmdbuf;
944
945 lbs_deb_host("PREP_CMD: command 0x%04x\n", cmd_no);
946
947 /* Set sequence number, command and INT option */
948 priv->seqnum++;
949 cmdptr->seqnum = cpu_to_le16(priv->seqnum);
950
951 cmdptr->command = cpu_to_le16(cmd_no);
952 cmdptr->result = 0;
953
954 switch (cmd_no) {
955 case CMD_802_11_PS_MODE:
956 ret = lbs_cmd_802_11_ps_mode(cmdptr, cmd_action);
957 break;
958
959 case CMD_MAC_REG_ACCESS:
960 case CMD_BBP_REG_ACCESS:
961 case CMD_RF_REG_ACCESS:
962 ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf);
963 break;
964
965 case CMD_802_11_MONITOR_MODE:
966 ret = lbs_cmd_802_11_monitor_mode(cmdptr,
967 cmd_action, pdata_buf);
968 break;
969
970 case CMD_802_11_RSSI:
971 ret = lbs_cmd_802_11_rssi(priv, cmdptr);
972 break;
973
974 case CMD_802_11_SET_AFC:
975 case CMD_802_11_GET_AFC:
976
977 cmdptr->command = cpu_to_le16(cmd_no);
978 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) +
979 sizeof(struct cmd_header));
980
981 memmove(&cmdptr->params.afc,
982 pdata_buf, sizeof(struct cmd_ds_802_11_afc));
983
984 ret = 0;
985 goto done;
986
987 case CMD_802_11_TPC_CFG:
988 cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
989 cmdptr->size =
990 cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) +
991 sizeof(struct cmd_header));
992
993 memmove(&cmdptr->params.tpccfg,
994 pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg));
995
996 ret = 0;
997 break;
998
999#ifdef CONFIG_LIBERTAS_MESH
1000
1001 case CMD_BT_ACCESS:
1002 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
1003 break;
1004
1005 case CMD_FWT_ACCESS:
1006 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1007 break;
1008
1009#endif
1010
1011 case CMD_802_11_BEACON_CTRL:
1012 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1013 break;
1014 case CMD_802_11_DEEP_SLEEP:
1015 cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP);
1016 cmdptr->size = cpu_to_le16(sizeof(struct cmd_header));
1017 break;
1018 default:
1019 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
1020 ret = -1;
1021 break;
1022 }
1023
1024 /* return error, since the command preparation failed */
1025 if (ret != 0) {
1026 lbs_deb_host("PREP_CMD: command preparation failed\n");
1027 lbs_cleanup_and_insert_cmd(priv, cmdnode);
1028 ret = -1;
1029 goto done;
1030 }
1031
1032 cmdnode->cmdwaitqwoken = 0;
1033
1034 lbs_queue_cmd(priv, cmdnode);
1035 wake_up_interruptible(&priv->waitq);
1036
1037 if (wait_option & CMD_OPTION_WAITFORRSP) {
1038 lbs_deb_host("PREP_CMD: wait for response\n");
1039 might_sleep();
1040 wait_event_interruptible(cmdnode->cmdwait_q,
1041 cmdnode->cmdwaitqwoken);
1042 }
1043
1044 spin_lock_irqsave(&priv->driver_lock, flags);
1045 if (priv->cur_cmd_retcode) {
1046 lbs_deb_host("PREP_CMD: command failed with return code %d\n",
1047 priv->cur_cmd_retcode);
1048 priv->cur_cmd_retcode = 0;
1049 ret = -1;
1050 }
1051 spin_unlock_irqrestore(&priv->driver_lock, flags);
1052
1053done:
1054 lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
1055 return ret;
1056}
1057
1058/**
1059 * @brief This function allocates the command buffer and link 1116 * @brief This function allocates the command buffer and link
1060 * it to command free queue. 1117 * it to command free queue.
1061 * 1118 *
@@ -1148,7 +1205,7 @@ done:
1148 * @param priv A pointer to struct lbs_private structure 1205 * @param priv A pointer to struct lbs_private structure
1149 * @return cmd_ctrl_node A pointer to cmd_ctrl_node structure or NULL 1206 * @return cmd_ctrl_node A pointer to cmd_ctrl_node structure or NULL
1150 */ 1207 */
1151static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv) 1208static struct cmd_ctrl_node *lbs_get_free_cmd_node(struct lbs_private *priv)
1152{ 1209{
1153 struct cmd_ctrl_node *tempnode; 1210 struct cmd_ctrl_node *tempnode;
1154 unsigned long flags; 1211 unsigned long flags;
@@ -1231,10 +1288,10 @@ int lbs_execute_next_command(struct lbs_private *priv)
1231 /* 1288 /*
1232 * 1. Non-PS command: 1289 * 1. Non-PS command:
1233 * Queue it. set needtowakeup to TRUE if current state 1290 * Queue it. set needtowakeup to TRUE if current state
1234 * is SLEEP, otherwise call lbs_ps_wakeup to send Exit_PS. 1291 * is SLEEP, otherwise call send EXIT_PS.
1235 * 2. PS command but not Exit_PS: 1292 * 2. PS command but not EXIT_PS:
1236 * Ignore it. 1293 * Ignore it.
1237 * 3. PS command Exit_PS: 1294 * 3. PS command EXIT_PS:
1238 * Set needtowakeup to TRUE if current state is SLEEP, 1295 * Set needtowakeup to TRUE if current state is SLEEP,
1239 * otherwise send this command down to firmware 1296 * otherwise send this command down to firmware
1240 * immediately. 1297 * immediately.
@@ -1248,8 +1305,11 @@ int lbs_execute_next_command(struct lbs_private *priv)
1248 /* w/ new scheme, it will not reach here. 1305 /* w/ new scheme, it will not reach here.
1249 since it is blocked in main_thread. */ 1306 since it is blocked in main_thread. */
1250 priv->needtowakeup = 1; 1307 priv->needtowakeup = 1;
1251 } else 1308 } else {
1252 lbs_ps_wakeup(priv, 0); 1309 lbs_set_ps_mode(priv,
1310 PS_MODE_ACTION_EXIT_PS,
1311 false);
1312 }
1253 1313
1254 ret = 0; 1314 ret = 0;
1255 goto done; 1315 goto done;
@@ -1264,7 +1324,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
1264 "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", 1324 "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n",
1265 psm->action); 1325 psm->action);
1266 if (psm->action != 1326 if (psm->action !=
1267 cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { 1327 cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
1268 lbs_deb_host( 1328 lbs_deb_host(
1269 "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); 1329 "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n");
1270 list_del(&cmdnode->list); 1330 list_del(&cmdnode->list);
@@ -1303,6 +1363,15 @@ int lbs_execute_next_command(struct lbs_private *priv)
1303 * check if in power save mode, if yes, put the device back 1363 * check if in power save mode, if yes, put the device back
1304 * to PS mode 1364 * to PS mode
1305 */ 1365 */
1366#ifdef TODO
1367 /*
1368 * This was the old code for libertas+wext. Someone that
1369 * understands this beast should re-code it in a sane way.
1370 *
1371 * I actually don't understand why this is related to WPA
1372 * and to connection status, shouldn't powering should be
1373 * independ of such things?
1374 */
1306 if ((priv->psmode != LBS802_11POWERMODECAM) && 1375 if ((priv->psmode != LBS802_11POWERMODECAM) &&
1307 (priv->psstate == PS_STATE_FULL_POWER) && 1376 (priv->psstate == PS_STATE_FULL_POWER) &&
1308 ((priv->connect_status == LBS_CONNECTED) || 1377 ((priv->connect_status == LBS_CONNECTED) ||
@@ -1315,15 +1384,19 @@ int lbs_execute_next_command(struct lbs_private *priv)
1315 lbs_deb_host( 1384 lbs_deb_host(
1316 "EXEC_NEXT_CMD: WPA enabled and GTK_SET" 1385 "EXEC_NEXT_CMD: WPA enabled and GTK_SET"
1317 " go back to PS_SLEEP"); 1386 " go back to PS_SLEEP");
1318 lbs_ps_sleep(priv, 0); 1387 lbs_set_ps_mode(priv,
1388 PS_MODE_ACTION_ENTER_PS,
1389 false);
1319 } 1390 }
1320 } else { 1391 } else {
1321 lbs_deb_host( 1392 lbs_deb_host(
1322 "EXEC_NEXT_CMD: cmdpendingq empty, " 1393 "EXEC_NEXT_CMD: cmdpendingq empty, "
1323 "go back to PS_SLEEP"); 1394 "go back to PS_SLEEP");
1324 lbs_ps_sleep(priv, 0); 1395 lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS,
1396 false);
1325 } 1397 }
1326 } 1398 }
1399#endif
1327 } 1400 }
1328 1401
1329 ret = 0; 1402 ret = 0;
@@ -1353,6 +1426,11 @@ static void lbs_send_confirmsleep(struct lbs_private *priv)
1353 /* We don't get a response on the sleep-confirmation */ 1426 /* We don't get a response on the sleep-confirmation */
1354 priv->dnld_sent = DNLD_RES_RECEIVED; 1427 priv->dnld_sent = DNLD_RES_RECEIVED;
1355 1428
1429 if (priv->is_host_sleep_configured) {
1430 priv->is_host_sleep_activated = 1;
1431 wake_up_interruptible(&priv->host_sleep_q);
1432 }
1433
1356 /* If nothing to do, go back to sleep (?) */ 1434 /* If nothing to do, go back to sleep (?) */
1357 if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) 1435 if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx])
1358 priv->psstate = PS_STATE_SLEEP; 1436 priv->psstate = PS_STATE_SLEEP;
@@ -1363,43 +1441,6 @@ out:
1363 lbs_deb_leave(LBS_DEB_HOST); 1441 lbs_deb_leave(LBS_DEB_HOST);
1364} 1442}
1365 1443
1366void lbs_ps_sleep(struct lbs_private *priv, int wait_option)
1367{
1368 lbs_deb_enter(LBS_DEB_HOST);
1369
1370 /*
1371 * PS is currently supported only in Infrastructure mode
1372 * Remove this check if it is to be supported in IBSS mode also
1373 */
1374
1375 lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
1376 CMD_SUBCMD_ENTER_PS, wait_option, 0, NULL);
1377
1378 lbs_deb_leave(LBS_DEB_HOST);
1379}
1380
1381/**
1382 * @brief This function sends Exit_PS command to firmware.
1383 *
1384 * @param priv A pointer to struct lbs_private structure
1385 * @param wait_option wait response or not
1386 * @return n/a
1387 */
1388void lbs_ps_wakeup(struct lbs_private *priv, int wait_option)
1389{
1390 __le32 Localpsmode;
1391
1392 lbs_deb_enter(LBS_DEB_HOST);
1393
1394 Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM);
1395
1396 lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
1397 CMD_SUBCMD_EXIT_PS,
1398 wait_option, 0, &Localpsmode);
1399
1400 lbs_deb_leave(LBS_DEB_HOST);
1401}
1402
1403/** 1444/**
1404 * @brief This function checks condition and prepares to 1445 * @brief This function checks condition and prepares to
1405 * send sleep confirm command to firmware if ok. 1446 * send sleep confirm command to firmware if ok.
@@ -1524,12 +1565,18 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
1524 goto done; 1565 goto done;
1525 } 1566 }
1526 1567
1527 if (!lbs_is_cmd_allowed(priv)) { 1568 /* No commands are allowed in Deep Sleep until we toggle the GPIO
1528 cmdnode = ERR_PTR(-EBUSY); 1569 * to wake up the card and it has signaled that it's ready.
1529 goto done; 1570 */
1571 if (!priv->is_auto_deep_sleep_enabled) {
1572 if (priv->is_deep_sleep) {
1573 lbs_deb_cmd("command not allowed in deep sleep\n");
1574 cmdnode = ERR_PTR(-EBUSY);
1575 goto done;
1576 }
1530 } 1577 }
1531 1578
1532 cmdnode = lbs_get_cmd_ctrl_node(priv); 1579 cmdnode = lbs_get_free_cmd_node(priv);
1533 if (cmdnode == NULL) { 1580 if (cmdnode == NULL) {
1534 lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); 1581 lbs_deb_host("PREP_CMD: cmdnode is NULL\n");
1535 1582
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index cb4138a55fdf..7109d6b717ea 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -3,6 +3,8 @@
3#ifndef _LBS_CMD_H_ 3#ifndef _LBS_CMD_H_
4#define _LBS_CMD_H_ 4#define _LBS_CMD_H_
5 5
6#include <net/cfg80211.h>
7
6#include "host.h" 8#include "host.h"
7#include "dev.h" 9#include "dev.h"
8 10
@@ -37,11 +39,6 @@ struct cmd_ctrl_node {
37#define lbs_cmd_with_response(priv, cmdnr, cmd) \ 39#define lbs_cmd_with_response(priv, cmdnr, cmd) \
38 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd)) 40 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd))
39 41
40int lbs_prepare_and_send_command(struct lbs_private *priv,
41 u16 cmd_no,
42 u16 cmd_action,
43 u16 wait_option, u32 cmd_oid, void *pdata_buf);
44
45void lbs_cmd_async(struct lbs_private *priv, uint16_t command, 42void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
46 struct cmd_header *in_cmd, int in_cmd_size); 43 struct cmd_header *in_cmd, int in_cmd_size);
47 44
@@ -92,10 +89,6 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
92int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 89int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
93 struct sleep_params *sp); 90 struct sleep_params *sp);
94 91
95void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
96
97void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
98
99void lbs_ps_confirm_sleep(struct lbs_private *priv); 92void lbs_ps_confirm_sleep(struct lbs_private *priv);
100 93
101int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); 94int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
@@ -127,4 +120,20 @@ int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
127 120
128int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep); 121int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
129 122
123int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep);
124
125int lbs_set_monitor_mode(struct lbs_private *priv, int enable);
126
127int lbs_get_rssi(struct lbs_private *priv, s8 *snr, s8 *nf);
128
129int lbs_set_11d_domain_info(struct lbs_private *priv,
130 struct regulatory_request *request,
131 struct ieee80211_supported_band **bands);
132
133int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value);
134
135int lbs_set_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 value);
136
137int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block);
138
130#endif /* _LBS_CMD_H */ 139#endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 88f7131d66e9..5e95da9dcc2e 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -5,18 +5,11 @@
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/if_arp.h>
9#include <linux/netdevice.h>
10#include <asm/unaligned.h> 8#include <asm/unaligned.h>
11#include <net/iw_handler.h> 9#include <net/cfg80211.h>
12 10
13#include "host.h" 11#include "cfg.h"
14#include "decl.h"
15#include "cmd.h" 12#include "cmd.h"
16#include "defs.h"
17#include "dev.h"
18#include "assoc.h"
19#include "wext.h"
20 13
21/** 14/**
22 * @brief This function handles disconnect event. it 15 * @brief This function handles disconnect event. it
@@ -38,7 +31,9 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
38 * It causes problem in the Supplicant 31 * It causes problem in the Supplicant
39 */ 32 */
40 msleep_interruptible(1000); 33 msleep_interruptible(1000);
41 lbs_send_disconnect_notification(priv); 34
35 if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
36 lbs_send_disconnect_notification(priv);
42 37
43 /* report disconnect to upper layer */ 38 /* report disconnect to upper layer */
44 netif_stop_queue(priv->dev); 39 netif_stop_queue(priv->dev);
@@ -49,141 +44,16 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
49 priv->currenttxskb = NULL; 44 priv->currenttxskb = NULL;
50 priv->tx_pending_len = 0; 45 priv->tx_pending_len = 0;
51 46
52 /* reset SNR/NF/RSSI values */
53 memset(priv->SNR, 0x00, sizeof(priv->SNR));
54 memset(priv->NF, 0x00, sizeof(priv->NF));
55 memset(priv->RSSI, 0x00, sizeof(priv->RSSI));
56 memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
57 memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
58 priv->nextSNRNF = 0;
59 priv->numSNRNF = 0;
60 priv->connect_status = LBS_DISCONNECTED; 47 priv->connect_status = LBS_DISCONNECTED;
61 48
62 /* Clear out associated SSID and BSSID since connection is
63 * no longer valid.
64 */
65 memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
66 memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
67 priv->curbssparams.ssid_len = 0;
68
69 if (priv->psstate != PS_STATE_FULL_POWER) { 49 if (priv->psstate != PS_STATE_FULL_POWER) {
70 /* make firmware to exit PS mode */ 50 /* make firmware to exit PS mode */
71 lbs_deb_cmd("disconnected, so exit PS mode\n"); 51 lbs_deb_cmd("disconnected, so exit PS mode\n");
72 lbs_ps_wakeup(priv, 0); 52 lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false);
73 } 53 }
74 lbs_deb_leave(LBS_DEB_ASSOC); 54 lbs_deb_leave(LBS_DEB_ASSOC);
75} 55}
76 56
77static int lbs_ret_reg_access(struct lbs_private *priv,
78 u16 type, struct cmd_ds_command *resp)
79{
80 int ret = 0;
81
82 lbs_deb_enter(LBS_DEB_CMD);
83
84 switch (type) {
85 case CMD_RET(CMD_MAC_REG_ACCESS):
86 {
87 struct cmd_ds_mac_reg_access *reg = &resp->params.macreg;
88
89 priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset);
90 priv->offsetvalue.value = le32_to_cpu(reg->value);
91 break;
92 }
93
94 case CMD_RET(CMD_BBP_REG_ACCESS):
95 {
96 struct cmd_ds_bbp_reg_access *reg = &resp->params.bbpreg;
97
98 priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset);
99 priv->offsetvalue.value = reg->value;
100 break;
101 }
102
103 case CMD_RET(CMD_RF_REG_ACCESS):
104 {
105 struct cmd_ds_rf_reg_access *reg = &resp->params.rfreg;
106
107 priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset);
108 priv->offsetvalue.value = reg->value;
109 break;
110 }
111
112 default:
113 ret = -1;
114 }
115
116 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
117 return ret;
118}
119
120static inline int handle_cmd_response(struct lbs_private *priv,
121 struct cmd_header *cmd_response)
122{
123 struct cmd_ds_command *resp = (struct cmd_ds_command *) cmd_response;
124 int ret = 0;
125 unsigned long flags;
126 uint16_t respcmd = le16_to_cpu(resp->command);
127
128 lbs_deb_enter(LBS_DEB_HOST);
129
130 switch (respcmd) {
131 case CMD_RET(CMD_MAC_REG_ACCESS):
132 case CMD_RET(CMD_BBP_REG_ACCESS):
133 case CMD_RET(CMD_RF_REG_ACCESS):
134 ret = lbs_ret_reg_access(priv, respcmd, resp);
135 break;
136
137 case CMD_RET(CMD_802_11_SET_AFC):
138 case CMD_RET(CMD_802_11_GET_AFC):
139 spin_lock_irqsave(&priv->driver_lock, flags);
140 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.afc,
141 sizeof(struct cmd_ds_802_11_afc));
142 spin_unlock_irqrestore(&priv->driver_lock, flags);
143
144 break;
145
146 case CMD_RET(CMD_802_11_BEACON_STOP):
147 break;
148
149 case CMD_RET(CMD_802_11_RSSI):
150 ret = lbs_ret_802_11_rssi(priv, resp);
151 break;
152
153 case CMD_RET(CMD_802_11_TPC_CFG):
154 spin_lock_irqsave(&priv->driver_lock, flags);
155 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
156 sizeof(struct cmd_ds_802_11_tpc_cfg));
157 spin_unlock_irqrestore(&priv->driver_lock, flags);
158 break;
159
160 case CMD_RET(CMD_BT_ACCESS):
161 spin_lock_irqsave(&priv->driver_lock, flags);
162 if (priv->cur_cmd->callback_arg)
163 memcpy((void *)priv->cur_cmd->callback_arg,
164 &resp->params.bt.addr1, 2 * ETH_ALEN);
165 spin_unlock_irqrestore(&priv->driver_lock, flags);
166 break;
167 case CMD_RET(CMD_FWT_ACCESS):
168 spin_lock_irqsave(&priv->driver_lock, flags);
169 if (priv->cur_cmd->callback_arg)
170 memcpy((void *)priv->cur_cmd->callback_arg, &resp->params.fwt,
171 sizeof(resp->params.fwt));
172 spin_unlock_irqrestore(&priv->driver_lock, flags);
173 break;
174 case CMD_RET(CMD_802_11_BEACON_CTRL):
175 ret = lbs_ret_802_11_bcn_ctrl(priv, resp);
176 break;
177
178 default:
179 lbs_pr_err("CMD_RESP: unknown cmd response 0x%04x\n",
180 le16_to_cpu(resp->command));
181 break;
182 }
183 lbs_deb_leave(LBS_DEB_HOST);
184 return ret;
185}
186
187int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) 57int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
188{ 58{
189 uint16_t respcmd, curcmd; 59 uint16_t respcmd, curcmd;
@@ -242,9 +112,6 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
242 del_timer(&priv->command_timer); 112 del_timer(&priv->command_timer);
243 priv->cmd_timed_out = 0; 113 priv->cmd_timed_out = 0;
244 114
245 /* Store the response code to cur_cmd_retcode. */
246 priv->cur_cmd_retcode = result;
247
248 if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { 115 if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) {
249 struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1]; 116 struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1];
250 u16 action = le16_to_cpu(psmode->action); 117 u16 action = le16_to_cpu(psmode->action);
@@ -261,10 +128,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
261 * ad-hoc mode. It takes place in 128 * ad-hoc mode. It takes place in
262 * lbs_execute_next_command(). 129 * lbs_execute_next_command().
263 */ 130 */
264 if (priv->mode == IW_MODE_ADHOC && 131 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR &&
265 action == CMD_SUBCMD_ENTER_PS) 132 action == PS_MODE_ACTION_ENTER_PS)
266 priv->psmode = LBS802_11POWERMODECAM; 133 priv->psmode = LBS802_11POWERMODECAM;
267 } else if (action == CMD_SUBCMD_ENTER_PS) { 134 } else if (action == PS_MODE_ACTION_ENTER_PS) {
268 priv->needtowakeup = 0; 135 priv->needtowakeup = 0;
269 priv->psstate = PS_STATE_AWAKE; 136 priv->psstate = PS_STATE_AWAKE;
270 137
@@ -279,11 +146,12 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
279 146
280 spin_unlock_irqrestore(&priv->driver_lock, flags); 147 spin_unlock_irqrestore(&priv->driver_lock, flags);
281 mutex_unlock(&priv->lock); 148 mutex_unlock(&priv->lock);
282 lbs_ps_wakeup(priv, 0); 149 lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS,
150 false);
283 mutex_lock(&priv->lock); 151 mutex_lock(&priv->lock);
284 spin_lock_irqsave(&priv->driver_lock, flags); 152 spin_lock_irqsave(&priv->driver_lock, flags);
285 } 153 }
286 } else if (action == CMD_SUBCMD_EXIT_PS) { 154 } else if (action == PS_MODE_ACTION_EXIT_PS) {
287 priv->needtowakeup = 0; 155 priv->needtowakeup = 0;
288 priv->psstate = PS_STATE_FULL_POWER; 156 priv->psstate = PS_STATE_FULL_POWER;
289 lbs_deb_host("CMD_RESP: EXIT_PS command response\n"); 157 lbs_deb_host("CMD_RESP: EXIT_PS command response\n");
@@ -324,8 +192,7 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
324 if (priv->cur_cmd && priv->cur_cmd->callback) { 192 if (priv->cur_cmd && priv->cur_cmd->callback) {
325 ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg, 193 ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg,
326 resp); 194 resp);
327 } else 195 }
328 ret = handle_cmd_response(priv, resp);
329 196
330 spin_lock_irqsave(&priv->driver_lock, flags); 197 spin_lock_irqsave(&priv->driver_lock, flags);
331 198
@@ -341,32 +208,10 @@ done:
341 return ret; 208 return ret;
342} 209}
343 210
344static int lbs_send_confirmwake(struct lbs_private *priv)
345{
346 struct cmd_header cmd;
347 int ret = 0;
348
349 lbs_deb_enter(LBS_DEB_HOST);
350
351 cmd.command = cpu_to_le16(CMD_802_11_WAKEUP_CONFIRM);
352 cmd.size = cpu_to_le16(sizeof(cmd));
353 cmd.seqnum = cpu_to_le16(++priv->seqnum);
354 cmd.result = 0;
355
356 lbs_deb_hex(LBS_DEB_HOST, "wake confirm", (u8 *) &cmd,
357 sizeof(cmd));
358
359 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &cmd, sizeof(cmd));
360 if (ret)
361 lbs_pr_alert("SEND_WAKEC_CMD: Host to Card failed for Confirm Wake\n");
362
363 lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
364 return ret;
365}
366
367int lbs_process_event(struct lbs_private *priv, u32 event) 211int lbs_process_event(struct lbs_private *priv, u32 event)
368{ 212{
369 int ret = 0; 213 int ret = 0;
214 struct cmd_header cmd;
370 215
371 lbs_deb_enter(LBS_DEB_CMD); 216 lbs_deb_enter(LBS_DEB_CMD);
372 217
@@ -410,7 +255,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
410 if (priv->reset_deep_sleep_wakeup) 255 if (priv->reset_deep_sleep_wakeup)
411 priv->reset_deep_sleep_wakeup(priv); 256 priv->reset_deep_sleep_wakeup(priv);
412 priv->is_deep_sleep = 0; 257 priv->is_deep_sleep = 0;
413 lbs_send_confirmwake(priv); 258 lbs_cmd_async(priv, CMD_802_11_WAKEUP_CONFIRM, &cmd,
259 sizeof(cmd));
260 priv->is_host_sleep_activated = 0;
261 wake_up_interruptible(&priv->host_sleep_q);
414 break; 262 break;
415 263
416 case MACREG_INT_CODE_DEEP_SLEEP_AWAKE: 264 case MACREG_INT_CODE_DEEP_SLEEP_AWAKE:
@@ -441,7 +289,7 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
441 * in lbs_ps_wakeup() 289 * in lbs_ps_wakeup()
442 */ 290 */
443 lbs_deb_cmd("waking up ...\n"); 291 lbs_deb_cmd("waking up ...\n");
444 lbs_ps_wakeup(priv, 0); 292 lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false);
445 } 293 }
446 break; 294 break;
447 295
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index de2caac11dd6..651a79c8de8a 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -1,18 +1,13 @@
1#include <linux/module.h>
2#include <linux/dcache.h> 1#include <linux/dcache.h>
3#include <linux/debugfs.h> 2#include <linux/debugfs.h>
4#include <linux/delay.h> 3#include <linux/delay.h>
5#include <linux/mm.h> 4#include <linux/mm.h>
6#include <linux/string.h> 5#include <linux/string.h>
7#include <linux/slab.h> 6#include <linux/slab.h>
8#include <net/iw_handler.h>
9#include <net/lib80211.h>
10 7
11#include "dev.h"
12#include "decl.h" 8#include "decl.h"
13#include "host.h"
14#include "debugfs.h"
15#include "cmd.h" 9#include "cmd.h"
10#include "debugfs.h"
16 11
17static struct dentry *lbs_dir; 12static struct dentry *lbs_dir;
18static char *szStates[] = { 13static char *szStates[] = {
@@ -60,51 +55,6 @@ static ssize_t lbs_dev_info(struct file *file, char __user *userbuf,
60 return res; 55 return res;
61} 56}
62 57
63
64static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
65 size_t count, loff_t *ppos)
66{
67 struct lbs_private *priv = file->private_data;
68 size_t pos = 0;
69 int numscansdone = 0, res;
70 unsigned long addr = get_zeroed_page(GFP_KERNEL);
71 char *buf = (char *)addr;
72 DECLARE_SSID_BUF(ssid);
73 struct bss_descriptor * iter_bss;
74 if (!buf)
75 return -ENOMEM;
76
77 pos += snprintf(buf+pos, len-pos,
78 "# | ch | rssi | bssid | cap | Qual | SSID\n");
79
80 mutex_lock(&priv->lock);
81 list_for_each_entry (iter_bss, &priv->network_list, list) {
82 u16 ibss = (iter_bss->capability & WLAN_CAPABILITY_IBSS);
83 u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY);
84 u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT);
85
86 pos += snprintf(buf+pos, len-pos, "%02u| %03d | %04d | %pM |",
87 numscansdone, iter_bss->channel, iter_bss->rssi,
88 iter_bss->bssid);
89 pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability);
90 pos += snprintf(buf+pos, len-pos, "%c%c%c |",
91 ibss ? 'A' : 'I', privacy ? 'P' : ' ',
92 spectrum_mgmt ? 'S' : ' ');
93 pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi));
94 pos += snprintf(buf+pos, len-pos, " %s\n",
95 print_ssid(ssid, iter_bss->ssid,
96 iter_bss->ssid_len));
97
98 numscansdone++;
99 }
100 mutex_unlock(&priv->lock);
101
102 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
103
104 free_page(addr);
105 return res;
106}
107
108static ssize_t lbs_sleepparams_write(struct file *file, 58static ssize_t lbs_sleepparams_write(struct file *file,
109 const char __user *user_buf, size_t count, 59 const char __user *user_buf, size_t count,
110 loff_t *ppos) 60 loff_t *ppos)
@@ -174,6 +124,70 @@ out_unlock:
174 return ret; 124 return ret;
175} 125}
176 126
127static ssize_t lbs_host_sleep_write(struct file *file,
128 const char __user *user_buf, size_t count,
129 loff_t *ppos)
130{
131 struct lbs_private *priv = file->private_data;
132 ssize_t buf_size, ret;
133 int host_sleep;
134 unsigned long addr = get_zeroed_page(GFP_KERNEL);
135 char *buf = (char *)addr;
136 if (!buf)
137 return -ENOMEM;
138
139 buf_size = min(count, len - 1);
140 if (copy_from_user(buf, user_buf, buf_size)) {
141 ret = -EFAULT;
142 goto out_unlock;
143 }
144 ret = sscanf(buf, "%d", &host_sleep);
145 if (ret != 1) {
146 ret = -EINVAL;
147 goto out_unlock;
148 }
149
150 if (host_sleep == 0)
151 ret = lbs_set_host_sleep(priv, 0);
152 else if (host_sleep == 1) {
153 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
154 lbs_pr_info("wake parameters not configured");
155 ret = -EINVAL;
156 goto out_unlock;
157 }
158 ret = lbs_set_host_sleep(priv, 1);
159 } else {
160 lbs_pr_err("invalid option\n");
161 ret = -EINVAL;
162 }
163
164 if (!ret)
165 ret = count;
166
167out_unlock:
168 free_page(addr);
169 return ret;
170}
171
172static ssize_t lbs_host_sleep_read(struct file *file, char __user *userbuf,
173 size_t count, loff_t *ppos)
174{
175 struct lbs_private *priv = file->private_data;
176 ssize_t ret;
177 size_t pos = 0;
178 unsigned long addr = get_zeroed_page(GFP_KERNEL);
179 char *buf = (char *)addr;
180 if (!buf)
181 return -ENOMEM;
182
183 pos += snprintf(buf, len, "%d\n", priv->is_host_sleep_activated);
184
185 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
186
187 free_page(addr);
188 return ret;
189}
190
177/* 191/*
178 * When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might 192 * When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might
179 * get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the 193 * get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the
@@ -432,30 +446,24 @@ static ssize_t lbs_bcnmiss_write(struct file *file, const char __user *userbuf,
432} 446}
433 447
434 448
435
436static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf, 449static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf,
437 size_t count, loff_t *ppos) 450 size_t count, loff_t *ppos)
438{ 451{
439 struct lbs_private *priv = file->private_data; 452 struct lbs_private *priv = file->private_data;
440 struct lbs_offset_value offval;
441 ssize_t pos = 0; 453 ssize_t pos = 0;
442 int ret; 454 int ret;
443 unsigned long addr = get_zeroed_page(GFP_KERNEL); 455 unsigned long addr = get_zeroed_page(GFP_KERNEL);
444 char *buf = (char *)addr; 456 char *buf = (char *)addr;
457 u32 val = 0;
458
445 if (!buf) 459 if (!buf)
446 return -ENOMEM; 460 return -ENOMEM;
447 461
448 offval.offset = priv->mac_offset; 462 ret = lbs_get_reg(priv, CMD_MAC_REG_ACCESS, priv->mac_offset, &val);
449 offval.value = 0;
450
451 ret = lbs_prepare_and_send_command(priv,
452 CMD_MAC_REG_ACCESS, 0,
453 CMD_OPTION_WAITFORRSP, 0, &offval);
454 mdelay(10); 463 mdelay(10);
455 if (!ret) { 464 if (!ret) {
456 pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", 465 pos = snprintf(buf, len, "MAC[0x%x] = 0x%08x\n",
457 priv->mac_offset, priv->offsetvalue.value); 466 priv->mac_offset, val);
458
459 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 467 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
460 } 468 }
461 free_page(addr); 469 free_page(addr);
@@ -493,7 +501,6 @@ static ssize_t lbs_wrmac_write(struct file *file,
493 struct lbs_private *priv = file->private_data; 501 struct lbs_private *priv = file->private_data;
494 ssize_t res, buf_size; 502 ssize_t res, buf_size;
495 u32 offset, value; 503 u32 offset, value;
496 struct lbs_offset_value offval;
497 unsigned long addr = get_zeroed_page(GFP_KERNEL); 504 unsigned long addr = get_zeroed_page(GFP_KERNEL);
498 char *buf = (char *)addr; 505 char *buf = (char *)addr;
499 if (!buf) 506 if (!buf)
@@ -510,11 +517,7 @@ static ssize_t lbs_wrmac_write(struct file *file,
510 goto out_unlock; 517 goto out_unlock;
511 } 518 }
512 519
513 offval.offset = offset; 520 res = lbs_set_reg(priv, CMD_MAC_REG_ACCESS, offset, value);
514 offval.value = value;
515 res = lbs_prepare_and_send_command(priv,
516 CMD_MAC_REG_ACCESS, 1,
517 CMD_OPTION_WAITFORRSP, 0, &offval);
518 mdelay(10); 521 mdelay(10);
519 522
520 if (!res) 523 if (!res)
@@ -528,25 +531,20 @@ static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf,
528 size_t count, loff_t *ppos) 531 size_t count, loff_t *ppos)
529{ 532{
530 struct lbs_private *priv = file->private_data; 533 struct lbs_private *priv = file->private_data;
531 struct lbs_offset_value offval;
532 ssize_t pos = 0; 534 ssize_t pos = 0;
533 int ret; 535 int ret;
534 unsigned long addr = get_zeroed_page(GFP_KERNEL); 536 unsigned long addr = get_zeroed_page(GFP_KERNEL);
535 char *buf = (char *)addr; 537 char *buf = (char *)addr;
538 u32 val;
539
536 if (!buf) 540 if (!buf)
537 return -ENOMEM; 541 return -ENOMEM;
538 542
539 offval.offset = priv->bbp_offset; 543 ret = lbs_get_reg(priv, CMD_BBP_REG_ACCESS, priv->bbp_offset, &val);
540 offval.value = 0;
541
542 ret = lbs_prepare_and_send_command(priv,
543 CMD_BBP_REG_ACCESS, 0,
544 CMD_OPTION_WAITFORRSP, 0, &offval);
545 mdelay(10); 544 mdelay(10);
546 if (!ret) { 545 if (!ret) {
547 pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", 546 pos = snprintf(buf, len, "BBP[0x%x] = 0x%08x\n",
548 priv->bbp_offset, priv->offsetvalue.value); 547 priv->bbp_offset, val);
549
550 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 548 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
551 } 549 }
552 free_page(addr); 550 free_page(addr);
@@ -585,7 +583,6 @@ static ssize_t lbs_wrbbp_write(struct file *file,
585 struct lbs_private *priv = file->private_data; 583 struct lbs_private *priv = file->private_data;
586 ssize_t res, buf_size; 584 ssize_t res, buf_size;
587 u32 offset, value; 585 u32 offset, value;
588 struct lbs_offset_value offval;
589 unsigned long addr = get_zeroed_page(GFP_KERNEL); 586 unsigned long addr = get_zeroed_page(GFP_KERNEL);
590 char *buf = (char *)addr; 587 char *buf = (char *)addr;
591 if (!buf) 588 if (!buf)
@@ -602,11 +599,7 @@ static ssize_t lbs_wrbbp_write(struct file *file,
602 goto out_unlock; 599 goto out_unlock;
603 } 600 }
604 601
605 offval.offset = offset; 602 res = lbs_set_reg(priv, CMD_BBP_REG_ACCESS, offset, value);
606 offval.value = value;
607 res = lbs_prepare_and_send_command(priv,
608 CMD_BBP_REG_ACCESS, 1,
609 CMD_OPTION_WAITFORRSP, 0, &offval);
610 mdelay(10); 603 mdelay(10);
611 604
612 if (!res) 605 if (!res)
@@ -620,25 +613,20 @@ static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf,
620 size_t count, loff_t *ppos) 613 size_t count, loff_t *ppos)
621{ 614{
622 struct lbs_private *priv = file->private_data; 615 struct lbs_private *priv = file->private_data;
623 struct lbs_offset_value offval;
624 ssize_t pos = 0; 616 ssize_t pos = 0;
625 int ret; 617 int ret;
626 unsigned long addr = get_zeroed_page(GFP_KERNEL); 618 unsigned long addr = get_zeroed_page(GFP_KERNEL);
627 char *buf = (char *)addr; 619 char *buf = (char *)addr;
620 u32 val;
621
628 if (!buf) 622 if (!buf)
629 return -ENOMEM; 623 return -ENOMEM;
630 624
631 offval.offset = priv->rf_offset; 625 ret = lbs_get_reg(priv, CMD_RF_REG_ACCESS, priv->rf_offset, &val);
632 offval.value = 0;
633
634 ret = lbs_prepare_and_send_command(priv,
635 CMD_RF_REG_ACCESS, 0,
636 CMD_OPTION_WAITFORRSP, 0, &offval);
637 mdelay(10); 626 mdelay(10);
638 if (!ret) { 627 if (!ret) {
639 pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", 628 pos = snprintf(buf, len, "RF[0x%x] = 0x%08x\n",
640 priv->rf_offset, priv->offsetvalue.value); 629 priv->rf_offset, val);
641
642 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 630 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
643 } 631 }
644 free_page(addr); 632 free_page(addr);
@@ -677,7 +665,6 @@ static ssize_t lbs_wrrf_write(struct file *file,
677 struct lbs_private *priv = file->private_data; 665 struct lbs_private *priv = file->private_data;
678 ssize_t res, buf_size; 666 ssize_t res, buf_size;
679 u32 offset, value; 667 u32 offset, value;
680 struct lbs_offset_value offval;
681 unsigned long addr = get_zeroed_page(GFP_KERNEL); 668 unsigned long addr = get_zeroed_page(GFP_KERNEL);
682 char *buf = (char *)addr; 669 char *buf = (char *)addr;
683 if (!buf) 670 if (!buf)
@@ -694,11 +681,7 @@ static ssize_t lbs_wrrf_write(struct file *file,
694 goto out_unlock; 681 goto out_unlock;
695 } 682 }
696 683
697 offval.offset = offset; 684 res = lbs_set_reg(priv, CMD_RF_REG_ACCESS, offset, value);
698 offval.value = value;
699 res = lbs_prepare_and_send_command(priv,
700 CMD_RF_REG_ACCESS, 1,
701 CMD_OPTION_WAITFORRSP, 0, &offval);
702 mdelay(10); 685 mdelay(10);
703 686
704 if (!res) 687 if (!res)
@@ -723,10 +706,10 @@ struct lbs_debugfs_files {
723 706
724static const struct lbs_debugfs_files debugfs_files[] = { 707static const struct lbs_debugfs_files debugfs_files[] = {
725 { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, 708 { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
726 { "getscantable", 0444, FOPS(lbs_getscantable,
727 write_file_dummy), },
728 { "sleepparams", 0644, FOPS(lbs_sleepparams_read, 709 { "sleepparams", 0644, FOPS(lbs_sleepparams_read,
729 lbs_sleepparams_write), }, 710 lbs_sleepparams_write), },
711 { "hostsleep", 0644, FOPS(lbs_host_sleep_read,
712 lbs_host_sleep_write), },
730}; 713};
731 714
732static const struct lbs_debugfs_files debugfs_events_files[] = { 715static const struct lbs_debugfs_files debugfs_events_files[] = {
@@ -891,7 +874,7 @@ static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf,
891 874
892 p = buf; 875 p = buf;
893 876
894 d = (struct debug_data *)file->private_data; 877 d = file->private_data;
895 878
896 for (i = 0; i < num_of_items; i++) { 879 for (i = 0; i < num_of_items; i++) {
897 if (d[i].size == 1) 880 if (d[i].size == 1)
@@ -930,7 +913,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
930 char *p0; 913 char *p0;
931 char *p1; 914 char *p1;
932 char *p2; 915 char *p2;
933 struct debug_data *d = (struct debug_data *)f->private_data; 916 struct debug_data *d = f->private_data;
934 917
935 pdata = kmalloc(cnt, GFP_KERNEL); 918 pdata = kmalloc(cnt, GFP_KERNEL);
936 if (pdata == NULL) 919 if (pdata == NULL)
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 709ffcad22ad..1d141fefd767 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -1,3 +1,4 @@
1
1/** 2/**
2 * This file contains declaration referring to 3 * This file contains declaration referring to
3 * functions defined in other source files 4 * functions defined in other source files
@@ -12,6 +13,7 @@
12struct lbs_private; 13struct lbs_private;
13struct sk_buff; 14struct sk_buff;
14struct net_device; 15struct net_device;
16struct cmd_ds_command;
15 17
16 18
17/* ethtool.c */ 19/* ethtool.c */
@@ -34,11 +36,13 @@ int lbs_start_card(struct lbs_private *priv);
34void lbs_stop_card(struct lbs_private *priv); 36void lbs_stop_card(struct lbs_private *priv);
35void lbs_host_to_card_done(struct lbs_private *priv); 37void lbs_host_to_card_done(struct lbs_private *priv);
36 38
39int lbs_rtap_supported(struct lbs_private *priv);
40
37int lbs_set_mac_address(struct net_device *dev, void *addr); 41int lbs_set_mac_address(struct net_device *dev, void *addr);
38void lbs_set_multicast_list(struct net_device *dev); 42void lbs_set_multicast_list(struct net_device *dev);
39 43
40int lbs_suspend(struct lbs_private *priv); 44int lbs_suspend(struct lbs_private *priv);
41void lbs_resume(struct lbs_private *priv); 45int lbs_resume(struct lbs_private *priv);
42 46
43void lbs_queue_event(struct lbs_private *priv, u32 event); 47void lbs_queue_event(struct lbs_private *priv, u32 event);
44void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx); 48void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
@@ -49,5 +53,4 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
49u32 lbs_fw_index_to_data_rate(u8 index); 53u32 lbs_fw_index_to_data_rate(u8 index);
50u8 lbs_data_rate_to_fw_index(u32 rate); 54u8 lbs_data_rate_to_fw_index(u32 rate);
51 55
52
53#endif 56#endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index ea3f10ef4e00..d00c728cec47 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -172,11 +172,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
172#define MRVDRV_MAX_BSS_DESCRIPTS 16 172#define MRVDRV_MAX_BSS_DESCRIPTS 16
173#define MRVDRV_MAX_REGION_CODE 6 173#define MRVDRV_MAX_REGION_CODE 6
174 174
175#define MRVDRV_IGNORE_MULTIPLE_DTIM 0xfffe
176#define MRVDRV_MIN_MULTIPLE_DTIM 1
177#define MRVDRV_MAX_MULTIPLE_DTIM 5
178#define MRVDRV_DEFAULT_MULTIPLE_DTIM 1
179
180#define MRVDRV_DEFAULT_LISTEN_INTERVAL 10 175#define MRVDRV_DEFAULT_LISTEN_INTERVAL 10
181 176
182#define MRVDRV_CHANNELS_PER_SCAN 4 177#define MRVDRV_CHANNELS_PER_SCAN 4
@@ -301,19 +296,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
301#define BAND_G (0x02) 296#define BAND_G (0x02)
302#define ALL_802_11_BANDS (BAND_B | BAND_G) 297#define ALL_802_11_BANDS (BAND_B | BAND_G)
303 298
304/** MACRO DEFINITIONS */
305#define CAL_NF(NF) ((s32)(-(s32)(NF)))
306#define CAL_RSSI(SNR, NF) ((s32)((s32)(SNR) + CAL_NF(NF)))
307#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
308
309#define DEFAULT_BCN_AVG_FACTOR 8
310#define DEFAULT_DATA_AVG_FACTOR 8
311#define AVG_SCALE 100
312#define CAL_AVG_SNR_NF(AVG, SNRNF, N) \
313 (((AVG) == 0) ? ((u16)(SNRNF) * AVG_SCALE) : \
314 ((((int)(AVG) * (N -1)) + ((u16)(SNRNF) * \
315 AVG_SCALE)) / N))
316
317#define MAX_RATES 14 299#define MAX_RATES 14
318 300
319#define MAX_LEDS 8 301#define MAX_LEDS 8
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index a54880e4ad2b..3c7e255e18c7 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -7,8 +7,8 @@
7#define _LBS_DEV_H_ 7#define _LBS_DEV_H_
8 8
9#include "mesh.h" 9#include "mesh.h"
10#include "scan.h" 10#include "defs.h"
11#include "assoc.h" 11#include "host.h"
12 12
13#include <linux/kfifo.h> 13#include <linux/kfifo.h>
14 14
@@ -29,7 +29,6 @@ struct lbs_private {
29 /* Basic networking */ 29 /* Basic networking */
30 struct net_device *dev; 30 struct net_device *dev;
31 u32 connect_status; 31 u32 connect_status;
32 int infra_open;
33 struct work_struct mcast_work; 32 struct work_struct mcast_work;
34 u32 nr_of_multicastmacaddr; 33 u32 nr_of_multicastmacaddr;
35 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN]; 34 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
@@ -37,6 +36,9 @@ struct lbs_private {
37 /* CFG80211 */ 36 /* CFG80211 */
38 struct wireless_dev *wdev; 37 struct wireless_dev *wdev;
39 bool wiphy_registered; 38 bool wiphy_registered;
39 struct cfg80211_scan_request *scan_req;
40 u8 assoc_bss[ETH_ALEN];
41 u8 disassoc_reason;
40 42
41 /* Mesh */ 43 /* Mesh */
42 struct net_device *mesh_dev; /* Virtual device */ 44 struct net_device *mesh_dev; /* Virtual device */
@@ -49,10 +51,6 @@ struct lbs_private {
49 u8 mesh_ssid_len; 51 u8 mesh_ssid_len;
50#endif 52#endif
51 53
52 /* Monitor mode */
53 struct net_device *rtap_net_dev;
54 u32 monitormode;
55
56 /* Debugfs */ 54 /* Debugfs */
57 struct dentry *debugfs_dir; 55 struct dentry *debugfs_dir;
58 struct dentry *debugfs_debug; 56 struct dentry *debugfs_debug;
@@ -66,7 +64,6 @@ struct lbs_private {
66 u32 mac_offset; 64 u32 mac_offset;
67 u32 bbp_offset; 65 u32 bbp_offset;
68 u32 rf_offset; 66 u32 rf_offset;
69 struct lbs_offset_value offsetvalue;
70 67
71 /* Power management */ 68 /* Power management */
72 u16 psmode; 69 u16 psmode;
@@ -75,6 +72,7 @@ struct lbs_private {
75 72
76 /* Deep sleep */ 73 /* Deep sleep */
77 int is_deep_sleep; 74 int is_deep_sleep;
75 int deep_sleep_required;
78 int is_auto_deep_sleep_enabled; 76 int is_auto_deep_sleep_enabled;
79 int wakeup_dev_required; 77 int wakeup_dev_required;
80 int is_activity_detected; 78 int is_activity_detected;
@@ -82,6 +80,11 @@ struct lbs_private {
82 wait_queue_head_t ds_awake_q; 80 wait_queue_head_t ds_awake_q;
83 struct timer_list auto_deepsleep_timer; 81 struct timer_list auto_deepsleep_timer;
84 82
83 /* Host sleep*/
84 int is_host_sleep_configured;
85 int is_host_sleep_activated;
86 wait_queue_head_t host_sleep_q;
87
85 /* Hardware access */ 88 /* Hardware access */
86 void *card; 89 void *card;
87 u8 fw_ready; 90 u8 fw_ready;
@@ -108,12 +111,10 @@ struct lbs_private {
108 struct cmd_ctrl_node *cur_cmd; 111 struct cmd_ctrl_node *cur_cmd;
109 struct list_head cmdfreeq; /* free command buffers */ 112 struct list_head cmdfreeq; /* free command buffers */
110 struct list_head cmdpendingq; /* pending command buffers */ 113 struct list_head cmdpendingq; /* pending command buffers */
111 wait_queue_head_t cmd_pending;
112 struct timer_list command_timer; 114 struct timer_list command_timer;
113 int cmd_timed_out; 115 int cmd_timed_out;
114 116
115 /* Command responses sent from the hardware to the driver */ 117 /* Command responses sent from the hardware to the driver */
116 int cur_cmd_retcode;
117 u8 resp_idx; 118 u8 resp_idx;
118 u8 resp_buf[2][LBS_UPLD_SIZE]; 119 u8 resp_buf[2][LBS_UPLD_SIZE];
119 u32 resp_len[2]; 120 u32 resp_len[2];
@@ -127,14 +128,10 @@ struct lbs_private {
127 struct workqueue_struct *work_thread; 128 struct workqueue_struct *work_thread;
128 129
129 /** Encryption stuff */ 130 /** Encryption stuff */
130 struct lbs_802_11_security secinfo;
131 struct enc_key wpa_mcast_key;
132 struct enc_key wpa_unicast_key;
133 u8 wpa_ie[MAX_WPA_IE_LEN];
134 u8 wpa_ie_len;
135 u16 wep_tx_keyidx;
136 struct enc_key wep_keys[4];
137 u8 authtype_auto; 131 u8 authtype_auto;
132 u8 wep_tx_key;
133 u8 wep_key[4][WLAN_KEY_LEN_WEP104];
134 u8 wep_key_len[4];
138 135
139 /* Wake On LAN */ 136 /* Wake On LAN */
140 uint32_t wol_criteria; 137 uint32_t wol_criteria;
@@ -155,6 +152,7 @@ struct lbs_private {
155 /* NIC/link operation characteristics */ 152 /* NIC/link operation characteristics */
156 u16 mac_control; 153 u16 mac_control;
157 u8 radio_on; 154 u8 radio_on;
155 u8 cur_rate;
158 u8 channel; 156 u8 channel;
159 s16 txpower_cur; 157 s16 txpower_cur;
160 s16 txpower_min; 158 s16 txpower_min;
@@ -163,42 +161,6 @@ struct lbs_private {
163 /** Scanning */ 161 /** Scanning */
164 struct delayed_work scan_work; 162 struct delayed_work scan_work;
165 int scan_channel; 163 int scan_channel;
166 /* remember which channel was scanned last, != 0 if currently scanning */
167 u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
168 u8 scan_ssid_len;
169
170 /* Associating */
171 struct delayed_work assoc_work;
172 struct current_bss_params curbssparams;
173 u8 mode;
174 struct list_head network_list;
175 struct list_head network_free_list;
176 struct bss_descriptor *networks;
177 struct assoc_request * pending_assoc_req;
178 struct assoc_request * in_progress_assoc_req;
179 uint16_t enablehwauto;
180
181 /* ADHOC */
182 u16 beacon_period;
183 u8 beacon_enable;
184 u8 adhoccreate;
185
186 /* WEXT */
187 char name[DEV_NAME_LEN];
188 u8 nodename[16];
189 struct iw_statistics wstats;
190 u8 cur_rate;
191#define MAX_REGION_CHANNEL_NUM 2
192 struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
193
194 /** Requested Signal Strength*/
195 u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
196 u16 NF[MAX_TYPE_B][MAX_TYPE_AVG];
197 u8 RSSI[MAX_TYPE_B][MAX_TYPE_AVG];
198 u8 rawSNR[DEFAULT_DATA_AVG_FACTOR];
199 u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
200 u16 nextSNRNF;
201 u16 numSNRNF;
202}; 164};
203 165
204extern struct cmd_confirm_sleep confirm_sleep; 166extern struct cmd_confirm_sleep confirm_sleep;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 3804a58d7f4e..50193aac679e 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -2,13 +2,8 @@
2#include <linux/ethtool.h> 2#include <linux/ethtool.h>
3#include <linux/delay.h> 3#include <linux/delay.h>
4 4
5#include "host.h"
6#include "decl.h" 5#include "decl.h"
7#include "defs.h"
8#include "dev.h"
9#include "wext.h"
10#include "cmd.h" 6#include "cmd.h"
11#include "mesh.h"
12 7
13 8
14static void lbs_ethtool_get_drvinfo(struct net_device *dev, 9static void lbs_ethtool_get_drvinfo(struct net_device *dev,
@@ -69,14 +64,11 @@ static void lbs_ethtool_get_wol(struct net_device *dev,
69{ 64{
70 struct lbs_private *priv = dev->ml_priv; 65 struct lbs_private *priv = dev->ml_priv;
71 66
72 if (priv->wol_criteria == 0xffffffff) {
73 /* Interface driver didn't configure wake */
74 wol->supported = wol->wolopts = 0;
75 return;
76 }
77
78 wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY; 67 wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY;
79 68
69 if (priv->wol_criteria == EHS_REMOVE_WAKEUP)
70 return;
71
80 if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA) 72 if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA)
81 wol->wolopts |= WAKE_UCAST; 73 wol->wolopts |= WAKE_UCAST;
82 if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA) 74 if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA)
@@ -91,23 +83,22 @@ static int lbs_ethtool_set_wol(struct net_device *dev,
91 struct ethtool_wolinfo *wol) 83 struct ethtool_wolinfo *wol)
92{ 84{
93 struct lbs_private *priv = dev->ml_priv; 85 struct lbs_private *priv = dev->ml_priv;
94 uint32_t criteria = 0;
95 86
96 if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY)) 87 if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
97 return -EOPNOTSUPP; 88 return -EOPNOTSUPP;
98 89
90 priv->wol_criteria = 0;
99 if (wol->wolopts & WAKE_UCAST) 91 if (wol->wolopts & WAKE_UCAST)
100 criteria |= EHS_WAKE_ON_UNICAST_DATA; 92 priv->wol_criteria |= EHS_WAKE_ON_UNICAST_DATA;
101 if (wol->wolopts & WAKE_MCAST) 93 if (wol->wolopts & WAKE_MCAST)
102 criteria |= EHS_WAKE_ON_MULTICAST_DATA; 94 priv->wol_criteria |= EHS_WAKE_ON_MULTICAST_DATA;
103 if (wol->wolopts & WAKE_BCAST) 95 if (wol->wolopts & WAKE_BCAST)
104 criteria |= EHS_WAKE_ON_BROADCAST_DATA; 96 priv->wol_criteria |= EHS_WAKE_ON_BROADCAST_DATA;
105 if (wol->wolopts & WAKE_PHY) 97 if (wol->wolopts & WAKE_PHY)
106 criteria |= EHS_WAKE_ON_MAC_EVENT; 98 priv->wol_criteria |= EHS_WAKE_ON_MAC_EVENT;
107 if (wol->wolopts == 0) 99 if (wol->wolopts == 0)
108 criteria |= EHS_REMOVE_WAKEUP; 100 priv->wol_criteria |= EHS_REMOVE_WAKEUP;
109 101 return 0;
110 return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
111} 102}
112 103
113const struct ethtool_ops lbs_ethtool_ops = { 104const struct ethtool_ops lbs_ethtool_ops = {
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 3809c0b49464..5eac1351a021 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -94,11 +94,9 @@
94#define CMD_802_11_BEACON_CTRL 0x00b0 94#define CMD_802_11_BEACON_CTRL 0x00b0
95 95
96/* For the IEEE Power Save */ 96/* For the IEEE Power Save */
97#define CMD_SUBCMD_ENTER_PS 0x0030 97#define PS_MODE_ACTION_ENTER_PS 0x0030
98#define CMD_SUBCMD_EXIT_PS 0x0031 98#define PS_MODE_ACTION_EXIT_PS 0x0031
99#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 99#define PS_MODE_ACTION_SLEEP_CONFIRMED 0x0034
100#define CMD_SUBCMD_FULL_POWERDOWN 0x0035
101#define CMD_SUBCMD_FULL_POWERUP 0x0036
102 100
103#define CMD_ENABLE_RSN 0x0001 101#define CMD_ENABLE_RSN 0x0001
104#define CMD_DISABLE_RSN 0x0000 102#define CMD_DISABLE_RSN 0x0000
@@ -163,11 +161,6 @@
163#define CMD_ACT_SET_TX_FIX_RATE 0x0001 161#define CMD_ACT_SET_TX_FIX_RATE 0x0001
164#define CMD_ACT_GET_TX_RATE 0x0002 162#define CMD_ACT_GET_TX_RATE 0x0002
165 163
166/* Define action or option for CMD_802_11_PS_MODE */
167#define CMD_TYPE_CAM 0x0000
168#define CMD_TYPE_MAX_PSP 0x0001
169#define CMD_TYPE_FAST_PSP 0x0002
170
171/* Options for CMD_802_11_FW_WAKE_METHOD */ 164/* Options for CMD_802_11_FW_WAKE_METHOD */
172#define CMD_WAKE_METHOD_UNCHANGED 0x0000 165#define CMD_WAKE_METHOD_UNCHANGED 0x0000
173#define CMD_WAKE_METHOD_COMMAND_INT 0x0001 166#define CMD_WAKE_METHOD_COMMAND_INT 0x0001
@@ -326,7 +319,7 @@ struct txpd {
326 u8 pktdelay_2ms; 319 u8 pktdelay_2ms;
327 /* reserved */ 320 /* reserved */
328 u8 reserved1; 321 u8 reserved1;
329} __attribute__ ((packed)); 322} __packed;
330 323
331/* RxPD Descriptor */ 324/* RxPD Descriptor */
332struct rxpd { 325struct rxpd {
@@ -339,8 +332,8 @@ struct rxpd {
339 u8 bss_type; 332 u8 bss_type;
340 /* BSS number */ 333 /* BSS number */
341 u8 bss_num; 334 u8 bss_num;
342 } __attribute__ ((packed)) bss; 335 } __packed bss;
343 } __attribute__ ((packed)) u; 336 } __packed u;
344 337
345 /* SNR */ 338 /* SNR */
346 u8 snr; 339 u8 snr;
@@ -366,14 +359,14 @@ struct rxpd {
366 /* Pkt Priority */ 359 /* Pkt Priority */
367 u8 priority; 360 u8 priority;
368 u8 reserved[3]; 361 u8 reserved[3];
369} __attribute__ ((packed)); 362} __packed;
370 363
371struct cmd_header { 364struct cmd_header {
372 __le16 command; 365 __le16 command;
373 __le16 size; 366 __le16 size;
374 __le16 seqnum; 367 __le16 seqnum;
375 __le16 result; 368 __le16 result;
376} __attribute__ ((packed)); 369} __packed;
377 370
378/* Generic structure to hold all key types. */ 371/* Generic structure to hold all key types. */
379struct enc_key { 372struct enc_key {
@@ -387,7 +380,23 @@ struct enc_key {
387struct lbs_offset_value { 380struct lbs_offset_value {
388 u32 offset; 381 u32 offset;
389 u32 value; 382 u32 value;
390} __attribute__ ((packed)); 383} __packed;
384
385#define MAX_11D_TRIPLETS 83
386
387struct mrvl_ie_domain_param_set {
388 struct mrvl_ie_header header;
389
390 u8 country_code[3];
391 struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS];
392} __packed;
393
394struct cmd_ds_802_11d_domain_info {
395 struct cmd_header hdr;
396
397 __le16 action;
398 struct mrvl_ie_domain_param_set domain;
399} __packed;
391 400
392/* 401/*
393 * Define data structure for CMD_GET_HW_SPEC 402 * Define data structure for CMD_GET_HW_SPEC
@@ -426,7 +435,7 @@ struct cmd_ds_get_hw_spec {
426 435
427 /*FW/HW capability */ 436 /*FW/HW capability */
428 __le32 fwcapinfo; 437 __le32 fwcapinfo;
429} __attribute__ ((packed)); 438} __packed;
430 439
431struct cmd_ds_802_11_subscribe_event { 440struct cmd_ds_802_11_subscribe_event {
432 struct cmd_header hdr; 441 struct cmd_header hdr;
@@ -440,7 +449,7 @@ struct cmd_ds_802_11_subscribe_event {
440 * bump this up a bit. 449 * bump this up a bit.
441 */ 450 */
442 uint8_t tlv[128]; 451 uint8_t tlv[128];
443} __attribute__ ((packed)); 452} __packed;
444 453
445/* 454/*
446 * This scan handle Country Information IE(802.11d compliant) 455 * This scan handle Country Information IE(802.11d compliant)
@@ -452,7 +461,7 @@ struct cmd_ds_802_11_scan {
452 uint8_t bsstype; 461 uint8_t bsstype;
453 uint8_t bssid[ETH_ALEN]; 462 uint8_t bssid[ETH_ALEN];
454 uint8_t tlvbuffer[0]; 463 uint8_t tlvbuffer[0];
455} __attribute__ ((packed)); 464} __packed;
456 465
457struct cmd_ds_802_11_scan_rsp { 466struct cmd_ds_802_11_scan_rsp {
458 struct cmd_header hdr; 467 struct cmd_header hdr;
@@ -460,7 +469,7 @@ struct cmd_ds_802_11_scan_rsp {
460 __le16 bssdescriptsize; 469 __le16 bssdescriptsize;
461 uint8_t nr_sets; 470 uint8_t nr_sets;
462 uint8_t bssdesc_and_tlvbuffer[0]; 471 uint8_t bssdesc_and_tlvbuffer[0];
463} __attribute__ ((packed)); 472} __packed;
464 473
465struct cmd_ds_802_11_get_log { 474struct cmd_ds_802_11_get_log {
466 struct cmd_header hdr; 475 struct cmd_header hdr;
@@ -478,20 +487,20 @@ struct cmd_ds_802_11_get_log {
478 __le32 fcserror; 487 __le32 fcserror;
479 __le32 txframe; 488 __le32 txframe;
480 __le32 wepundecryptable; 489 __le32 wepundecryptable;
481} __attribute__ ((packed)); 490} __packed;
482 491
483struct cmd_ds_mac_control { 492struct cmd_ds_mac_control {
484 struct cmd_header hdr; 493 struct cmd_header hdr;
485 __le16 action; 494 __le16 action;
486 u16 reserved; 495 u16 reserved;
487} __attribute__ ((packed)); 496} __packed;
488 497
489struct cmd_ds_mac_multicast_adr { 498struct cmd_ds_mac_multicast_adr {
490 struct cmd_header hdr; 499 struct cmd_header hdr;
491 __le16 action; 500 __le16 action;
492 __le16 nr_of_adrs; 501 __le16 nr_of_adrs;
493 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 502 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
494} __attribute__ ((packed)); 503} __packed;
495 504
496struct cmd_ds_802_11_authenticate { 505struct cmd_ds_802_11_authenticate {
497 struct cmd_header hdr; 506 struct cmd_header hdr;
@@ -499,14 +508,14 @@ struct cmd_ds_802_11_authenticate {
499 u8 bssid[ETH_ALEN]; 508 u8 bssid[ETH_ALEN];
500 u8 authtype; 509 u8 authtype;
501 u8 reserved[10]; 510 u8 reserved[10];
502} __attribute__ ((packed)); 511} __packed;
503 512
504struct cmd_ds_802_11_deauthenticate { 513struct cmd_ds_802_11_deauthenticate {
505 struct cmd_header hdr; 514 struct cmd_header hdr;
506 515
507 u8 macaddr[ETH_ALEN]; 516 u8 macaddr[ETH_ALEN];
508 __le16 reasoncode; 517 __le16 reasoncode;
509} __attribute__ ((packed)); 518} __packed;
510 519
511struct cmd_ds_802_11_associate { 520struct cmd_ds_802_11_associate {
512 struct cmd_header hdr; 521 struct cmd_header hdr;
@@ -517,7 +526,7 @@ struct cmd_ds_802_11_associate {
517 __le16 bcnperiod; 526 __le16 bcnperiod;
518 u8 dtimperiod; 527 u8 dtimperiod;
519 u8 iebuf[512]; /* Enough for required and most optional IEs */ 528 u8 iebuf[512]; /* Enough for required and most optional IEs */
520} __attribute__ ((packed)); 529} __packed;
521 530
522struct cmd_ds_802_11_associate_response { 531struct cmd_ds_802_11_associate_response {
523 struct cmd_header hdr; 532 struct cmd_header hdr;
@@ -526,7 +535,7 @@ struct cmd_ds_802_11_associate_response {
526 __le16 statuscode; 535 __le16 statuscode;
527 __le16 aid; 536 __le16 aid;
528 u8 iebuf[512]; 537 u8 iebuf[512];
529} __attribute__ ((packed)); 538} __packed;
530 539
531struct cmd_ds_802_11_set_wep { 540struct cmd_ds_802_11_set_wep {
532 struct cmd_header hdr; 541 struct cmd_header hdr;
@@ -540,7 +549,7 @@ struct cmd_ds_802_11_set_wep {
540 /* 40, 128bit or TXWEP */ 549 /* 40, 128bit or TXWEP */
541 uint8_t keytype[4]; 550 uint8_t keytype[4];
542 uint8_t keymaterial[4][16]; 551 uint8_t keymaterial[4][16];
543} __attribute__ ((packed)); 552} __packed;
544 553
545struct cmd_ds_802_11_snmp_mib { 554struct cmd_ds_802_11_snmp_mib {
546 struct cmd_header hdr; 555 struct cmd_header hdr;
@@ -549,40 +558,33 @@ struct cmd_ds_802_11_snmp_mib {
549 __le16 oid; 558 __le16 oid;
550 __le16 bufsize; 559 __le16 bufsize;
551 u8 value[128]; 560 u8 value[128];
552} __attribute__ ((packed)); 561} __packed;
553
554struct cmd_ds_mac_reg_access {
555 __le16 action;
556 __le16 offset;
557 __le32 value;
558} __attribute__ ((packed));
559 562
560struct cmd_ds_bbp_reg_access { 563struct cmd_ds_reg_access {
561 __le16 action; 564 struct cmd_header hdr;
562 __le16 offset;
563 u8 value;
564 u8 reserved[3];
565} __attribute__ ((packed));
566 565
567struct cmd_ds_rf_reg_access {
568 __le16 action; 566 __le16 action;
569 __le16 offset; 567 __le16 offset;
570 u8 value; 568 union {
571 u8 reserved[3]; 569 u8 bbp_rf; /* for BBP and RF registers */
572} __attribute__ ((packed)); 570 __le32 mac; /* for MAC registers */
571 } value;
572} __packed;
573 573
574struct cmd_ds_802_11_radio_control { 574struct cmd_ds_802_11_radio_control {
575 struct cmd_header hdr; 575 struct cmd_header hdr;
576 576
577 __le16 action; 577 __le16 action;
578 __le16 control; 578 __le16 control;
579} __attribute__ ((packed)); 579} __packed;
580 580
581struct cmd_ds_802_11_beacon_control { 581struct cmd_ds_802_11_beacon_control {
582 struct cmd_header hdr;
583
582 __le16 action; 584 __le16 action;
583 __le16 beacon_enable; 585 __le16 beacon_enable;
584 __le16 beacon_period; 586 __le16 beacon_period;
585} __attribute__ ((packed)); 587} __packed;
586 588
587struct cmd_ds_802_11_sleep_params { 589struct cmd_ds_802_11_sleep_params {
588 struct cmd_header hdr; 590 struct cmd_header hdr;
@@ -607,7 +609,7 @@ struct cmd_ds_802_11_sleep_params {
607 609
608 /* reserved field, should be set to zero */ 610 /* reserved field, should be set to zero */
609 __le16 reserved; 611 __le16 reserved;
610} __attribute__ ((packed)); 612} __packed;
611 613
612struct cmd_ds_802_11_rf_channel { 614struct cmd_ds_802_11_rf_channel {
613 struct cmd_header hdr; 615 struct cmd_header hdr;
@@ -617,30 +619,30 @@ struct cmd_ds_802_11_rf_channel {
617 __le16 rftype; /* unused */ 619 __le16 rftype; /* unused */
618 __le16 reserved; /* unused */ 620 __le16 reserved; /* unused */
619 u8 channellist[32]; /* unused */ 621 u8 channellist[32]; /* unused */
620} __attribute__ ((packed)); 622} __packed;
621 623
622struct cmd_ds_802_11_rssi { 624struct cmd_ds_802_11_rssi {
623 /* weighting factor */ 625 struct cmd_header hdr;
624 __le16 N;
625 626
626 __le16 reserved_0; 627 /* request: number of beacons (N) to average the SNR and NF over
627 __le16 reserved_1; 628 * response: SNR of most recent beacon
628 __le16 reserved_2; 629 */
629} __attribute__ ((packed)); 630 __le16 n_or_snr;
630 631
631struct cmd_ds_802_11_rssi_rsp { 632 /* The following fields are only set in the response.
632 __le16 SNR; 633 * In the request these are reserved and should be set to 0.
633 __le16 noisefloor; 634 */
634 __le16 avgSNR; 635 __le16 nf; /* most recent beacon noise floor */
635 __le16 avgnoisefloor; 636 __le16 avg_snr; /* average SNR weighted by N from request */
636} __attribute__ ((packed)); 637 __le16 avg_nf; /* average noise floor weighted by N from request */
638} __packed;
637 639
638struct cmd_ds_802_11_mac_address { 640struct cmd_ds_802_11_mac_address {
639 struct cmd_header hdr; 641 struct cmd_header hdr;
640 642
641 __le16 action; 643 __le16 action;
642 u8 macadd[ETH_ALEN]; 644 u8 macadd[ETH_ALEN];
643} __attribute__ ((packed)); 645} __packed;
644 646
645struct cmd_ds_802_11_rf_tx_power { 647struct cmd_ds_802_11_rf_tx_power {
646 struct cmd_header hdr; 648 struct cmd_header hdr;
@@ -649,34 +651,61 @@ struct cmd_ds_802_11_rf_tx_power {
649 __le16 curlevel; 651 __le16 curlevel;
650 s8 maxlevel; 652 s8 maxlevel;
651 s8 minlevel; 653 s8 minlevel;
652} __attribute__ ((packed)); 654} __packed;
653 655
656/* MONITOR_MODE only exists in OLPC v5 firmware */
654struct cmd_ds_802_11_monitor_mode { 657struct cmd_ds_802_11_monitor_mode {
658 struct cmd_header hdr;
659
655 __le16 action; 660 __le16 action;
656 __le16 mode; 661 __le16 mode;
657} __attribute__ ((packed)); 662} __packed;
658 663
659struct cmd_ds_set_boot2_ver { 664struct cmd_ds_set_boot2_ver {
660 struct cmd_header hdr; 665 struct cmd_header hdr;
661 666
662 __le16 action; 667 __le16 action;
663 __le16 version; 668 __le16 version;
664} __attribute__ ((packed)); 669} __packed;
665 670
666struct cmd_ds_802_11_fw_wake_method { 671struct cmd_ds_802_11_fw_wake_method {
667 struct cmd_header hdr; 672 struct cmd_header hdr;
668 673
669 __le16 action; 674 __le16 action;
670 __le16 method; 675 __le16 method;
671} __attribute__ ((packed)); 676} __packed;
672 677
673struct cmd_ds_802_11_ps_mode { 678struct cmd_ds_802_11_ps_mode {
679 struct cmd_header hdr;
680
674 __le16 action; 681 __le16 action;
682
683 /* Interval for keepalive in PS mode:
684 * 0x0000 = don't change
685 * 0x001E = firmware default
686 * 0xFFFF = disable
687 */
675 __le16 nullpktinterval; 688 __le16 nullpktinterval;
689
690 /* Number of DTIM intervals to wake up for:
691 * 0 = don't change
692 * 1 = firmware default
693 * 5 = max
694 */
676 __le16 multipledtim; 695 __le16 multipledtim;
696
677 __le16 reserved; 697 __le16 reserved;
678 __le16 locallisteninterval; 698 __le16 locallisteninterval;
679} __attribute__ ((packed)); 699
700 /* AdHoc awake period (FW v9+ only):
701 * 0 = don't change
702 * 1 = always awake (IEEE standard behavior)
703 * 2 - 31 = sleep for (n - 1) periods and awake for 1 period
704 * 32 - 254 = invalid
705 * 255 = sleep at each ATIM
706 */
707 __le16 adhoc_awake_period;
708} __packed;
680 709
681struct cmd_confirm_sleep { 710struct cmd_confirm_sleep {
682 struct cmd_header hdr; 711 struct cmd_header hdr;
@@ -686,7 +715,7 @@ struct cmd_confirm_sleep {
686 __le16 multipledtim; 715 __le16 multipledtim;
687 __le16 reserved; 716 __le16 reserved;
688 __le16 locallisteninterval; 717 __le16 locallisteninterval;
689} __attribute__ ((packed)); 718} __packed;
690 719
691struct cmd_ds_802_11_data_rate { 720struct cmd_ds_802_11_data_rate {
692 struct cmd_header hdr; 721 struct cmd_header hdr;
@@ -694,14 +723,14 @@ struct cmd_ds_802_11_data_rate {
694 __le16 action; 723 __le16 action;
695 __le16 reserved; 724 __le16 reserved;
696 u8 rates[MAX_RATES]; 725 u8 rates[MAX_RATES];
697} __attribute__ ((packed)); 726} __packed;
698 727
699struct cmd_ds_802_11_rate_adapt_rateset { 728struct cmd_ds_802_11_rate_adapt_rateset {
700 struct cmd_header hdr; 729 struct cmd_header hdr;
701 __le16 action; 730 __le16 action;
702 __le16 enablehwauto; 731 __le16 enablehwauto;
703 __le16 bitmap; 732 __le16 bitmap;
704} __attribute__ ((packed)); 733} __packed;
705 734
706struct cmd_ds_802_11_ad_hoc_start { 735struct cmd_ds_802_11_ad_hoc_start {
707 struct cmd_header hdr; 736 struct cmd_header hdr;
@@ -718,14 +747,14 @@ struct cmd_ds_802_11_ad_hoc_start {
718 __le16 capability; 747 __le16 capability;
719 u8 rates[MAX_RATES]; 748 u8 rates[MAX_RATES];
720 u8 tlv_memory_size_pad[100]; 749 u8 tlv_memory_size_pad[100];
721} __attribute__ ((packed)); 750} __packed;
722 751
723struct cmd_ds_802_11_ad_hoc_result { 752struct cmd_ds_802_11_ad_hoc_result {
724 struct cmd_header hdr; 753 struct cmd_header hdr;
725 754
726 u8 pad[3]; 755 u8 pad[3];
727 u8 bssid[ETH_ALEN]; 756 u8 bssid[ETH_ALEN];
728} __attribute__ ((packed)); 757} __packed;
729 758
730struct adhoc_bssdesc { 759struct adhoc_bssdesc {
731 u8 bssid[ETH_ALEN]; 760 u8 bssid[ETH_ALEN];
@@ -746,7 +775,7 @@ struct adhoc_bssdesc {
746 * Adhoc join command and will cause a binary layout mismatch with 775 * Adhoc join command and will cause a binary layout mismatch with
747 * the firmware 776 * the firmware
748 */ 777 */
749} __attribute__ ((packed)); 778} __packed;
750 779
751struct cmd_ds_802_11_ad_hoc_join { 780struct cmd_ds_802_11_ad_hoc_join {
752 struct cmd_header hdr; 781 struct cmd_header hdr;
@@ -754,18 +783,18 @@ struct cmd_ds_802_11_ad_hoc_join {
754 struct adhoc_bssdesc bss; 783 struct adhoc_bssdesc bss;
755 __le16 failtimeout; /* Reserved on v9 and later */ 784 __le16 failtimeout; /* Reserved on v9 and later */
756 __le16 probedelay; /* Reserved on v9 and later */ 785 __le16 probedelay; /* Reserved on v9 and later */
757} __attribute__ ((packed)); 786} __packed;
758 787
759struct cmd_ds_802_11_ad_hoc_stop { 788struct cmd_ds_802_11_ad_hoc_stop {
760 struct cmd_header hdr; 789 struct cmd_header hdr;
761} __attribute__ ((packed)); 790} __packed;
762 791
763struct cmd_ds_802_11_enable_rsn { 792struct cmd_ds_802_11_enable_rsn {
764 struct cmd_header hdr; 793 struct cmd_header hdr;
765 794
766 __le16 action; 795 __le16 action;
767 __le16 enable; 796 __le16 enable;
768} __attribute__ ((packed)); 797} __packed;
769 798
770struct MrvlIEtype_keyParamSet { 799struct MrvlIEtype_keyParamSet {
771 /* type ID */ 800 /* type ID */
@@ -785,7 +814,7 @@ struct MrvlIEtype_keyParamSet {
785 814
786 /* key material of size keylen */ 815 /* key material of size keylen */
787 u8 key[32]; 816 u8 key[32];
788} __attribute__ ((packed)); 817} __packed;
789 818
790#define MAX_WOL_RULES 16 819#define MAX_WOL_RULES 16
791 820
@@ -797,7 +826,7 @@ struct host_wol_rule {
797 __le16 reserve; 826 __le16 reserve;
798 __be32 sig_mask; 827 __be32 sig_mask;
799 __be32 signature; 828 __be32 signature;
800} __attribute__ ((packed)); 829} __packed;
801 830
802struct wol_config { 831struct wol_config {
803 uint8_t action; 832 uint8_t action;
@@ -805,7 +834,7 @@ struct wol_config {
805 uint8_t no_rules_in_cmd; 834 uint8_t no_rules_in_cmd;
806 uint8_t result; 835 uint8_t result;
807 struct host_wol_rule rule[MAX_WOL_RULES]; 836 struct host_wol_rule rule[MAX_WOL_RULES];
808} __attribute__ ((packed)); 837} __packed;
809 838
810struct cmd_ds_host_sleep { 839struct cmd_ds_host_sleep {
811 struct cmd_header hdr; 840 struct cmd_header hdr;
@@ -813,7 +842,7 @@ struct cmd_ds_host_sleep {
813 uint8_t gpio; 842 uint8_t gpio;
814 uint16_t gap; 843 uint16_t gap;
815 struct wol_config wol_conf; 844 struct wol_config wol_conf;
816} __attribute__ ((packed)); 845} __packed;
817 846
818 847
819 848
@@ -822,7 +851,7 @@ struct cmd_ds_802_11_key_material {
822 851
823 __le16 action; 852 __le16 action;
824 struct MrvlIEtype_keyParamSet keyParamSet[2]; 853 struct MrvlIEtype_keyParamSet keyParamSet[2];
825} __attribute__ ((packed)); 854} __packed;
826 855
827struct cmd_ds_802_11_eeprom_access { 856struct cmd_ds_802_11_eeprom_access {
828 struct cmd_header hdr; 857 struct cmd_header hdr;
@@ -832,7 +861,7 @@ struct cmd_ds_802_11_eeprom_access {
832 /* firmware says it returns a maximum of 20 bytes */ 861 /* firmware says it returns a maximum of 20 bytes */
833#define LBS_EEPROM_READ_LEN 20 862#define LBS_EEPROM_READ_LEN 20
834 u8 value[LBS_EEPROM_READ_LEN]; 863 u8 value[LBS_EEPROM_READ_LEN];
835} __attribute__ ((packed)); 864} __packed;
836 865
837struct cmd_ds_802_11_tpc_cfg { 866struct cmd_ds_802_11_tpc_cfg {
838 struct cmd_header hdr; 867 struct cmd_header hdr;
@@ -843,7 +872,7 @@ struct cmd_ds_802_11_tpc_cfg {
843 int8_t P1; 872 int8_t P1;
844 int8_t P2; 873 int8_t P2;
845 uint8_t usesnr; 874 uint8_t usesnr;
846} __attribute__ ((packed)); 875} __packed;
847 876
848 877
849struct cmd_ds_802_11_pa_cfg { 878struct cmd_ds_802_11_pa_cfg {
@@ -854,16 +883,21 @@ struct cmd_ds_802_11_pa_cfg {
854 int8_t P0; 883 int8_t P0;
855 int8_t P1; 884 int8_t P1;
856 int8_t P2; 885 int8_t P2;
857} __attribute__ ((packed)); 886} __packed;
858 887
859 888
860struct cmd_ds_802_11_led_ctrl { 889struct cmd_ds_802_11_led_ctrl {
890 struct cmd_header hdr;
891
861 __le16 action; 892 __le16 action;
862 __le16 numled; 893 __le16 numled;
863 u8 data[256]; 894 u8 data[256];
864} __attribute__ ((packed)); 895} __packed;
865 896
897/* Automatic Frequency Control */
866struct cmd_ds_802_11_afc { 898struct cmd_ds_802_11_afc {
899 struct cmd_header hdr;
900
867 __le16 afc_auto; 901 __le16 afc_auto;
868 union { 902 union {
869 struct { 903 struct {
@@ -875,24 +909,28 @@ struct cmd_ds_802_11_afc {
875 __le16 carrier_offset; /* signed */ 909 __le16 carrier_offset; /* signed */
876 }; 910 };
877 }; 911 };
878} __attribute__ ((packed)); 912} __packed;
879 913
880struct cmd_tx_rate_query { 914struct cmd_tx_rate_query {
881 __le16 txrate; 915 __le16 txrate;
882} __attribute__ ((packed)); 916} __packed;
883 917
884struct cmd_ds_get_tsf { 918struct cmd_ds_get_tsf {
885 __le64 tsfvalue; 919 __le64 tsfvalue;
886} __attribute__ ((packed)); 920} __packed;
887 921
888struct cmd_ds_bt_access { 922struct cmd_ds_bt_access {
923 struct cmd_header hdr;
924
889 __le16 action; 925 __le16 action;
890 __le32 id; 926 __le32 id;
891 u8 addr1[ETH_ALEN]; 927 u8 addr1[ETH_ALEN];
892 u8 addr2[ETH_ALEN]; 928 u8 addr2[ETH_ALEN];
893} __attribute__ ((packed)); 929} __packed;
894 930
895struct cmd_ds_fwt_access { 931struct cmd_ds_fwt_access {
932 struct cmd_header hdr;
933
896 __le16 action; 934 __le16 action;
897 __le32 id; 935 __le32 id;
898 u8 valid; 936 u8 valid;
@@ -910,7 +948,7 @@ struct cmd_ds_fwt_access {
910 __le32 snr; 948 __le32 snr;
911 __le32 references; 949 __le32 references;
912 u8 prec[ETH_ALEN]; 950 u8 prec[ETH_ALEN];
913} __attribute__ ((packed)); 951} __packed;
914 952
915struct cmd_ds_mesh_config { 953struct cmd_ds_mesh_config {
916 struct cmd_header hdr; 954 struct cmd_header hdr;
@@ -920,43 +958,15 @@ struct cmd_ds_mesh_config {
920 __le16 type; 958 __le16 type;
921 __le16 length; 959 __le16 length;
922 u8 data[128]; /* last position reserved */ 960 u8 data[128]; /* last position reserved */
923} __attribute__ ((packed)); 961} __packed;
924 962
925struct cmd_ds_mesh_access { 963struct cmd_ds_mesh_access {
926 struct cmd_header hdr; 964 struct cmd_header hdr;
927 965
928 __le16 action; 966 __le16 action;
929 __le32 data[32]; /* last position reserved */ 967 __le32 data[32]; /* last position reserved */
930} __attribute__ ((packed)); 968} __packed;
931 969
932/* Number of stats counters returned by the firmware */ 970/* Number of stats counters returned by the firmware */
933#define MESH_STATS_NUM 8 971#define MESH_STATS_NUM 8
934
935struct cmd_ds_command {
936 /* command header */
937 __le16 command;
938 __le16 size;
939 __le16 seqnum;
940 __le16 result;
941
942 /* command Body */
943 union {
944 struct cmd_ds_802_11_ps_mode psmode;
945 struct cmd_ds_802_11_monitor_mode monitor;
946 struct cmd_ds_802_11_rssi rssi;
947 struct cmd_ds_802_11_rssi_rsp rssirsp;
948 struct cmd_ds_mac_reg_access macreg;
949 struct cmd_ds_bbp_reg_access bbpreg;
950 struct cmd_ds_rf_reg_access rfreg;
951
952 struct cmd_ds_802_11_tpc_cfg tpccfg;
953 struct cmd_ds_802_11_afc afc;
954 struct cmd_ds_802_11_led_ctrl ledgpio;
955
956 struct cmd_ds_bt_access bt;
957 struct cmd_ds_fwt_access fwt;
958 struct cmd_ds_802_11_beacon_control bcn_ctrl;
959 } params;
960} __attribute__ ((packed));
961
962#endif 972#endif
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 64dd345d30f5..6e71346a7550 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1182,11 +1182,69 @@ static void if_sdio_remove(struct sdio_func *func)
1182 lbs_deb_leave(LBS_DEB_SDIO); 1182 lbs_deb_leave(LBS_DEB_SDIO);
1183} 1183}
1184 1184
1185static int if_sdio_suspend(struct device *dev)
1186{
1187 struct sdio_func *func = dev_to_sdio_func(dev);
1188 int ret;
1189 struct if_sdio_card *card = sdio_get_drvdata(func);
1190
1191 mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
1192
1193 lbs_pr_info("%s: suspend: PM flags = 0x%x\n",
1194 sdio_func_id(func), flags);
1195
1196 /* If we aren't being asked to wake on anything, we should bail out
1197 * and let the SD stack power down the card.
1198 */
1199 if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1200 lbs_pr_info("Suspend without wake params -- "
1201 "powering down card.");
1202 return -ENOSYS;
1203 }
1204
1205 if (!(flags & MMC_PM_KEEP_POWER)) {
1206 lbs_pr_err("%s: cannot remain alive while host is suspended\n",
1207 sdio_func_id(func));
1208 return -ENOSYS;
1209 }
1210
1211 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
1212 if (ret)
1213 return ret;
1214
1215 ret = lbs_suspend(card->priv);
1216 if (ret)
1217 return ret;
1218
1219 return sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
1220}
1221
1222static int if_sdio_resume(struct device *dev)
1223{
1224 struct sdio_func *func = dev_to_sdio_func(dev);
1225 struct if_sdio_card *card = sdio_get_drvdata(func);
1226 int ret;
1227
1228 lbs_pr_info("%s: resume: we're back\n", sdio_func_id(func));
1229
1230 ret = lbs_resume(card->priv);
1231
1232 return ret;
1233}
1234
1235static const struct dev_pm_ops if_sdio_pm_ops = {
1236 .suspend = if_sdio_suspend,
1237 .resume = if_sdio_resume,
1238};
1239
1185static struct sdio_driver if_sdio_driver = { 1240static struct sdio_driver if_sdio_driver = {
1186 .name = "libertas_sdio", 1241 .name = "libertas_sdio",
1187 .id_table = if_sdio_ids, 1242 .id_table = if_sdio_ids,
1188 .probe = if_sdio_probe, 1243 .probe = if_sdio_probe,
1189 .remove = if_sdio_remove, 1244 .remove = if_sdio_remove,
1245 .drv = {
1246 .pm = &if_sdio_pm_ops,
1247 },
1190}; 1248};
1191 1249
1192/*******************************************************************/ 1250/*******************************************************************/
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index f41594c7ac16..07ece9d26c63 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -433,7 +433,7 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
433 433
434static int if_usb_reset_device(struct if_usb_card *cardp) 434static int if_usb_reset_device(struct if_usb_card *cardp)
435{ 435{
436 struct cmd_ds_command *cmd = cardp->ep_out_buf + 4; 436 struct cmd_header *cmd = cardp->ep_out_buf + 4;
437 int ret; 437 int ret;
438 438
439 lbs_deb_enter(LBS_DEB_USB); 439 lbs_deb_enter(LBS_DEB_USB);
@@ -441,7 +441,7 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
441 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); 441 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
442 442
443 cmd->command = cpu_to_le16(CMD_802_11_RESET); 443 cmd->command = cpu_to_le16(CMD_802_11_RESET);
444 cmd->size = cpu_to_le16(sizeof(struct cmd_header)); 444 cmd->size = cpu_to_le16(sizeof(cmd));
445 cmd->result = cpu_to_le16(0); 445 cmd->result = cpu_to_le16(0);
446 cmd->seqnum = cpu_to_le16(0x5a5a); 446 cmd->seqnum = cpu_to_le16(0x5a5a);
447 usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_header)); 447 usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_header));
@@ -613,16 +613,14 @@ static void if_usb_receive_fwload(struct urb *urb)
613 return; 613 return;
614 } 614 }
615 615
616 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); 616 syncfwheader = kmemdup(skb->data + IPFIELD_ALIGN_OFFSET,
617 sizeof(struct fwsyncheader), GFP_ATOMIC);
617 if (!syncfwheader) { 618 if (!syncfwheader) {
618 lbs_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); 619 lbs_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
619 kfree_skb(skb); 620 kfree_skb(skb);
620 return; 621 return;
621 } 622 }
622 623
623 memcpy(syncfwheader, skb->data + IPFIELD_ALIGN_OFFSET,
624 sizeof(struct fwsyncheader));
625
626 if (!syncfwheader->cmd) { 624 if (!syncfwheader->cmd) {
627 lbs_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); 625 lbs_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
628 lbs_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", 626 lbs_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
@@ -1043,6 +1041,12 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
1043 if (priv->psstate != PS_STATE_FULL_POWER) 1041 if (priv->psstate != PS_STATE_FULL_POWER)
1044 return -1; 1042 return -1;
1045 1043
1044 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1045 lbs_pr_info("Suspend attempt without "
1046 "configuring wake params!\n");
1047 return -ENOSYS;
1048 }
1049
1046 ret = lbs_suspend(priv); 1050 ret = lbs_suspend(priv);
1047 if (ret) 1051 if (ret)
1048 goto out; 1052 goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index d9b8ee130c45..258967144b96 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -11,20 +11,14 @@
11#include <linux/if_arp.h> 11#include <linux/if_arp.h>
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include <linux/kfifo.h> 13#include <linux/kfifo.h>
14#include <linux/stddef.h>
15#include <linux/ieee80211.h>
16#include <linux/slab.h> 14#include <linux/slab.h>
17#include <net/iw_handler.h>
18#include <net/cfg80211.h> 15#include <net/cfg80211.h>
19 16
20#include "host.h" 17#include "host.h"
21#include "decl.h" 18#include "decl.h"
22#include "dev.h" 19#include "dev.h"
23#include "wext.h"
24#include "cfg.h" 20#include "cfg.h"
25#include "debugfs.h" 21#include "debugfs.h"
26#include "scan.h"
27#include "assoc.h"
28#include "cmd.h" 22#include "cmd.h"
29 23
30#define DRIVER_RELEASE_VERSION "323.p0" 24#define DRIVER_RELEASE_VERSION "323.p0"
@@ -96,72 +90,6 @@ u8 lbs_data_rate_to_fw_index(u32 rate)
96} 90}
97 91
98 92
99static int lbs_add_rtap(struct lbs_private *priv);
100static void lbs_remove_rtap(struct lbs_private *priv);
101
102
103/**
104 * Get function for sysfs attribute rtap
105 */
106static ssize_t lbs_rtap_get(struct device *dev,
107 struct device_attribute *attr, char * buf)
108{
109 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
110 return snprintf(buf, 5, "0x%X\n", priv->monitormode);
111}
112
113/**
114 * Set function for sysfs attribute rtap
115 */
116static ssize_t lbs_rtap_set(struct device *dev,
117 struct device_attribute *attr, const char * buf, size_t count)
118{
119 int monitor_mode;
120 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
121
122 sscanf(buf, "%x", &monitor_mode);
123 if (monitor_mode) {
124 if (priv->monitormode == monitor_mode)
125 return strlen(buf);
126 if (!priv->monitormode) {
127 if (priv->infra_open || lbs_mesh_open(priv))
128 return -EBUSY;
129 if (priv->mode == IW_MODE_INFRA)
130 lbs_cmd_80211_deauthenticate(priv,
131 priv->curbssparams.bssid,
132 WLAN_REASON_DEAUTH_LEAVING);
133 else if (priv->mode == IW_MODE_ADHOC)
134 lbs_adhoc_stop(priv);
135 lbs_add_rtap(priv);
136 }
137 priv->monitormode = monitor_mode;
138 } else {
139 if (!priv->monitormode)
140 return strlen(buf);
141 priv->monitormode = 0;
142 lbs_remove_rtap(priv);
143
144 if (priv->currenttxskb) {
145 dev_kfree_skb_any(priv->currenttxskb);
146 priv->currenttxskb = NULL;
147 }
148
149 /* Wake queues, command thread, etc. */
150 lbs_host_to_card_done(priv);
151 }
152
153 lbs_prepare_and_send_command(priv,
154 CMD_802_11_MONITOR_MODE, CMD_ACT_SET,
155 CMD_OPTION_WAITFORRSP, 0, &priv->monitormode);
156 return strlen(buf);
157}
158
159/**
160 * lbs_rtap attribute to be exported per ethX interface
161 * through sysfs (/sys/class/net/ethX/lbs_rtap)
162 */
163static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set );
164
165/** 93/**
166 * @brief This function opens the ethX interface 94 * @brief This function opens the ethX interface
167 * 95 *
@@ -177,13 +105,6 @@ static int lbs_dev_open(struct net_device *dev)
177 105
178 spin_lock_irq(&priv->driver_lock); 106 spin_lock_irq(&priv->driver_lock);
179 107
180 if (priv->monitormode) {
181 ret = -EBUSY;
182 goto out;
183 }
184
185 priv->infra_open = 1;
186
187 if (priv->connect_status == LBS_CONNECTED) 108 if (priv->connect_status == LBS_CONNECTED)
188 netif_carrier_on(dev); 109 netif_carrier_on(dev);
189 else 110 else
@@ -191,7 +112,6 @@ static int lbs_dev_open(struct net_device *dev)
191 112
192 if (!priv->tx_pending_len) 113 if (!priv->tx_pending_len)
193 netif_wake_queue(dev); 114 netif_wake_queue(dev);
194 out:
195 115
196 spin_unlock_irq(&priv->driver_lock); 116 spin_unlock_irq(&priv->driver_lock);
197 lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); 117 lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
@@ -211,7 +131,6 @@ static int lbs_eth_stop(struct net_device *dev)
211 lbs_deb_enter(LBS_DEB_NET); 131 lbs_deb_enter(LBS_DEB_NET);
212 132
213 spin_lock_irq(&priv->driver_lock); 133 spin_lock_irq(&priv->driver_lock);
214 priv->infra_open = 0;
215 netif_stop_queue(dev); 134 netif_stop_queue(dev);
216 spin_unlock_irq(&priv->driver_lock); 135 spin_unlock_irq(&priv->driver_lock);
217 136
@@ -238,12 +157,7 @@ static void lbs_tx_timeout(struct net_device *dev)
238 to kick it somehow? */ 157 to kick it somehow? */
239 lbs_host_to_card_done(priv); 158 lbs_host_to_card_done(priv);
240 159
241 /* More often than not, this actually happens because the 160 /* FIXME: reset the card */
242 firmware has crapped itself -- rather than just a very
243 busy medium. So send a harmless command, and if/when
244 _that_ times out, we'll kick it in the head. */
245 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
246 0, 0, NULL);
247 161
248 lbs_deb_leave(LBS_DEB_TX); 162 lbs_deb_leave(LBS_DEB_TX);
249} 163}
@@ -588,12 +502,6 @@ static int lbs_thread(void *data)
588 if (!priv->dnld_sent && !priv->cur_cmd) 502 if (!priv->dnld_sent && !priv->cur_cmd)
589 lbs_execute_next_command(priv); 503 lbs_execute_next_command(priv);
590 504
591 /* Wake-up command waiters which can't sleep in
592 * lbs_prepare_and_send_command
593 */
594 if (!list_empty(&priv->cmdpendingq))
595 wake_up_all(&priv->cmd_pending);
596
597 spin_lock_irq(&priv->driver_lock); 505 spin_lock_irq(&priv->driver_lock);
598 if (!priv->dnld_sent && priv->tx_pending_len > 0) { 506 if (!priv->dnld_sent && priv->tx_pending_len > 0) {
599 int ret = priv->hw_host_to_card(priv, MVMS_DAT, 507 int ret = priv->hw_host_to_card(priv, MVMS_DAT,
@@ -619,66 +527,58 @@ static int lbs_thread(void *data)
619 527
620 del_timer(&priv->command_timer); 528 del_timer(&priv->command_timer);
621 del_timer(&priv->auto_deepsleep_timer); 529 del_timer(&priv->auto_deepsleep_timer);
622 wake_up_all(&priv->cmd_pending);
623 530
624 lbs_deb_leave(LBS_DEB_THREAD); 531 lbs_deb_leave(LBS_DEB_THREAD);
625 return 0; 532 return 0;
626} 533}
627 534
628static int lbs_suspend_callback(struct lbs_private *priv, unsigned long dummy,
629 struct cmd_header *cmd)
630{
631 lbs_deb_enter(LBS_DEB_FW);
632
633 netif_device_detach(priv->dev);
634 if (priv->mesh_dev)
635 netif_device_detach(priv->mesh_dev);
636
637 priv->fw_ready = 0;
638 lbs_deb_leave(LBS_DEB_FW);
639 return 0;
640}
641
642int lbs_suspend(struct lbs_private *priv) 535int lbs_suspend(struct lbs_private *priv)
643{ 536{
644 struct cmd_header cmd;
645 int ret; 537 int ret;
646 538
647 lbs_deb_enter(LBS_DEB_FW); 539 lbs_deb_enter(LBS_DEB_FW);
648 540
649 if (priv->wol_criteria == 0xffffffff) { 541 if (priv->is_deep_sleep) {
650 lbs_pr_info("Suspend attempt without configuring wake params!\n"); 542 ret = lbs_set_deep_sleep(priv, 0);
651 return -EINVAL; 543 if (ret) {
544 lbs_pr_err("deep sleep cancellation failed: %d\n", ret);
545 return ret;
546 }
547 priv->deep_sleep_required = 1;
652 } 548 }
653 549
654 memset(&cmd, 0, sizeof(cmd)); 550 ret = lbs_set_host_sleep(priv, 1);
655 551
656 ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd, 552 netif_device_detach(priv->dev);
657 sizeof(cmd), lbs_suspend_callback, 0); 553 if (priv->mesh_dev)
658 if (ret) 554 netif_device_detach(priv->mesh_dev);
659 lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret);
660 555
661 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 556 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
662 return ret; 557 return ret;
663} 558}
664EXPORT_SYMBOL_GPL(lbs_suspend); 559EXPORT_SYMBOL_GPL(lbs_suspend);
665 560
666void lbs_resume(struct lbs_private *priv) 561int lbs_resume(struct lbs_private *priv)
667{ 562{
668 lbs_deb_enter(LBS_DEB_FW); 563 int ret;
669 564
670 priv->fw_ready = 1; 565 lbs_deb_enter(LBS_DEB_FW);
671 566
672 /* Firmware doesn't seem to give us RX packets any more 567 ret = lbs_set_host_sleep(priv, 0);
673 until we send it some command. Might as well update */
674 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
675 0, 0, NULL);
676 568
677 netif_device_attach(priv->dev); 569 netif_device_attach(priv->dev);
678 if (priv->mesh_dev) 570 if (priv->mesh_dev)
679 netif_device_attach(priv->mesh_dev); 571 netif_device_attach(priv->mesh_dev);
680 572
681 lbs_deb_leave(LBS_DEB_FW); 573 if (priv->deep_sleep_required) {
574 priv->deep_sleep_required = 0;
575 ret = lbs_set_deep_sleep(priv, 1);
576 if (ret)
577 lbs_pr_err("deep sleep activation failed: %d\n", ret);
578 }
579
580 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
581 return ret;
682} 582}
683EXPORT_SYMBOL_GPL(lbs_resume); 583EXPORT_SYMBOL_GPL(lbs_resume);
684 584
@@ -710,6 +610,9 @@ static int lbs_setup_firmware(struct lbs_private *priv)
710 priv->txpower_max = maxlevel; 610 priv->txpower_max = maxlevel;
711 } 611 }
712 612
613 /* Send cmd to FW to enable 11D function */
614 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
615
713 lbs_set_mac_control(priv); 616 lbs_set_mac_control(priv);
714done: 617done:
715 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 618 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
@@ -748,7 +651,6 @@ out:
748static void auto_deepsleep_timer_fn(unsigned long data) 651static void auto_deepsleep_timer_fn(unsigned long data)
749{ 652{
750 struct lbs_private *priv = (struct lbs_private *)data; 653 struct lbs_private *priv = (struct lbs_private *)data;
751 int ret;
752 654
753 lbs_deb_enter(LBS_DEB_CMD); 655 lbs_deb_enter(LBS_DEB_CMD);
754 656
@@ -756,14 +658,15 @@ static void auto_deepsleep_timer_fn(unsigned long data)
756 priv->is_activity_detected = 0; 658 priv->is_activity_detected = 0;
757 } else { 659 } else {
758 if (priv->is_auto_deep_sleep_enabled && 660 if (priv->is_auto_deep_sleep_enabled &&
759 (!priv->wakeup_dev_required) && 661 (!priv->wakeup_dev_required) &&
760 (priv->connect_status != LBS_CONNECTED)) { 662 (priv->connect_status != LBS_CONNECTED)) {
663 struct cmd_header cmd;
664
761 lbs_deb_main("Entering auto deep sleep mode...\n"); 665 lbs_deb_main("Entering auto deep sleep mode...\n");
762 ret = lbs_prepare_and_send_command(priv, 666 memset(&cmd, 0, sizeof(cmd));
763 CMD_802_11_DEEP_SLEEP, 0, 667 cmd.size = cpu_to_le16(sizeof(cmd));
764 0, 0, NULL); 668 lbs_cmd_async(priv, CMD_802_11_DEEP_SLEEP, &cmd,
765 if (ret) 669 sizeof(cmd));
766 lbs_pr_err("Enter Deep Sleep command failed\n");
767 } 670 }
768 } 671 }
769 mod_timer(&priv->auto_deepsleep_timer , jiffies + 672 mod_timer(&priv->auto_deepsleep_timer , jiffies +
@@ -799,45 +702,27 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
799 702
800static int lbs_init_adapter(struct lbs_private *priv) 703static int lbs_init_adapter(struct lbs_private *priv)
801{ 704{
802 size_t bufsize; 705 int ret;
803 int i, ret = 0;
804 706
805 lbs_deb_enter(LBS_DEB_MAIN); 707 lbs_deb_enter(LBS_DEB_MAIN);
806 708
807 /* Allocate buffer to store the BSSID list */
808 bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor);
809 priv->networks = kzalloc(bufsize, GFP_KERNEL);
810 if (!priv->networks) {
811 lbs_pr_err("Out of memory allocating beacons\n");
812 ret = -1;
813 goto out;
814 }
815
816 /* Initialize scan result lists */
817 INIT_LIST_HEAD(&priv->network_free_list);
818 INIT_LIST_HEAD(&priv->network_list);
819 for (i = 0; i < MAX_NETWORK_COUNT; i++) {
820 list_add_tail(&priv->networks[i].list,
821 &priv->network_free_list);
822 }
823
824 memset(priv->current_addr, 0xff, ETH_ALEN); 709 memset(priv->current_addr, 0xff, ETH_ALEN);
825 710
826 priv->connect_status = LBS_DISCONNECTED; 711 priv->connect_status = LBS_DISCONNECTED;
827 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
828 priv->mode = IW_MODE_INFRA;
829 priv->channel = DEFAULT_AD_HOC_CHANNEL; 712 priv->channel = DEFAULT_AD_HOC_CHANNEL;
830 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 713 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
831 priv->radio_on = 1; 714 priv->radio_on = 1;
832 priv->enablehwauto = 1;
833 priv->psmode = LBS802_11POWERMODECAM; 715 priv->psmode = LBS802_11POWERMODECAM;
834 priv->psstate = PS_STATE_FULL_POWER; 716 priv->psstate = PS_STATE_FULL_POWER;
835 priv->is_deep_sleep = 0; 717 priv->is_deep_sleep = 0;
836 priv->is_auto_deep_sleep_enabled = 0; 718 priv->is_auto_deep_sleep_enabled = 0;
719 priv->deep_sleep_required = 0;
837 priv->wakeup_dev_required = 0; 720 priv->wakeup_dev_required = 0;
838 init_waitqueue_head(&priv->ds_awake_q); 721 init_waitqueue_head(&priv->ds_awake_q);
839 priv->authtype_auto = 1; 722 priv->authtype_auto = 1;
840 723 priv->is_host_sleep_configured = 0;
724 priv->is_host_sleep_activated = 0;
725 init_waitqueue_head(&priv->host_sleep_q);
841 mutex_init(&priv->lock); 726 mutex_init(&priv->lock);
842 727
843 setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, 728 setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
@@ -849,7 +734,6 @@ static int lbs_init_adapter(struct lbs_private *priv)
849 INIT_LIST_HEAD(&priv->cmdpendingq); 734 INIT_LIST_HEAD(&priv->cmdpendingq);
850 735
851 spin_lock_init(&priv->driver_lock); 736 spin_lock_init(&priv->driver_lock);
852 init_waitqueue_head(&priv->cmd_pending);
853 737
854 /* Allocate the command buffers */ 738 /* Allocate the command buffers */
855 if (lbs_allocate_cmd_buffer(priv)) { 739 if (lbs_allocate_cmd_buffer(priv)) {
@@ -881,8 +765,6 @@ static void lbs_free_adapter(struct lbs_private *priv)
881 kfifo_free(&priv->event_fifo); 765 kfifo_free(&priv->event_fifo);
882 del_timer(&priv->command_timer); 766 del_timer(&priv->command_timer);
883 del_timer(&priv->auto_deepsleep_timer); 767 del_timer(&priv->auto_deepsleep_timer);
884 kfree(priv->networks);
885 priv->networks = NULL;
886 768
887 lbs_deb_leave(LBS_DEB_MAIN); 769 lbs_deb_leave(LBS_DEB_MAIN);
888} 770}
@@ -919,7 +801,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
919 lbs_pr_err("cfg80211 init failed\n"); 801 lbs_pr_err("cfg80211 init failed\n");
920 goto done; 802 goto done;
921 } 803 }
922 /* TODO? */ 804
923 wdev->iftype = NL80211_IFTYPE_STATION; 805 wdev->iftype = NL80211_IFTYPE_STATION;
924 priv = wdev_priv(wdev); 806 priv = wdev_priv(wdev);
925 priv->wdev = wdev; 807 priv->wdev = wdev;
@@ -929,7 +811,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
929 goto err_wdev; 811 goto err_wdev;
930 } 812 }
931 813
932 //TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
933 dev = alloc_netdev(0, "wlan%d", ether_setup); 814 dev = alloc_netdev(0, "wlan%d", ether_setup);
934 if (!dev) { 815 if (!dev) {
935 dev_err(dmdev, "no memory for network device instance\n"); 816 dev_err(dmdev, "no memory for network device instance\n");
@@ -945,20 +826,10 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
945 dev->netdev_ops = &lbs_netdev_ops; 826 dev->netdev_ops = &lbs_netdev_ops;
946 dev->watchdog_timeo = 5 * HZ; 827 dev->watchdog_timeo = 5 * HZ;
947 dev->ethtool_ops = &lbs_ethtool_ops; 828 dev->ethtool_ops = &lbs_ethtool_ops;
948#ifdef WIRELESS_EXT
949 dev->wireless_handlers = &lbs_handler_def;
950#endif
951 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 829 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
952 830
953
954 // TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ??
955
956
957 priv->card = card; 831 priv->card = card;
958 priv->infra_open = 0;
959 832
960
961 priv->rtap_net_dev = NULL;
962 strcpy(dev->name, "wlan%d"); 833 strcpy(dev->name, "wlan%d");
963 834
964 lbs_deb_thread("Starting main thread...\n"); 835 lbs_deb_thread("Starting main thread...\n");
@@ -970,12 +841,11 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
970 } 841 }
971 842
972 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 843 priv->work_thread = create_singlethread_workqueue("lbs_worker");
973 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
974 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
975 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); 844 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
976 845
977 priv->wol_criteria = 0xffffffff; 846 priv->wol_criteria = 0xffffffff;
978 priv->wol_gpio = 0xff; 847 priv->wol_gpio = 0xff;
848 priv->wol_gap = 20;
979 849
980 goto done; 850 goto done;
981 851
@@ -1004,12 +874,10 @@ void lbs_remove_card(struct lbs_private *priv)
1004 lbs_deb_enter(LBS_DEB_MAIN); 874 lbs_deb_enter(LBS_DEB_MAIN);
1005 875
1006 lbs_remove_mesh(priv); 876 lbs_remove_mesh(priv);
1007 lbs_remove_rtap(priv); 877 lbs_scan_deinit(priv);
1008 878
1009 dev = priv->dev; 879 dev = priv->dev;
1010 880
1011 cancel_delayed_work_sync(&priv->scan_work);
1012 cancel_delayed_work_sync(&priv->assoc_work);
1013 cancel_work_sync(&priv->mcast_work); 881 cancel_work_sync(&priv->mcast_work);
1014 882
1015 /* worker thread destruction blocks on the in-flight command which 883 /* worker thread destruction blocks on the in-flight command which
@@ -1021,16 +889,18 @@ void lbs_remove_card(struct lbs_private *priv)
1021 889
1022 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { 890 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
1023 priv->psmode = LBS802_11POWERMODECAM; 891 priv->psmode = LBS802_11POWERMODECAM;
1024 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); 892 lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true);
1025 } 893 }
1026 894
1027 lbs_send_disconnect_notification(priv);
1028
1029 if (priv->is_deep_sleep) { 895 if (priv->is_deep_sleep) {
1030 priv->is_deep_sleep = 0; 896 priv->is_deep_sleep = 0;
1031 wake_up_interruptible(&priv->ds_awake_q); 897 wake_up_interruptible(&priv->ds_awake_q);
1032 } 898 }
1033 899
900 priv->is_host_sleep_configured = 0;
901 priv->is_host_sleep_activated = 0;
902 wake_up_interruptible(&priv->host_sleep_q);
903
1034 /* Stop the thread servicing the interrupts */ 904 /* Stop the thread servicing the interrupts */
1035 priv->surpriseremoved = 1; 905 priv->surpriseremoved = 1;
1036 kthread_stop(priv->main_thread); 906 kthread_stop(priv->main_thread);
@@ -1046,7 +916,7 @@ void lbs_remove_card(struct lbs_private *priv)
1046EXPORT_SYMBOL_GPL(lbs_remove_card); 916EXPORT_SYMBOL_GPL(lbs_remove_card);
1047 917
1048 918
1049static int lbs_rtap_supported(struct lbs_private *priv) 919int lbs_rtap_supported(struct lbs_private *priv)
1050{ 920{
1051 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) 921 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
1052 return 1; 922 return 1;
@@ -1078,16 +948,6 @@ int lbs_start_card(struct lbs_private *priv)
1078 948
1079 lbs_init_mesh(priv); 949 lbs_init_mesh(priv);
1080 950
1081 /*
1082 * While rtap isn't related to mesh, only mesh-enabled
1083 * firmware implements the rtap functionality via
1084 * CMD_802_11_MONITOR_MODE.
1085 */
1086 if (lbs_rtap_supported(priv)) {
1087 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1088 lbs_pr_err("cannot register lbs_rtap attribute\n");
1089 }
1090
1091 lbs_debugfs_init_one(priv, dev); 951 lbs_debugfs_init_one(priv, dev);
1092 952
1093 lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); 953 lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name);
@@ -1119,9 +979,6 @@ void lbs_stop_card(struct lbs_private *priv)
1119 lbs_debugfs_remove_one(priv); 979 lbs_debugfs_remove_one(priv);
1120 lbs_deinit_mesh(priv); 980 lbs_deinit_mesh(priv);
1121 981
1122 if (lbs_rtap_supported(priv))
1123 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1124
1125 /* Delete the timeout of the currently processing command */ 982 /* Delete the timeout of the currently processing command */
1126 del_timer_sync(&priv->command_timer); 983 del_timer_sync(&priv->command_timer);
1127 del_timer_sync(&priv->auto_deepsleep_timer); 984 del_timer_sync(&priv->auto_deepsleep_timer);
@@ -1195,7 +1052,7 @@ static int __init lbs_init_module(void)
1195 memset(&confirm_sleep, 0, sizeof(confirm_sleep)); 1052 memset(&confirm_sleep, 0, sizeof(confirm_sleep));
1196 confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE); 1053 confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE);
1197 confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep)); 1054 confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep));
1198 confirm_sleep.action = cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED); 1055 confirm_sleep.action = cpu_to_le16(PS_MODE_ACTION_SLEEP_CONFIRMED);
1199 lbs_debugfs_init(); 1056 lbs_debugfs_init();
1200 lbs_deb_leave(LBS_DEB_MAIN); 1057 lbs_deb_leave(LBS_DEB_MAIN);
1201 return 0; 1058 return 0;
@@ -1208,87 +1065,6 @@ static void __exit lbs_exit_module(void)
1208 lbs_deb_leave(LBS_DEB_MAIN); 1065 lbs_deb_leave(LBS_DEB_MAIN);
1209} 1066}
1210 1067
1211/*
1212 * rtap interface support fuctions
1213 */
1214
1215static int lbs_rtap_open(struct net_device *dev)
1216{
1217 /* Yes, _stop_ the queue. Because we don't support injection */
1218 lbs_deb_enter(LBS_DEB_MAIN);
1219 netif_carrier_off(dev);
1220 netif_stop_queue(dev);
1221 lbs_deb_leave(LBS_DEB_LEAVE);
1222 return 0;
1223}
1224
1225static int lbs_rtap_stop(struct net_device *dev)
1226{
1227 lbs_deb_enter(LBS_DEB_MAIN);
1228 lbs_deb_leave(LBS_DEB_MAIN);
1229 return 0;
1230}
1231
1232static netdev_tx_t lbs_rtap_hard_start_xmit(struct sk_buff *skb,
1233 struct net_device *dev)
1234{
1235 netif_stop_queue(dev);
1236 return NETDEV_TX_BUSY;
1237}
1238
1239static void lbs_remove_rtap(struct lbs_private *priv)
1240{
1241 lbs_deb_enter(LBS_DEB_MAIN);
1242 if (priv->rtap_net_dev == NULL)
1243 goto out;
1244 unregister_netdev(priv->rtap_net_dev);
1245 free_netdev(priv->rtap_net_dev);
1246 priv->rtap_net_dev = NULL;
1247out:
1248 lbs_deb_leave(LBS_DEB_MAIN);
1249}
1250
1251static const struct net_device_ops rtap_netdev_ops = {
1252 .ndo_open = lbs_rtap_open,
1253 .ndo_stop = lbs_rtap_stop,
1254 .ndo_start_xmit = lbs_rtap_hard_start_xmit,
1255};
1256
1257static int lbs_add_rtap(struct lbs_private *priv)
1258{
1259 int ret = 0;
1260 struct net_device *rtap_dev;
1261
1262 lbs_deb_enter(LBS_DEB_MAIN);
1263 if (priv->rtap_net_dev) {
1264 ret = -EPERM;
1265 goto out;
1266 }
1267
1268 rtap_dev = alloc_netdev(0, "rtap%d", ether_setup);
1269 if (rtap_dev == NULL) {
1270 ret = -ENOMEM;
1271 goto out;
1272 }
1273
1274 memcpy(rtap_dev->dev_addr, priv->current_addr, ETH_ALEN);
1275 rtap_dev->type = ARPHRD_IEEE80211_RADIOTAP;
1276 rtap_dev->netdev_ops = &rtap_netdev_ops;
1277 rtap_dev->ml_priv = priv;
1278 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
1279
1280 ret = register_netdev(rtap_dev);
1281 if (ret) {
1282 free_netdev(rtap_dev);
1283 goto out;
1284 }
1285 priv->rtap_net_dev = rtap_dev;
1286
1287out:
1288 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
1289 return ret;
1290}
1291
1292module_init(lbs_init_module); 1068module_init(lbs_init_module);
1293module_exit(lbs_exit_module); 1069module_exit(lbs_exit_module);
1294 1070
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index e385af1f4583..194762ab0142 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -5,6 +5,7 @@
5#include <linux/if_arp.h> 5#include <linux/if_arp.h>
6#include <linux/kthread.h> 6#include <linux/kthread.h>
7#include <linux/kfifo.h> 7#include <linux/kfifo.h>
8#include <net/cfg80211.h>
8 9
9#include "mesh.h" 10#include "mesh.h"
10#include "decl.h" 11#include "decl.h"
@@ -314,7 +315,7 @@ static int lbs_mesh_dev_open(struct net_device *dev)
314 315
315 spin_lock_irq(&priv->driver_lock); 316 spin_lock_irq(&priv->driver_lock);
316 317
317 if (priv->monitormode) { 318 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
318 ret = -EBUSY; 319 ret = -EBUSY;
319 goto out; 320 goto out;
320 } 321 }
@@ -369,9 +370,6 @@ int lbs_add_mesh(struct lbs_private *priv)
369 370
370 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); 371 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
371 372
372#ifdef WIRELESS_EXT
373 mesh_dev->wireless_handlers = &mesh_handler_def;
374#endif
375 mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 373 mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
376 /* Register virtual mesh interface */ 374 /* Register virtual mesh interface */
377 ret = register_netdev(mesh_dev); 375 ret = register_netdev(mesh_dev);
@@ -457,65 +455,189 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
457 * Mesh command handling 455 * Mesh command handling
458 */ 456 */
459 457
460int lbs_cmd_bt_access(struct cmd_ds_command *cmd, 458/**
461 u16 cmd_action, void *pdata_buf) 459 * @brief Add or delete Mesh Blinding Table entries
460 *
461 * @param priv A pointer to struct lbs_private structure
462 * @param add TRUE to add the entry, FALSE to delete it
463 * @param addr1 Destination address to blind or unblind
464 *
465 * @return 0 on success, error on failure
466 */
467int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1)
462{ 468{
463 struct cmd_ds_bt_access *bt_access = &cmd->params.bt; 469 struct cmd_ds_bt_access cmd;
464 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 470 int ret = 0;
465 471
466 cmd->command = cpu_to_le16(CMD_BT_ACCESS); 472 lbs_deb_enter(LBS_DEB_CMD);
467 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) +
468 sizeof(struct cmd_header));
469 cmd->result = 0;
470 bt_access->action = cpu_to_le16(cmd_action);
471 473
472 switch (cmd_action) { 474 BUG_ON(addr1 == NULL);
473 case CMD_ACT_BT_ACCESS_ADD: 475
474 memcpy(bt_access->addr1, pdata_buf, 2 * ETH_ALEN); 476 memset(&cmd, 0, sizeof(cmd));
477 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
478 memcpy(cmd.addr1, addr1, ETH_ALEN);
479 if (add) {
480 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_ADD);
475 lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr", 481 lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr",
476 bt_access->addr1, 6); 482 addr1, ETH_ALEN);
477 break; 483 } else {
478 case CMD_ACT_BT_ACCESS_DEL: 484 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_DEL);
479 memcpy(bt_access->addr1, pdata_buf, 1 * ETH_ALEN);
480 lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr", 485 lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr",
481 bt_access->addr1, 6); 486 addr1, ETH_ALEN);
482 break;
483 case CMD_ACT_BT_ACCESS_LIST:
484 bt_access->id = cpu_to_le32(*(u32 *) pdata_buf);
485 break;
486 case CMD_ACT_BT_ACCESS_RESET:
487 break;
488 case CMD_ACT_BT_ACCESS_SET_INVERT:
489 bt_access->id = cpu_to_le32(*(u32 *) pdata_buf);
490 break;
491 case CMD_ACT_BT_ACCESS_GET_INVERT:
492 break;
493 default:
494 break;
495 } 487 }
496 lbs_deb_leave(LBS_DEB_CMD); 488
497 return 0; 489 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
490
491 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
492 return ret;
498} 493}
499 494
500int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, 495/**
501 u16 cmd_action, void *pdata_buf) 496 * @brief Reset/clear the mesh blinding table
497 *
498 * @param priv A pointer to struct lbs_private structure
499 *
500 * @return 0 on success, error on failure
501 */
502int lbs_mesh_bt_reset(struct lbs_private *priv)
502{ 503{
503 struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt; 504 struct cmd_ds_bt_access cmd;
504 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 505 int ret = 0;
505 506
506 cmd->command = cpu_to_le16(CMD_FWT_ACCESS); 507 lbs_deb_enter(LBS_DEB_CMD);
507 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) +
508 sizeof(struct cmd_header));
509 cmd->result = 0;
510 508
511 if (pdata_buf) 509 memset(&cmd, 0, sizeof(cmd));
512 memcpy(fwt_access, pdata_buf, sizeof(*fwt_access)); 510 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
513 else 511 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_RESET);
514 memset(fwt_access, 0, sizeof(*fwt_access));
515 512
516 fwt_access->action = cpu_to_le16(cmd_action); 513 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
517 514
518 lbs_deb_leave(LBS_DEB_CMD); 515 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
516 return ret;
517}
518
519/**
520 * @brief Gets the inverted status of the mesh blinding table
521 *
522 * Normally the firmware "blinds" or ignores traffic from mesh nodes in the
523 * table, but an inverted table allows *only* traffic from nodes listed in
524 * the table.
525 *
526 * @param priv A pointer to struct lbs_private structure
527 * @param invert On success, TRUE if the blinding table is inverted,
528 * FALSE if it is not inverted
529 *
530 * @return 0 on success, error on failure
531 */
532int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted)
533{
534 struct cmd_ds_bt_access cmd;
535 int ret = 0;
536
537 lbs_deb_enter(LBS_DEB_CMD);
538
539 BUG_ON(inverted == NULL);
540
541 memset(&cmd, 0, sizeof(cmd));
542 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
543 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_GET_INVERT);
544
545 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
546 if (ret == 0)
547 *inverted = !!cmd.id;
548
549 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
550 return ret;
551}
552
553/**
554 * @brief Sets the inverted status of the mesh blinding table
555 *
556 * Normally the firmware "blinds" or ignores traffic from mesh nodes in the
557 * table, but an inverted table allows *only* traffic from nodes listed in
558 * the table.
559 *
560 * @param priv A pointer to struct lbs_private structure
561 * @param invert TRUE to invert the blinding table (only traffic from
562 * listed nodes allowed), FALSE to return it
563 * to normal state (listed nodes ignored)
564 *
565 * @return 0 on success, error on failure
566 */
567int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted)
568{
569 struct cmd_ds_bt_access cmd;
570 int ret = 0;
571
572 lbs_deb_enter(LBS_DEB_CMD);
573
574 memset(&cmd, 0, sizeof(cmd));
575 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
576 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT);
577 cmd.id = !!inverted;
578
579 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
580
581 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
582 return ret;
583}
584
585/**
586 * @brief List an entry in the mesh blinding table
587 *
588 * @param priv A pointer to struct lbs_private structure
589 * @param id The ID of the entry to list
590 * @param addr1 MAC address associated with the table entry
591 *
592 * @return 0 on success, error on failure
593 */
594int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1)
595{
596 struct cmd_ds_bt_access cmd;
597 int ret = 0;
598
599 lbs_deb_enter(LBS_DEB_CMD);
600
601 BUG_ON(addr1 == NULL);
602
603 memset(&cmd, 0, sizeof(cmd));
604 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
605 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT);
606 cmd.id = cpu_to_le32(id);
607
608 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
609 if (ret == 0)
610 memcpy(addr1, cmd.addr1, sizeof(cmd.addr1));
611
612 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
613 return ret;
614}
615
616/**
617 * @brief Access the mesh forwarding table
618 *
619 * @param priv A pointer to struct lbs_private structure
620 * @param cmd_action The forwarding table action to perform
621 * @param cmd The pre-filled FWT_ACCESS command
622 *
623 * @return 0 on success and 'cmd' will be filled with the
624 * firmware's response
625 */
626int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action,
627 struct cmd_ds_fwt_access *cmd)
628{
629 int ret;
630
631 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
632
633 cmd->hdr.command = cpu_to_le16(CMD_FWT_ACCESS);
634 cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access));
635 cmd->hdr.result = 0;
636 cmd->action = cpu_to_le16(cmd_action);
637
638 ret = lbs_cmd_with_response(priv, CMD_FWT_ACCESS, cmd);
639
640 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
519 return 0; 641 return 0;
520} 642}
521 643
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index e2573303a328..afb2e8dead3f 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -8,6 +8,7 @@
8#include <net/iw_handler.h> 8#include <net/iw_handler.h>
9#include <net/lib80211.h> 9#include <net/lib80211.h>
10 10
11#include "host.h"
11 12
12#ifdef CONFIG_LIBERTAS_MESH 13#ifdef CONFIG_LIBERTAS_MESH
13 14
@@ -51,10 +52,15 @@ struct cmd_ds_command;
51struct cmd_ds_mesh_access; 52struct cmd_ds_mesh_access;
52struct cmd_ds_mesh_config; 53struct cmd_ds_mesh_config;
53 54
54int lbs_cmd_bt_access(struct cmd_ds_command *cmd, 55int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1);
55 u16 cmd_action, void *pdata_buf); 56int lbs_mesh_bt_reset(struct lbs_private *priv);
56int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, 57int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted);
57 u16 cmd_action, void *pdata_buf); 58int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted);
59int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1);
60
61int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action,
62 struct cmd_ds_fwt_access *cmd);
63
58int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, 64int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
59 struct cmd_ds_mesh_access *cmd); 65 struct cmd_ds_mesh_access *cmd);
60int lbs_mesh_config_send(struct lbs_private *priv, 66int lbs_mesh_config_send(struct lbs_private *priv,
@@ -70,11 +76,6 @@ void lbs_persist_config_init(struct net_device *net);
70void lbs_persist_config_remove(struct net_device *net); 76void lbs_persist_config_remove(struct net_device *net);
71 77
72 78
73/* WEXT handler */
74
75extern struct iw_handler_def mesh_handler_def;
76
77
78/* Ethtool statistics */ 79/* Ethtool statistics */
79 80
80struct ethtool_stats; 81struct ethtool_stats;
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index d16b26416e82..b3c8ea6d610e 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -6,7 +6,7 @@ struct tx_radiotap_hdr {
6 u8 txpower; 6 u8 txpower;
7 u8 rts_retries; 7 u8 rts_retries;
8 u8 data_retries; 8 u8 data_retries;
9} __attribute__ ((packed)); 9} __packed;
10 10
11#define TX_RADIOTAP_PRESENT ( \ 11#define TX_RADIOTAP_PRESENT ( \
12 (1 << IEEE80211_RADIOTAP_RATE) | \ 12 (1 << IEEE80211_RADIOTAP_RATE) | \
@@ -34,7 +34,7 @@ struct rx_radiotap_hdr {
34 u8 flags; 34 u8 flags;
35 u8 rate; 35 u8 rate;
36 u8 antsignal; 36 u8 antsignal;
37} __attribute__ ((packed)); 37} __packed;
38 38
39#define RX_RADIOTAP_PRESENT ( \ 39#define RX_RADIOTAP_PRESENT ( \
40 (1 << IEEE80211_RADIOTAP_FLAGS) | \ 40 (1 << IEEE80211_RADIOTAP_FLAGS) | \
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 7a377f5b7662..a4d0bca9ef2c 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,18 +4,19 @@
4#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <net/cfg80211.h>
7 8
9#include "defs.h"
8#include "host.h" 10#include "host.h"
9#include "radiotap.h" 11#include "radiotap.h"
10#include "decl.h" 12#include "decl.h"
11#include "dev.h" 13#include "dev.h"
12#include "wext.h"
13 14
14struct eth803hdr { 15struct eth803hdr {
15 u8 dest_addr[6]; 16 u8 dest_addr[6];
16 u8 src_addr[6]; 17 u8 src_addr[6];
17 u16 h803_len; 18 u16 h803_len;
18} __attribute__ ((packed)); 19} __packed;
19 20
20struct rfc1042hdr { 21struct rfc1042hdr {
21 u8 llc_dsap; 22 u8 llc_dsap;
@@ -23,114 +24,22 @@ struct rfc1042hdr {
23 u8 llc_ctrl; 24 u8 llc_ctrl;
24 u8 snap_oui[3]; 25 u8 snap_oui[3];
25 u16 snap_type; 26 u16 snap_type;
26} __attribute__ ((packed)); 27} __packed;
27 28
28struct rxpackethdr { 29struct rxpackethdr {
29 struct eth803hdr eth803_hdr; 30 struct eth803hdr eth803_hdr;
30 struct rfc1042hdr rfc1042_hdr; 31 struct rfc1042hdr rfc1042_hdr;
31} __attribute__ ((packed)); 32} __packed;
32 33
33struct rx80211packethdr { 34struct rx80211packethdr {
34 struct rxpd rx_pd; 35 struct rxpd rx_pd;
35 void *eth80211_hdr; 36 void *eth80211_hdr;
36} __attribute__ ((packed)); 37} __packed;
37 38
38static int process_rxed_802_11_packet(struct lbs_private *priv, 39static int process_rxed_802_11_packet(struct lbs_private *priv,
39 struct sk_buff *skb); 40 struct sk_buff *skb);
40 41
41/** 42/**
42 * @brief This function computes the avgSNR .
43 *
44 * @param priv A pointer to struct lbs_private structure
45 * @return avgSNR
46 */
47static u8 lbs_getavgsnr(struct lbs_private *priv)
48{
49 u8 i;
50 u16 temp = 0;
51 if (priv->numSNRNF == 0)
52 return 0;
53 for (i = 0; i < priv->numSNRNF; i++)
54 temp += priv->rawSNR[i];
55 return (u8) (temp / priv->numSNRNF);
56
57}
58
59/**
60 * @brief This function computes the AvgNF
61 *
62 * @param priv A pointer to struct lbs_private structure
63 * @return AvgNF
64 */
65static u8 lbs_getavgnf(struct lbs_private *priv)
66{
67 u8 i;
68 u16 temp = 0;
69 if (priv->numSNRNF == 0)
70 return 0;
71 for (i = 0; i < priv->numSNRNF; i++)
72 temp += priv->rawNF[i];
73 return (u8) (temp / priv->numSNRNF);
74
75}
76
77/**
78 * @brief This function save the raw SNR/NF to our internel buffer
79 *
80 * @param priv A pointer to struct lbs_private structure
81 * @param prxpd A pointer to rxpd structure of received packet
82 * @return n/a
83 */
84static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
85{
86 if (priv->numSNRNF < DEFAULT_DATA_AVG_FACTOR)
87 priv->numSNRNF++;
88 priv->rawSNR[priv->nextSNRNF] = p_rx_pd->snr;
89 priv->rawNF[priv->nextSNRNF] = p_rx_pd->nf;
90 priv->nextSNRNF++;
91 if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR)
92 priv->nextSNRNF = 0;
93}
94
95/**
96 * @brief This function computes the RSSI in received packet.
97 *
98 * @param priv A pointer to struct lbs_private structure
99 * @param prxpd A pointer to rxpd structure of received packet
100 * @return n/a
101 */
102static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
103{
104
105 lbs_deb_enter(LBS_DEB_RX);
106
107 lbs_deb_rx("rxpd: SNR %d, NF %d\n", p_rx_pd->snr, p_rx_pd->nf);
108 lbs_deb_rx("before computing SNR: SNR-avg = %d, NF-avg = %d\n",
109 priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
110 priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
111
112 priv->SNR[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->snr;
113 priv->NF[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->nf;
114 lbs_save_rawSNRNF(priv, p_rx_pd);
115
116 priv->SNR[TYPE_RXPD][TYPE_AVG] = lbs_getavgsnr(priv) * AVG_SCALE;
117 priv->NF[TYPE_RXPD][TYPE_AVG] = lbs_getavgnf(priv) * AVG_SCALE;
118 lbs_deb_rx("after computing SNR: SNR-avg = %d, NF-avg = %d\n",
119 priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
120 priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
121
122 priv->RSSI[TYPE_RXPD][TYPE_NOAVG] =
123 CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_NOAVG],
124 priv->NF[TYPE_RXPD][TYPE_NOAVG]);
125
126 priv->RSSI[TYPE_RXPD][TYPE_AVG] =
127 CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
128 priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
129
130 lbs_deb_leave(LBS_DEB_RX);
131}
132
133/**
134 * @brief This function processes received packet and forwards it 43 * @brief This function processes received packet and forwards it
135 * to kernel/upper layer 44 * to kernel/upper layer
136 * 45 *
@@ -154,7 +63,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
154 63
155 skb->ip_summed = CHECKSUM_NONE; 64 skb->ip_summed = CHECKSUM_NONE;
156 65
157 if (priv->monitormode) 66 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
158 return process_rxed_802_11_packet(priv, skb); 67 return process_rxed_802_11_packet(priv, skb);
159 68
160 p_rx_pd = (struct rxpd *) skb->data; 69 p_rx_pd = (struct rxpd *) skb->data;
@@ -225,13 +134,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
225 */ 134 */
226 skb_pull(skb, hdrchop); 135 skb_pull(skb, hdrchop);
227 136
228 /* Take the data rate from the rxpd structure 137 priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
229 * only if the rate is auto
230 */
231 if (priv->enablehwauto)
232 priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
233
234 lbs_compute_rssi(priv, p_rx_pd);
235 138
236 lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); 139 lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
237 dev->stats.rx_bytes += skb->len; 140 dev->stats.rx_bytes += skb->len;
@@ -352,20 +255,18 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
352 pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr)); 255 pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr));
353 memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr)); 256 memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr));
354 257
355 /* Take the data rate from the rxpd structure 258 priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
356 * only if the rate is auto
357 */
358 if (priv->enablehwauto)
359 priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
360
361 lbs_compute_rssi(priv, prxpd);
362 259
363 lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); 260 lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
364 dev->stats.rx_bytes += skb->len; 261 dev->stats.rx_bytes += skb->len;
365 dev->stats.rx_packets++; 262 dev->stats.rx_packets++;
366 263
367 skb->protocol = eth_type_trans(skb, priv->rtap_net_dev); 264 skb->protocol = eth_type_trans(skb, priv->dev);
368 netif_rx(skb); 265
266 if (in_interrupt())
267 netif_rx(skb);
268 else
269 netif_rx_ni(skb);
369 270
370 ret = 0; 271 ret = 0;
371 272
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
deleted file mode 100644
index 24cd54b3a806..000000000000
--- a/drivers/net/wireless/libertas/scan.c
+++ /dev/null
@@ -1,1354 +0,0 @@
1/**
2 * Functions implementing wlan scan IOCTL and firmware command APIs
3 *
4 * IOCTL handlers as well as command preperation and response routines
5 * for sending scan commands to the firmware.
6 */
7#include <linux/slab.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/etherdevice.h>
11#include <linux/if_arp.h>
12#include <asm/unaligned.h>
13#include <net/lib80211.h>
14
15#include "host.h"
16#include "dev.h"
17#include "scan.h"
18#include "assoc.h"
19#include "wext.h"
20#include "cmd.h"
21
22//! Approximate amount of data needed to pass a scan result back to iwlist
23#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \
24 + IEEE80211_MAX_SSID_LEN \
25 + IW_EV_UINT_LEN \
26 + IW_EV_FREQ_LEN \
27 + IW_EV_QUAL_LEN \
28 + IEEE80211_MAX_SSID_LEN \
29 + IW_EV_PARAM_LEN \
30 + 40) /* 40 for WPAIE */
31
32//! Memory needed to store a max sized channel List TLV for a firmware scan
33#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvl_ie_header) \
34 + (MRVDRV_MAX_CHANNELS_PER_SCAN \
35 * sizeof(struct chanscanparamset)))
36
37//! Memory needed to store a max number/size SSID TLV for a firmware scan
38#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvl_ie_ssid_param_set))
39
40//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max
41#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \
42 + CHAN_TLV_MAX_SIZE + SSID_TLV_MAX_SIZE)
43
44//! The maximum number of channels the firmware can scan per command
45#define MRVDRV_MAX_CHANNELS_PER_SCAN 14
46
47/**
48 * @brief Number of channels to scan per firmware scan command issuance.
49 *
50 * Number restricted to prevent hitting the limit on the amount of scan data
51 * returned in a single firmware scan command.
52 */
53#define MRVDRV_CHANNELS_PER_SCAN_CMD 4
54
55//! Scan time specified in the channel TLV for each channel for passive scans
56#define MRVDRV_PASSIVE_SCAN_CHAN_TIME 100
57
58//! Scan time specified in the channel TLV for each channel for active scans
59#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100
60
61#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
62
63static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
64 struct cmd_header *resp);
65
66/*********************************************************************/
67/* */
68/* Misc helper functions */
69/* */
70/*********************************************************************/
71
72/**
73 * @brief Unsets the MSB on basic rates
74 *
75 * Scan through an array and unset the MSB for basic data rates.
76 *
77 * @param rates buffer of data rates
78 * @param len size of buffer
79 */
80static void lbs_unset_basic_rate_flags(u8 *rates, size_t len)
81{
82 int i;
83
84 for (i = 0; i < len; i++)
85 rates[i] &= 0x7f;
86}
87
88
89static inline void clear_bss_descriptor(struct bss_descriptor *bss)
90{
91 /* Don't blow away ->list, just BSS data */
92 memset(bss, 0, offsetof(struct bss_descriptor, list));
93}
94
95/**
96 * @brief Compare two SSIDs
97 *
98 * @param ssid1 A pointer to ssid to compare
99 * @param ssid2 A pointer to ssid to compare
100 *
101 * @return 0: ssid is same, otherwise is different
102 */
103int lbs_ssid_cmp(uint8_t *ssid1, uint8_t ssid1_len, uint8_t *ssid2,
104 uint8_t ssid2_len)
105{
106 if (ssid1_len != ssid2_len)
107 return -1;
108
109 return memcmp(ssid1, ssid2, ssid1_len);
110}
111
112static inline int is_same_network(struct bss_descriptor *src,
113 struct bss_descriptor *dst)
114{
115 /* A network is only a duplicate if the channel, BSSID, and ESSID
116 * all match. We treat all <hidden> with the same BSSID and channel
117 * as one network */
118 return ((src->ssid_len == dst->ssid_len) &&
119 (src->channel == dst->channel) &&
120 !compare_ether_addr(src->bssid, dst->bssid) &&
121 !memcmp(src->ssid, dst->ssid, src->ssid_len));
122}
123
124
125
126/*********************************************************************/
127/* */
128/* Region channel support */
129/* */
130/*********************************************************************/
131
132#define LBS_TX_PWR_DEFAULT 20 /*100mW */
133#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
134#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
135#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
136#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
137
138/* Format { channel, frequency (MHz), maxtxpower } */
139/* band: 'B/G', region: USA FCC/Canada IC */
140static struct chan_freq_power channel_freq_power_US_BG[] = {
141 {1, 2412, LBS_TX_PWR_US_DEFAULT},
142 {2, 2417, LBS_TX_PWR_US_DEFAULT},
143 {3, 2422, LBS_TX_PWR_US_DEFAULT},
144 {4, 2427, LBS_TX_PWR_US_DEFAULT},
145 {5, 2432, LBS_TX_PWR_US_DEFAULT},
146 {6, 2437, LBS_TX_PWR_US_DEFAULT},
147 {7, 2442, LBS_TX_PWR_US_DEFAULT},
148 {8, 2447, LBS_TX_PWR_US_DEFAULT},
149 {9, 2452, LBS_TX_PWR_US_DEFAULT},
150 {10, 2457, LBS_TX_PWR_US_DEFAULT},
151 {11, 2462, LBS_TX_PWR_US_DEFAULT}
152};
153
154/* band: 'B/G', region: Europe ETSI */
155static struct chan_freq_power channel_freq_power_EU_BG[] = {
156 {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
157 {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
158 {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
159 {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
160 {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
161 {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
162 {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
163 {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
164 {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
165 {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
166 {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
167 {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
168 {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
169};
170
171/* band: 'B/G', region: Spain */
172static struct chan_freq_power channel_freq_power_SPN_BG[] = {
173 {10, 2457, LBS_TX_PWR_DEFAULT},
174 {11, 2462, LBS_TX_PWR_DEFAULT}
175};
176
177/* band: 'B/G', region: France */
178static struct chan_freq_power channel_freq_power_FR_BG[] = {
179 {10, 2457, LBS_TX_PWR_FR_DEFAULT},
180 {11, 2462, LBS_TX_PWR_FR_DEFAULT},
181 {12, 2467, LBS_TX_PWR_FR_DEFAULT},
182 {13, 2472, LBS_TX_PWR_FR_DEFAULT}
183};
184
185/* band: 'B/G', region: Japan */
186static struct chan_freq_power channel_freq_power_JPN_BG[] = {
187 {1, 2412, LBS_TX_PWR_JP_DEFAULT},
188 {2, 2417, LBS_TX_PWR_JP_DEFAULT},
189 {3, 2422, LBS_TX_PWR_JP_DEFAULT},
190 {4, 2427, LBS_TX_PWR_JP_DEFAULT},
191 {5, 2432, LBS_TX_PWR_JP_DEFAULT},
192 {6, 2437, LBS_TX_PWR_JP_DEFAULT},
193 {7, 2442, LBS_TX_PWR_JP_DEFAULT},
194 {8, 2447, LBS_TX_PWR_JP_DEFAULT},
195 {9, 2452, LBS_TX_PWR_JP_DEFAULT},
196 {10, 2457, LBS_TX_PWR_JP_DEFAULT},
197 {11, 2462, LBS_TX_PWR_JP_DEFAULT},
198 {12, 2467, LBS_TX_PWR_JP_DEFAULT},
199 {13, 2472, LBS_TX_PWR_JP_DEFAULT},
200 {14, 2484, LBS_TX_PWR_JP_DEFAULT}
201};
202
203/**
204 * the structure for channel, frequency and power
205 */
206struct region_cfp_table {
207 u8 region;
208 struct chan_freq_power *cfp_BG;
209 int cfp_no_BG;
210};
211
212/**
213 * the structure for the mapping between region and CFP
214 */
215static struct region_cfp_table region_cfp_table[] = {
216 {0x10, /*US FCC */
217 channel_freq_power_US_BG,
218 ARRAY_SIZE(channel_freq_power_US_BG),
219 }
220 ,
221 {0x20, /*CANADA IC */
222 channel_freq_power_US_BG,
223 ARRAY_SIZE(channel_freq_power_US_BG),
224 }
225 ,
226 {0x30, /*EU*/ channel_freq_power_EU_BG,
227 ARRAY_SIZE(channel_freq_power_EU_BG),
228 }
229 ,
230 {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
231 ARRAY_SIZE(channel_freq_power_SPN_BG),
232 }
233 ,
234 {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
235 ARRAY_SIZE(channel_freq_power_FR_BG),
236 }
237 ,
238 {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
239 ARRAY_SIZE(channel_freq_power_JPN_BG),
240 }
241 ,
242/*Add new region here */
243};
244
245/**
246 * @brief This function finds the CFP in
247 * region_cfp_table based on region and band parameter.
248 *
249 * @param region The region code
250 * @param band The band
251 * @param cfp_no A pointer to CFP number
252 * @return A pointer to CFP
253 */
254static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
255{
256 int i, end;
257
258 lbs_deb_enter(LBS_DEB_MAIN);
259
260 end = ARRAY_SIZE(region_cfp_table);
261
262 for (i = 0; i < end ; i++) {
263 lbs_deb_main("region_cfp_table[i].region=%d\n",
264 region_cfp_table[i].region);
265 if (region_cfp_table[i].region == region) {
266 *cfp_no = region_cfp_table[i].cfp_no_BG;
267 lbs_deb_leave(LBS_DEB_MAIN);
268 return region_cfp_table[i].cfp_BG;
269 }
270 }
271
272 lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
273 return NULL;
274}
275
276int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
277{
278 int ret = 0;
279 int i = 0;
280
281 struct chan_freq_power *cfp;
282 int cfp_no;
283
284 lbs_deb_enter(LBS_DEB_MAIN);
285
286 memset(priv->region_channel, 0, sizeof(priv->region_channel));
287
288 cfp = lbs_get_region_cfp_table(region, &cfp_no);
289 if (cfp != NULL) {
290 priv->region_channel[i].nrcfp = cfp_no;
291 priv->region_channel[i].CFP = cfp;
292 } else {
293 lbs_deb_main("wrong region code %#x in band B/G\n",
294 region);
295 ret = -1;
296 goto out;
297 }
298 priv->region_channel[i].valid = 1;
299 priv->region_channel[i].region = region;
300 priv->region_channel[i].band = band;
301 i++;
302out:
303 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
304 return ret;
305}
306
307
308
309
310/*********************************************************************/
311/* */
312/* Main scanning support */
313/* */
314/*********************************************************************/
315
316/**
317 * @brief Create a channel list for the driver to scan based on region info
318 *
319 * Only used from lbs_scan_setup_scan_config()
320 *
321 * Use the driver region/band information to construct a comprehensive list
322 * of channels to scan. This routine is used for any scan that is not
323 * provided a specific channel list to scan.
324 *
325 * @param priv A pointer to struct lbs_private structure
326 * @param scanchanlist Output parameter: resulting channel list to scan
327 *
328 * @return void
329 */
330static int lbs_scan_create_channel_list(struct lbs_private *priv,
331 struct chanscanparamset *scanchanlist)
332{
333 struct region_channel *scanregion;
334 struct chan_freq_power *cfp;
335 int rgnidx;
336 int chanidx;
337 int nextchan;
338 uint8_t scantype;
339
340 chanidx = 0;
341
342 /* Set the default scan type to the user specified type, will later
343 * be changed to passive on a per channel basis if restricted by
344 * regulatory requirements (11d or 11h)
345 */
346 scantype = CMD_SCAN_TYPE_ACTIVE;
347
348 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
349 if (!priv->region_channel[rgnidx].valid)
350 continue;
351 scanregion = &priv->region_channel[rgnidx];
352
353 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
354 struct chanscanparamset *chan = &scanchanlist[chanidx];
355
356 cfp = scanregion->CFP + nextchan;
357
358 if (scanregion->band == BAND_B || scanregion->band == BAND_G)
359 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
360
361 if (scantype == CMD_SCAN_TYPE_PASSIVE) {
362 chan->maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME);
363 chan->chanscanmode.passivescan = 1;
364 } else {
365 chan->maxscantime = cpu_to_le16(MRVDRV_ACTIVE_SCAN_CHAN_TIME);
366 chan->chanscanmode.passivescan = 0;
367 }
368
369 chan->channumber = cfp->channel;
370 }
371 }
372 return chanidx;
373}
374
375/*
376 * Add SSID TLV of the form:
377 *
378 * TLV-ID SSID 00 00
379 * length 06 00
380 * ssid 4d 4e 54 45 53 54
381 */
382static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv)
383{
384 struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
385
386 ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
387 ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len);
388 memcpy(ssid_tlv->ssid, priv->scan_ssid, priv->scan_ssid_len);
389 return sizeof(ssid_tlv->header) + priv->scan_ssid_len;
390}
391
392/*
393 * Add CHANLIST TLV of the form
394 *
395 * TLV-ID CHANLIST 01 01
396 * length 5b 00
397 * channel 1 00 01 00 00 00 64 00
398 * radio type 00
399 * channel 01
400 * scan type 00
401 * min scan time 00 00
402 * max scan time 64 00
403 * channel 2 00 02 00 00 00 64 00
404 * channel 3 00 03 00 00 00 64 00
405 * channel 4 00 04 00 00 00 64 00
406 * channel 5 00 05 00 00 00 64 00
407 * channel 6 00 06 00 00 00 64 00
408 * channel 7 00 07 00 00 00 64 00
409 * channel 8 00 08 00 00 00 64 00
410 * channel 9 00 09 00 00 00 64 00
411 * channel 10 00 0a 00 00 00 64 00
412 * channel 11 00 0b 00 00 00 64 00
413 * channel 12 00 0c 00 00 00 64 00
414 * channel 13 00 0d 00 00 00 64 00
415 *
416 */
417static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
418 struct chanscanparamset *chan_list,
419 int chan_count)
420{
421 size_t size = sizeof(struct chanscanparamset) *chan_count;
422 struct mrvl_ie_chanlist_param_set *chan_tlv = (void *)tlv;
423
424 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
425 memcpy(chan_tlv->chanscanparam, chan_list, size);
426 chan_tlv->header.len = cpu_to_le16(size);
427 return sizeof(chan_tlv->header) + size;
428}
429
430/*
431 * Add RATES TLV of the form
432 *
433 * TLV-ID RATES 01 00
434 * length 0e 00
435 * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c
436 *
437 * The rates are in lbs_bg_rates[], but for the 802.11b
438 * rates the high bit isn't set.
439 */
440static int lbs_scan_add_rates_tlv(uint8_t *tlv)
441{
442 int i;
443 struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
444
445 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
446 tlv += sizeof(rate_tlv->header);
447 for (i = 0; i < MAX_RATES; i++) {
448 *tlv = lbs_bg_rates[i];
449 if (*tlv == 0)
450 break;
451 /* This code makes sure that the 802.11b rates (1 MBit/s, 2
452 MBit/s, 5.5 MBit/s and 11 MBit/s get's the high bit set.
453 Note that the values are MBit/s * 2, to mark them as
454 basic rates so that the firmware likes it better */
455 if (*tlv == 0x02 || *tlv == 0x04 ||
456 *tlv == 0x0b || *tlv == 0x16)
457 *tlv |= 0x80;
458 tlv++;
459 }
460 rate_tlv->header.len = cpu_to_le16(i);
461 return sizeof(rate_tlv->header) + i;
462}
463
464/*
465 * Generate the CMD_802_11_SCAN command with the proper tlv
466 * for a bunch of channels.
467 */
468static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype,
469 struct chanscanparamset *chan_list, int chan_count)
470{
471 int ret = -ENOMEM;
472 struct cmd_ds_802_11_scan *scan_cmd;
473 uint8_t *tlv; /* pointer into our current, growing TLV storage area */
474
475 lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d",
476 bsstype, chan_list ? chan_list[0].channumber : -1,
477 chan_count);
478
479 /* create the fixed part for scan command */
480 scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL);
481 if (scan_cmd == NULL)
482 goto out;
483
484 tlv = scan_cmd->tlvbuffer;
485 /* TODO: do we need to scan for a specific BSSID?
486 memcpy(scan_cmd->bssid, priv->scan_bssid, ETH_ALEN); */
487 scan_cmd->bsstype = bsstype;
488
489 /* add TLVs */
490 if (priv->scan_ssid_len)
491 tlv += lbs_scan_add_ssid_tlv(priv, tlv);
492 if (chan_list && chan_count)
493 tlv += lbs_scan_add_chanlist_tlv(tlv, chan_list, chan_count);
494 tlv += lbs_scan_add_rates_tlv(tlv);
495
496 /* This is the final data we are about to send */
497 scan_cmd->hdr.size = cpu_to_le16(tlv - (uint8_t *)scan_cmd);
498 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd,
499 sizeof(*scan_cmd));
500 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer,
501 tlv - scan_cmd->tlvbuffer);
502
503 ret = __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr,
504 le16_to_cpu(scan_cmd->hdr.size),
505 lbs_ret_80211_scan, 0);
506
507out:
508 kfree(scan_cmd);
509 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
510 return ret;
511}
512
513/**
514 * @brief Internal function used to start a scan based on an input config
515 *
516 * Use the input user scan configuration information when provided in
517 * order to send the appropriate scan commands to firmware to populate or
518 * update the internal driver scan table
519 *
520 * @param priv A pointer to struct lbs_private structure
521 * @param full_scan Do a full-scan (blocking)
522 *
523 * @return 0 or < 0 if error
524 */
525int lbs_scan_networks(struct lbs_private *priv, int full_scan)
526{
527 int ret = -ENOMEM;
528 struct chanscanparamset *chan_list;
529 struct chanscanparamset *curr_chans;
530 int chan_count;
531 uint8_t bsstype = CMD_BSS_TYPE_ANY;
532 int numchannels = MRVDRV_CHANNELS_PER_SCAN_CMD;
533 union iwreq_data wrqu;
534#ifdef CONFIG_LIBERTAS_DEBUG
535 struct bss_descriptor *iter;
536 int i = 0;
537 DECLARE_SSID_BUF(ssid);
538#endif
539
540 lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan);
541
542 /* Cancel any partial outstanding partial scans if this scan
543 * is a full scan.
544 */
545 if (full_scan && delayed_work_pending(&priv->scan_work))
546 cancel_delayed_work(&priv->scan_work);
547
548 /* User-specified bsstype or channel list
549 TODO: this can be implemented if some user-space application
550 need the feature. Formerly, it was accessible from debugfs,
551 but then nowhere used.
552 if (user_cfg) {
553 if (user_cfg->bsstype)
554 bsstype = user_cfg->bsstype;
555 } */
556
557 lbs_deb_scan("numchannels %d, bsstype %d\n", numchannels, bsstype);
558
559 /* Create list of channels to scan */
560 chan_list = kzalloc(sizeof(struct chanscanparamset) *
561 LBS_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL);
562 if (!chan_list) {
563 lbs_pr_alert("SCAN: chan_list empty\n");
564 goto out;
565 }
566
567 /* We want to scan all channels */
568 chan_count = lbs_scan_create_channel_list(priv, chan_list);
569
570 netif_stop_queue(priv->dev);
571 if (priv->mesh_dev)
572 netif_stop_queue(priv->mesh_dev);
573
574 /* Prepare to continue an interrupted scan */
575 lbs_deb_scan("chan_count %d, scan_channel %d\n",
576 chan_count, priv->scan_channel);
577 curr_chans = chan_list;
578 /* advance channel list by already-scanned-channels */
579 if (priv->scan_channel > 0) {
580 curr_chans += priv->scan_channel;
581 chan_count -= priv->scan_channel;
582 }
583
584 /* Send scan command(s)
585 * numchannels contains the number of channels we should maximally scan
586 * chan_count is the total number of channels to scan
587 */
588
589 while (chan_count) {
590 int to_scan = min(numchannels, chan_count);
591 lbs_deb_scan("scanning %d of %d channels\n",
592 to_scan, chan_count);
593 ret = lbs_do_scan(priv, bsstype, curr_chans,
594 to_scan);
595 if (ret) {
596 lbs_pr_err("SCAN_CMD failed\n");
597 goto out2;
598 }
599 curr_chans += to_scan;
600 chan_count -= to_scan;
601
602 /* somehow schedule the next part of the scan */
603 if (chan_count && !full_scan &&
604 !priv->surpriseremoved) {
605 /* -1 marks just that we're currently scanning */
606 if (priv->scan_channel < 0)
607 priv->scan_channel = to_scan;
608 else
609 priv->scan_channel += to_scan;
610 cancel_delayed_work(&priv->scan_work);
611 queue_delayed_work(priv->work_thread, &priv->scan_work,
612 msecs_to_jiffies(300));
613 /* skip over GIWSCAN event */
614 goto out;
615 }
616
617 }
618 memset(&wrqu, 0, sizeof(union iwreq_data));
619 wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
620
621#ifdef CONFIG_LIBERTAS_DEBUG
622 /* Dump the scan table */
623 mutex_lock(&priv->lock);
624 lbs_deb_scan("scan table:\n");
625 list_for_each_entry(iter, &priv->network_list, list)
626 lbs_deb_scan("%02d: BSSID %pM, RSSI %d, SSID '%s'\n",
627 i++, iter->bssid, iter->rssi,
628 print_ssid(ssid, iter->ssid, iter->ssid_len));
629 mutex_unlock(&priv->lock);
630#endif
631
632out2:
633 priv->scan_channel = 0;
634
635out:
636 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
637 netif_wake_queue(priv->dev);
638
639 if (priv->mesh_dev && lbs_mesh_connected(priv) &&
640 !priv->tx_pending_len)
641 netif_wake_queue(priv->mesh_dev);
642
643 kfree(chan_list);
644
645 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
646 return ret;
647}
648
649void lbs_scan_worker(struct work_struct *work)
650{
651 struct lbs_private *priv =
652 container_of(work, struct lbs_private, scan_work.work);
653
654 lbs_deb_enter(LBS_DEB_SCAN);
655 lbs_scan_networks(priv, 0);
656 lbs_deb_leave(LBS_DEB_SCAN);
657}
658
659
660/*********************************************************************/
661/* */
662/* Result interpretation */
663/* */
664/*********************************************************************/
665
666/**
667 * @brief Interpret a BSS scan response returned from the firmware
668 *
669 * Parse the various fixed fields and IEs passed back for a a BSS probe
670 * response or beacon from the scan command. Record information as needed
671 * in the scan table struct bss_descriptor for that entry.
672 *
673 * @param bss Output parameter: Pointer to the BSS Entry
674 *
675 * @return 0 or -1
676 */
677static int lbs_process_bss(struct bss_descriptor *bss,
678 uint8_t **pbeaconinfo, int *bytesleft)
679{
680 struct ieee_ie_fh_param_set *fh;
681 struct ieee_ie_ds_param_set *ds;
682 struct ieee_ie_cf_param_set *cf;
683 struct ieee_ie_ibss_param_set *ibss;
684 DECLARE_SSID_BUF(ssid);
685 uint8_t *pos, *end, *p;
686 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
687 uint16_t beaconsize = 0;
688 int ret;
689
690 lbs_deb_enter(LBS_DEB_SCAN);
691
692 if (*bytesleft >= sizeof(beaconsize)) {
693 /* Extract & convert beacon size from the command buffer */
694 beaconsize = get_unaligned_le16(*pbeaconinfo);
695 *bytesleft -= sizeof(beaconsize);
696 *pbeaconinfo += sizeof(beaconsize);
697 }
698
699 if (beaconsize == 0 || beaconsize > *bytesleft) {
700 *pbeaconinfo += *bytesleft;
701 *bytesleft = 0;
702 ret = -1;
703 goto done;
704 }
705
706 /* Initialize the current working beacon pointer for this BSS iteration */
707 pos = *pbeaconinfo;
708 end = pos + beaconsize;
709
710 /* Advance the return beacon pointer past the current beacon */
711 *pbeaconinfo += beaconsize;
712 *bytesleft -= beaconsize;
713
714 memcpy(bss->bssid, pos, ETH_ALEN);
715 lbs_deb_scan("process_bss: BSSID %pM\n", bss->bssid);
716 pos += ETH_ALEN;
717
718 if ((end - pos) < 12) {
719 lbs_deb_scan("process_bss: Not enough bytes left\n");
720 ret = -1;
721 goto done;
722 }
723
724 /*
725 * next 4 fields are RSSI, time stamp, beacon interval,
726 * and capability information
727 */
728
729 /* RSSI is 1 byte long */
730 bss->rssi = *pos;
731 lbs_deb_scan("process_bss: RSSI %d\n", *pos);
732 pos++;
733
734 /* time stamp is 8 bytes long */
735 pos += 8;
736
737 /* beacon interval is 2 bytes long */
738 bss->beaconperiod = get_unaligned_le16(pos);
739 pos += 2;
740
741 /* capability information is 2 bytes long */
742 bss->capability = get_unaligned_le16(pos);
743 lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability);
744 pos += 2;
745
746 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
747 lbs_deb_scan("process_bss: WEP enabled\n");
748 if (bss->capability & WLAN_CAPABILITY_IBSS)
749 bss->mode = IW_MODE_ADHOC;
750 else
751 bss->mode = IW_MODE_INFRA;
752
753 /* rest of the current buffer are IE's */
754 lbs_deb_scan("process_bss: IE len %zd\n", end - pos);
755 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos);
756
757 /* process variable IE */
758 while (pos <= end - 2) {
759 if (pos + pos[1] > end) {
760 lbs_deb_scan("process_bss: error in processing IE, "
761 "bytes left < IE length\n");
762 break;
763 }
764
765 switch (pos[0]) {
766 case WLAN_EID_SSID:
767 bss->ssid_len = min_t(int, IEEE80211_MAX_SSID_LEN, pos[1]);
768 memcpy(bss->ssid, pos + 2, bss->ssid_len);
769 lbs_deb_scan("got SSID IE: '%s', len %u\n",
770 print_ssid(ssid, bss->ssid, bss->ssid_len),
771 bss->ssid_len);
772 break;
773
774 case WLAN_EID_SUPP_RATES:
775 n_basic_rates = min_t(uint8_t, MAX_RATES, pos[1]);
776 memcpy(bss->rates, pos + 2, n_basic_rates);
777 got_basic_rates = 1;
778 lbs_deb_scan("got RATES IE\n");
779 break;
780
781 case WLAN_EID_FH_PARAMS:
782 fh = (struct ieee_ie_fh_param_set *) pos;
783 memcpy(&bss->phy.fh, fh, sizeof(*fh));
784 lbs_deb_scan("got FH IE\n");
785 break;
786
787 case WLAN_EID_DS_PARAMS:
788 ds = (struct ieee_ie_ds_param_set *) pos;
789 bss->channel = ds->channel;
790 memcpy(&bss->phy.ds, ds, sizeof(*ds));
791 lbs_deb_scan("got DS IE, channel %d\n", bss->channel);
792 break;
793
794 case WLAN_EID_CF_PARAMS:
795 cf = (struct ieee_ie_cf_param_set *) pos;
796 memcpy(&bss->ss.cf, cf, sizeof(*cf));
797 lbs_deb_scan("got CF IE\n");
798 break;
799
800 case WLAN_EID_IBSS_PARAMS:
801 ibss = (struct ieee_ie_ibss_param_set *) pos;
802 bss->atimwindow = ibss->atimwindow;
803 memcpy(&bss->ss.ibss, ibss, sizeof(*ibss));
804 lbs_deb_scan("got IBSS IE\n");
805 break;
806
807 case WLAN_EID_EXT_SUPP_RATES:
808 /* only process extended supported rate if data rate is
809 * already found. Data rate IE should come before
810 * extended supported rate IE
811 */
812 lbs_deb_scan("got RATESEX IE\n");
813 if (!got_basic_rates) {
814 lbs_deb_scan("... but ignoring it\n");
815 break;
816 }
817
818 n_ex_rates = pos[1];
819 if (n_basic_rates + n_ex_rates > MAX_RATES)
820 n_ex_rates = MAX_RATES - n_basic_rates;
821
822 p = bss->rates + n_basic_rates;
823 memcpy(p, pos + 2, n_ex_rates);
824 break;
825
826 case WLAN_EID_GENERIC:
827 if (pos[1] >= 4 &&
828 pos[2] == 0x00 && pos[3] == 0x50 &&
829 pos[4] == 0xf2 && pos[5] == 0x01) {
830 bss->wpa_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
831 memcpy(bss->wpa_ie, pos, bss->wpa_ie_len);
832 lbs_deb_scan("got WPA IE\n");
833 lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie,
834 bss->wpa_ie_len);
835 } else if (pos[1] >= MARVELL_MESH_IE_LENGTH &&
836 pos[2] == 0x00 && pos[3] == 0x50 &&
837 pos[4] == 0x43 && pos[5] == 0x04) {
838 lbs_deb_scan("got mesh IE\n");
839 bss->mesh = 1;
840 } else {
841 lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n",
842 pos[2], pos[3],
843 pos[4], pos[5],
844 pos[1]);
845 }
846 break;
847
848 case WLAN_EID_RSN:
849 lbs_deb_scan("got RSN IE\n");
850 bss->rsn_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
851 memcpy(bss->rsn_ie, pos, bss->rsn_ie_len);
852 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE",
853 bss->rsn_ie, bss->rsn_ie_len);
854 break;
855
856 default:
857 lbs_deb_scan("got IE 0x%04x, len %d\n",
858 pos[0], pos[1]);
859 break;
860 }
861
862 pos += pos[1] + 2;
863 }
864
865 /* Timestamp */
866 bss->last_scanned = jiffies;
867 lbs_unset_basic_rate_flags(bss->rates, sizeof(bss->rates));
868
869 ret = 0;
870
871done:
872 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
873 return ret;
874}
875
876/**
877 * @brief Send a scan command for all available channels filtered on a spec
878 *
879 * Used in association code and from debugfs
880 *
881 * @param priv A pointer to struct lbs_private structure
882 * @param ssid A pointer to the SSID to scan for
883 * @param ssid_len Length of the SSID
884 *
885 * @return 0-success, otherwise fail
886 */
887int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid,
888 uint8_t ssid_len)
889{
890 DECLARE_SSID_BUF(ssid_buf);
891 int ret = 0;
892
893 lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n",
894 print_ssid(ssid_buf, ssid, ssid_len));
895
896 if (!ssid_len)
897 goto out;
898
899 memcpy(priv->scan_ssid, ssid, ssid_len);
900 priv->scan_ssid_len = ssid_len;
901
902 lbs_scan_networks(priv, 1);
903 if (priv->surpriseremoved) {
904 ret = -1;
905 goto out;
906 }
907
908out:
909 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
910 return ret;
911}
912
913
914
915
916/*********************************************************************/
917/* */
918/* Support for Wireless Extensions */
919/* */
920/*********************************************************************/
921
922
923#define MAX_CUSTOM_LEN 64
924
925static inline char *lbs_translate_scan(struct lbs_private *priv,
926 struct iw_request_info *info,
927 char *start, char *stop,
928 struct bss_descriptor *bss)
929{
930 struct chan_freq_power *cfp;
931 char *current_val; /* For rates */
932 struct iw_event iwe; /* Temporary buffer */
933 int j;
934#define PERFECT_RSSI ((uint8_t)50)
935#define WORST_RSSI ((uint8_t)0)
936#define RSSI_DIFF ((uint8_t)(PERFECT_RSSI - WORST_RSSI))
937 uint8_t rssi;
938
939 lbs_deb_enter(LBS_DEB_SCAN);
940
941 cfp = lbs_find_cfp_by_band_and_channel(priv, 0, bss->channel);
942 if (!cfp) {
943 lbs_deb_scan("Invalid channel number %d\n", bss->channel);
944 start = NULL;
945 goto out;
946 }
947
948 /* First entry *MUST* be the BSSID */
949 iwe.cmd = SIOCGIWAP;
950 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
951 memcpy(iwe.u.ap_addr.sa_data, &bss->bssid, ETH_ALEN);
952 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
953
954 /* SSID */
955 iwe.cmd = SIOCGIWESSID;
956 iwe.u.data.flags = 1;
957 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
958 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
959
960 /* Mode */
961 iwe.cmd = SIOCGIWMODE;
962 iwe.u.mode = bss->mode;
963 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
964
965 /* Frequency */
966 iwe.cmd = SIOCGIWFREQ;
967 iwe.u.freq.m = (long)cfp->freq * 100000;
968 iwe.u.freq.e = 1;
969 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
970
971 /* Add quality statistics */
972 iwe.cmd = IWEVQUAL;
973 iwe.u.qual.updated = IW_QUAL_ALL_UPDATED;
974 iwe.u.qual.level = SCAN_RSSI(bss->rssi);
975
976 rssi = iwe.u.qual.level - MRVDRV_NF_DEFAULT_SCAN_VALUE;
977 iwe.u.qual.qual =
978 (100 * RSSI_DIFF * RSSI_DIFF - (PERFECT_RSSI - rssi) *
979 (15 * (RSSI_DIFF) + 62 * (PERFECT_RSSI - rssi))) /
980 (RSSI_DIFF * RSSI_DIFF);
981 if (iwe.u.qual.qual > 100)
982 iwe.u.qual.qual = 100;
983
984 if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) {
985 iwe.u.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE;
986 } else {
987 iwe.u.qual.noise = CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
988 }
989
990 /* Locally created ad-hoc BSSs won't have beacons if this is the
991 * only station in the adhoc network; so get signal strength
992 * from receive statistics.
993 */
994 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate
995 && !lbs_ssid_cmp(priv->curbssparams.ssid,
996 priv->curbssparams.ssid_len,
997 bss->ssid, bss->ssid_len)) {
998 int snr, nf;
999 snr = priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
1000 nf = priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
1001 iwe.u.qual.level = CAL_RSSI(snr, nf);
1002 }
1003 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
1004
1005 /* Add encryption capability */
1006 iwe.cmd = SIOCGIWENCODE;
1007 if (bss->capability & WLAN_CAPABILITY_PRIVACY) {
1008 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
1009 } else {
1010 iwe.u.data.flags = IW_ENCODE_DISABLED;
1011 }
1012 iwe.u.data.length = 0;
1013 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
1014
1015 current_val = start + iwe_stream_lcp_len(info);
1016
1017 iwe.cmd = SIOCGIWRATE;
1018 iwe.u.bitrate.fixed = 0;
1019 iwe.u.bitrate.disabled = 0;
1020 iwe.u.bitrate.value = 0;
1021
1022 for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) {
1023 /* Bit rate given in 500 kb/s units */
1024 iwe.u.bitrate.value = bss->rates[j] * 500000;
1025 current_val = iwe_stream_add_value(info, start, current_val,
1026 stop, &iwe, IW_EV_PARAM_LEN);
1027 }
1028 if ((bss->mode == IW_MODE_ADHOC) && priv->adhoccreate
1029 && !lbs_ssid_cmp(priv->curbssparams.ssid,
1030 priv->curbssparams.ssid_len,
1031 bss->ssid, bss->ssid_len)) {
1032 iwe.u.bitrate.value = 22 * 500000;
1033 current_val = iwe_stream_add_value(info, start, current_val,
1034 stop, &iwe, IW_EV_PARAM_LEN);
1035 }
1036 /* Check if we added any event */
1037 if ((current_val - start) > iwe_stream_lcp_len(info))
1038 start = current_val;
1039
1040 memset(&iwe, 0, sizeof(iwe));
1041 if (bss->wpa_ie_len) {
1042 char buf[MAX_WPA_IE_LEN];
1043 memcpy(buf, bss->wpa_ie, bss->wpa_ie_len);
1044 iwe.cmd = IWEVGENIE;
1045 iwe.u.data.length = bss->wpa_ie_len;
1046 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
1047 }
1048
1049 memset(&iwe, 0, sizeof(iwe));
1050 if (bss->rsn_ie_len) {
1051 char buf[MAX_WPA_IE_LEN];
1052 memcpy(buf, bss->rsn_ie, bss->rsn_ie_len);
1053 iwe.cmd = IWEVGENIE;
1054 iwe.u.data.length = bss->rsn_ie_len;
1055 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
1056 }
1057
1058 if (bss->mesh) {
1059 char custom[MAX_CUSTOM_LEN];
1060 char *p = custom;
1061
1062 iwe.cmd = IWEVCUSTOM;
1063 p += snprintf(p, MAX_CUSTOM_LEN, "mesh-type: olpc");
1064 iwe.u.data.length = p - custom;
1065 if (iwe.u.data.length)
1066 start = iwe_stream_add_point(info, start, stop,
1067 &iwe, custom);
1068 }
1069
1070out:
1071 lbs_deb_leave_args(LBS_DEB_SCAN, "start %p", start);
1072 return start;
1073}
1074
1075
1076/**
1077 * @brief Handle Scan Network ioctl
1078 *
1079 * @param dev A pointer to net_device structure
1080 * @param info A pointer to iw_request_info structure
1081 * @param vwrq A pointer to iw_param structure
1082 * @param extra A pointer to extra data buf
1083 *
1084 * @return 0 --success, otherwise fail
1085 */
1086int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
1087 union iwreq_data *wrqu, char *extra)
1088{
1089 DECLARE_SSID_BUF(ssid);
1090 struct lbs_private *priv = dev->ml_priv;
1091 int ret = 0;
1092
1093 lbs_deb_enter(LBS_DEB_WEXT);
1094
1095 if (!priv->radio_on) {
1096 ret = -EINVAL;
1097 goto out;
1098 }
1099
1100 if (!netif_running(dev)) {
1101 ret = -ENETDOWN;
1102 goto out;
1103 }
1104
1105 /* mac80211 does this:
1106 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1107 if (sdata->type != IEEE80211_IF_TYPE_xxx) {
1108 ret = -EOPNOTSUPP;
1109 goto out;
1110 }
1111 */
1112
1113 if (wrqu->data.length == sizeof(struct iw_scan_req) &&
1114 wrqu->data.flags & IW_SCAN_THIS_ESSID) {
1115 struct iw_scan_req *req = (struct iw_scan_req *)extra;
1116 priv->scan_ssid_len = req->essid_len;
1117 memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
1118 lbs_deb_wext("set_scan, essid '%s'\n",
1119 print_ssid(ssid, priv->scan_ssid, priv->scan_ssid_len));
1120 } else {
1121 priv->scan_ssid_len = 0;
1122 }
1123
1124 if (!delayed_work_pending(&priv->scan_work))
1125 queue_delayed_work(priv->work_thread, &priv->scan_work,
1126 msecs_to_jiffies(50));
1127 /* set marker that currently a scan is taking place */
1128 priv->scan_channel = -1;
1129
1130 if (priv->surpriseremoved)
1131 ret = -EIO;
1132
1133out:
1134 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1135 return ret;
1136}
1137
1138
1139/**
1140 * @brief Handle Retrieve scan table ioctl
1141 *
1142 * @param dev A pointer to net_device structure
1143 * @param info A pointer to iw_request_info structure
1144 * @param dwrq A pointer to iw_point structure
1145 * @param extra A pointer to extra data buf
1146 *
1147 * @return 0 --success, otherwise fail
1148 */
1149int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1150 struct iw_point *dwrq, char *extra)
1151{
1152#define SCAN_ITEM_SIZE 128
1153 struct lbs_private *priv = dev->ml_priv;
1154 int err = 0;
1155 char *ev = extra;
1156 char *stop = ev + dwrq->length;
1157 struct bss_descriptor *iter_bss;
1158 struct bss_descriptor *safe;
1159
1160 lbs_deb_enter(LBS_DEB_WEXT);
1161
1162 /* iwlist should wait until the current scan is finished */
1163 if (priv->scan_channel)
1164 return -EAGAIN;
1165
1166 /* Update RSSI if current BSS is a locally created ad-hoc BSS */
1167 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
1168 err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
1169 CMD_OPTION_WAITFORRSP, 0, NULL);
1170 if (err)
1171 goto out;
1172 }
1173
1174 mutex_lock(&priv->lock);
1175 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
1176 char *next_ev;
1177 unsigned long stale_time;
1178
1179 if (stop - ev < SCAN_ITEM_SIZE) {
1180 err = -E2BIG;
1181 break;
1182 }
1183
1184 /* For mesh device, list only mesh networks */
1185 if (dev == priv->mesh_dev && !iter_bss->mesh)
1186 continue;
1187
1188 /* Prune old an old scan result */
1189 stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE;
1190 if (time_after(jiffies, stale_time)) {
1191 list_move_tail(&iter_bss->list, &priv->network_free_list);
1192 clear_bss_descriptor(iter_bss);
1193 continue;
1194 }
1195
1196 /* Translate to WE format this entry */
1197 next_ev = lbs_translate_scan(priv, info, ev, stop, iter_bss);
1198 if (next_ev == NULL)
1199 continue;
1200 ev = next_ev;
1201 }
1202 mutex_unlock(&priv->lock);
1203
1204 dwrq->length = (ev - extra);
1205 dwrq->flags = 0;
1206out:
1207 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
1208 return err;
1209}
1210
1211
1212
1213
1214/*********************************************************************/
1215/* */
1216/* Command execution */
1217/* */
1218/*********************************************************************/
1219
1220
1221/**
1222 * @brief This function handles the command response of scan
1223 *
1224 * Called from handle_cmd_response() in cmdrespc.
1225 *
1226 * The response buffer for the scan command has the following
1227 * memory layout:
1228 *
1229 * .-----------------------------------------------------------.
1230 * | header (4 * sizeof(u16)): Standard command response hdr |
1231 * .-----------------------------------------------------------.
1232 * | bufsize (u16) : sizeof the BSS Description data |
1233 * .-----------------------------------------------------------.
1234 * | NumOfSet (u8) : Number of BSS Descs returned |
1235 * .-----------------------------------------------------------.
1236 * | BSSDescription data (variable, size given in bufsize) |
1237 * .-----------------------------------------------------------.
1238 * | TLV data (variable, size calculated using header->size, |
1239 * | bufsize and sizeof the fixed fields above) |
1240 * .-----------------------------------------------------------.
1241 *
1242 * @param priv A pointer to struct lbs_private structure
1243 * @param resp A pointer to cmd_ds_command
1244 *
1245 * @return 0 or -1
1246 */
1247static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1248 struct cmd_header *resp)
1249{
1250 struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
1251 struct bss_descriptor *iter_bss;
1252 struct bss_descriptor *safe;
1253 uint8_t *bssinfo;
1254 uint16_t scanrespsize;
1255 int bytesleft;
1256 int idx;
1257 int tlvbufsize;
1258 int ret;
1259
1260 lbs_deb_enter(LBS_DEB_SCAN);
1261
1262 /* Prune old entries from scan table */
1263 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
1264 unsigned long stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE;
1265 if (time_before(jiffies, stale_time))
1266 continue;
1267 list_move_tail (&iter_bss->list, &priv->network_free_list);
1268 clear_bss_descriptor(iter_bss);
1269 }
1270
1271 if (scanresp->nr_sets > MAX_NETWORK_COUNT) {
1272 lbs_deb_scan("SCAN_RESP: too many scan results (%d, max %d)\n",
1273 scanresp->nr_sets, MAX_NETWORK_COUNT);
1274 ret = -1;
1275 goto done;
1276 }
1277
1278 bytesleft = get_unaligned_le16(&scanresp->bssdescriptsize);
1279 lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft);
1280
1281 scanrespsize = le16_to_cpu(resp->size);
1282 lbs_deb_scan("SCAN_RESP: scan results %d\n", scanresp->nr_sets);
1283
1284 bssinfo = scanresp->bssdesc_and_tlvbuffer;
1285
1286 /* The size of the TLV buffer is equal to the entire command response
1287 * size (scanrespsize) minus the fixed fields (sizeof()'s), the
1288 * BSS Descriptions (bssdescriptsize as bytesLef) and the command
1289 * response header (sizeof(struct cmd_header))
1290 */
1291 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
1292 + sizeof(scanresp->nr_sets)
1293 + sizeof(struct cmd_header));
1294
1295 /*
1296 * Process each scan response returned (scanresp->nr_sets). Save
1297 * the information in the newbssentry and then insert into the
1298 * driver scan table either as an update to an existing entry
1299 * or as an addition at the end of the table
1300 */
1301 for (idx = 0; idx < scanresp->nr_sets && bytesleft; idx++) {
1302 struct bss_descriptor new;
1303 struct bss_descriptor *found = NULL;
1304 struct bss_descriptor *oldest = NULL;
1305
1306 /* Process the data fields and IEs returned for this BSS */
1307 memset(&new, 0, sizeof (struct bss_descriptor));
1308 if (lbs_process_bss(&new, &bssinfo, &bytesleft) != 0) {
1309 /* error parsing the scan response, skipped */
1310 lbs_deb_scan("SCAN_RESP: process_bss returned ERROR\n");
1311 continue;
1312 }
1313
1314 /* Try to find this bss in the scan table */
1315 list_for_each_entry (iter_bss, &priv->network_list, list) {
1316 if (is_same_network(iter_bss, &new)) {
1317 found = iter_bss;
1318 break;
1319 }
1320
1321 if ((oldest == NULL) ||
1322 (iter_bss->last_scanned < oldest->last_scanned))
1323 oldest = iter_bss;
1324 }
1325
1326 if (found) {
1327 /* found, clear it */
1328 clear_bss_descriptor(found);
1329 } else if (!list_empty(&priv->network_free_list)) {
1330 /* Pull one from the free list */
1331 found = list_entry(priv->network_free_list.next,
1332 struct bss_descriptor, list);
1333 list_move_tail(&found->list, &priv->network_list);
1334 } else if (oldest) {
1335 /* If there are no more slots, expire the oldest */
1336 found = oldest;
1337 clear_bss_descriptor(found);
1338 list_move_tail(&found->list, &priv->network_list);
1339 } else {
1340 continue;
1341 }
1342
1343 lbs_deb_scan("SCAN_RESP: BSSID %pM\n", new.bssid);
1344
1345 /* Copy the locally created newbssentry to the scan table */
1346 memcpy(found, &new, offsetof(struct bss_descriptor, list));
1347 }
1348
1349 ret = 0;
1350
1351done:
1352 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
1353 return ret;
1354}
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
deleted file mode 100644
index 8fb1706d7526..000000000000
--- a/drivers/net/wireless/libertas/scan.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/**
2 * Interface for the wlan network scan routines
3 *
4 * Driver interface functions and type declarations for the scan module
5 * implemented in scan.c.
6 */
7#ifndef _LBS_SCAN_H
8#define _LBS_SCAN_H
9
10#include <net/iw_handler.h>
11
12struct lbs_private;
13
14#define MAX_NETWORK_COUNT 128
15
16/** Chan-freq-TxPower mapping table*/
17struct chan_freq_power {
18 /** channel Number */
19 u16 channel;
20 /** frequency of this channel */
21 u32 freq;
22 /** Max allowed Tx power level */
23 u16 maxtxpower;
24 /** TRUE:channel unsupported; FLASE:supported*/
25 u8 unsupported;
26};
27
28/** region-band mapping table*/
29struct region_channel {
30 /** TRUE if this entry is valid */
31 u8 valid;
32 /** region code for US, Japan ... */
33 u8 region;
34 /** band B/G/A, used for BAND_CONFIG cmd */
35 u8 band;
36 /** Actual No. of elements in the array below */
37 u8 nrcfp;
38 /** chan-freq-txpower mapping table*/
39 struct chan_freq_power *CFP;
40};
41
42/**
43 * @brief Maximum number of channels that can be sent in a setuserscan ioctl
44 */
45#define LBS_IOCTL_USER_SCAN_CHAN_MAX 50
46
47int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
48
49int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
50
51int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
52 u8 ssid_len);
53
54int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
55 struct iw_point *dwrq, char *extra);
56int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
57 union iwreq_data *wrqu, char *extra);
58
59int lbs_scan_networks(struct lbs_private *priv, int full_scan);
60
61void lbs_scan_worker(struct work_struct *work);
62
63#endif
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index a9bf658659eb..8000ca6165d0 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -4,13 +4,13 @@
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <net/cfg80211.h>
7 8
8#include "host.h" 9#include "host.h"
9#include "radiotap.h" 10#include "radiotap.h"
10#include "decl.h" 11#include "decl.h"
11#include "defs.h" 12#include "defs.h"
12#include "dev.h" 13#include "dev.h"
13#include "wext.h"
14 14
15/** 15/**
16 * @brief This function converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE 16 * @brief This function converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE
@@ -111,7 +111,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
111 p802x_hdr = skb->data; 111 p802x_hdr = skb->data;
112 pkt_len = skb->len; 112 pkt_len = skb->len;
113 113
114 if (dev == priv->rtap_net_dev) { 114 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
115 struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; 115 struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data;
116 116
117 /* set txpd fields from the radiotap header */ 117 /* set txpd fields from the radiotap header */
@@ -147,7 +147,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
147 dev->stats.tx_packets++; 147 dev->stats.tx_packets++;
148 dev->stats.tx_bytes += skb->len; 148 dev->stats.tx_bytes += skb->len;
149 149
150 if (priv->monitormode) { 150 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
151 /* Keep the skb to echo it back once Tx feedback is 151 /* Keep the skb to echo it back once Tx feedback is
152 received from FW */ 152 received from FW */
153 skb_orphan(skb); 153 skb_orphan(skb);
@@ -158,6 +158,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
158 free: 158 free:
159 dev_kfree_skb_any(skb); 159 dev_kfree_skb_any(skb);
160 } 160 }
161
161 unlock: 162 unlock:
162 spin_unlock_irqrestore(&priv->driver_lock, flags); 163 spin_unlock_irqrestore(&priv->driver_lock, flags);
163 wake_up(&priv->waitq); 164 wake_up(&priv->waitq);
@@ -179,7 +180,8 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
179{ 180{
180 struct tx_radiotap_hdr *radiotap_hdr; 181 struct tx_radiotap_hdr *radiotap_hdr;
181 182
182 if (!priv->monitormode || priv->currenttxskb == NULL) 183 if (priv->wdev->iftype != NL80211_IFTYPE_MONITOR ||
184 priv->currenttxskb == NULL)
183 return; 185 return;
184 186
185 radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; 187 radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data;
@@ -188,7 +190,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
188 (1 + priv->txretrycount - try_count) : 0; 190 (1 + priv->txretrycount - try_count) : 0;
189 191
190 priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, 192 priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb,
191 priv->rtap_net_dev); 193 priv->dev);
192 netif_rx(priv->currenttxskb); 194 netif_rx(priv->currenttxskb);
193 195
194 priv->currenttxskb = NULL; 196 priv->currenttxskb = NULL;
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 3e72c86ceca8..462fbb4cb743 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -11,7 +11,7 @@
11struct ieee_ie_header { 11struct ieee_ie_header {
12 u8 id; 12 u8 id;
13 u8 len; 13 u8 len;
14} __attribute__ ((packed)); 14} __packed;
15 15
16struct ieee_ie_cf_param_set { 16struct ieee_ie_cf_param_set {
17 struct ieee_ie_header header; 17 struct ieee_ie_header header;
@@ -20,19 +20,19 @@ struct ieee_ie_cf_param_set {
20 u8 cfpperiod; 20 u8 cfpperiod;
21 __le16 cfpmaxduration; 21 __le16 cfpmaxduration;
22 __le16 cfpdurationremaining; 22 __le16 cfpdurationremaining;
23} __attribute__ ((packed)); 23} __packed;
24 24
25 25
26struct ieee_ie_ibss_param_set { 26struct ieee_ie_ibss_param_set {
27 struct ieee_ie_header header; 27 struct ieee_ie_header header;
28 28
29 __le16 atimwindow; 29 __le16 atimwindow;
30} __attribute__ ((packed)); 30} __packed;
31 31
32union ieee_ss_param_set { 32union ieee_ss_param_set {
33 struct ieee_ie_cf_param_set cf; 33 struct ieee_ie_cf_param_set cf;
34 struct ieee_ie_ibss_param_set ibss; 34 struct ieee_ie_ibss_param_set ibss;
35} __attribute__ ((packed)); 35} __packed;
36 36
37struct ieee_ie_fh_param_set { 37struct ieee_ie_fh_param_set {
38 struct ieee_ie_header header; 38 struct ieee_ie_header header;
@@ -41,18 +41,18 @@ struct ieee_ie_fh_param_set {
41 u8 hopset; 41 u8 hopset;
42 u8 hoppattern; 42 u8 hoppattern;
43 u8 hopindex; 43 u8 hopindex;
44} __attribute__ ((packed)); 44} __packed;
45 45
46struct ieee_ie_ds_param_set { 46struct ieee_ie_ds_param_set {
47 struct ieee_ie_header header; 47 struct ieee_ie_header header;
48 48
49 u8 channel; 49 u8 channel;
50} __attribute__ ((packed)); 50} __packed;
51 51
52union ieee_phy_param_set { 52union ieee_phy_param_set {
53 struct ieee_ie_fh_param_set fh; 53 struct ieee_ie_fh_param_set fh;
54 struct ieee_ie_ds_param_set ds; 54 struct ieee_ie_ds_param_set ds;
55} __attribute__ ((packed)); 55} __packed;
56 56
57/** TLV type ID definition */ 57/** TLV type ID definition */
58#define PROPRIETARY_TLV_BASE_ID 0x0100 58#define PROPRIETARY_TLV_BASE_ID 0x0100
@@ -100,28 +100,28 @@ union ieee_phy_param_set {
100struct mrvl_ie_header { 100struct mrvl_ie_header {
101 __le16 type; 101 __le16 type;
102 __le16 len; 102 __le16 len;
103} __attribute__ ((packed)); 103} __packed;
104 104
105struct mrvl_ie_data { 105struct mrvl_ie_data {
106 struct mrvl_ie_header header; 106 struct mrvl_ie_header header;
107 u8 Data[1]; 107 u8 Data[1];
108} __attribute__ ((packed)); 108} __packed;
109 109
110struct mrvl_ie_rates_param_set { 110struct mrvl_ie_rates_param_set {
111 struct mrvl_ie_header header; 111 struct mrvl_ie_header header;
112 u8 rates[1]; 112 u8 rates[1];
113} __attribute__ ((packed)); 113} __packed;
114 114
115struct mrvl_ie_ssid_param_set { 115struct mrvl_ie_ssid_param_set {
116 struct mrvl_ie_header header; 116 struct mrvl_ie_header header;
117 u8 ssid[1]; 117 u8 ssid[1];
118} __attribute__ ((packed)); 118} __packed;
119 119
120struct mrvl_ie_wildcard_ssid_param_set { 120struct mrvl_ie_wildcard_ssid_param_set {
121 struct mrvl_ie_header header; 121 struct mrvl_ie_header header;
122 u8 MaxSsidlength; 122 u8 MaxSsidlength;
123 u8 ssid[1]; 123 u8 ssid[1];
124} __attribute__ ((packed)); 124} __packed;
125 125
126struct chanscanmode { 126struct chanscanmode {
127#ifdef __BIG_ENDIAN_BITFIELD 127#ifdef __BIG_ENDIAN_BITFIELD
@@ -133,7 +133,7 @@ struct chanscanmode {
133 u8 disablechanfilt:1; 133 u8 disablechanfilt:1;
134 u8 reserved_2_7:6; 134 u8 reserved_2_7:6;
135#endif 135#endif
136} __attribute__ ((packed)); 136} __packed;
137 137
138struct chanscanparamset { 138struct chanscanparamset {
139 u8 radiotype; 139 u8 radiotype;
@@ -141,12 +141,12 @@ struct chanscanparamset {
141 struct chanscanmode chanscanmode; 141 struct chanscanmode chanscanmode;
142 __le16 minscantime; 142 __le16 minscantime;
143 __le16 maxscantime; 143 __le16 maxscantime;
144} __attribute__ ((packed)); 144} __packed;
145 145
146struct mrvl_ie_chanlist_param_set { 146struct mrvl_ie_chanlist_param_set {
147 struct mrvl_ie_header header; 147 struct mrvl_ie_header header;
148 struct chanscanparamset chanscanparam[1]; 148 struct chanscanparamset chanscanparam[1];
149} __attribute__ ((packed)); 149} __packed;
150 150
151struct mrvl_ie_cf_param_set { 151struct mrvl_ie_cf_param_set {
152 struct mrvl_ie_header header; 152 struct mrvl_ie_header header;
@@ -154,86 +154,86 @@ struct mrvl_ie_cf_param_set {
154 u8 cfpperiod; 154 u8 cfpperiod;
155 __le16 cfpmaxduration; 155 __le16 cfpmaxduration;
156 __le16 cfpdurationremaining; 156 __le16 cfpdurationremaining;
157} __attribute__ ((packed)); 157} __packed;
158 158
159struct mrvl_ie_ds_param_set { 159struct mrvl_ie_ds_param_set {
160 struct mrvl_ie_header header; 160 struct mrvl_ie_header header;
161 u8 channel; 161 u8 channel;
162} __attribute__ ((packed)); 162} __packed;
163 163
164struct mrvl_ie_rsn_param_set { 164struct mrvl_ie_rsn_param_set {
165 struct mrvl_ie_header header; 165 struct mrvl_ie_header header;
166 u8 rsnie[1]; 166 u8 rsnie[1];
167} __attribute__ ((packed)); 167} __packed;
168 168
169struct mrvl_ie_tsf_timestamp { 169struct mrvl_ie_tsf_timestamp {
170 struct mrvl_ie_header header; 170 struct mrvl_ie_header header;
171 __le64 tsftable[1]; 171 __le64 tsftable[1];
172} __attribute__ ((packed)); 172} __packed;
173 173
174/* v9 and later firmware only */ 174/* v9 and later firmware only */
175struct mrvl_ie_auth_type { 175struct mrvl_ie_auth_type {
176 struct mrvl_ie_header header; 176 struct mrvl_ie_header header;
177 __le16 auth; 177 __le16 auth;
178} __attribute__ ((packed)); 178} __packed;
179 179
180/** Local Power capability */ 180/** Local Power capability */
181struct mrvl_ie_power_capability { 181struct mrvl_ie_power_capability {
182 struct mrvl_ie_header header; 182 struct mrvl_ie_header header;
183 s8 minpower; 183 s8 minpower;
184 s8 maxpower; 184 s8 maxpower;
185} __attribute__ ((packed)); 185} __packed;
186 186
187/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */ 187/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */
188struct mrvl_ie_thresholds { 188struct mrvl_ie_thresholds {
189 struct mrvl_ie_header header; 189 struct mrvl_ie_header header;
190 u8 value; 190 u8 value;
191 u8 freq; 191 u8 freq;
192} __attribute__ ((packed)); 192} __packed;
193 193
194struct mrvl_ie_beacons_missed { 194struct mrvl_ie_beacons_missed {
195 struct mrvl_ie_header header; 195 struct mrvl_ie_header header;
196 u8 beaconmissed; 196 u8 beaconmissed;
197 u8 reserved; 197 u8 reserved;
198} __attribute__ ((packed)); 198} __packed;
199 199
200struct mrvl_ie_num_probes { 200struct mrvl_ie_num_probes {
201 struct mrvl_ie_header header; 201 struct mrvl_ie_header header;
202 __le16 numprobes; 202 __le16 numprobes;
203} __attribute__ ((packed)); 203} __packed;
204 204
205struct mrvl_ie_bcast_probe { 205struct mrvl_ie_bcast_probe {
206 struct mrvl_ie_header header; 206 struct mrvl_ie_header header;
207 __le16 bcastprobe; 207 __le16 bcastprobe;
208} __attribute__ ((packed)); 208} __packed;
209 209
210struct mrvl_ie_num_ssid_probe { 210struct mrvl_ie_num_ssid_probe {
211 struct mrvl_ie_header header; 211 struct mrvl_ie_header header;
212 __le16 numssidprobe; 212 __le16 numssidprobe;
213} __attribute__ ((packed)); 213} __packed;
214 214
215struct led_pin { 215struct led_pin {
216 u8 led; 216 u8 led;
217 u8 pin; 217 u8 pin;
218} __attribute__ ((packed)); 218} __packed;
219 219
220struct mrvl_ie_ledgpio { 220struct mrvl_ie_ledgpio {
221 struct mrvl_ie_header header; 221 struct mrvl_ie_header header;
222 struct led_pin ledpin[1]; 222 struct led_pin ledpin[1];
223} __attribute__ ((packed)); 223} __packed;
224 224
225struct led_bhv { 225struct led_bhv {
226 uint8_t firmwarestate; 226 uint8_t firmwarestate;
227 uint8_t led; 227 uint8_t led;
228 uint8_t ledstate; 228 uint8_t ledstate;
229 uint8_t ledarg; 229 uint8_t ledarg;
230} __attribute__ ((packed)); 230} __packed;
231 231
232 232
233struct mrvl_ie_ledbhv { 233struct mrvl_ie_ledbhv {
234 struct mrvl_ie_header header; 234 struct mrvl_ie_header header;
235 struct led_bhv ledbhv[1]; 235 struct led_bhv ledbhv[1];
236} __attribute__ ((packed)); 236} __packed;
237 237
238/* Meant to be packed as the value member of a struct ieee80211_info_element. 238/* Meant to be packed as the value member of a struct ieee80211_info_element.
239 * Note that the len member of the ieee80211_info_element varies depending on 239 * Note that the len member of the ieee80211_info_element varies depending on
@@ -248,12 +248,12 @@ struct mrvl_meshie_val {
248 uint8_t mesh_capability; 248 uint8_t mesh_capability;
249 uint8_t mesh_id_len; 249 uint8_t mesh_id_len;
250 uint8_t mesh_id[IEEE80211_MAX_SSID_LEN]; 250 uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
251} __attribute__ ((packed)); 251} __packed;
252 252
253struct mrvl_meshie { 253struct mrvl_meshie {
254 u8 id, len; 254 u8 id, len;
255 struct mrvl_meshie_val val; 255 struct mrvl_meshie_val val;
256} __attribute__ ((packed)); 256} __packed;
257 257
258struct mrvl_mesh_defaults { 258struct mrvl_mesh_defaults {
259 __le32 bootflag; 259 __le32 bootflag;
@@ -261,6 +261,6 @@ struct mrvl_mesh_defaults {
261 uint8_t reserved; 261 uint8_t reserved;
262 __le16 channel; 262 __le16 channel;
263 struct mrvl_meshie meshie; 263 struct mrvl_meshie meshie;
264} __attribute__ ((packed)); 264} __packed;
265 265
266#endif 266#endif
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
deleted file mode 100644
index f96a96031a50..000000000000
--- a/drivers/net/wireless/libertas/wext.c
+++ /dev/null
@@ -1,2353 +0,0 @@
1/**
2 * This file contains ioctl functions
3 */
4#include <linux/ctype.h>
5#include <linux/slab.h>
6#include <linux/delay.h>
7#include <linux/if.h>
8#include <linux/if_arp.h>
9#include <linux/wireless.h>
10#include <linux/bitops.h>
11
12#include <net/lib80211.h>
13#include <net/iw_handler.h>
14
15#include "host.h"
16#include "radiotap.h"
17#include "decl.h"
18#include "defs.h"
19#include "dev.h"
20#include "wext.h"
21#include "scan.h"
22#include "assoc.h"
23#include "cmd.h"
24
25
26static inline void lbs_postpone_association_work(struct lbs_private *priv)
27{
28 if (priv->surpriseremoved)
29 return;
30 cancel_delayed_work(&priv->assoc_work);
31 queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2);
32}
33
34static inline void lbs_do_association_work(struct lbs_private *priv)
35{
36 if (priv->surpriseremoved)
37 return;
38 cancel_delayed_work(&priv->assoc_work);
39 queue_delayed_work(priv->work_thread, &priv->assoc_work, 0);
40}
41
42static inline void lbs_cancel_association_work(struct lbs_private *priv)
43{
44 cancel_delayed_work(&priv->assoc_work);
45 kfree(priv->pending_assoc_req);
46 priv->pending_assoc_req = NULL;
47}
48
49void lbs_send_disconnect_notification(struct lbs_private *priv)
50{
51 union iwreq_data wrqu;
52
53 memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
54 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
55 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
56}
57
58static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
59{
60 union iwreq_data iwrq;
61 u8 buf[50];
62
63 lbs_deb_enter(LBS_DEB_WEXT);
64
65 memset(&iwrq, 0, sizeof(union iwreq_data));
66 memset(buf, 0, sizeof(buf));
67
68 snprintf(buf, sizeof(buf) - 1, "%s", str);
69
70 iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
71
72 /* Send Event to upper layer */
73 lbs_deb_wext("event indication string %s\n", (char *)buf);
74 lbs_deb_wext("event indication length %d\n", iwrq.data.length);
75 lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
76
77 wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
78
79 lbs_deb_leave(LBS_DEB_WEXT);
80}
81
82/**
83 * @brief This function handles MIC failure event.
84 *
85 * @param priv A pointer to struct lbs_private structure
86 * @para event the event id
87 * @return n/a
88 */
89void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
90{
91 char buf[50];
92
93 lbs_deb_enter(LBS_DEB_CMD);
94 memset(buf, 0, sizeof(buf));
95
96 sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
97
98 if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
99 strcat(buf, "unicast ");
100 else
101 strcat(buf, "multicast ");
102
103 lbs_send_iwevcustom_event(priv, buf);
104 lbs_deb_leave(LBS_DEB_CMD);
105}
106
107/**
108 * @brief Find the channel frequency power info with specific channel
109 *
110 * @param priv A pointer to struct lbs_private structure
111 * @param band it can be BAND_A, BAND_G or BAND_B
112 * @param channel the channel for looking
113 * @return A pointer to struct chan_freq_power structure or NULL if not find.
114 */
115struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
116 struct lbs_private *priv,
117 u8 band,
118 u16 channel)
119{
120 struct chan_freq_power *cfp = NULL;
121 struct region_channel *rc;
122 int i, j;
123
124 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
125 rc = &priv->region_channel[j];
126
127 if (!rc->valid || !rc->CFP)
128 continue;
129 if (rc->band != band)
130 continue;
131 for (i = 0; i < rc->nrcfp; i++) {
132 if (rc->CFP[i].channel == channel) {
133 cfp = &rc->CFP[i];
134 break;
135 }
136 }
137 }
138
139 if (!cfp && channel)
140 lbs_deb_wext("lbs_find_cfp_by_band_and_channel: can't find "
141 "cfp by band %d / channel %d\n", band, channel);
142
143 return cfp;
144}
145
146/**
147 * @brief Find the channel frequency power info with specific frequency
148 *
149 * @param priv A pointer to struct lbs_private structure
150 * @param band it can be BAND_A, BAND_G or BAND_B
151 * @param freq the frequency for looking
152 * @return A pointer to struct chan_freq_power structure or NULL if not find.
153 */
154static struct chan_freq_power *find_cfp_by_band_and_freq(
155 struct lbs_private *priv,
156 u8 band,
157 u32 freq)
158{
159 struct chan_freq_power *cfp = NULL;
160 struct region_channel *rc;
161 int i, j;
162
163 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
164 rc = &priv->region_channel[j];
165
166 if (!rc->valid || !rc->CFP)
167 continue;
168 if (rc->band != band)
169 continue;
170 for (i = 0; i < rc->nrcfp; i++) {
171 if (rc->CFP[i].freq == freq) {
172 cfp = &rc->CFP[i];
173 break;
174 }
175 }
176 }
177
178 if (!cfp && freq)
179 lbs_deb_wext("find_cfp_by_band_and_freql: can't find cfp by "
180 "band %d / freq %d\n", band, freq);
181
182 return cfp;
183}
184
185/**
186 * @brief Copy active data rates based on adapter mode and status
187 *
188 * @param priv A pointer to struct lbs_private structure
189 * @param rate The buf to return the active rates
190 */
191static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
192{
193 lbs_deb_enter(LBS_DEB_WEXT);
194
195 if ((priv->connect_status != LBS_CONNECTED) &&
196 !lbs_mesh_connected(priv))
197 memcpy(rates, lbs_bg_rates, MAX_RATES);
198 else
199 memcpy(rates, priv->curbssparams.rates, MAX_RATES);
200
201 lbs_deb_leave(LBS_DEB_WEXT);
202}
203
204static int lbs_get_name(struct net_device *dev, struct iw_request_info *info,
205 char *cwrq, char *extra)
206{
207
208 lbs_deb_enter(LBS_DEB_WEXT);
209
210 /* We could add support for 802.11n here as needed. Jean II */
211 snprintf(cwrq, IFNAMSIZ, "IEEE 802.11b/g");
212
213 lbs_deb_leave(LBS_DEB_WEXT);
214 return 0;
215}
216
217static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
218 struct iw_freq *fwrq, char *extra)
219{
220 struct lbs_private *priv = dev->ml_priv;
221 struct chan_freq_power *cfp;
222
223 lbs_deb_enter(LBS_DEB_WEXT);
224
225 cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
226 priv->channel);
227
228 if (!cfp) {
229 if (priv->channel)
230 lbs_deb_wext("invalid channel %d\n",
231 priv->channel);
232 return -EINVAL;
233 }
234
235 fwrq->m = (long)cfp->freq * 100000;
236 fwrq->e = 1;
237
238 lbs_deb_wext("freq %u\n", fwrq->m);
239 lbs_deb_leave(LBS_DEB_WEXT);
240 return 0;
241}
242
243static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info,
244 struct sockaddr *awrq, char *extra)
245{
246 struct lbs_private *priv = dev->ml_priv;
247
248 lbs_deb_enter(LBS_DEB_WEXT);
249
250 if (priv->connect_status == LBS_CONNECTED) {
251 memcpy(awrq->sa_data, priv->curbssparams.bssid, ETH_ALEN);
252 } else {
253 memset(awrq->sa_data, 0, ETH_ALEN);
254 }
255 awrq->sa_family = ARPHRD_ETHER;
256
257 lbs_deb_leave(LBS_DEB_WEXT);
258 return 0;
259}
260
261static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info,
262 struct iw_point *dwrq, char *extra)
263{
264 struct lbs_private *priv = dev->ml_priv;
265
266 lbs_deb_enter(LBS_DEB_WEXT);
267
268 /*
269 * Check the size of the string
270 */
271
272 if (dwrq->length > 16) {
273 return -E2BIG;
274 }
275
276 mutex_lock(&priv->lock);
277 memset(priv->nodename, 0, sizeof(priv->nodename));
278 memcpy(priv->nodename, extra, dwrq->length);
279 mutex_unlock(&priv->lock);
280
281 lbs_deb_leave(LBS_DEB_WEXT);
282 return 0;
283}
284
285static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
286 struct iw_point *dwrq, char *extra)
287{
288 struct lbs_private *priv = dev->ml_priv;
289
290 lbs_deb_enter(LBS_DEB_WEXT);
291
292 dwrq->length = strlen(priv->nodename);
293 memcpy(extra, priv->nodename, dwrq->length);
294 extra[dwrq->length] = '\0';
295
296 dwrq->flags = 1; /* active */
297
298 lbs_deb_leave(LBS_DEB_WEXT);
299 return 0;
300}
301
302#ifdef CONFIG_LIBERTAS_MESH
303static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
304 struct iw_point *dwrq, char *extra)
305{
306 struct lbs_private *priv = dev->ml_priv;
307
308 lbs_deb_enter(LBS_DEB_WEXT);
309
310 /* Use nickname to indicate that mesh is on */
311
312 if (lbs_mesh_connected(priv)) {
313 strncpy(extra, "Mesh", 12);
314 extra[12] = '\0';
315 dwrq->length = strlen(extra);
316 }
317
318 else {
319 extra[0] = '\0';
320 dwrq->length = 0;
321 }
322
323 lbs_deb_leave(LBS_DEB_WEXT);
324 return 0;
325}
326#endif
327
328static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
329 struct iw_param *vwrq, char *extra)
330{
331 int ret = 0;
332 struct lbs_private *priv = dev->ml_priv;
333 u32 val = vwrq->value;
334
335 lbs_deb_enter(LBS_DEB_WEXT);
336
337 if (vwrq->disabled)
338 val = MRVDRV_RTS_MAX_VALUE;
339
340 if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */
341 return -EINVAL;
342
343 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val);
344
345 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
346 return ret;
347}
348
349static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
350 struct iw_param *vwrq, char *extra)
351{
352 struct lbs_private *priv = dev->ml_priv;
353 int ret = 0;
354 u16 val = 0;
355
356 lbs_deb_enter(LBS_DEB_WEXT);
357
358 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val);
359 if (ret)
360 goto out;
361
362 vwrq->value = val;
363 vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */
364 vwrq->fixed = 1;
365
366out:
367 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
368 return ret;
369}
370
371static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
372 struct iw_param *vwrq, char *extra)
373{
374 struct lbs_private *priv = dev->ml_priv;
375 int ret = 0;
376 u32 val = vwrq->value;
377
378 lbs_deb_enter(LBS_DEB_WEXT);
379
380 if (vwrq->disabled)
381 val = MRVDRV_FRAG_MAX_VALUE;
382
383 if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE)
384 return -EINVAL;
385
386 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val);
387
388 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
389 return ret;
390}
391
392static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
393 struct iw_param *vwrq, char *extra)
394{
395 struct lbs_private *priv = dev->ml_priv;
396 int ret = 0;
397 u16 val = 0;
398
399 lbs_deb_enter(LBS_DEB_WEXT);
400
401 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val);
402 if (ret)
403 goto out;
404
405 vwrq->value = val;
406 vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE)
407 || (val > MRVDRV_FRAG_MAX_VALUE));
408 vwrq->fixed = 1;
409
410out:
411 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
412 return ret;
413}
414
415static int lbs_get_mode(struct net_device *dev,
416 struct iw_request_info *info, u32 * uwrq, char *extra)
417{
418 struct lbs_private *priv = dev->ml_priv;
419
420 lbs_deb_enter(LBS_DEB_WEXT);
421
422 *uwrq = priv->mode;
423
424 lbs_deb_leave(LBS_DEB_WEXT);
425 return 0;
426}
427
428#ifdef CONFIG_LIBERTAS_MESH
429static int mesh_wlan_get_mode(struct net_device *dev,
430 struct iw_request_info *info, u32 * uwrq,
431 char *extra)
432{
433 lbs_deb_enter(LBS_DEB_WEXT);
434
435 *uwrq = IW_MODE_REPEAT;
436
437 lbs_deb_leave(LBS_DEB_WEXT);
438 return 0;
439}
440#endif
441
442static int lbs_get_txpow(struct net_device *dev,
443 struct iw_request_info *info,
444 struct iw_param *vwrq, char *extra)
445{
446 struct lbs_private *priv = dev->ml_priv;
447 s16 curlevel = 0;
448 int ret = 0;
449
450 lbs_deb_enter(LBS_DEB_WEXT);
451
452 if (!priv->radio_on) {
453 lbs_deb_wext("tx power off\n");
454 vwrq->value = 0;
455 vwrq->disabled = 1;
456 goto out;
457 }
458
459 ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL);
460 if (ret)
461 goto out;
462
463 lbs_deb_wext("tx power level %d dbm\n", curlevel);
464 priv->txpower_cur = curlevel;
465
466 vwrq->value = curlevel;
467 vwrq->fixed = 1;
468 vwrq->disabled = 0;
469 vwrq->flags = IW_TXPOW_DBM;
470
471out:
472 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
473 return ret;
474}
475
476static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
477 struct iw_param *vwrq, char *extra)
478{
479 struct lbs_private *priv = dev->ml_priv;
480 int ret = 0;
481 u16 slimit = 0, llimit = 0;
482
483 lbs_deb_enter(LBS_DEB_WEXT);
484
485 if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
486 return -EOPNOTSUPP;
487
488 /* The MAC has a 4-bit Total_Tx_Count register
489 Total_Tx_Count = 1 + Tx_Retry_Count */
490#define TX_RETRY_MIN 0
491#define TX_RETRY_MAX 14
492 if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX)
493 return -EINVAL;
494
495 /* Add 1 to convert retry count to try count */
496 if (vwrq->flags & IW_RETRY_SHORT)
497 slimit = (u16) (vwrq->value + 1);
498 else if (vwrq->flags & IW_RETRY_LONG)
499 llimit = (u16) (vwrq->value + 1);
500 else
501 slimit = llimit = (u16) (vwrq->value + 1); /* set both */
502
503 if (llimit) {
504 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT,
505 llimit);
506 if (ret)
507 goto out;
508 }
509
510 if (slimit) {
511 /* txretrycount follows the short retry limit */
512 priv->txretrycount = slimit;
513 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT,
514 slimit);
515 if (ret)
516 goto out;
517 }
518
519out:
520 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
521 return ret;
522}
523
524static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info,
525 struct iw_param *vwrq, char *extra)
526{
527 struct lbs_private *priv = dev->ml_priv;
528 int ret = 0;
529 u16 val = 0;
530
531 lbs_deb_enter(LBS_DEB_WEXT);
532
533 vwrq->disabled = 0;
534
535 if (vwrq->flags & IW_RETRY_LONG) {
536 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val);
537 if (ret)
538 goto out;
539
540 /* Subtract 1 to convert try count to retry count */
541 vwrq->value = val - 1;
542 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
543 } else {
544 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val);
545 if (ret)
546 goto out;
547
548 /* txretry count follows the short retry limit */
549 priv->txretrycount = val;
550 /* Subtract 1 to convert try count to retry count */
551 vwrq->value = val - 1;
552 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
553 }
554
555out:
556 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
557 return ret;
558}
559
560static inline void sort_channels(struct iw_freq *freq, int num)
561{
562 int i, j;
563 struct iw_freq temp;
564
565 for (i = 0; i < num; i++)
566 for (j = i + 1; j < num; j++)
567 if (freq[i].i > freq[j].i) {
568 temp.i = freq[i].i;
569 temp.m = freq[i].m;
570
571 freq[i].i = freq[j].i;
572 freq[i].m = freq[j].m;
573
574 freq[j].i = temp.i;
575 freq[j].m = temp.m;
576 }
577}
578
579/* data rate listing
580 MULTI_BANDS:
581 abg a b b/g
582 Infra G(12) A(8) B(4) G(12)
583 Adhoc A+B(12) A(8) B(4) B(4)
584
585 non-MULTI_BANDS:
586 b b/g
587 Infra B(4) G(12)
588 Adhoc B(4) B(4)
589 */
590/**
591 * @brief Get Range Info
592 *
593 * @param dev A pointer to net_device structure
594 * @param info A pointer to iw_request_info structure
595 * @param vwrq A pointer to iw_param structure
596 * @param extra A pointer to extra data buf
597 * @return 0 --success, otherwise fail
598 */
599static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
600 struct iw_point *dwrq, char *extra)
601{
602 int i, j;
603 struct lbs_private *priv = dev->ml_priv;
604 struct iw_range *range = (struct iw_range *)extra;
605 struct chan_freq_power *cfp;
606 u8 rates[MAX_RATES + 1];
607
608 lbs_deb_enter(LBS_DEB_WEXT);
609
610 dwrq->length = sizeof(struct iw_range);
611 memset(range, 0, sizeof(struct iw_range));
612
613 range->min_nwid = 0;
614 range->max_nwid = 0;
615
616 memset(rates, 0, sizeof(rates));
617 copy_active_data_rates(priv, rates);
618 range->num_bitrates = strnlen(rates, IW_MAX_BITRATES);
619 for (i = 0; i < range->num_bitrates; i++)
620 range->bitrate[i] = rates[i] * 500000;
621 range->num_bitrates = i;
622 lbs_deb_wext("IW_MAX_BITRATES %d, num_bitrates %d\n", IW_MAX_BITRATES,
623 range->num_bitrates);
624
625 range->num_frequency = 0;
626
627 range->scan_capa = IW_SCAN_CAPA_ESSID;
628
629 for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
630 && (j < ARRAY_SIZE(priv->region_channel)); j++) {
631 cfp = priv->region_channel[j].CFP;
632 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
633 && priv->region_channel[j].valid
634 && cfp
635 && (i < priv->region_channel[j].nrcfp); i++) {
636 range->freq[range->num_frequency].i =
637 (long)cfp->channel;
638 range->freq[range->num_frequency].m =
639 (long)cfp->freq * 100000;
640 range->freq[range->num_frequency].e = 1;
641 cfp++;
642 range->num_frequency++;
643 }
644 }
645
646 lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
647 IW_MAX_FREQUENCIES, range->num_frequency);
648
649 range->num_channels = range->num_frequency;
650
651 sort_channels(&range->freq[0], range->num_frequency);
652
653 /*
654 * Set an indication of the max TCP throughput in bit/s that we can
655 * expect using this interface
656 */
657 if (i > 2)
658 range->throughput = 5000 * 1000;
659 else
660 range->throughput = 1500 * 1000;
661
662 range->min_rts = MRVDRV_RTS_MIN_VALUE;
663 range->max_rts = MRVDRV_RTS_MAX_VALUE;
664 range->min_frag = MRVDRV_FRAG_MIN_VALUE;
665 range->max_frag = MRVDRV_FRAG_MAX_VALUE;
666
667 range->encoding_size[0] = 5;
668 range->encoding_size[1] = 13;
669 range->num_encoding_sizes = 2;
670 range->max_encoding_tokens = 4;
671
672 /*
673 * Right now we support only "iwconfig ethX power on|off"
674 */
675 range->pm_capa = IW_POWER_ON;
676
677 /*
678 * Minimum version we recommend
679 */
680 range->we_version_source = 15;
681
682 /*
683 * Version we are compiled with
684 */
685 range->we_version_compiled = WIRELESS_EXT;
686
687 range->retry_capa = IW_RETRY_LIMIT;
688 range->retry_flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
689
690 range->min_retry = TX_RETRY_MIN;
691 range->max_retry = TX_RETRY_MAX;
692
693 /*
694 * Set the qual, level and noise range values
695 */
696 range->max_qual.qual = 100;
697 range->max_qual.level = 0;
698 range->max_qual.noise = 0;
699 range->max_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
700
701 range->avg_qual.qual = 70;
702 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
703 range->avg_qual.level = 0;
704 range->avg_qual.noise = 0;
705 range->avg_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
706
707 range->sensitivity = 0;
708
709 /* Setup the supported power level ranges */
710 memset(range->txpower, 0, sizeof(range->txpower));
711 range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE;
712 range->txpower[0] = priv->txpower_min;
713 range->txpower[1] = priv->txpower_max;
714 range->num_txpower = 2;
715
716 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
717 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
718 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
719 range->event_capa[1] = IW_EVENT_CAPA_K_1;
720
721 if (priv->fwcapinfo & FW_CAPINFO_WPA) {
722 range->enc_capa = IW_ENC_CAPA_WPA
723 | IW_ENC_CAPA_WPA2
724 | IW_ENC_CAPA_CIPHER_TKIP
725 | IW_ENC_CAPA_CIPHER_CCMP;
726 }
727
728 lbs_deb_leave(LBS_DEB_WEXT);
729 return 0;
730}
731
732static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
733 struct iw_param *vwrq, char *extra)
734{
735 struct lbs_private *priv = dev->ml_priv;
736 int ret = 0;
737
738 lbs_deb_enter(LBS_DEB_WEXT);
739
740 if (!(priv->fwcapinfo & FW_CAPINFO_PS)) {
741 if (vwrq->disabled)
742 return 0;
743 else
744 return -EINVAL;
745 }
746
747 /* PS is currently supported only in Infrastructure mode
748 * Remove this check if it is to be supported in IBSS mode also
749 */
750
751 if (vwrq->disabled) {
752 priv->psmode = LBS802_11POWERMODECAM;
753 if (priv->psstate != PS_STATE_FULL_POWER) {
754 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
755 }
756
757 return 0;
758 }
759
760 if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
761 lbs_deb_wext(
762 "setting power timeout is not supported\n");
763 return -EINVAL;
764 } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
765 vwrq->value = vwrq->value / 1000;
766 if (!priv->enter_deep_sleep) {
767 lbs_pr_err("deep sleep feature is not implemented "
768 "for this interface driver\n");
769 return -EINVAL;
770 }
771
772 if (priv->connect_status == LBS_CONNECTED) {
773 if ((priv->is_auto_deep_sleep_enabled) &&
774 (vwrq->value == -1000)) {
775 lbs_exit_auto_deep_sleep(priv);
776 return 0;
777 } else {
778 lbs_pr_err("can't use deep sleep cmd in "
779 "connected state\n");
780 return -EINVAL;
781 }
782 }
783
784 if ((vwrq->value < 0) && (vwrq->value != -1000)) {
785 lbs_pr_err("unknown option\n");
786 return -EINVAL;
787 }
788
789 if (vwrq->value > 0) {
790 if (!priv->is_auto_deep_sleep_enabled) {
791 priv->is_activity_detected = 0;
792 priv->auto_deep_sleep_timeout = vwrq->value;
793 lbs_enter_auto_deep_sleep(priv);
794 } else {
795 priv->auto_deep_sleep_timeout = vwrq->value;
796 lbs_deb_debugfs("auto deep sleep: "
797 "already enabled\n");
798 }
799 return 0;
800 } else {
801 if (priv->is_auto_deep_sleep_enabled) {
802 lbs_exit_auto_deep_sleep(priv);
803 /* Try to exit deep sleep if auto */
804 /*deep sleep disabled */
805 ret = lbs_set_deep_sleep(priv, 0);
806 }
807 if (vwrq->value == 0)
808 ret = lbs_set_deep_sleep(priv, 1);
809 else if (vwrq->value == -1000)
810 ret = lbs_set_deep_sleep(priv, 0);
811 return ret;
812 }
813 }
814
815 if (priv->psmode != LBS802_11POWERMODECAM) {
816 return 0;
817 }
818
819 priv->psmode = LBS802_11POWERMODEMAX_PSP;
820
821 if (priv->connect_status == LBS_CONNECTED) {
822 lbs_ps_sleep(priv, CMD_OPTION_WAITFORRSP);
823 }
824
825 lbs_deb_leave(LBS_DEB_WEXT);
826
827 return 0;
828}
829
830static int lbs_get_power(struct net_device *dev, struct iw_request_info *info,
831 struct iw_param *vwrq, char *extra)
832{
833 struct lbs_private *priv = dev->ml_priv;
834
835 lbs_deb_enter(LBS_DEB_WEXT);
836
837 vwrq->value = 0;
838 vwrq->flags = 0;
839 vwrq->disabled = priv->psmode == LBS802_11POWERMODECAM
840 || priv->connect_status == LBS_DISCONNECTED;
841
842 lbs_deb_leave(LBS_DEB_WEXT);
843 return 0;
844}
845
846static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
847{
848 enum {
849 POOR = 30,
850 FAIR = 60,
851 GOOD = 80,
852 VERY_GOOD = 90,
853 EXCELLENT = 95,
854 PERFECT = 100
855 };
856 struct lbs_private *priv = dev->ml_priv;
857 u32 rssi_qual;
858 u32 tx_qual;
859 u32 quality = 0;
860 int ret, stats_valid = 0;
861 u8 rssi;
862 u32 tx_retries;
863 struct cmd_ds_802_11_get_log log;
864
865 lbs_deb_enter(LBS_DEB_WEXT);
866
867 priv->wstats.status = priv->mode;
868
869 /* If we're not associated, all quality values are meaningless */
870 if ((priv->connect_status != LBS_CONNECTED) &&
871 !lbs_mesh_connected(priv))
872 goto out;
873
874 /* Quality by RSSI */
875 priv->wstats.qual.level =
876 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
877 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
878
879 if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) {
880 priv->wstats.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE;
881 } else {
882 priv->wstats.qual.noise =
883 CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
884 }
885
886 lbs_deb_wext("signal level %#x\n", priv->wstats.qual.level);
887 lbs_deb_wext("noise %#x\n", priv->wstats.qual.noise);
888
889 rssi = priv->wstats.qual.level - priv->wstats.qual.noise;
890 if (rssi < 15)
891 rssi_qual = rssi * POOR / 10;
892 else if (rssi < 20)
893 rssi_qual = (rssi - 15) * (FAIR - POOR) / 5 + POOR;
894 else if (rssi < 30)
895 rssi_qual = (rssi - 20) * (GOOD - FAIR) / 5 + FAIR;
896 else if (rssi < 40)
897 rssi_qual = (rssi - 30) * (VERY_GOOD - GOOD) /
898 10 + GOOD;
899 else
900 rssi_qual = (rssi - 40) * (PERFECT - VERY_GOOD) /
901 10 + VERY_GOOD;
902 quality = rssi_qual;
903
904 /* Quality by TX errors */
905 priv->wstats.discard.retries = dev->stats.tx_errors;
906
907 memset(&log, 0, sizeof(log));
908 log.hdr.size = cpu_to_le16(sizeof(log));
909 ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
910 if (ret)
911 goto out;
912
913 tx_retries = le32_to_cpu(log.retry);
914
915 if (tx_retries > 75)
916 tx_qual = (90 - tx_retries) * POOR / 15;
917 else if (tx_retries > 70)
918 tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR;
919 else if (tx_retries > 65)
920 tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR;
921 else if (tx_retries > 50)
922 tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) /
923 15 + GOOD;
924 else
925 tx_qual = (50 - tx_retries) *
926 (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
927 quality = min(quality, tx_qual);
928
929 priv->wstats.discard.code = le32_to_cpu(log.wepundecryptable);
930 priv->wstats.discard.retries = tx_retries;
931 priv->wstats.discard.misc = le32_to_cpu(log.ackfailure);
932
933 /* Calculate quality */
934 priv->wstats.qual.qual = min_t(u8, quality, 100);
935 priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
936 stats_valid = 1;
937
938 /* update stats asynchronously for future calls */
939 ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
940 0, 0, NULL);
941 if (ret)
942 lbs_pr_err("RSSI command failed\n");
943out:
944 if (!stats_valid) {
945 priv->wstats.miss.beacon = 0;
946 priv->wstats.discard.retries = 0;
947 priv->wstats.qual.qual = 0;
948 priv->wstats.qual.level = 0;
949 priv->wstats.qual.noise = 0;
950 priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED;
951 priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID |
952 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
953 }
954
955 lbs_deb_leave(LBS_DEB_WEXT);
956 return &priv->wstats;
957
958
959}
960
961static int lbs_set_freq(struct net_device *dev, struct iw_request_info *info,
962 struct iw_freq *fwrq, char *extra)
963{
964 int ret = -EINVAL;
965 struct lbs_private *priv = dev->ml_priv;
966 struct chan_freq_power *cfp;
967 struct assoc_request * assoc_req;
968
969 lbs_deb_enter(LBS_DEB_WEXT);
970
971 mutex_lock(&priv->lock);
972 assoc_req = lbs_get_association_request(priv);
973 if (!assoc_req) {
974 ret = -ENOMEM;
975 goto out;
976 }
977
978 /* If setting by frequency, convert to a channel */
979 if (fwrq->e == 1) {
980 long f = fwrq->m / 100000;
981
982 cfp = find_cfp_by_band_and_freq(priv, 0, f);
983 if (!cfp) {
984 lbs_deb_wext("invalid freq %ld\n", f);
985 goto out;
986 }
987
988 fwrq->e = 0;
989 fwrq->m = (int) cfp->channel;
990 }
991
992 /* Setting by channel number */
993 if (fwrq->m > 1000 || fwrq->e > 0) {
994 goto out;
995 }
996
997 cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m);
998 if (!cfp) {
999 goto out;
1000 }
1001
1002 assoc_req->channel = fwrq->m;
1003 ret = 0;
1004
1005out:
1006 if (ret == 0) {
1007 set_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags);
1008 lbs_postpone_association_work(priv);
1009 } else {
1010 lbs_cancel_association_work(priv);
1011 }
1012 mutex_unlock(&priv->lock);
1013
1014 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1015 return ret;
1016}
1017
1018#ifdef CONFIG_LIBERTAS_MESH
1019static int lbs_mesh_set_freq(struct net_device *dev,
1020 struct iw_request_info *info,
1021 struct iw_freq *fwrq, char *extra)
1022{
1023 struct lbs_private *priv = dev->ml_priv;
1024 struct chan_freq_power *cfp;
1025 int ret = -EINVAL;
1026
1027 lbs_deb_enter(LBS_DEB_WEXT);
1028
1029 /* If setting by frequency, convert to a channel */
1030 if (fwrq->e == 1) {
1031 long f = fwrq->m / 100000;
1032
1033 cfp = find_cfp_by_band_and_freq(priv, 0, f);
1034 if (!cfp) {
1035 lbs_deb_wext("invalid freq %ld\n", f);
1036 goto out;
1037 }
1038
1039 fwrq->e = 0;
1040 fwrq->m = (int) cfp->channel;
1041 }
1042
1043 /* Setting by channel number */
1044 if (fwrq->m > 1000 || fwrq->e > 0) {
1045 goto out;
1046 }
1047
1048 cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m);
1049 if (!cfp) {
1050 goto out;
1051 }
1052
1053 if (fwrq->m != priv->channel) {
1054 lbs_deb_wext("mesh channel change forces eth disconnect\n");
1055 if (priv->mode == IW_MODE_INFRA)
1056 lbs_cmd_80211_deauthenticate(priv,
1057 priv->curbssparams.bssid,
1058 WLAN_REASON_DEAUTH_LEAVING);
1059 else if (priv->mode == IW_MODE_ADHOC)
1060 lbs_adhoc_stop(priv);
1061 }
1062 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
1063 lbs_update_channel(priv);
1064 ret = 0;
1065
1066out:
1067 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1068 return ret;
1069}
1070#endif
1071
1072static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1073 struct iw_param *vwrq, char *extra)
1074{
1075 struct lbs_private *priv = dev->ml_priv;
1076 u8 new_rate = 0;
1077 int ret = -EINVAL;
1078 u8 rates[MAX_RATES + 1];
1079
1080 lbs_deb_enter(LBS_DEB_WEXT);
1081
1082 lbs_deb_wext("vwrq->value %d\n", vwrq->value);
1083 lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
1084
1085 if (vwrq->fixed && vwrq->value == -1)
1086 goto out;
1087
1088 /* Auto rate? */
1089 priv->enablehwauto = !vwrq->fixed;
1090
1091 if (vwrq->value == -1)
1092 priv->cur_rate = 0;
1093 else {
1094 if (vwrq->value % 100000)
1095 goto out;
1096
1097 new_rate = vwrq->value / 500000;
1098 priv->cur_rate = new_rate;
1099 /* the rest is only needed for lbs_set_data_rate() */
1100 memset(rates, 0, sizeof(rates));
1101 copy_active_data_rates(priv, rates);
1102 if (!memchr(rates, new_rate, sizeof(rates))) {
1103 lbs_pr_alert("fixed data rate 0x%X out of range\n",
1104 new_rate);
1105 goto out;
1106 }
1107 if (priv->fwrelease < 0x09000000) {
1108 ret = lbs_set_power_adapt_cfg(priv, 0,
1109 POW_ADAPT_DEFAULT_P0,
1110 POW_ADAPT_DEFAULT_P1,
1111 POW_ADAPT_DEFAULT_P2);
1112 if (ret)
1113 goto out;
1114 }
1115 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
1116 TPC_DEFAULT_P2, 1);
1117 if (ret)
1118 goto out;
1119 }
1120
1121 /* Try the newer command first (Firmware Spec 5.1 and above) */
1122 ret = lbs_cmd_802_11_rate_adapt_rateset(priv, CMD_ACT_SET);
1123
1124 /* Fallback to older version */
1125 if (ret)
1126 ret = lbs_set_data_rate(priv, new_rate);
1127
1128out:
1129 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1130 return ret;
1131}
1132
1133static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info,
1134 struct iw_param *vwrq, char *extra)
1135{
1136 struct lbs_private *priv = dev->ml_priv;
1137
1138 lbs_deb_enter(LBS_DEB_WEXT);
1139
1140 if (priv->connect_status == LBS_CONNECTED) {
1141 vwrq->value = priv->cur_rate * 500000;
1142
1143 if (priv->enablehwauto)
1144 vwrq->fixed = 0;
1145 else
1146 vwrq->fixed = 1;
1147
1148 } else {
1149 vwrq->fixed = 0;
1150 vwrq->value = 0;
1151 }
1152
1153 lbs_deb_leave(LBS_DEB_WEXT);
1154 return 0;
1155}
1156
1157static int lbs_set_mode(struct net_device *dev,
1158 struct iw_request_info *info, u32 * uwrq, char *extra)
1159{
1160 int ret = 0;
1161 struct lbs_private *priv = dev->ml_priv;
1162 struct assoc_request * assoc_req;
1163
1164 lbs_deb_enter(LBS_DEB_WEXT);
1165
1166 if ( (*uwrq != IW_MODE_ADHOC)
1167 && (*uwrq != IW_MODE_INFRA)
1168 && (*uwrq != IW_MODE_AUTO)) {
1169 lbs_deb_wext("Invalid mode: 0x%x\n", *uwrq);
1170 ret = -EINVAL;
1171 goto out;
1172 }
1173
1174 mutex_lock(&priv->lock);
1175 assoc_req = lbs_get_association_request(priv);
1176 if (!assoc_req) {
1177 ret = -ENOMEM;
1178 lbs_cancel_association_work(priv);
1179 } else {
1180 assoc_req->mode = *uwrq;
1181 set_bit(ASSOC_FLAG_MODE, &assoc_req->flags);
1182 lbs_postpone_association_work(priv);
1183 lbs_deb_wext("Switching to mode: 0x%x\n", *uwrq);
1184 }
1185 mutex_unlock(&priv->lock);
1186
1187out:
1188 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1189 return ret;
1190}
1191
1192
1193/**
1194 * @brief Get Encryption key
1195 *
1196 * @param dev A pointer to net_device structure
1197 * @param info A pointer to iw_request_info structure
1198 * @param vwrq A pointer to iw_param structure
1199 * @param extra A pointer to extra data buf
1200 * @return 0 --success, otherwise fail
1201 */
1202static int lbs_get_encode(struct net_device *dev,
1203 struct iw_request_info *info,
1204 struct iw_point *dwrq, u8 * extra)
1205{
1206 struct lbs_private *priv = dev->ml_priv;
1207 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1208
1209 lbs_deb_enter(LBS_DEB_WEXT);
1210
1211 lbs_deb_wext("flags 0x%x, index %d, length %d, wep_tx_keyidx %d\n",
1212 dwrq->flags, index, dwrq->length, priv->wep_tx_keyidx);
1213
1214 dwrq->flags = 0;
1215
1216 /* Authentication method */
1217 switch (priv->secinfo.auth_mode) {
1218 case IW_AUTH_ALG_OPEN_SYSTEM:
1219 dwrq->flags = IW_ENCODE_OPEN;
1220 break;
1221
1222 case IW_AUTH_ALG_SHARED_KEY:
1223 case IW_AUTH_ALG_LEAP:
1224 dwrq->flags = IW_ENCODE_RESTRICTED;
1225 break;
1226 default:
1227 dwrq->flags = IW_ENCODE_DISABLED | IW_ENCODE_OPEN;
1228 break;
1229 }
1230
1231 memset(extra, 0, 16);
1232
1233 mutex_lock(&priv->lock);
1234
1235 /* Default to returning current transmit key */
1236 if (index < 0)
1237 index = priv->wep_tx_keyidx;
1238
1239 if ((priv->wep_keys[index].len) && priv->secinfo.wep_enabled) {
1240 memcpy(extra, priv->wep_keys[index].key,
1241 priv->wep_keys[index].len);
1242 dwrq->length = priv->wep_keys[index].len;
1243
1244 dwrq->flags |= (index + 1);
1245 /* Return WEP enabled */
1246 dwrq->flags &= ~IW_ENCODE_DISABLED;
1247 } else if ((priv->secinfo.WPAenabled)
1248 || (priv->secinfo.WPA2enabled)) {
1249 /* return WPA enabled */
1250 dwrq->flags &= ~IW_ENCODE_DISABLED;
1251 dwrq->flags |= IW_ENCODE_NOKEY;
1252 } else {
1253 dwrq->flags |= IW_ENCODE_DISABLED;
1254 }
1255
1256 mutex_unlock(&priv->lock);
1257
1258 lbs_deb_wext("key: %02x:%02x:%02x:%02x:%02x:%02x, keylen %d\n",
1259 extra[0], extra[1], extra[2],
1260 extra[3], extra[4], extra[5], dwrq->length);
1261
1262 lbs_deb_wext("return flags 0x%x\n", dwrq->flags);
1263
1264 lbs_deb_leave(LBS_DEB_WEXT);
1265 return 0;
1266}
1267
1268/**
1269 * @brief Set Encryption key (internal)
1270 *
1271 * @param priv A pointer to private card structure
1272 * @param key_material A pointer to key material
1273 * @param key_length length of key material
1274 * @param index key index to set
1275 * @param set_tx_key Force set TX key (1 = yes, 0 = no)
1276 * @return 0 --success, otherwise fail
1277 */
1278static int lbs_set_wep_key(struct assoc_request *assoc_req,
1279 const char *key_material,
1280 u16 key_length,
1281 u16 index,
1282 int set_tx_key)
1283{
1284 int ret = 0;
1285 struct enc_key *pkey;
1286
1287 lbs_deb_enter(LBS_DEB_WEXT);
1288
1289 /* Paranoid validation of key index */
1290 if (index > 3) {
1291 ret = -EINVAL;
1292 goto out;
1293 }
1294
1295 /* validate max key length */
1296 if (key_length > KEY_LEN_WEP_104) {
1297 ret = -EINVAL;
1298 goto out;
1299 }
1300
1301 pkey = &assoc_req->wep_keys[index];
1302
1303 if (key_length > 0) {
1304 memset(pkey, 0, sizeof(struct enc_key));
1305 pkey->type = KEY_TYPE_ID_WEP;
1306
1307 /* Standardize the key length */
1308 pkey->len = (key_length > KEY_LEN_WEP_40) ?
1309 KEY_LEN_WEP_104 : KEY_LEN_WEP_40;
1310 memcpy(pkey->key, key_material, key_length);
1311 }
1312
1313 if (set_tx_key) {
1314 /* Ensure the chosen key is valid */
1315 if (!pkey->len) {
1316 lbs_deb_wext("key not set, so cannot enable it\n");
1317 ret = -EINVAL;
1318 goto out;
1319 }
1320 assoc_req->wep_tx_keyidx = index;
1321 }
1322
1323 assoc_req->secinfo.wep_enabled = 1;
1324
1325out:
1326 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1327 return ret;
1328}
1329
1330static int validate_key_index(u16 def_index, u16 raw_index,
1331 u16 *out_index, u16 *is_default)
1332{
1333 if (!out_index || !is_default)
1334 return -EINVAL;
1335
1336 /* Verify index if present, otherwise use default TX key index */
1337 if (raw_index > 0) {
1338 if (raw_index > 4)
1339 return -EINVAL;
1340 *out_index = raw_index - 1;
1341 } else {
1342 *out_index = def_index;
1343 *is_default = 1;
1344 }
1345 return 0;
1346}
1347
1348static void disable_wep(struct assoc_request *assoc_req)
1349{
1350 int i;
1351
1352 lbs_deb_enter(LBS_DEB_WEXT);
1353
1354 /* Set Open System auth mode */
1355 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1356
1357 /* Clear WEP keys and mark WEP as disabled */
1358 assoc_req->secinfo.wep_enabled = 0;
1359 for (i = 0; i < 4; i++)
1360 assoc_req->wep_keys[i].len = 0;
1361
1362 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1363 set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
1364
1365 lbs_deb_leave(LBS_DEB_WEXT);
1366}
1367
1368static void disable_wpa(struct assoc_request *assoc_req)
1369{
1370 lbs_deb_enter(LBS_DEB_WEXT);
1371
1372 memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct enc_key));
1373 assoc_req->wpa_mcast_key.flags = KEY_INFO_WPA_MCAST;
1374 set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
1375
1376 memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct enc_key));
1377 assoc_req->wpa_unicast_key.flags = KEY_INFO_WPA_UNICAST;
1378 set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
1379
1380 assoc_req->secinfo.WPAenabled = 0;
1381 assoc_req->secinfo.WPA2enabled = 0;
1382 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1383
1384 lbs_deb_leave(LBS_DEB_WEXT);
1385}
1386
1387/**
1388 * @brief Set Encryption key
1389 *
1390 * @param dev A pointer to net_device structure
1391 * @param info A pointer to iw_request_info structure
1392 * @param vwrq A pointer to iw_param structure
1393 * @param extra A pointer to extra data buf
1394 * @return 0 --success, otherwise fail
1395 */
1396static int lbs_set_encode(struct net_device *dev,
1397 struct iw_request_info *info,
1398 struct iw_point *dwrq, char *extra)
1399{
1400 int ret = 0;
1401 struct lbs_private *priv = dev->ml_priv;
1402 struct assoc_request * assoc_req;
1403 u16 is_default = 0, index = 0, set_tx_key = 0;
1404
1405 lbs_deb_enter(LBS_DEB_WEXT);
1406
1407 mutex_lock(&priv->lock);
1408 assoc_req = lbs_get_association_request(priv);
1409 if (!assoc_req) {
1410 ret = -ENOMEM;
1411 goto out;
1412 }
1413
1414 if (dwrq->flags & IW_ENCODE_DISABLED) {
1415 disable_wep (assoc_req);
1416 disable_wpa (assoc_req);
1417 goto out;
1418 }
1419
1420 ret = validate_key_index(assoc_req->wep_tx_keyidx,
1421 (dwrq->flags & IW_ENCODE_INDEX),
1422 &index, &is_default);
1423 if (ret) {
1424 ret = -EINVAL;
1425 goto out;
1426 }
1427
1428 /* If WEP isn't enabled, or if there is no key data but a valid
1429 * index, set the TX key.
1430 */
1431 if (!assoc_req->secinfo.wep_enabled || (dwrq->length == 0 && !is_default))
1432 set_tx_key = 1;
1433
1434 ret = lbs_set_wep_key(assoc_req, extra, dwrq->length, index, set_tx_key);
1435 if (ret)
1436 goto out;
1437
1438 if (dwrq->length)
1439 set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
1440 if (set_tx_key)
1441 set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
1442
1443 if (dwrq->flags & IW_ENCODE_RESTRICTED) {
1444 priv->authtype_auto = 0;
1445 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
1446 } else if (dwrq->flags & IW_ENCODE_OPEN) {
1447 priv->authtype_auto = 0;
1448 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1449 }
1450
1451out:
1452 if (ret == 0) {
1453 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1454 lbs_postpone_association_work(priv);
1455 } else {
1456 lbs_cancel_association_work(priv);
1457 }
1458 mutex_unlock(&priv->lock);
1459
1460 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1461 return ret;
1462}
1463
1464/**
1465 * @brief Get Extended Encryption key (WPA/802.1x and WEP)
1466 *
1467 * @param dev A pointer to net_device structure
1468 * @param info A pointer to iw_request_info structure
1469 * @param vwrq A pointer to iw_param structure
1470 * @param extra A pointer to extra data buf
1471 * @return 0 on success, otherwise failure
1472 */
1473static int lbs_get_encodeext(struct net_device *dev,
1474 struct iw_request_info *info,
1475 struct iw_point *dwrq,
1476 char *extra)
1477{
1478 int ret = -EINVAL;
1479 struct lbs_private *priv = dev->ml_priv;
1480 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1481 int index, max_key_len;
1482
1483 lbs_deb_enter(LBS_DEB_WEXT);
1484
1485 max_key_len = dwrq->length - sizeof(*ext);
1486 if (max_key_len < 0)
1487 goto out;
1488
1489 index = dwrq->flags & IW_ENCODE_INDEX;
1490 if (index) {
1491 if (index < 1 || index > 4)
1492 goto out;
1493 index--;
1494 } else {
1495 index = priv->wep_tx_keyidx;
1496 }
1497
1498 if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
1499 ext->alg != IW_ENCODE_ALG_WEP) {
1500 if (index != 0 || priv->mode != IW_MODE_INFRA)
1501 goto out;
1502 }
1503
1504 dwrq->flags = index + 1;
1505 memset(ext, 0, sizeof(*ext));
1506
1507 if ( !priv->secinfo.wep_enabled
1508 && !priv->secinfo.WPAenabled
1509 && !priv->secinfo.WPA2enabled) {
1510 ext->alg = IW_ENCODE_ALG_NONE;
1511 ext->key_len = 0;
1512 dwrq->flags |= IW_ENCODE_DISABLED;
1513 } else {
1514 u8 *key = NULL;
1515
1516 if ( priv->secinfo.wep_enabled
1517 && !priv->secinfo.WPAenabled
1518 && !priv->secinfo.WPA2enabled) {
1519 /* WEP */
1520 ext->alg = IW_ENCODE_ALG_WEP;
1521 ext->key_len = priv->wep_keys[index].len;
1522 key = &priv->wep_keys[index].key[0];
1523 } else if ( !priv->secinfo.wep_enabled
1524 && (priv->secinfo.WPAenabled ||
1525 priv->secinfo.WPA2enabled)) {
1526 /* WPA */
1527 struct enc_key * pkey = NULL;
1528
1529 if ( priv->wpa_mcast_key.len
1530 && (priv->wpa_mcast_key.flags & KEY_INFO_WPA_ENABLED))
1531 pkey = &priv->wpa_mcast_key;
1532 else if ( priv->wpa_unicast_key.len
1533 && (priv->wpa_unicast_key.flags & KEY_INFO_WPA_ENABLED))
1534 pkey = &priv->wpa_unicast_key;
1535
1536 if (pkey) {
1537 if (pkey->type == KEY_TYPE_ID_AES) {
1538 ext->alg = IW_ENCODE_ALG_CCMP;
1539 } else {
1540 ext->alg = IW_ENCODE_ALG_TKIP;
1541 }
1542 ext->key_len = pkey->len;
1543 key = &pkey->key[0];
1544 } else {
1545 ext->alg = IW_ENCODE_ALG_TKIP;
1546 ext->key_len = 0;
1547 }
1548 } else {
1549 goto out;
1550 }
1551
1552 if (ext->key_len > max_key_len) {
1553 ret = -E2BIG;
1554 goto out;
1555 }
1556
1557 if (ext->key_len)
1558 memcpy(ext->key, key, ext->key_len);
1559 else
1560 dwrq->flags |= IW_ENCODE_NOKEY;
1561 dwrq->flags |= IW_ENCODE_ENABLED;
1562 }
1563 ret = 0;
1564
1565out:
1566 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1567 return ret;
1568}
1569
1570/**
1571 * @brief Set Encryption key Extended (WPA/802.1x and WEP)
1572 *
1573 * @param dev A pointer to net_device structure
1574 * @param info A pointer to iw_request_info structure
1575 * @param vwrq A pointer to iw_param structure
1576 * @param extra A pointer to extra data buf
1577 * @return 0 --success, otherwise fail
1578 */
1579static int lbs_set_encodeext(struct net_device *dev,
1580 struct iw_request_info *info,
1581 struct iw_point *dwrq,
1582 char *extra)
1583{
1584 int ret = 0;
1585 struct lbs_private *priv = dev->ml_priv;
1586 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1587 int alg = ext->alg;
1588 struct assoc_request * assoc_req;
1589
1590 lbs_deb_enter(LBS_DEB_WEXT);
1591
1592 mutex_lock(&priv->lock);
1593 assoc_req = lbs_get_association_request(priv);
1594 if (!assoc_req) {
1595 ret = -ENOMEM;
1596 goto out;
1597 }
1598
1599 if ((alg == IW_ENCODE_ALG_NONE) || (dwrq->flags & IW_ENCODE_DISABLED)) {
1600 disable_wep (assoc_req);
1601 disable_wpa (assoc_req);
1602 } else if (alg == IW_ENCODE_ALG_WEP) {
1603 u16 is_default = 0, index, set_tx_key = 0;
1604
1605 ret = validate_key_index(assoc_req->wep_tx_keyidx,
1606 (dwrq->flags & IW_ENCODE_INDEX),
1607 &index, &is_default);
1608 if (ret)
1609 goto out;
1610
1611 /* If WEP isn't enabled, or if there is no key data but a valid
1612 * index, or if the set-TX-key flag was passed, set the TX key.
1613 */
1614 if ( !assoc_req->secinfo.wep_enabled
1615 || (dwrq->length == 0 && !is_default)
1616 || (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY))
1617 set_tx_key = 1;
1618
1619 /* Copy key to driver */
1620 ret = lbs_set_wep_key(assoc_req, ext->key, ext->key_len, index,
1621 set_tx_key);
1622 if (ret)
1623 goto out;
1624
1625 if (dwrq->flags & IW_ENCODE_RESTRICTED) {
1626 priv->authtype_auto = 0;
1627 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
1628 } else if (dwrq->flags & IW_ENCODE_OPEN) {
1629 priv->authtype_auto = 0;
1630 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1631 }
1632
1633 /* Mark the various WEP bits as modified */
1634 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1635 if (dwrq->length)
1636 set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
1637 if (set_tx_key)
1638 set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
1639 } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) {
1640 struct enc_key * pkey;
1641
1642 /* validate key length */
1643 if (((alg == IW_ENCODE_ALG_TKIP)
1644 && (ext->key_len != KEY_LEN_WPA_TKIP))
1645 || ((alg == IW_ENCODE_ALG_CCMP)
1646 && (ext->key_len != KEY_LEN_WPA_AES))) {
1647 lbs_deb_wext("invalid size %d for key of alg "
1648 "type %d\n",
1649 ext->key_len,
1650 alg);
1651 ret = -EINVAL;
1652 goto out;
1653 }
1654
1655 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
1656 pkey = &assoc_req->wpa_mcast_key;
1657 set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
1658 } else {
1659 pkey = &assoc_req->wpa_unicast_key;
1660 set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
1661 }
1662
1663 memset(pkey, 0, sizeof (struct enc_key));
1664 memcpy(pkey->key, ext->key, ext->key_len);
1665 pkey->len = ext->key_len;
1666 if (pkey->len)
1667 pkey->flags |= KEY_INFO_WPA_ENABLED;
1668
1669 /* Do this after zeroing key structure */
1670 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
1671 pkey->flags |= KEY_INFO_WPA_MCAST;
1672 } else {
1673 pkey->flags |= KEY_INFO_WPA_UNICAST;
1674 }
1675
1676 if (alg == IW_ENCODE_ALG_TKIP) {
1677 pkey->type = KEY_TYPE_ID_TKIP;
1678 } else if (alg == IW_ENCODE_ALG_CCMP) {
1679 pkey->type = KEY_TYPE_ID_AES;
1680 }
1681
1682 /* If WPA isn't enabled yet, do that now */
1683 if ( assoc_req->secinfo.WPAenabled == 0
1684 && assoc_req->secinfo.WPA2enabled == 0) {
1685 assoc_req->secinfo.WPAenabled = 1;
1686 assoc_req->secinfo.WPA2enabled = 1;
1687 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1688 }
1689
1690 /* Only disable wep if necessary: can't waste time here. */
1691 if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE)
1692 disable_wep(assoc_req);
1693 }
1694
1695out:
1696 if (ret == 0) {
1697 /* 802.1x and WPA rekeying must happen as quickly as possible,
1698 * especially during the 4-way handshake; thus if in
1699 * infrastructure mode, and either (a) 802.1x is enabled or
1700 * (b) WPA is being used, set the key right away.
1701 */
1702 if (assoc_req->mode == IW_MODE_INFRA &&
1703 ((assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_802_1X) ||
1704 (assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_PSK) ||
1705 assoc_req->secinfo.WPAenabled ||
1706 assoc_req->secinfo.WPA2enabled)) {
1707 lbs_do_association_work(priv);
1708 } else
1709 lbs_postpone_association_work(priv);
1710 } else {
1711 lbs_cancel_association_work(priv);
1712 }
1713 mutex_unlock(&priv->lock);
1714
1715 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1716 return ret;
1717}
1718
1719
1720static int lbs_set_genie(struct net_device *dev,
1721 struct iw_request_info *info,
1722 struct iw_point *dwrq,
1723 char *extra)
1724{
1725 struct lbs_private *priv = dev->ml_priv;
1726 int ret = 0;
1727 struct assoc_request * assoc_req;
1728
1729 lbs_deb_enter(LBS_DEB_WEXT);
1730
1731 mutex_lock(&priv->lock);
1732 assoc_req = lbs_get_association_request(priv);
1733 if (!assoc_req) {
1734 ret = -ENOMEM;
1735 goto out;
1736 }
1737
1738 if (dwrq->length > MAX_WPA_IE_LEN ||
1739 (dwrq->length && extra == NULL)) {
1740 ret = -EINVAL;
1741 goto out;
1742 }
1743
1744 if (dwrq->length) {
1745 memcpy(&assoc_req->wpa_ie[0], extra, dwrq->length);
1746 assoc_req->wpa_ie_len = dwrq->length;
1747 } else {
1748 memset(&assoc_req->wpa_ie[0], 0, sizeof(priv->wpa_ie));
1749 assoc_req->wpa_ie_len = 0;
1750 }
1751
1752out:
1753 if (ret == 0) {
1754 set_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags);
1755 lbs_postpone_association_work(priv);
1756 } else {
1757 lbs_cancel_association_work(priv);
1758 }
1759 mutex_unlock(&priv->lock);
1760
1761 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1762 return ret;
1763}
1764
1765static int lbs_get_genie(struct net_device *dev,
1766 struct iw_request_info *info,
1767 struct iw_point *dwrq,
1768 char *extra)
1769{
1770 int ret = 0;
1771 struct lbs_private *priv = dev->ml_priv;
1772
1773 lbs_deb_enter(LBS_DEB_WEXT);
1774
1775 if (priv->wpa_ie_len == 0) {
1776 dwrq->length = 0;
1777 goto out;
1778 }
1779
1780 if (dwrq->length < priv->wpa_ie_len) {
1781 ret = -E2BIG;
1782 goto out;
1783 }
1784
1785 dwrq->length = priv->wpa_ie_len;
1786 memcpy(extra, &priv->wpa_ie[0], priv->wpa_ie_len);
1787
1788out:
1789 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1790 return ret;
1791}
1792
1793
1794static int lbs_set_auth(struct net_device *dev,
1795 struct iw_request_info *info,
1796 struct iw_param *dwrq,
1797 char *extra)
1798{
1799 struct lbs_private *priv = dev->ml_priv;
1800 struct assoc_request * assoc_req;
1801 int ret = 0;
1802 int updated = 0;
1803
1804 lbs_deb_enter(LBS_DEB_WEXT);
1805
1806 mutex_lock(&priv->lock);
1807 assoc_req = lbs_get_association_request(priv);
1808 if (!assoc_req) {
1809 ret = -ENOMEM;
1810 goto out;
1811 }
1812
1813 switch (dwrq->flags & IW_AUTH_INDEX) {
1814 case IW_AUTH_PRIVACY_INVOKED:
1815 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1816 case IW_AUTH_TKIP_COUNTERMEASURES:
1817 case IW_AUTH_CIPHER_PAIRWISE:
1818 case IW_AUTH_CIPHER_GROUP:
1819 case IW_AUTH_DROP_UNENCRYPTED:
1820 /*
1821 * libertas does not use these parameters
1822 */
1823 break;
1824
1825 case IW_AUTH_KEY_MGMT:
1826 assoc_req->secinfo.key_mgmt = dwrq->value;
1827 updated = 1;
1828 break;
1829
1830 case IW_AUTH_WPA_VERSION:
1831 if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) {
1832 assoc_req->secinfo.WPAenabled = 0;
1833 assoc_req->secinfo.WPA2enabled = 0;
1834 disable_wpa (assoc_req);
1835 }
1836 if (dwrq->value & IW_AUTH_WPA_VERSION_WPA) {
1837 assoc_req->secinfo.WPAenabled = 1;
1838 assoc_req->secinfo.wep_enabled = 0;
1839 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1840 }
1841 if (dwrq->value & IW_AUTH_WPA_VERSION_WPA2) {
1842 assoc_req->secinfo.WPA2enabled = 1;
1843 assoc_req->secinfo.wep_enabled = 0;
1844 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1845 }
1846 updated = 1;
1847 break;
1848
1849 case IW_AUTH_80211_AUTH_ALG:
1850 if (dwrq->value & IW_AUTH_ALG_SHARED_KEY) {
1851 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
1852 } else if (dwrq->value & IW_AUTH_ALG_OPEN_SYSTEM) {
1853 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1854 } else if (dwrq->value & IW_AUTH_ALG_LEAP) {
1855 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_LEAP;
1856 } else {
1857 ret = -EINVAL;
1858 }
1859 updated = 1;
1860 break;
1861
1862 case IW_AUTH_WPA_ENABLED:
1863 if (dwrq->value) {
1864 if (!assoc_req->secinfo.WPAenabled &&
1865 !assoc_req->secinfo.WPA2enabled) {
1866 assoc_req->secinfo.WPAenabled = 1;
1867 assoc_req->secinfo.WPA2enabled = 1;
1868 assoc_req->secinfo.wep_enabled = 0;
1869 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1870 }
1871 } else {
1872 assoc_req->secinfo.WPAenabled = 0;
1873 assoc_req->secinfo.WPA2enabled = 0;
1874 disable_wpa (assoc_req);
1875 }
1876 updated = 1;
1877 break;
1878
1879 default:
1880 ret = -EOPNOTSUPP;
1881 break;
1882 }
1883
1884out:
1885 if (ret == 0) {
1886 if (updated)
1887 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1888 lbs_postpone_association_work(priv);
1889 } else if (ret != -EOPNOTSUPP) {
1890 lbs_cancel_association_work(priv);
1891 }
1892 mutex_unlock(&priv->lock);
1893
1894 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1895 return ret;
1896}
1897
1898static int lbs_get_auth(struct net_device *dev,
1899 struct iw_request_info *info,
1900 struct iw_param *dwrq,
1901 char *extra)
1902{
1903 int ret = 0;
1904 struct lbs_private *priv = dev->ml_priv;
1905
1906 lbs_deb_enter(LBS_DEB_WEXT);
1907
1908 switch (dwrq->flags & IW_AUTH_INDEX) {
1909 case IW_AUTH_KEY_MGMT:
1910 dwrq->value = priv->secinfo.key_mgmt;
1911 break;
1912
1913 case IW_AUTH_WPA_VERSION:
1914 dwrq->value = 0;
1915 if (priv->secinfo.WPAenabled)
1916 dwrq->value |= IW_AUTH_WPA_VERSION_WPA;
1917 if (priv->secinfo.WPA2enabled)
1918 dwrq->value |= IW_AUTH_WPA_VERSION_WPA2;
1919 if (!dwrq->value)
1920 dwrq->value |= IW_AUTH_WPA_VERSION_DISABLED;
1921 break;
1922
1923 case IW_AUTH_80211_AUTH_ALG:
1924 dwrq->value = priv->secinfo.auth_mode;
1925 break;
1926
1927 case IW_AUTH_WPA_ENABLED:
1928 if (priv->secinfo.WPAenabled && priv->secinfo.WPA2enabled)
1929 dwrq->value = 1;
1930 break;
1931
1932 default:
1933 ret = -EOPNOTSUPP;
1934 }
1935
1936 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1937 return ret;
1938}
1939
1940
1941static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info,
1942 struct iw_param *vwrq, char *extra)
1943{
1944 int ret = 0;
1945 struct lbs_private *priv = dev->ml_priv;
1946 s16 dbm = (s16) vwrq->value;
1947
1948 lbs_deb_enter(LBS_DEB_WEXT);
1949
1950 if (vwrq->disabled) {
1951 lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0);
1952 goto out;
1953 }
1954
1955 if (vwrq->fixed == 0) {
1956 /* User requests automatic tx power control, however there are
1957 * many auto tx settings. For now use firmware defaults until
1958 * we come up with a good way to expose these to the user. */
1959 if (priv->fwrelease < 0x09000000) {
1960 ret = lbs_set_power_adapt_cfg(priv, 1,
1961 POW_ADAPT_DEFAULT_P0,
1962 POW_ADAPT_DEFAULT_P1,
1963 POW_ADAPT_DEFAULT_P2);
1964 if (ret)
1965 goto out;
1966 }
1967 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
1968 TPC_DEFAULT_P2, 1);
1969 if (ret)
1970 goto out;
1971 dbm = priv->txpower_max;
1972 } else {
1973 /* Userspace check in iwrange if it should use dBm or mW,
1974 * therefore this should never happen... Jean II */
1975 if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
1976 ret = -EOPNOTSUPP;
1977 goto out;
1978 }
1979
1980 /* Validate requested power level against firmware allowed
1981 * levels */
1982 if (priv->txpower_min && (dbm < priv->txpower_min)) {
1983 ret = -EINVAL;
1984 goto out;
1985 }
1986
1987 if (priv->txpower_max && (dbm > priv->txpower_max)) {
1988 ret = -EINVAL;
1989 goto out;
1990 }
1991 if (priv->fwrelease < 0x09000000) {
1992 ret = lbs_set_power_adapt_cfg(priv, 0,
1993 POW_ADAPT_DEFAULT_P0,
1994 POW_ADAPT_DEFAULT_P1,
1995 POW_ADAPT_DEFAULT_P2);
1996 if (ret)
1997 goto out;
1998 }
1999 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
2000 TPC_DEFAULT_P2, 1);
2001 if (ret)
2002 goto out;
2003 }
2004
2005 /* If the radio was off, turn it on */
2006 if (!priv->radio_on) {
2007 ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1);
2008 if (ret)
2009 goto out;
2010 }
2011
2012 lbs_deb_wext("txpower set %d dBm\n", dbm);
2013
2014 ret = lbs_set_tx_power(priv, dbm);
2015
2016out:
2017 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2018 return ret;
2019}
2020
2021static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
2022 struct iw_point *dwrq, char *extra)
2023{
2024 struct lbs_private *priv = dev->ml_priv;
2025
2026 lbs_deb_enter(LBS_DEB_WEXT);
2027
2028 /*
2029 * Note : if dwrq->flags != 0, we should get the relevant SSID from
2030 * the SSID list...
2031 */
2032
2033 /*
2034 * Get the current SSID
2035 */
2036 if (priv->connect_status == LBS_CONNECTED) {
2037 memcpy(extra, priv->curbssparams.ssid,
2038 priv->curbssparams.ssid_len);
2039 } else {
2040 memset(extra, 0, 32);
2041 }
2042 /*
2043 * If none, we may want to get the one that was set
2044 */
2045
2046 dwrq->length = priv->curbssparams.ssid_len;
2047
2048 dwrq->flags = 1; /* active */
2049
2050 lbs_deb_leave(LBS_DEB_WEXT);
2051 return 0;
2052}
2053
2054static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
2055 struct iw_point *dwrq, char *extra)
2056{
2057 struct lbs_private *priv = dev->ml_priv;
2058 int ret = 0;
2059 u8 ssid[IEEE80211_MAX_SSID_LEN];
2060 u8 ssid_len = 0;
2061 struct assoc_request * assoc_req;
2062 int in_ssid_len = dwrq->length;
2063 DECLARE_SSID_BUF(ssid_buf);
2064
2065 lbs_deb_enter(LBS_DEB_WEXT);
2066
2067 if (!priv->radio_on) {
2068 ret = -EINVAL;
2069 goto out;
2070 }
2071
2072 /* Check the size of the string */
2073 if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
2074 ret = -E2BIG;
2075 goto out;
2076 }
2077
2078 memset(&ssid, 0, sizeof(ssid));
2079
2080 if (!dwrq->flags || !in_ssid_len) {
2081 /* "any" SSID requested; leave SSID blank */
2082 } else {
2083 /* Specific SSID requested */
2084 memcpy(&ssid, extra, in_ssid_len);
2085 ssid_len = in_ssid_len;
2086 }
2087
2088 if (!ssid_len) {
2089 lbs_deb_wext("requested any SSID\n");
2090 } else {
2091 lbs_deb_wext("requested SSID '%s'\n",
2092 print_ssid(ssid_buf, ssid, ssid_len));
2093 }
2094
2095out:
2096 mutex_lock(&priv->lock);
2097 if (ret == 0) {
2098 /* Get or create the current association request */
2099 assoc_req = lbs_get_association_request(priv);
2100 if (!assoc_req) {
2101 ret = -ENOMEM;
2102 } else {
2103 /* Copy the SSID to the association request */
2104 memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
2105 assoc_req->ssid_len = ssid_len;
2106 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
2107 lbs_postpone_association_work(priv);
2108 }
2109 }
2110
2111 /* Cancel the association request if there was an error */
2112 if (ret != 0) {
2113 lbs_cancel_association_work(priv);
2114 }
2115
2116 mutex_unlock(&priv->lock);
2117
2118 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2119 return ret;
2120}
2121
2122#ifdef CONFIG_LIBERTAS_MESH
2123static int lbs_mesh_get_essid(struct net_device *dev,
2124 struct iw_request_info *info,
2125 struct iw_point *dwrq, char *extra)
2126{
2127 struct lbs_private *priv = dev->ml_priv;
2128
2129 lbs_deb_enter(LBS_DEB_WEXT);
2130
2131 memcpy(extra, priv->mesh_ssid, priv->mesh_ssid_len);
2132
2133 dwrq->length = priv->mesh_ssid_len;
2134
2135 dwrq->flags = 1; /* active */
2136
2137 lbs_deb_leave(LBS_DEB_WEXT);
2138 return 0;
2139}
2140
2141static int lbs_mesh_set_essid(struct net_device *dev,
2142 struct iw_request_info *info,
2143 struct iw_point *dwrq, char *extra)
2144{
2145 struct lbs_private *priv = dev->ml_priv;
2146 int ret = 0;
2147
2148 lbs_deb_enter(LBS_DEB_WEXT);
2149
2150 if (!priv->radio_on) {
2151 ret = -EINVAL;
2152 goto out;
2153 }
2154
2155 /* Check the size of the string */
2156 if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
2157 ret = -E2BIG;
2158 goto out;
2159 }
2160
2161 if (!dwrq->flags || !dwrq->length) {
2162 ret = -EINVAL;
2163 goto out;
2164 } else {
2165 /* Specific SSID requested */
2166 memcpy(priv->mesh_ssid, extra, dwrq->length);
2167 priv->mesh_ssid_len = dwrq->length;
2168 }
2169
2170 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
2171 priv->channel);
2172 out:
2173 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2174 return ret;
2175}
2176#endif
2177
2178/**
2179 * @brief Connect to the AP or Ad-hoc Network with specific bssid
2180 *
2181 * @param dev A pointer to net_device structure
2182 * @param info A pointer to iw_request_info structure
2183 * @param awrq A pointer to iw_param structure
2184 * @param extra A pointer to extra data buf
2185 * @return 0 --success, otherwise fail
2186 */
2187static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
2188 struct sockaddr *awrq, char *extra)
2189{
2190 struct lbs_private *priv = dev->ml_priv;
2191 struct assoc_request * assoc_req;
2192 int ret = 0;
2193
2194 lbs_deb_enter(LBS_DEB_WEXT);
2195
2196 if (!priv->radio_on)
2197 return -EINVAL;
2198
2199 if (awrq->sa_family != ARPHRD_ETHER)
2200 return -EINVAL;
2201
2202 lbs_deb_wext("ASSOC: WAP: sa_data %pM\n", awrq->sa_data);
2203
2204 mutex_lock(&priv->lock);
2205
2206 /* Get or create the current association request */
2207 assoc_req = lbs_get_association_request(priv);
2208 if (!assoc_req) {
2209 lbs_cancel_association_work(priv);
2210 ret = -ENOMEM;
2211 } else {
2212 /* Copy the BSSID to the association request */
2213 memcpy(&assoc_req->bssid, awrq->sa_data, ETH_ALEN);
2214 set_bit(ASSOC_FLAG_BSSID, &assoc_req->flags);
2215 lbs_postpone_association_work(priv);
2216 }
2217
2218 mutex_unlock(&priv->lock);
2219
2220 return ret;
2221}
2222
2223/*
2224 * iwconfig settable callbacks
2225 */
2226static const iw_handler lbs_handler[] = {
2227 (iw_handler) NULL, /* SIOCSIWCOMMIT */
2228 (iw_handler) lbs_get_name, /* SIOCGIWNAME */
2229 (iw_handler) NULL, /* SIOCSIWNWID */
2230 (iw_handler) NULL, /* SIOCGIWNWID */
2231 (iw_handler) lbs_set_freq, /* SIOCSIWFREQ */
2232 (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */
2233 (iw_handler) lbs_set_mode, /* SIOCSIWMODE */
2234 (iw_handler) lbs_get_mode, /* SIOCGIWMODE */
2235 (iw_handler) NULL, /* SIOCSIWSENS */
2236 (iw_handler) NULL, /* SIOCGIWSENS */
2237 (iw_handler) NULL, /* SIOCSIWRANGE */
2238 (iw_handler) lbs_get_range, /* SIOCGIWRANGE */
2239 (iw_handler) NULL, /* SIOCSIWPRIV */
2240 (iw_handler) NULL, /* SIOCGIWPRIV */
2241 (iw_handler) NULL, /* SIOCSIWSTATS */
2242 (iw_handler) NULL, /* SIOCGIWSTATS */
2243 iw_handler_set_spy, /* SIOCSIWSPY */
2244 iw_handler_get_spy, /* SIOCGIWSPY */
2245 iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
2246 iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
2247 (iw_handler) lbs_set_wap, /* SIOCSIWAP */
2248 (iw_handler) lbs_get_wap, /* SIOCGIWAP */
2249 (iw_handler) NULL, /* SIOCSIWMLME */
2250 (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */
2251 (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */
2252 (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */
2253 (iw_handler) lbs_set_essid, /* SIOCSIWESSID */
2254 (iw_handler) lbs_get_essid, /* SIOCGIWESSID */
2255 (iw_handler) lbs_set_nick, /* SIOCSIWNICKN */
2256 (iw_handler) lbs_get_nick, /* SIOCGIWNICKN */
2257 (iw_handler) NULL, /* -- hole -- */
2258 (iw_handler) NULL, /* -- hole -- */
2259 (iw_handler) lbs_set_rate, /* SIOCSIWRATE */
2260 (iw_handler) lbs_get_rate, /* SIOCGIWRATE */
2261 (iw_handler) lbs_set_rts, /* SIOCSIWRTS */
2262 (iw_handler) lbs_get_rts, /* SIOCGIWRTS */
2263 (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */
2264 (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */
2265 (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */
2266 (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */
2267 (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */
2268 (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */
2269 (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */
2270 (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */
2271 (iw_handler) lbs_set_power, /* SIOCSIWPOWER */
2272 (iw_handler) lbs_get_power, /* SIOCGIWPOWER */
2273 (iw_handler) NULL, /* -- hole -- */
2274 (iw_handler) NULL, /* -- hole -- */
2275 (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */
2276 (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */
2277 (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */
2278 (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */
2279 (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */
2280 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2281 (iw_handler) NULL, /* SIOCSIWPMKSA */
2282};
2283struct iw_handler_def lbs_handler_def = {
2284 .num_standard = ARRAY_SIZE(lbs_handler),
2285 .standard = (iw_handler *) lbs_handler,
2286 .get_wireless_stats = lbs_get_wireless_stats,
2287};
2288
2289#ifdef CONFIG_LIBERTAS_MESH
2290static const iw_handler mesh_wlan_handler[] = {
2291 (iw_handler) NULL, /* SIOCSIWCOMMIT */
2292 (iw_handler) lbs_get_name, /* SIOCGIWNAME */
2293 (iw_handler) NULL, /* SIOCSIWNWID */
2294 (iw_handler) NULL, /* SIOCGIWNWID */
2295 (iw_handler) lbs_mesh_set_freq, /* SIOCSIWFREQ */
2296 (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */
2297 (iw_handler) NULL, /* SIOCSIWMODE */
2298 (iw_handler) mesh_wlan_get_mode, /* SIOCGIWMODE */
2299 (iw_handler) NULL, /* SIOCSIWSENS */
2300 (iw_handler) NULL, /* SIOCGIWSENS */
2301 (iw_handler) NULL, /* SIOCSIWRANGE */
2302 (iw_handler) lbs_get_range, /* SIOCGIWRANGE */
2303 (iw_handler) NULL, /* SIOCSIWPRIV */
2304 (iw_handler) NULL, /* SIOCGIWPRIV */
2305 (iw_handler) NULL, /* SIOCSIWSTATS */
2306 (iw_handler) NULL, /* SIOCGIWSTATS */
2307 iw_handler_set_spy, /* SIOCSIWSPY */
2308 iw_handler_get_spy, /* SIOCGIWSPY */
2309 iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
2310 iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
2311 (iw_handler) NULL, /* SIOCSIWAP */
2312 (iw_handler) NULL, /* SIOCGIWAP */
2313 (iw_handler) NULL, /* SIOCSIWMLME */
2314 (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */
2315 (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */
2316 (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */
2317 (iw_handler) lbs_mesh_set_essid,/* SIOCSIWESSID */
2318 (iw_handler) lbs_mesh_get_essid,/* SIOCGIWESSID */
2319 (iw_handler) NULL, /* SIOCSIWNICKN */
2320 (iw_handler) mesh_get_nick, /* SIOCGIWNICKN */
2321 (iw_handler) NULL, /* -- hole -- */
2322 (iw_handler) NULL, /* -- hole -- */
2323 (iw_handler) lbs_set_rate, /* SIOCSIWRATE */
2324 (iw_handler) lbs_get_rate, /* SIOCGIWRATE */
2325 (iw_handler) lbs_set_rts, /* SIOCSIWRTS */
2326 (iw_handler) lbs_get_rts, /* SIOCGIWRTS */
2327 (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */
2328 (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */
2329 (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */
2330 (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */
2331 (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */
2332 (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */
2333 (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */
2334 (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */
2335 (iw_handler) lbs_set_power, /* SIOCSIWPOWER */
2336 (iw_handler) lbs_get_power, /* SIOCGIWPOWER */
2337 (iw_handler) NULL, /* -- hole -- */
2338 (iw_handler) NULL, /* -- hole -- */
2339 (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */
2340 (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */
2341 (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */
2342 (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */
2343 (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */
2344 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2345 (iw_handler) NULL, /* SIOCSIWPMKSA */
2346};
2347
2348struct iw_handler_def mesh_handler_def = {
2349 .num_standard = ARRAY_SIZE(mesh_wlan_handler),
2350 .standard = (iw_handler *) mesh_wlan_handler,
2351 .get_wireless_stats = lbs_get_wireless_stats,
2352};
2353#endif
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
deleted file mode 100644
index f3f19fe8c6c6..000000000000
--- a/drivers/net/wireless/libertas/wext.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/**
2 * This file contains definition for IOCTL call.
3 */
4#ifndef _LBS_WEXT_H_
5#define _LBS_WEXT_H_
6
7void lbs_send_disconnect_notification(struct lbs_private *priv);
8void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
9
10struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
11 struct lbs_private *priv,
12 u8 band,
13 u16 channel);
14
15extern struct iw_handler_def lbs_handler_def;
16
17#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index c445500ffc61..b172f5d87a3b 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -538,7 +538,8 @@ static void if_usb_receive_fwload(struct urb *urb)
538 return; 538 return;
539 } 539 }
540 540
541 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); 541 syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader),
542 GFP_ATOMIC);
542 if (!syncfwheader) { 543 if (!syncfwheader) {
543 lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); 544 lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
544 kfree_skb(skb); 545 kfree_skb(skb);
@@ -546,8 +547,6 @@ static void if_usb_receive_fwload(struct urb *urb)
546 return; 547 return;
547 } 548 }
548 549
549 memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
550
551 if (!syncfwheader->cmd) { 550 if (!syncfwheader->cmd) {
552 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); 551 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
553 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", 552 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index fbbaaae7a1ae..ad77b92d0b41 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -253,6 +253,9 @@ struct lbtf_private {
253 u8 fw_ready; 253 u8 fw_ready;
254 u8 surpriseremoved; 254 u8 surpriseremoved;
255 struct sk_buff_head bc_ps_buf; 255 struct sk_buff_head bc_ps_buf;
256
257 /* Most recently reported noise in dBm */
258 s8 noise;
256}; 259};
257 260
258/* 802.11-related definitions */ 261/* 802.11-related definitions */
@@ -316,7 +319,7 @@ struct cmd_header {
316 __le16 size; 319 __le16 size;
317 __le16 seqnum; 320 __le16 seqnum;
318 __le16 result; 321 __le16 result;
319} __attribute__ ((packed)); 322} __packed;
320 323
321struct cmd_ctrl_node { 324struct cmd_ctrl_node {
322 struct list_head list; 325 struct list_head list;
@@ -369,7 +372,7 @@ struct cmd_ds_get_hw_spec {
369 372
370 /*FW/HW capability */ 373 /*FW/HW capability */
371 __le32 fwcapinfo; 374 __le32 fwcapinfo;
372} __attribute__ ((packed)); 375} __packed;
373 376
374struct cmd_ds_mac_control { 377struct cmd_ds_mac_control {
375 struct cmd_header hdr; 378 struct cmd_header hdr;
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 817fffc0de4b..9278b3c8ee30 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -525,6 +525,22 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
525 lbtf_deb_leave(LBTF_DEB_MACOPS); 525 lbtf_deb_leave(LBTF_DEB_MACOPS);
526} 526}
527 527
528static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
529 struct survey_info *survey)
530{
531 struct lbtf_private *priv = hw->priv;
532 struct ieee80211_conf *conf = &hw->conf;
533
534 if (idx != 0)
535 return -ENOENT;
536
537 survey->channel = conf->channel;
538 survey->filled = SURVEY_INFO_NOISE_DBM;
539 survey->noise = priv->noise;
540
541 return 0;
542}
543
528static const struct ieee80211_ops lbtf_ops = { 544static const struct ieee80211_ops lbtf_ops = {
529 .tx = lbtf_op_tx, 545 .tx = lbtf_op_tx,
530 .start = lbtf_op_start, 546 .start = lbtf_op_start,
@@ -535,6 +551,7 @@ static const struct ieee80211_ops lbtf_ops = {
535 .prepare_multicast = lbtf_op_prepare_multicast, 551 .prepare_multicast = lbtf_op_prepare_multicast,
536 .configure_filter = lbtf_op_configure_filter, 552 .configure_filter = lbtf_op_configure_filter,
537 .bss_info_changed = lbtf_op_bss_info_changed, 553 .bss_info_changed = lbtf_op_bss_info_changed,
554 .get_survey = lbtf_op_get_survey,
538}; 555};
539 556
540int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) 557int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
@@ -555,6 +572,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
555 stats.freq = priv->cur_freq; 572 stats.freq = priv->cur_freq;
556 stats.band = IEEE80211_BAND_2GHZ; 573 stats.band = IEEE80211_BAND_2GHZ;
557 stats.signal = prxpd->snr; 574 stats.signal = prxpd->snr;
575 priv->noise = prxpd->nf;
558 /* Marvell rate index has a hole at value 4 */ 576 /* Marvell rate index has a hole at value 4 */
559 if (prxpd->rx_rate > 4) 577 if (prxpd->rx_rate > 4)
560 --prxpd->rx_rate; 578 --prxpd->rx_rate;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6f8cb3ee6fed..01ad7f77383a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -317,7 +317,7 @@ struct hwsim_radiotap_hdr {
317 u8 rt_rate; 317 u8 rt_rate;
318 __le16 rt_channel; 318 __le16 rt_channel;
319 __le16 rt_chbitmask; 319 __le16 rt_chbitmask;
320} __attribute__ ((packed)); 320} __packed;
321 321
322 322
323static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, 323static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
@@ -486,8 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
486 struct ieee80211_rx_status rx_status; 486 struct ieee80211_rx_status rx_status;
487 487
488 if (data->idle) { 488 if (data->idle) {
489 printk(KERN_DEBUG "%s: Trying to TX when idle - reject\n", 489 wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n");
490 wiphy_name(hw->wiphy));
491 return false; 490 return false;
492 } 491 }
493 492
@@ -576,7 +575,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
576static int mac80211_hwsim_start(struct ieee80211_hw *hw) 575static int mac80211_hwsim_start(struct ieee80211_hw *hw)
577{ 576{
578 struct mac80211_hwsim_data *data = hw->priv; 577 struct mac80211_hwsim_data *data = hw->priv;
579 printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); 578 wiphy_debug(hw->wiphy, "%s\n", __func__);
580 data->started = 1; 579 data->started = 1;
581 return 0; 580 return 0;
582} 581}
@@ -587,16 +586,15 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
587 struct mac80211_hwsim_data *data = hw->priv; 586 struct mac80211_hwsim_data *data = hw->priv;
588 data->started = 0; 587 data->started = 0;
589 del_timer(&data->beacon_timer); 588 del_timer(&data->beacon_timer);
590 printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); 589 wiphy_debug(hw->wiphy, "%s\n", __func__);
591} 590}
592 591
593 592
594static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, 593static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
595 struct ieee80211_vif *vif) 594 struct ieee80211_vif *vif)
596{ 595{
597 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 596 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
598 wiphy_name(hw->wiphy), __func__, vif->type, 597 __func__, vif->type, vif->addr);
599 vif->addr);
600 hwsim_set_magic(vif); 598 hwsim_set_magic(vif);
601 return 0; 599 return 0;
602} 600}
@@ -605,9 +603,8 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
605static void mac80211_hwsim_remove_interface( 603static void mac80211_hwsim_remove_interface(
606 struct ieee80211_hw *hw, struct ieee80211_vif *vif) 604 struct ieee80211_hw *hw, struct ieee80211_vif *vif)
607{ 605{
608 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 606 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
609 wiphy_name(hw->wiphy), __func__, vif->type, 607 __func__, vif->type, vif->addr);
610 vif->addr);
611 hwsim_check_magic(vif); 608 hwsim_check_magic(vif);
612 hwsim_clear_magic(vif); 609 hwsim_clear_magic(vif);
613} 610}
@@ -670,13 +667,14 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
670 [IEEE80211_SMPS_DYNAMIC] = "dynamic", 667 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
671 }; 668 };
672 669
673 printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n", 670 wiphy_debug(hw->wiphy,
674 wiphy_name(hw->wiphy), __func__, 671 "%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
675 conf->channel->center_freq, 672 __func__,
676 hwsim_chantypes[conf->channel_type], 673 conf->channel->center_freq,
677 !!(conf->flags & IEEE80211_CONF_IDLE), 674 hwsim_chantypes[conf->channel_type],
678 !!(conf->flags & IEEE80211_CONF_PS), 675 !!(conf->flags & IEEE80211_CONF_IDLE),
679 smps_modes[conf->smps_mode]); 676 !!(conf->flags & IEEE80211_CONF_PS),
677 smps_modes[conf->smps_mode]);
680 678
681 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 679 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
682 680
@@ -696,7 +694,7 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
696{ 694{
697 struct mac80211_hwsim_data *data = hw->priv; 695 struct mac80211_hwsim_data *data = hw->priv;
698 696
699 printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); 697 wiphy_debug(hw->wiphy, "%s\n", __func__);
700 698
701 data->rx_filter = 0; 699 data->rx_filter = 0;
702 if (*total_flags & FIF_PROMISC_IN_BSS) 700 if (*total_flags & FIF_PROMISC_IN_BSS)
@@ -717,26 +715,23 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
717 715
718 hwsim_check_magic(vif); 716 hwsim_check_magic(vif);
719 717
720 printk(KERN_DEBUG "%s:%s(changed=0x%x)\n", 718 wiphy_debug(hw->wiphy, "%s(changed=0x%x)\n", __func__, changed);
721 wiphy_name(hw->wiphy), __func__, changed);
722 719
723 if (changed & BSS_CHANGED_BSSID) { 720 if (changed & BSS_CHANGED_BSSID) {
724 printk(KERN_DEBUG "%s:%s: BSSID changed: %pM\n", 721 wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n",
725 wiphy_name(hw->wiphy), __func__, 722 __func__, info->bssid);
726 info->bssid);
727 memcpy(vp->bssid, info->bssid, ETH_ALEN); 723 memcpy(vp->bssid, info->bssid, ETH_ALEN);
728 } 724 }
729 725
730 if (changed & BSS_CHANGED_ASSOC) { 726 if (changed & BSS_CHANGED_ASSOC) {
731 printk(KERN_DEBUG " %s: ASSOC: assoc=%d aid=%d\n", 727 wiphy_debug(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
732 wiphy_name(hw->wiphy), info->assoc, info->aid); 728 info->assoc, info->aid);
733 vp->assoc = info->assoc; 729 vp->assoc = info->assoc;
734 vp->aid = info->aid; 730 vp->aid = info->aid;
735 } 731 }
736 732
737 if (changed & BSS_CHANGED_BEACON_INT) { 733 if (changed & BSS_CHANGED_BEACON_INT) {
738 printk(KERN_DEBUG " %s: BCNINT: %d\n", 734 wiphy_debug(hw->wiphy, " BCNINT: %d\n", info->beacon_int);
739 wiphy_name(hw->wiphy), info->beacon_int);
740 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; 735 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
741 if (WARN_ON(!data->beacon_int)) 736 if (WARN_ON(!data->beacon_int))
742 data->beacon_int = 1; 737 data->beacon_int = 1;
@@ -746,31 +741,28 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
746 } 741 }
747 742
748 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 743 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
749 printk(KERN_DEBUG " %s: ERP_CTS_PROT: %d\n", 744 wiphy_debug(hw->wiphy, " ERP_CTS_PROT: %d\n",
750 wiphy_name(hw->wiphy), info->use_cts_prot); 745 info->use_cts_prot);
751 } 746 }
752 747
753 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 748 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
754 printk(KERN_DEBUG " %s: ERP_PREAMBLE: %d\n", 749 wiphy_debug(hw->wiphy, " ERP_PREAMBLE: %d\n",
755 wiphy_name(hw->wiphy), info->use_short_preamble); 750 info->use_short_preamble);
756 } 751 }
757 752
758 if (changed & BSS_CHANGED_ERP_SLOT) { 753 if (changed & BSS_CHANGED_ERP_SLOT) {
759 printk(KERN_DEBUG " %s: ERP_SLOT: %d\n", 754 wiphy_debug(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot);
760 wiphy_name(hw->wiphy), info->use_short_slot);
761 } 755 }
762 756
763 if (changed & BSS_CHANGED_HT) { 757 if (changed & BSS_CHANGED_HT) {
764 printk(KERN_DEBUG " %s: HT: op_mode=0x%x, chantype=%s\n", 758 wiphy_debug(hw->wiphy, " HT: op_mode=0x%x, chantype=%s\n",
765 wiphy_name(hw->wiphy), 759 info->ht_operation_mode,
766 info->ht_operation_mode, 760 hwsim_chantypes[info->channel_type]);
767 hwsim_chantypes[info->channel_type]);
768 } 761 }
769 762
770 if (changed & BSS_CHANGED_BASIC_RATES) { 763 if (changed & BSS_CHANGED_BASIC_RATES) {
771 printk(KERN_DEBUG " %s: BASIC_RATES: 0x%llx\n", 764 wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n",
772 wiphy_name(hw->wiphy), 765 (unsigned long long) info->basic_rates);
773 (unsigned long long) info->basic_rates);
774 } 766 }
775} 767}
776 768
@@ -824,10 +816,11 @@ static int mac80211_hwsim_conf_tx(
824 struct ieee80211_hw *hw, u16 queue, 816 struct ieee80211_hw *hw, u16 queue,
825 const struct ieee80211_tx_queue_params *params) 817 const struct ieee80211_tx_queue_params *params)
826{ 818{
827 printk(KERN_DEBUG "%s:%s (queue=%d txop=%d cw_min=%d cw_max=%d " 819 wiphy_debug(hw->wiphy,
828 "aifs=%d)\n", 820 "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n",
829 wiphy_name(hw->wiphy), __func__, queue, 821 __func__, queue,
830 params->txop, params->cw_min, params->cw_max, params->aifs); 822 params->txop, params->cw_min,
823 params->cw_max, params->aifs);
831 return 0; 824 return 0;
832} 825}
833 826
@@ -837,8 +830,7 @@ static int mac80211_hwsim_get_survey(
837{ 830{
838 struct ieee80211_conf *conf = &hw->conf; 831 struct ieee80211_conf *conf = &hw->conf;
839 832
840 printk(KERN_DEBUG "%s:%s (idx=%d)\n", 833 wiphy_debug(hw->wiphy, "%s (idx=%d)\n", __func__, idx);
841 wiphy_name(hw->wiphy), __func__, idx);
842 834
843 if (idx != 0) 835 if (idx != 0)
844 return -ENOENT; 836 return -ENOENT;
@@ -1108,8 +1100,9 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1108 if (!vp->assoc) 1100 if (!vp->assoc)
1109 return; 1101 return;
1110 1102
1111 printk(KERN_DEBUG "%s:%s: send PS-Poll to %pM for aid %d\n", 1103 wiphy_debug(data->hw->wiphy,
1112 wiphy_name(data->hw->wiphy), __func__, vp->bssid, vp->aid); 1104 "%s: send PS-Poll to %pM for aid %d\n",
1105 __func__, vp->bssid, vp->aid);
1113 1106
1114 skb = dev_alloc_skb(sizeof(*pspoll)); 1107 skb = dev_alloc_skb(sizeof(*pspoll));
1115 if (!skb) 1108 if (!skb)
@@ -1137,8 +1130,9 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1137 if (!vp->assoc) 1130 if (!vp->assoc)
1138 return; 1131 return;
1139 1132
1140 printk(KERN_DEBUG "%s:%s: send data::nullfunc to %pM ps=%d\n", 1133 wiphy_debug(data->hw->wiphy,
1141 wiphy_name(data->hw->wiphy), __func__, vp->bssid, ps); 1134 "%s: send data::nullfunc to %pM ps=%d\n",
1135 __func__, vp->bssid, ps);
1142 1136
1143 skb = dev_alloc_skb(sizeof(*hdr)); 1137 skb = dev_alloc_skb(sizeof(*hdr));
1144 if (!skb) 1138 if (!skb)
@@ -1291,6 +1285,11 @@ static int __init init_mac80211_hwsim(void)
1291 hw->wiphy->n_addresses = 2; 1285 hw->wiphy->n_addresses = 2;
1292 hw->wiphy->addresses = data->addresses; 1286 hw->wiphy->addresses = data->addresses;
1293 1287
1288 if (fake_hw_scan) {
1289 hw->wiphy->max_scan_ssids = 255;
1290 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
1291 }
1292
1294 hw->channel_change_time = 1; 1293 hw->channel_change_time = 1;
1295 hw->queues = 4; 1294 hw->queues = 4;
1296 hw->wiphy->interface_modes = 1295 hw->wiphy->interface_modes =
@@ -1468,9 +1467,8 @@ static int __init init_mac80211_hwsim(void)
1468 break; 1467 break;
1469 } 1468 }
1470 1469
1471 printk(KERN_DEBUG "%s: hwaddr %pM registered\n", 1470 wiphy_debug(hw->wiphy, "hwaddr %pm registered\n",
1472 wiphy_name(hw->wiphy), 1471 hw->wiphy->perm_addr);
1473 hw->wiphy->perm_addr);
1474 1472
1475 data->debugfs = debugfs_create_dir("hwsim", 1473 data->debugfs = debugfs_create_dir("hwsim",
1476 hw->wiphy->debugfsdir); 1474 hw->wiphy->debugfsdir);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 808adb909095..d761ed2d8af4 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -86,7 +86,7 @@ struct rxd_ops {
86 void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr); 86 void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
87 void (*rxd_refill)(void *rxd, dma_addr_t addr, int len); 87 void (*rxd_refill)(void *rxd, dma_addr_t addr, int len);
88 int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status, 88 int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status,
89 __le16 *qos); 89 __le16 *qos, s8 *noise);
90}; 90};
91 91
92struct mwl8k_device_info { 92struct mwl8k_device_info {
@@ -109,7 +109,7 @@ struct mwl8k_rx_queue {
109 dma_addr_t rxd_dma; 109 dma_addr_t rxd_dma;
110 struct { 110 struct {
111 struct sk_buff *skb; 111 struct sk_buff *skb;
112 DECLARE_PCI_UNMAP_ADDR(dma) 112 DEFINE_DMA_UNMAP_ADDR(dma);
113 } *buf; 113 } *buf;
114}; 114};
115 115
@@ -207,6 +207,9 @@ struct mwl8k_priv {
207 207
208 /* Tasklet to perform RX. */ 208 /* Tasklet to perform RX. */
209 struct tasklet_struct poll_rx_task; 209 struct tasklet_struct poll_rx_task;
210
211 /* Most recently reported noise in dBm */
212 s8 noise;
210}; 213};
211 214
212/* Per interface specific private data */ 215/* Per interface specific private data */
@@ -314,13 +317,15 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
314#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ 317#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
315#define MWL8K_CMD_UPDATE_STADB 0x1123 318#define MWL8K_CMD_UPDATE_STADB 0x1123
316 319
317static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize) 320static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
318{ 321{
322 u16 command = le16_to_cpu(cmd);
323
319#define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\ 324#define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\
320 snprintf(buf, bufsize, "%s", #x);\ 325 snprintf(buf, bufsize, "%s", #x);\
321 return buf;\ 326 return buf;\
322 } while (0) 327 } while (0)
323 switch (cmd & ~0x8000) { 328 switch (command & ~0x8000) {
324 MWL8K_CMDNAME(CODE_DNLD); 329 MWL8K_CMDNAME(CODE_DNLD);
325 MWL8K_CMDNAME(GET_HW_SPEC); 330 MWL8K_CMDNAME(GET_HW_SPEC);
326 MWL8K_CMDNAME(SET_HW_SPEC); 331 MWL8K_CMDNAME(SET_HW_SPEC);
@@ -426,7 +431,7 @@ struct mwl8k_cmd_pkt {
426 __u8 macid; 431 __u8 macid;
427 __le16 result; 432 __le16 result;
428 char payload[0]; 433 char payload[0];
429} __attribute__((packed)); 434} __packed;
430 435
431/* 436/*
432 * Firmware loading. 437 * Firmware loading.
@@ -632,7 +637,7 @@ struct mwl8k_dma_data {
632 __le16 fwlen; 637 __le16 fwlen;
633 struct ieee80211_hdr wh; 638 struct ieee80211_hdr wh;
634 char data[0]; 639 char data[0];
635} __attribute__((packed)); 640} __packed;
636 641
637/* Routines to add/remove DMA header from skb. */ 642/* Routines to add/remove DMA header from skb. */
638static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) 643static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
@@ -711,7 +716,7 @@ struct mwl8k_rxd_8366_ap {
711 __u8 rx_status; 716 __u8 rx_status;
712 __u8 channel; 717 __u8 channel;
713 __u8 rx_ctrl; 718 __u8 rx_ctrl;
714} __attribute__((packed)); 719} __packed;
715 720
716#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 721#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
717#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 722#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
@@ -739,7 +744,7 @@ static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
739 744
740static int 745static int
741mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, 746mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
742 __le16 *qos) 747 __le16 *qos, s8 *noise)
743{ 748{
744 struct mwl8k_rxd_8366_ap *rxd = _rxd; 749 struct mwl8k_rxd_8366_ap *rxd = _rxd;
745 750
@@ -750,6 +755,7 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
750 memset(status, 0, sizeof(*status)); 755 memset(status, 0, sizeof(*status));
751 756
752 status->signal = -rxd->rssi; 757 status->signal = -rxd->rssi;
758 *noise = -rxd->noise_floor;
753 759
754 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { 760 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
755 status->flag |= RX_FLAG_HT; 761 status->flag |= RX_FLAG_HT;
@@ -806,7 +812,7 @@ struct mwl8k_rxd_sta {
806 __u8 rx_ctrl; 812 __u8 rx_ctrl;
807 __u8 rx_status; 813 __u8 rx_status;
808 __u8 pad2[2]; 814 __u8 pad2[2];
809} __attribute__((packed)); 815} __packed;
810 816
811#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000 817#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
812#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) 818#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
@@ -837,7 +843,7 @@ static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
837 843
838static int 844static int
839mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, 845mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
840 __le16 *qos) 846 __le16 *qos, s8 *noise)
841{ 847{
842 struct mwl8k_rxd_sta *rxd = _rxd; 848 struct mwl8k_rxd_sta *rxd = _rxd;
843 u16 rate_info; 849 u16 rate_info;
@@ -851,6 +857,7 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
851 memset(status, 0, sizeof(*status)); 857 memset(status, 0, sizeof(*status));
852 858
853 status->signal = -rxd->rssi; 859 status->signal = -rxd->rssi;
860 *noise = -rxd->noise_level;
854 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info); 861 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
855 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); 862 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
856 863
@@ -903,16 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
903 910
904 rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); 911 rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
905 if (rxq->rxd == NULL) { 912 if (rxq->rxd == NULL) {
906 printk(KERN_ERR "%s: failed to alloc RX descriptors\n", 913 wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n");
907 wiphy_name(hw->wiphy));
908 return -ENOMEM; 914 return -ENOMEM;
909 } 915 }
910 memset(rxq->rxd, 0, size); 916 memset(rxq->rxd, 0, size);
911 917
912 rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); 918 rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
913 if (rxq->buf == NULL) { 919 if (rxq->buf == NULL) {
914 printk(KERN_ERR "%s: failed to alloc RX skbuff list\n", 920 wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n");
915 wiphy_name(hw->wiphy));
916 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); 921 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
917 return -ENOMEM; 922 return -ENOMEM;
918 } 923 }
@@ -963,7 +968,7 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
963 if (rxq->tail == MWL8K_RX_DESCS) 968 if (rxq->tail == MWL8K_RX_DESCS)
964 rxq->tail = 0; 969 rxq->tail = 0;
965 rxq->buf[rx].skb = skb; 970 rxq->buf[rx].skb = skb;
966 pci_unmap_addr_set(&rxq->buf[rx], dma, addr); 971 dma_unmap_addr_set(&rxq->buf[rx], dma, addr);
967 972
968 rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size); 973 rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
969 priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ); 974 priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ);
@@ -984,9 +989,9 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
984 for (i = 0; i < MWL8K_RX_DESCS; i++) { 989 for (i = 0; i < MWL8K_RX_DESCS; i++) {
985 if (rxq->buf[i].skb != NULL) { 990 if (rxq->buf[i].skb != NULL) {
986 pci_unmap_single(priv->pdev, 991 pci_unmap_single(priv->pdev,
987 pci_unmap_addr(&rxq->buf[i], dma), 992 dma_unmap_addr(&rxq->buf[i], dma),
988 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); 993 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
989 pci_unmap_addr_set(&rxq->buf[i], dma, 0); 994 dma_unmap_addr_set(&rxq->buf[i], dma, 0);
990 995
991 kfree_skb(rxq->buf[i].skb); 996 kfree_skb(rxq->buf[i].skb);
992 rxq->buf[i].skb = NULL; 997 rxq->buf[i].skb = NULL;
@@ -1053,16 +1058,17 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1053 1058
1054 rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size); 1059 rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
1055 1060
1056 pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos); 1061 pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos,
1062 &priv->noise);
1057 if (pkt_len < 0) 1063 if (pkt_len < 0)
1058 break; 1064 break;
1059 1065
1060 rxq->buf[rxq->head].skb = NULL; 1066 rxq->buf[rxq->head].skb = NULL;
1061 1067
1062 pci_unmap_single(priv->pdev, 1068 pci_unmap_single(priv->pdev,
1063 pci_unmap_addr(&rxq->buf[rxq->head], dma), 1069 dma_unmap_addr(&rxq->buf[rxq->head], dma),
1064 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); 1070 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
1065 pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); 1071 dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
1066 1072
1067 rxq->head++; 1073 rxq->head++;
1068 if (rxq->head == MWL8K_RX_DESCS) 1074 if (rxq->head == MWL8K_RX_DESCS)
@@ -1120,7 +1126,7 @@ struct mwl8k_tx_desc {
1120 __le16 rate_info; 1126 __le16 rate_info;
1121 __u8 peer_id; 1127 __u8 peer_id;
1122 __u8 tx_frag_cnt; 1128 __u8 tx_frag_cnt;
1123} __attribute__((packed)); 1129} __packed;
1124 1130
1125#define MWL8K_TX_DESCS 128 1131#define MWL8K_TX_DESCS 128
1126 1132
@@ -1139,16 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
1139 1145
1140 txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); 1146 txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
1141 if (txq->txd == NULL) { 1147 if (txq->txd == NULL) {
1142 printk(KERN_ERR "%s: failed to alloc TX descriptors\n", 1148 wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n");
1143 wiphy_name(hw->wiphy));
1144 return -ENOMEM; 1149 return -ENOMEM;
1145 } 1150 }
1146 memset(txq->txd, 0, size); 1151 memset(txq->txd, 0, size);
1147 1152
1148 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); 1153 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
1149 if (txq->skb == NULL) { 1154 if (txq->skb == NULL) {
1150 printk(KERN_ERR "%s: failed to alloc TX skbuff list\n", 1155 wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n");
1151 wiphy_name(hw->wiphy));
1152 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); 1156 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
1153 return -ENOMEM; 1157 return -ENOMEM;
1154 } 1158 }
@@ -1204,11 +1208,12 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
1204 unused++; 1208 unused++;
1205 } 1209 }
1206 1210
1207 printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d " 1211 wiphy_err(hw->wiphy,
1208 "fw_owned=%d drv_owned=%d unused=%d\n", 1212 "txq[%d] len=%d head=%d tail=%d "
1209 wiphy_name(hw->wiphy), i, 1213 "fw_owned=%d drv_owned=%d unused=%d\n",
1210 txq->len, txq->head, txq->tail, 1214 i,
1211 fw_owned, drv_owned, unused); 1215 txq->len, txq->head, txq->tail,
1216 fw_owned, drv_owned, unused);
1212 } 1217 }
1213} 1218}
1214 1219
@@ -1252,25 +1257,23 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1252 if (timeout) { 1257 if (timeout) {
1253 WARN_ON(priv->pending_tx_pkts); 1258 WARN_ON(priv->pending_tx_pkts);
1254 if (retry) { 1259 if (retry) {
1255 printk(KERN_NOTICE "%s: tx rings drained\n", 1260 wiphy_notice(hw->wiphy, "tx rings drained\n");
1256 wiphy_name(hw->wiphy));
1257 } 1261 }
1258 break; 1262 break;
1259 } 1263 }
1260 1264
1261 if (priv->pending_tx_pkts < oldcount) { 1265 if (priv->pending_tx_pkts < oldcount) {
1262 printk(KERN_NOTICE "%s: waiting for tx rings " 1266 wiphy_notice(hw->wiphy,
1263 "to drain (%d -> %d pkts)\n", 1267 "waiting for tx rings to drain (%d -> %d pkts)\n",
1264 wiphy_name(hw->wiphy), oldcount, 1268 oldcount, priv->pending_tx_pkts);
1265 priv->pending_tx_pkts);
1266 retry = 1; 1269 retry = 1;
1267 continue; 1270 continue;
1268 } 1271 }
1269 1272
1270 priv->tx_wait = NULL; 1273 priv->tx_wait = NULL;
1271 1274
1272 printk(KERN_ERR "%s: tx rings stuck for %d ms\n", 1275 wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n",
1273 wiphy_name(hw->wiphy), MWL8K_TX_WAIT_TIMEOUT_MS); 1276 MWL8K_TX_WAIT_TIMEOUT_MS);
1274 mwl8k_dump_tx_rings(hw); 1277 mwl8k_dump_tx_rings(hw);
1275 1278
1276 rc = -ETIMEDOUT; 1279 rc = -ETIMEDOUT;
@@ -1421,8 +1424,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1421 skb->len, PCI_DMA_TODEVICE); 1424 skb->len, PCI_DMA_TODEVICE);
1422 1425
1423 if (pci_dma_mapping_error(priv->pdev, dma)) { 1426 if (pci_dma_mapping_error(priv->pdev, dma)) {
1424 printk(KERN_DEBUG "%s: failed to dma map skb, " 1427 wiphy_debug(hw->wiphy,
1425 "dropping TX frame.\n", wiphy_name(hw->wiphy)); 1428 "failed to dma map skb, dropping TX frame.\n");
1426 dev_kfree_skb(skb); 1429 dev_kfree_skb(skb);
1427 return NETDEV_TX_OK; 1430 return NETDEV_TX_OK;
1428 } 1431 }
@@ -1538,7 +1541,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1538 unsigned long timeout = 0; 1541 unsigned long timeout = 0;
1539 u8 buf[32]; 1542 u8 buf[32];
1540 1543
1541 cmd->result = 0xffff; 1544 cmd->result = (__force __le16) 0xffff;
1542 dma_size = le16_to_cpu(cmd->length); 1545 dma_size = le16_to_cpu(cmd->length);
1543 dma_addr = pci_map_single(priv->pdev, cmd, dma_size, 1546 dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
1544 PCI_DMA_BIDIRECTIONAL); 1547 PCI_DMA_BIDIRECTIONAL);
@@ -1570,10 +1573,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1570 PCI_DMA_BIDIRECTIONAL); 1573 PCI_DMA_BIDIRECTIONAL);
1571 1574
1572 if (!timeout) { 1575 if (!timeout) {
1573 printk(KERN_ERR "%s: Command %s timeout after %u ms\n", 1576 wiphy_err(hw->wiphy, "command %s timeout after %u ms\n",
1574 wiphy_name(hw->wiphy), 1577 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1575 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1578 MWL8K_CMD_TIMEOUT_MS);
1576 MWL8K_CMD_TIMEOUT_MS);
1577 rc = -ETIMEDOUT; 1579 rc = -ETIMEDOUT;
1578 } else { 1580 } else {
1579 int ms; 1581 int ms;
@@ -1582,15 +1584,14 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1582 1584
1583 rc = cmd->result ? -EINVAL : 0; 1585 rc = cmd->result ? -EINVAL : 0;
1584 if (rc) 1586 if (rc)
1585 printk(KERN_ERR "%s: Command %s error 0x%x\n", 1587 wiphy_err(hw->wiphy, "command %s error 0x%x\n",
1586 wiphy_name(hw->wiphy), 1588 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1587 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1589 le16_to_cpu(cmd->result));
1588 le16_to_cpu(cmd->result));
1589 else if (ms > 2000) 1590 else if (ms > 2000)
1590 printk(KERN_NOTICE "%s: Command %s took %d ms\n", 1591 wiphy_notice(hw->wiphy, "command %s took %d ms\n",
1591 wiphy_name(hw->wiphy), 1592 mwl8k_cmd_name(cmd->code,
1592 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1593 buf, sizeof(buf)),
1593 ms); 1594 ms);
1594 } 1595 }
1595 1596
1596 return rc; 1597 return rc;
@@ -1666,7 +1667,7 @@ struct mwl8k_cmd_get_hw_spec_sta {
1666 __le32 caps2; 1667 __le32 caps2;
1667 __le32 num_tx_desc_per_queue; 1668 __le32 num_tx_desc_per_queue;
1668 __le32 total_rxd; 1669 __le32 total_rxd;
1669} __attribute__((packed)); 1670} __packed;
1670 1671
1671#define MWL8K_CAP_MAX_AMSDU 0x20000000 1672#define MWL8K_CAP_MAX_AMSDU 0x20000000
1672#define MWL8K_CAP_GREENFIELD 0x08000000 1673#define MWL8K_CAP_GREENFIELD 0x08000000
@@ -1810,7 +1811,7 @@ struct mwl8k_cmd_get_hw_spec_ap {
1810 __le32 wcbbase1; 1811 __le32 wcbbase1;
1811 __le32 wcbbase2; 1812 __le32 wcbbase2;
1812 __le32 wcbbase3; 1813 __le32 wcbbase3;
1813} __attribute__((packed)); 1814} __packed;
1814 1815
1815static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) 1816static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1816{ 1817{
@@ -1842,22 +1843,22 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1842 priv->sta_macids_supported = 0x00000000; 1843 priv->sta_macids_supported = 0x00000000;
1843 1844
1844 off = le32_to_cpu(cmd->wcbbase0) & 0xffff; 1845 off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
1845 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off); 1846 iowrite32(priv->txq[0].txd_dma, priv->sram + off);
1846 1847
1847 off = le32_to_cpu(cmd->rxwrptr) & 0xffff; 1848 off = le32_to_cpu(cmd->rxwrptr) & 0xffff;
1848 iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off); 1849 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
1849 1850
1850 off = le32_to_cpu(cmd->rxrdptr) & 0xffff; 1851 off = le32_to_cpu(cmd->rxrdptr) & 0xffff;
1851 iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off); 1852 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
1852 1853
1853 off = le32_to_cpu(cmd->wcbbase1) & 0xffff; 1854 off = le32_to_cpu(cmd->wcbbase1) & 0xffff;
1854 iowrite32(cpu_to_le32(priv->txq[1].txd_dma), priv->sram + off); 1855 iowrite32(priv->txq[1].txd_dma, priv->sram + off);
1855 1856
1856 off = le32_to_cpu(cmd->wcbbase2) & 0xffff; 1857 off = le32_to_cpu(cmd->wcbbase2) & 0xffff;
1857 iowrite32(cpu_to_le32(priv->txq[2].txd_dma), priv->sram + off); 1858 iowrite32(priv->txq[2].txd_dma, priv->sram + off);
1858 1859
1859 off = le32_to_cpu(cmd->wcbbase3) & 0xffff; 1860 off = le32_to_cpu(cmd->wcbbase3) & 0xffff;
1860 iowrite32(cpu_to_le32(priv->txq[3].txd_dma), priv->sram + off); 1861 iowrite32(priv->txq[3].txd_dma, priv->sram + off);
1861 } 1862 }
1862 1863
1863 kfree(cmd); 1864 kfree(cmd);
@@ -1883,7 +1884,7 @@ struct mwl8k_cmd_set_hw_spec {
1883 __le32 flags; 1884 __le32 flags;
1884 __le32 num_tx_desc_per_queue; 1885 __le32 num_tx_desc_per_queue;
1885 __le32 total_rxd; 1886 __le32 total_rxd;
1886} __attribute__((packed)); 1887} __packed;
1887 1888
1888#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 1889#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
1889#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 1890#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
@@ -1985,7 +1986,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1985struct mwl8k_cmd_get_stat { 1986struct mwl8k_cmd_get_stat {
1986 struct mwl8k_cmd_pkt header; 1987 struct mwl8k_cmd_pkt header;
1987 __le32 stats[64]; 1988 __le32 stats[64];
1988} __attribute__((packed)); 1989} __packed;
1989 1990
1990#define MWL8K_STAT_ACK_FAILURE 9 1991#define MWL8K_STAT_ACK_FAILURE 9
1991#define MWL8K_STAT_RTS_FAILURE 12 1992#define MWL8K_STAT_RTS_FAILURE 12
@@ -2029,7 +2030,7 @@ struct mwl8k_cmd_radio_control {
2029 __le16 action; 2030 __le16 action;
2030 __le16 control; 2031 __le16 control;
2031 __le16 radio_on; 2032 __le16 radio_on;
2032} __attribute__((packed)); 2033} __packed;
2033 2034
2034static int 2035static int
2035mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force) 2036mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
@@ -2092,7 +2093,7 @@ struct mwl8k_cmd_rf_tx_power {
2092 __le16 current_level; 2093 __le16 current_level;
2093 __le16 reserved; 2094 __le16 reserved;
2094 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2095 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2095} __attribute__((packed)); 2096} __packed;
2096 2097
2097static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) 2098static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
2098{ 2099{
@@ -2121,7 +2122,7 @@ struct mwl8k_cmd_rf_antenna {
2121 struct mwl8k_cmd_pkt header; 2122 struct mwl8k_cmd_pkt header;
2122 __le16 antenna; 2123 __le16 antenna;
2123 __le16 mode; 2124 __le16 mode;
2124} __attribute__((packed)); 2125} __packed;
2125 2126
2126#define MWL8K_RF_ANTENNA_RX 1 2127#define MWL8K_RF_ANTENNA_RX 1
2127#define MWL8K_RF_ANTENNA_TX 2 2128#define MWL8K_RF_ANTENNA_TX 2
@@ -2182,7 +2183,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
2182 */ 2183 */
2183struct mwl8k_cmd_set_pre_scan { 2184struct mwl8k_cmd_set_pre_scan {
2184 struct mwl8k_cmd_pkt header; 2185 struct mwl8k_cmd_pkt header;
2185} __attribute__((packed)); 2186} __packed;
2186 2187
2187static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) 2188static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
2188{ 2189{
@@ -2209,7 +2210,7 @@ struct mwl8k_cmd_set_post_scan {
2209 struct mwl8k_cmd_pkt header; 2210 struct mwl8k_cmd_pkt header;
2210 __le32 isibss; 2211 __le32 isibss;
2211 __u8 bssid[ETH_ALEN]; 2212 __u8 bssid[ETH_ALEN];
2212} __attribute__((packed)); 2213} __packed;
2213 2214
2214static int 2215static int
2215mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac) 2216mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
@@ -2240,7 +2241,7 @@ struct mwl8k_cmd_set_rf_channel {
2240 __le16 action; 2241 __le16 action;
2241 __u8 current_channel; 2242 __u8 current_channel;
2242 __le32 channel_flags; 2243 __le32 channel_flags;
2243} __attribute__((packed)); 2244} __packed;
2244 2245
2245static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, 2246static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2246 struct ieee80211_conf *conf) 2247 struct ieee80211_conf *conf)
@@ -2293,7 +2294,7 @@ struct mwl8k_cmd_update_set_aid {
2293 __u8 bssid[ETH_ALEN]; 2294 __u8 bssid[ETH_ALEN];
2294 __le16 protection_mode; 2295 __le16 protection_mode;
2295 __u8 supp_rates[14]; 2296 __u8 supp_rates[14];
2296} __attribute__((packed)); 2297} __packed;
2297 2298
2298static void legacy_rate_mask_to_array(u8 *rates, u32 mask) 2299static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
2299{ 2300{
@@ -2364,7 +2365,7 @@ struct mwl8k_cmd_set_rate {
2364 /* Bitmap for supported MCS codes. */ 2365 /* Bitmap for supported MCS codes. */
2365 __u8 mcs_set[16]; 2366 __u8 mcs_set[16];
2366 __u8 reserved[16]; 2367 __u8 reserved[16];
2367} __attribute__((packed)); 2368} __packed;
2368 2369
2369static int 2370static int
2370mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2371mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -2397,7 +2398,7 @@ struct mwl8k_cmd_finalize_join {
2397 struct mwl8k_cmd_pkt header; 2398 struct mwl8k_cmd_pkt header;
2398 __le32 sleep_interval; /* Number of beacon periods to sleep */ 2399 __le32 sleep_interval; /* Number of beacon periods to sleep */
2399 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; 2400 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2400} __attribute__((packed)); 2401} __packed;
2401 2402
2402static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, 2403static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
2403 int framelen, int dtim) 2404 int framelen, int dtim)
@@ -2436,7 +2437,7 @@ struct mwl8k_cmd_set_rts_threshold {
2436 struct mwl8k_cmd_pkt header; 2437 struct mwl8k_cmd_pkt header;
2437 __le16 action; 2438 __le16 action;
2438 __le16 threshold; 2439 __le16 threshold;
2439} __attribute__((packed)); 2440} __packed;
2440 2441
2441static int 2442static int
2442mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) 2443mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
@@ -2466,7 +2467,7 @@ struct mwl8k_cmd_set_slot {
2466 struct mwl8k_cmd_pkt header; 2467 struct mwl8k_cmd_pkt header;
2467 __le16 action; 2468 __le16 action;
2468 __u8 short_slot; 2469 __u8 short_slot;
2469} __attribute__((packed)); 2470} __packed;
2470 2471
2471static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) 2472static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2472{ 2473{
@@ -2528,7 +2529,7 @@ struct mwl8k_cmd_set_edca_params {
2528 __u8 txq; 2529 __u8 txq;
2529 } sta; 2530 } sta;
2530 }; 2531 };
2531} __attribute__((packed)); 2532} __packed;
2532 2533
2533#define MWL8K_SET_EDCA_CW 0x01 2534#define MWL8K_SET_EDCA_CW 0x01
2534#define MWL8K_SET_EDCA_TXOP 0x02 2535#define MWL8K_SET_EDCA_TXOP 0x02
@@ -2579,7 +2580,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2579struct mwl8k_cmd_set_wmm_mode { 2580struct mwl8k_cmd_set_wmm_mode {
2580 struct mwl8k_cmd_pkt header; 2581 struct mwl8k_cmd_pkt header;
2581 __le16 action; 2582 __le16 action;
2582} __attribute__((packed)); 2583} __packed;
2583 2584
2584static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) 2585static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
2585{ 2586{
@@ -2612,7 +2613,7 @@ struct mwl8k_cmd_mimo_config {
2612 __le32 action; 2613 __le32 action;
2613 __u8 rx_antenna_map; 2614 __u8 rx_antenna_map;
2614 __u8 tx_antenna_map; 2615 __u8 tx_antenna_map;
2615} __attribute__((packed)); 2616} __packed;
2616 2617
2617static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) 2618static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
2618{ 2619{
@@ -2652,7 +2653,7 @@ struct mwl8k_cmd_use_fixed_rate_sta {
2652 __le32 rate_type; 2653 __le32 rate_type;
2653 __le32 reserved1; 2654 __le32 reserved1;
2654 __le32 reserved2; 2655 __le32 reserved2;
2655} __attribute__((packed)); 2656} __packed;
2656 2657
2657#define MWL8K_USE_AUTO_RATE 0x0002 2658#define MWL8K_USE_AUTO_RATE 0x0002
2658#define MWL8K_UCAST_RATE 0 2659#define MWL8K_UCAST_RATE 0
@@ -2694,7 +2695,7 @@ struct mwl8k_cmd_use_fixed_rate_ap {
2694 u8 multicast_rate; 2695 u8 multicast_rate;
2695 u8 multicast_rate_type; 2696 u8 multicast_rate_type;
2696 u8 management_rate; 2697 u8 management_rate;
2697} __attribute__((packed)); 2698} __packed;
2698 2699
2699static int 2700static int
2700mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) 2701mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
@@ -2724,7 +2725,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
2724struct mwl8k_cmd_enable_sniffer { 2725struct mwl8k_cmd_enable_sniffer {
2725 struct mwl8k_cmd_pkt header; 2726 struct mwl8k_cmd_pkt header;
2726 __le32 action; 2727 __le32 action;
2727} __attribute__((packed)); 2728} __packed;
2728 2729
2729static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) 2730static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2730{ 2731{
@@ -2757,7 +2758,7 @@ struct mwl8k_cmd_set_mac_addr {
2757 } mbss; 2758 } mbss;
2758 __u8 mac_addr[ETH_ALEN]; 2759 __u8 mac_addr[ETH_ALEN];
2759 }; 2760 };
2760} __attribute__((packed)); 2761} __packed;
2761 2762
2762#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0 2763#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
2763#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1 2764#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
@@ -2812,7 +2813,7 @@ struct mwl8k_cmd_set_rate_adapt_mode {
2812 struct mwl8k_cmd_pkt header; 2813 struct mwl8k_cmd_pkt header;
2813 __le16 action; 2814 __le16 action;
2814 __le16 mode; 2815 __le16 mode;
2815} __attribute__((packed)); 2816} __packed;
2816 2817
2817static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) 2818static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
2818{ 2819{
@@ -2840,7 +2841,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
2840struct mwl8k_cmd_bss_start { 2841struct mwl8k_cmd_bss_start {
2841 struct mwl8k_cmd_pkt header; 2842 struct mwl8k_cmd_pkt header;
2842 __le32 enable; 2843 __le32 enable;
2843} __attribute__((packed)); 2844} __packed;
2844 2845
2845static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, 2846static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
2846 struct ieee80211_vif *vif, int enable) 2847 struct ieee80211_vif *vif, int enable)
@@ -2885,7 +2886,7 @@ struct mwl8k_cmd_set_new_stn {
2885 __u8 add_qos_info; 2886 __u8 add_qos_info;
2886 __u8 is_qos_sta; 2887 __u8 is_qos_sta;
2887 __le32 fw_sta_ptr; 2888 __le32 fw_sta_ptr;
2888} __attribute__((packed)); 2889} __packed;
2889 2890
2890#define MWL8K_STA_ACTION_ADD 0 2891#define MWL8K_STA_ACTION_ADD 0
2891#define MWL8K_STA_ACTION_REMOVE 2 2892#define MWL8K_STA_ACTION_REMOVE 2
@@ -2978,7 +2979,7 @@ struct ewc_ht_info {
2978 __le16 control1; 2979 __le16 control1;
2979 __le16 control2; 2980 __le16 control2;
2980 __le16 control3; 2981 __le16 control3;
2981} __attribute__((packed)); 2982} __packed;
2982 2983
2983struct peer_capability_info { 2984struct peer_capability_info {
2984 /* Peer type - AP vs. STA. */ 2985 /* Peer type - AP vs. STA. */
@@ -3007,7 +3008,7 @@ struct peer_capability_info {
3007 __u8 pad2; 3008 __u8 pad2;
3008 __u8 station_id; 3009 __u8 station_id;
3009 __le16 amsdu_enabled; 3010 __le16 amsdu_enabled;
3010} __attribute__((packed)); 3011} __packed;
3011 3012
3012struct mwl8k_cmd_update_stadb { 3013struct mwl8k_cmd_update_stadb {
3013 struct mwl8k_cmd_pkt header; 3014 struct mwl8k_cmd_pkt header;
@@ -3022,7 +3023,7 @@ struct mwl8k_cmd_update_stadb {
3022 3023
3023 /* Peer info - valid during add/update. */ 3024 /* Peer info - valid during add/update. */
3024 struct peer_capability_info peer_info; 3025 struct peer_capability_info peer_info;
3025} __attribute__((packed)); 3026} __packed;
3026 3027
3027#define MWL8K_STA_DB_MODIFY_ENTRY 1 3028#define MWL8K_STA_DB_MODIFY_ENTRY 1
3028#define MWL8K_STA_DB_DEL_ENTRY 2 3029#define MWL8K_STA_DB_DEL_ENTRY 2
@@ -3052,7 +3053,7 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
3052 p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT; 3053 p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
3053 p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability); 3054 p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
3054 p->ht_support = sta->ht_cap.ht_supported; 3055 p->ht_support = sta->ht_cap.ht_supported;
3055 p->ht_caps = sta->ht_cap.cap; 3056 p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
3056 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | 3057 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
3057 ((sta->ht_cap.ampdu_density & 7) << 2); 3058 ((sta->ht_cap.ampdu_density & 7) << 2);
3058 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) 3059 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
@@ -3190,8 +3191,8 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3190 int rc; 3191 int rc;
3191 3192
3192 if (!priv->radio_on) { 3193 if (!priv->radio_on) {
3193 printk(KERN_DEBUG "%s: dropped TX frame since radio " 3194 wiphy_debug(hw->wiphy,
3194 "disabled\n", wiphy_name(hw->wiphy)); 3195 "dropped TX frame since radio disabled\n");
3195 dev_kfree_skb(skb); 3196 dev_kfree_skb(skb);
3196 return NETDEV_TX_OK; 3197 return NETDEV_TX_OK;
3197 } 3198 }
@@ -3209,8 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
3209 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 3210 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
3210 IRQF_SHARED, MWL8K_NAME, hw); 3211 IRQF_SHARED, MWL8K_NAME, hw);
3211 if (rc) { 3212 if (rc) {
3212 printk(KERN_ERR "%s: failed to register IRQ handler\n", 3213 wiphy_err(hw->wiphy, "failed to register irq handler\n");
3213 wiphy_name(hw->wiphy));
3214 return -EIO; 3214 return -EIO;
3215 } 3215 }
3216 3216
@@ -3297,9 +3297,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
3297 * mode. (Sniffer mode is only used on STA firmware.) 3297 * mode. (Sniffer mode is only used on STA firmware.)
3298 */ 3298 */
3299 if (priv->sniffer_enabled) { 3299 if (priv->sniffer_enabled) {
3300 printk(KERN_INFO "%s: unable to create STA " 3300 wiphy_info(hw->wiphy,
3301 "interface due to sniffer mode being enabled\n", 3301 "unable to create STA interface because sniffer mode is enabled\n");
3302 wiphy_name(hw->wiphy));
3303 return -EINVAL; 3302 return -EINVAL;
3304 } 3303 }
3305 3304
@@ -3581,9 +3580,8 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3581 */ 3580 */
3582 if (!list_empty(&priv->vif_list)) { 3581 if (!list_empty(&priv->vif_list)) {
3583 if (net_ratelimit()) 3582 if (net_ratelimit())
3584 printk(KERN_INFO "%s: not enabling sniffer " 3583 wiphy_info(hw->wiphy,
3585 "mode because STA interface is active\n", 3584 "not enabling sniffer mode because STA interface is active\n");
3586 wiphy_name(hw->wiphy));
3587 return 0; 3585 return 0;
3588 } 3586 }
3589 3587
@@ -3763,6 +3761,22 @@ static int mwl8k_get_stats(struct ieee80211_hw *hw,
3763 return mwl8k_cmd_get_stat(hw, stats); 3761 return mwl8k_cmd_get_stat(hw, stats);
3764} 3762}
3765 3763
3764static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
3765 struct survey_info *survey)
3766{
3767 struct mwl8k_priv *priv = hw->priv;
3768 struct ieee80211_conf *conf = &hw->conf;
3769
3770 if (idx != 0)
3771 return -ENOENT;
3772
3773 survey->channel = conf->channel;
3774 survey->filled = SURVEY_INFO_NOISE_DBM;
3775 survey->noise = priv->noise;
3776
3777 return 0;
3778}
3779
3766static int 3780static int
3767mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3781mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3768 enum ieee80211_ampdu_mlme_action action, 3782 enum ieee80211_ampdu_mlme_action action,
@@ -3794,6 +3808,7 @@ static const struct ieee80211_ops mwl8k_ops = {
3794 .sta_remove = mwl8k_sta_remove, 3808 .sta_remove = mwl8k_sta_remove,
3795 .conf_tx = mwl8k_conf_tx, 3809 .conf_tx = mwl8k_conf_tx,
3796 .get_stats = mwl8k_get_stats, 3810 .get_stats = mwl8k_get_stats,
3811 .get_survey = mwl8k_get_survey,
3797 .ampdu_action = mwl8k_ampdu_action, 3812 .ampdu_action = mwl8k_ampdu_action,
3798}; 3813};
3799 3814
@@ -3911,8 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3911 3926
3912 priv->sram = pci_iomap(pdev, 0, 0x10000); 3927 priv->sram = pci_iomap(pdev, 0, 0x10000);
3913 if (priv->sram == NULL) { 3928 if (priv->sram == NULL) {
3914 printk(KERN_ERR "%s: Cannot map device SRAM\n", 3929 wiphy_err(hw->wiphy, "cannot map device sram\n");
3915 wiphy_name(hw->wiphy));
3916 goto err_iounmap; 3930 goto err_iounmap;
3917 } 3931 }
3918 3932
@@ -3924,8 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3924 if (priv->regs == NULL) { 3938 if (priv->regs == NULL) {
3925 priv->regs = pci_iomap(pdev, 2, 0x10000); 3939 priv->regs = pci_iomap(pdev, 2, 0x10000);
3926 if (priv->regs == NULL) { 3940 if (priv->regs == NULL) {
3927 printk(KERN_ERR "%s: Cannot map device registers\n", 3941 wiphy_err(hw->wiphy, "cannot map device registers\n");
3928 wiphy_name(hw->wiphy));
3929 goto err_iounmap; 3942 goto err_iounmap;
3930 } 3943 }
3931 } 3944 }
@@ -3937,16 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3937 /* Ask userland hotplug daemon for the device firmware */ 3950 /* Ask userland hotplug daemon for the device firmware */
3938 rc = mwl8k_request_firmware(priv); 3951 rc = mwl8k_request_firmware(priv);
3939 if (rc) { 3952 if (rc) {
3940 printk(KERN_ERR "%s: Firmware files not found\n", 3953 wiphy_err(hw->wiphy, "firmware files not found\n");
3941 wiphy_name(hw->wiphy));
3942 goto err_stop_firmware; 3954 goto err_stop_firmware;
3943 } 3955 }
3944 3956
3945 /* Load firmware into hardware */ 3957 /* Load firmware into hardware */
3946 rc = mwl8k_load_firmware(hw); 3958 rc = mwl8k_load_firmware(hw);
3947 if (rc) { 3959 if (rc) {
3948 printk(KERN_ERR "%s: Cannot start firmware\n", 3960 wiphy_err(hw->wiphy, "cannot start firmware\n");
3949 wiphy_name(hw->wiphy));
3950 goto err_stop_firmware; 3961 goto err_stop_firmware;
3951 } 3962 }
3952 3963
@@ -3957,9 +3968,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3957 if (priv->ap_fw) { 3968 if (priv->ap_fw) {
3958 priv->rxd_ops = priv->device_info->ap_rxd_ops; 3969 priv->rxd_ops = priv->device_info->ap_rxd_ops;
3959 if (priv->rxd_ops == NULL) { 3970 if (priv->rxd_ops == NULL) {
3960 printk(KERN_ERR "%s: Driver does not have AP " 3971 wiphy_err(hw->wiphy,
3961 "firmware image support for this hardware\n", 3972 "Driver does not have AP firmware image support for this hardware\n");
3962 wiphy_name(hw->wiphy));
3963 goto err_stop_firmware; 3973 goto err_stop_firmware;
3964 } 3974 }
3965 } else { 3975 } else {
@@ -4037,8 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4037 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 4047 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
4038 IRQF_SHARED, MWL8K_NAME, hw); 4048 IRQF_SHARED, MWL8K_NAME, hw);
4039 if (rc) { 4049 if (rc) {
4040 printk(KERN_ERR "%s: failed to register IRQ handler\n", 4050 wiphy_err(hw->wiphy, "failed to register irq handler\n");
4041 wiphy_name(hw->wiphy));
4042 goto err_free_queues; 4051 goto err_free_queues;
4043 } 4052 }
4044 4053
@@ -4058,8 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4058 rc = mwl8k_cmd_get_hw_spec_sta(hw); 4067 rc = mwl8k_cmd_get_hw_spec_sta(hw);
4059 } 4068 }
4060 if (rc) { 4069 if (rc) {
4061 printk(KERN_ERR "%s: Cannot initialise firmware\n", 4070 wiphy_err(hw->wiphy, "cannot initialise firmware\n");
4062 wiphy_name(hw->wiphy));
4063 goto err_free_irq; 4071 goto err_free_irq;
4064 } 4072 }
4065 4073
@@ -4073,15 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4073 /* Turn radio off */ 4081 /* Turn radio off */
4074 rc = mwl8k_cmd_radio_disable(hw); 4082 rc = mwl8k_cmd_radio_disable(hw);
4075 if (rc) { 4083 if (rc) {
4076 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy)); 4084 wiphy_err(hw->wiphy, "cannot disable\n");
4077 goto err_free_irq; 4085 goto err_free_irq;
4078 } 4086 }
4079 4087
4080 /* Clear MAC address */ 4088 /* Clear MAC address */
4081 rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); 4089 rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
4082 if (rc) { 4090 if (rc) {
4083 printk(KERN_ERR "%s: Cannot clear MAC address\n", 4091 wiphy_err(hw->wiphy, "cannot clear mac address\n");
4084 wiphy_name(hw->wiphy));
4085 goto err_free_irq; 4092 goto err_free_irq;
4086 } 4093 }
4087 4094
@@ -4091,17 +4098,16 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4091 4098
4092 rc = ieee80211_register_hw(hw); 4099 rc = ieee80211_register_hw(hw);
4093 if (rc) { 4100 if (rc) {
4094 printk(KERN_ERR "%s: Cannot register device\n", 4101 wiphy_err(hw->wiphy, "cannot register device\n");
4095 wiphy_name(hw->wiphy));
4096 goto err_free_queues; 4102 goto err_free_queues;
4097 } 4103 }
4098 4104
4099 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n", 4105 wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n",
4100 wiphy_name(hw->wiphy), priv->device_info->part_name, 4106 priv->device_info->part_name,
4101 priv->hw_rev, hw->wiphy->perm_addr, 4107 priv->hw_rev, hw->wiphy->perm_addr,
4102 priv->ap_fw ? "AP" : "STA", 4108 priv->ap_fw ? "AP" : "STA",
4103 (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, 4109 (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff,
4104 (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); 4110 (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff);
4105 4111
4106 return 0; 4112 return 0;
4107 4113
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 8c4169c227ae..09fae2f0ea08 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -117,9 +117,8 @@ static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev,
117 117
118 case NL80211_IFTYPE_MONITOR: 118 case NL80211_IFTYPE_MONITOR:
119 if (priv->broken_monitor && !force_monitor) { 119 if (priv->broken_monitor && !force_monitor) {
120 printk(KERN_WARNING "%s: Monitor mode support is " 120 wiphy_warn(wiphy,
121 "buggy in this firmware, not enabling\n", 121 "Monitor mode support is buggy in this firmware, not enabling\n");
122 wiphy_name(wiphy));
123 err = -EINVAL; 122 err = -EINVAL;
124 } 123 }
125 break; 124 break;
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 3e1947d097ca..259d75853984 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -49,7 +49,7 @@ struct orinoco_fw_header {
49 __le32 pri_offset; /* Offset to primary plug data */ 49 __le32 pri_offset; /* Offset to primary plug data */
50 __le32 compat_offset; /* Offset to compatibility data*/ 50 __le32 compat_offset; /* Offset to compatibility data*/
51 char signature[0]; /* FW signature length headersize-20 */ 51 char signature[0]; /* FW signature length headersize-20 */
52} __attribute__ ((packed)); 52} __packed;
53 53
54/* Check the range of various header entries. Return a pointer to a 54/* Check the range of various header entries. Return a pointer to a
55 * description of the problem, or NULL if everything checks out. */ 55 * description of the problem, or NULL if everything checks out. */
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 9ca34e722b45..d9f18c11682a 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -205,7 +205,7 @@ struct hermes_tx_descriptor {
205 u8 retry_count; 205 u8 retry_count;
206 u8 tx_rate; 206 u8 tx_rate;
207 __le16 tx_control; 207 __le16 tx_control;
208} __attribute__ ((packed)); 208} __packed;
209 209
210#define HERMES_TXSTAT_RETRYERR (0x0001) 210#define HERMES_TXSTAT_RETRYERR (0x0001)
211#define HERMES_TXSTAT_AGEDERR (0x0002) 211#define HERMES_TXSTAT_AGEDERR (0x0002)
@@ -254,7 +254,7 @@ struct hermes_tallies_frame {
254 /* Those last are probably not available in very old firmwares */ 254 /* Those last are probably not available in very old firmwares */
255 __le16 RxDiscards_WEPICVError; 255 __le16 RxDiscards_WEPICVError;
256 __le16 RxDiscards_WEPExcluded; 256 __le16 RxDiscards_WEPExcluded;
257} __attribute__ ((packed)); 257} __packed;
258 258
259/* Grabbed from wlan-ng - Thanks Mark... - Jean II 259/* Grabbed from wlan-ng - Thanks Mark... - Jean II
260 * This is the result of a scan inquiry command */ 260 * This is the result of a scan inquiry command */
@@ -271,7 +271,7 @@ struct prism2_scan_apinfo {
271 u8 rates[10]; /* Bit rate supported */ 271 u8 rates[10]; /* Bit rate supported */
272 __le16 proberesp_rate; /* Data rate of the response frame */ 272 __le16 proberesp_rate; /* Data rate of the response frame */
273 __le16 atim; /* ATIM window time, Kus (hostscan only) */ 273 __le16 atim; /* ATIM window time, Kus (hostscan only) */
274} __attribute__ ((packed)); 274} __packed;
275 275
276/* Same stuff for the Lucent/Agere card. 276/* Same stuff for the Lucent/Agere card.
277 * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */ 277 * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
@@ -285,7 +285,7 @@ struct agere_scan_apinfo {
285 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ 285 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
286 __le16 essid_len; /* ESSID length */ 286 __le16 essid_len; /* ESSID length */
287 u8 essid[32]; /* ESSID of the network */ 287 u8 essid[32]; /* ESSID of the network */
288} __attribute__ ((packed)); 288} __packed;
289 289
290/* Moustafa: Scan structure for Symbol cards */ 290/* Moustafa: Scan structure for Symbol cards */
291struct symbol_scan_apinfo { 291struct symbol_scan_apinfo {
@@ -303,7 +303,7 @@ struct symbol_scan_apinfo {
303 __le16 basic_rates; /* Basic rates bitmask */ 303 __le16 basic_rates; /* Basic rates bitmask */
304 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ 304 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
305 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ 305 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
306} __attribute__ ((packed)); 306} __packed;
307 307
308union hermes_scan_info { 308union hermes_scan_info {
309 struct agere_scan_apinfo a; 309 struct agere_scan_apinfo a;
@@ -343,7 +343,7 @@ struct agere_ext_scan_info {
343 __le16 beacon_interval; 343 __le16 beacon_interval;
344 __le16 capabilities; 344 __le16 capabilities;
345 u8 data[0]; 345 u8 data[0];
346} __attribute__ ((packed)); 346} __packed;
347 347
348#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) 348#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
349#define HERMES_LINKSTATUS_CONNECTED (0x0001) 349#define HERMES_LINKSTATUS_CONNECTED (0x0001)
@@ -355,7 +355,7 @@ struct agere_ext_scan_info {
355 355
356struct hermes_linkstatus { 356struct hermes_linkstatus {
357 __le16 linkstatus; /* Link status */ 357 __le16 linkstatus; /* Link status */
358} __attribute__ ((packed)); 358} __packed;
359 359
360struct hermes_response { 360struct hermes_response {
361 u16 status, resp0, resp1, resp2; 361 u16 status, resp0, resp1, resp2;
@@ -365,11 +365,11 @@ struct hermes_response {
365struct hermes_idstring { 365struct hermes_idstring {
366 __le16 len; 366 __le16 len;
367 __le16 val[16]; 367 __le16 val[16];
368} __attribute__ ((packed)); 368} __packed;
369 369
370struct hermes_multicast { 370struct hermes_multicast {
371 u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN]; 371 u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
372} __attribute__ ((packed)); 372} __packed;
373 373
374/* Timeouts */ 374/* Timeouts */
375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ 375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index 6da85e75fce0..2b2b9a1a979c 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -65,10 +65,10 @@ struct dblock {
65 __le32 addr; /* adapter address where to write the block */ 65 __le32 addr; /* adapter address where to write the block */
66 __le16 len; /* length of the data only, in bytes */ 66 __le16 len; /* length of the data only, in bytes */
67 char data[0]; /* data to be written */ 67 char data[0]; /* data to be written */
68} __attribute__ ((packed)); 68} __packed;
69 69
70/* 70/*
71 * Plug Data References are located in in the image after the last data 71 * Plug Data References are located in the image after the last data
72 * block. They refer to areas in the adapter memory where the plug data 72 * block. They refer to areas in the adapter memory where the plug data
73 * items with matching ID should be written. 73 * items with matching ID should be written.
74 */ 74 */
@@ -77,7 +77,7 @@ struct pdr {
77 __le32 addr; /* adapter address where to write the data */ 77 __le32 addr; /* adapter address where to write the data */
78 __le32 len; /* expected length of the data, in bytes */ 78 __le32 len; /* expected length of the data, in bytes */
79 char next[0]; /* next PDR starts here */ 79 char next[0]; /* next PDR starts here */
80} __attribute__ ((packed)); 80} __packed;
81 81
82/* 82/*
83 * Plug Data Items are located in the EEPROM read from the adapter by 83 * Plug Data Items are located in the EEPROM read from the adapter by
@@ -88,7 +88,7 @@ struct pdi {
88 __le16 len; /* length of ID and data, in words */ 88 __le16 len; /* length of ID and data, in words */
89 __le16 id; /* record ID */ 89 __le16 id; /* record ID */
90 char data[0]; /* plug data */ 90 char data[0]; /* plug data */
91} __attribute__ ((packed)); 91} __packed;
92 92
93/*** FW data block access functions ***/ 93/*** FW data block access functions ***/
94 94
@@ -317,7 +317,7 @@ static const struct { \
317 __le16 len; \ 317 __le16 len; \
318 __le16 id; \ 318 __le16 id; \
319 u8 val[length]; \ 319 u8 val[length]; \
320} __attribute__ ((packed)) default_pdr_data_##pid = { \ 320} __packed default_pdr_data_##pid = { \
321 cpu_to_le16((sizeof(default_pdr_data_##pid)/ \ 321 cpu_to_le16((sizeof(default_pdr_data_##pid)/ \
322 sizeof(__le16)) - 1), \ 322 sizeof(__le16)) - 1), \
323 cpu_to_le16(pid), \ 323 cpu_to_le16(pid), \
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 6fbd78850123..077baa86756b 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -45,7 +45,7 @@ static const struct {
45/* Firmware version encoding */ 45/* Firmware version encoding */
46struct comp_id { 46struct comp_id {
47 u16 id, variant, major, minor; 47 u16 id, variant, major, minor;
48} __attribute__ ((packed)); 48} __packed;
49 49
50static inline fwtype_t determine_firmware_type(struct comp_id *nic_id) 50static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
51{ 51{
@@ -995,7 +995,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
995 u8 tx_mic[MIC_KEYLEN]; 995 u8 tx_mic[MIC_KEYLEN];
996 u8 rx_mic[MIC_KEYLEN]; 996 u8 rx_mic[MIC_KEYLEN];
997 u8 tsc[ORINOCO_SEQ_LEN]; 997 u8 tsc[ORINOCO_SEQ_LEN];
998 } __attribute__ ((packed)) buf; 998 } __packed buf;
999 hermes_t *hw = &priv->hw; 999 hermes_t *hw = &priv->hw;
1000 int ret; 1000 int ret;
1001 int err; 1001 int err;
@@ -1326,7 +1326,7 @@ int orinoco_hw_disassociate(struct orinoco_private *priv,
1326 struct { 1326 struct {
1327 u8 addr[ETH_ALEN]; 1327 u8 addr[ETH_ALEN];
1328 __le16 reason_code; 1328 __le16 reason_code;
1329 } __attribute__ ((packed)) buf; 1329 } __packed buf;
1330 1330
1331 /* Currently only supported by WPA enabled Agere fw */ 1331 /* Currently only supported by WPA enabled Agere fw */
1332 if (!priv->has_wpa) 1332 if (!priv->has_wpa)
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index ca71f08709bc..e8e2d0f4763d 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -172,7 +172,7 @@ struct hermes_txexc_data {
172 __le16 frame_ctl; 172 __le16 frame_ctl;
173 __le16 duration_id; 173 __le16 duration_id;
174 u8 addr1[ETH_ALEN]; 174 u8 addr1[ETH_ALEN];
175} __attribute__ ((packed)); 175} __packed;
176 176
177/* Rx frame header except compatibility 802.3 header */ 177/* Rx frame header except compatibility 802.3 header */
178struct hermes_rx_descriptor { 178struct hermes_rx_descriptor {
@@ -196,7 +196,7 @@ struct hermes_rx_descriptor {
196 196
197 /* Data length */ 197 /* Data length */
198 __le16 data_len; 198 __le16 data_len;
199} __attribute__ ((packed)); 199} __packed;
200 200
201struct orinoco_rx_data { 201struct orinoco_rx_data {
202 struct hermes_rx_descriptor *desc; 202 struct hermes_rx_descriptor *desc;
@@ -390,7 +390,7 @@ int orinoco_process_xmit_skb(struct sk_buff *skb,
390 struct header_struct { 390 struct header_struct {
391 struct ethhdr eth; /* 802.3 header */ 391 struct ethhdr eth; /* 802.3 header */
392 u8 encap[6]; /* 802.2 header */ 392 u8 encap[6]; /* 802.2 header */
393 } __attribute__ ((packed)) hdr; 393 } __packed hdr;
394 int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN); 394 int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
395 395
396 if (skb_headroom(skb) < ENCAPS_OVERHEAD) { 396 if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
@@ -1170,7 +1170,7 @@ static void orinoco_join_ap(struct work_struct *work)
1170 struct join_req { 1170 struct join_req {
1171 u8 bssid[ETH_ALEN]; 1171 u8 bssid[ETH_ALEN];
1172 __le16 channel; 1172 __le16 channel;
1173 } __attribute__ ((packed)) req; 1173 } __packed req;
1174 const int atom_len = offsetof(struct prism2_scan_apinfo, atim); 1174 const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
1175 struct prism2_scan_apinfo *atom = NULL; 1175 struct prism2_scan_apinfo *atom = NULL;
1176 int offset = 4; 1176 int offset = 4;
@@ -1410,7 +1410,7 @@ void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1410 struct { 1410 struct {
1411 __le16 len; 1411 __le16 len;
1412 __le16 type; 1412 __le16 type;
1413 } __attribute__ ((packed)) info; 1413 } __packed info;
1414 int len, type; 1414 int len, type;
1415 int err; 1415 int err;
1416 1416
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index a6da86e0a70f..255710ef082a 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -32,7 +32,7 @@
32struct orinoco_key { 32struct orinoco_key {
33 __le16 len; /* always stored as little-endian */ 33 __le16 len; /* always stored as little-endian */
34 char data[ORINOCO_MAX_KEY_SIZE]; 34 char data[ORINOCO_MAX_KEY_SIZE];
35} __attribute__ ((packed)); 35} __packed;
36 36
37#define TKIP_KEYLEN 16 37#define TKIP_KEYLEN 16
38#define MIC_KEYLEN 8 38#define MIC_KEYLEN 8
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 78f089baa8c9..a38a7bd25f19 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -90,7 +90,7 @@ struct header_struct {
90 /* SNAP */ 90 /* SNAP */
91 u8 oui[3]; 91 u8 oui[3];
92 __be16 ethertype; 92 __be16 ethertype;
93} __attribute__ ((packed)); 93} __packed;
94 94
95struct ez_usb_fw { 95struct ez_usb_fw {
96 u16 size; 96 u16 size;
@@ -222,7 +222,7 @@ struct ezusb_packet {
222 __le16 hermes_len; 222 __le16 hermes_len;
223 __le16 hermes_rid; 223 __le16 hermes_rid;
224 u8 data[0]; 224 u8 data[0];
225} __attribute__ ((packed)); 225} __packed;
226 226
227/* Table of devices that work or may work with this driver */ 227/* Table of devices that work or may work with this driver */
228static struct usb_device_id ezusb_table[] = { 228static struct usb_device_id ezusb_table[] = {
@@ -356,12 +356,10 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
356{ 356{
357 struct request_context *ctx; 357 struct request_context *ctx;
358 358
359 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); 359 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
360 if (!ctx) 360 if (!ctx)
361 return NULL; 361 return NULL;
362 362
363 memset(ctx, 0, sizeof(*ctx));
364
365 ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC); 363 ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
366 if (!ctx->buf) { 364 if (!ctx->buf) {
367 kfree(ctx); 365 kfree(ctx);
@@ -1504,16 +1502,16 @@ static inline void ezusb_delete(struct ezusb_priv *upriv)
1504 ezusb_ctx_complete(list_entry(item, 1502 ezusb_ctx_complete(list_entry(item,
1505 struct request_context, list)); 1503 struct request_context, list));
1506 1504
1507 if (upriv->read_urb->status == -EINPROGRESS) 1505 if (upriv->read_urb && upriv->read_urb->status == -EINPROGRESS)
1508 printk(KERN_ERR PFX "Some URB in progress\n"); 1506 printk(KERN_ERR PFX "Some URB in progress\n");
1509 1507
1510 mutex_unlock(&upriv->mtx); 1508 mutex_unlock(&upriv->mtx);
1511 1509
1512 kfree(upriv->read_urb->transfer_buffer); 1510 if (upriv->read_urb) {
1513 if (upriv->bap_buf != NULL) 1511 kfree(upriv->read_urb->transfer_buffer);
1514 kfree(upriv->bap_buf);
1515 if (upriv->read_urb != NULL)
1516 usb_free_urb(upriv->read_urb); 1512 usb_free_urb(upriv->read_urb);
1513 }
1514 kfree(upriv->bap_buf);
1517 if (upriv->dev) { 1515 if (upriv->dev) {
1518 struct orinoco_private *priv = ndev_priv(upriv->dev); 1516 struct orinoco_private *priv = ndev_priv(upriv->dev);
1519 orinoco_if_del(priv); 1517 orinoco_if_del(priv);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 5775124e2aee..cf7be1eb6124 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -128,7 +128,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
128 } else { 128 } else {
129 struct { 129 struct {
130 __le16 qual, signal, noise, unused; 130 __le16 qual, signal, noise, unused;
131 } __attribute__ ((packed)) cq; 131 } __packed cq;
132 132
133 err = HERMES_READ_RECORD(hw, USER_BAP, 133 err = HERMES_READ_RECORD(hw, USER_BAP,
134 HERMES_RID_COMMSQUALITY, &cq); 134 HERMES_RID_COMMSQUALITY, &cq);
@@ -993,11 +993,9 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
993 return -EINVAL; 993 return -EINVAL;
994 994
995 if (wrqu->data.length) { 995 if (wrqu->data.length) {
996 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 996 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
997 if (buf == NULL) 997 if (buf == NULL)
998 return -ENOMEM; 998 return -ENOMEM;
999
1000 memcpy(buf, extra, wrqu->data.length);
1001 } else 999 } else
1002 buf = NULL; 1000 buf = NULL;
1003 1001
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 187e263b045a..d687cb7f2a59 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -149,16 +149,15 @@ static int p54_generate_band(struct ieee80211_hw *dev,
149 continue; 149 continue;
150 150
151 if (list->channels[i].data != CHAN_HAS_ALL) { 151 if (list->channels[i].data != CHAN_HAS_ALL) {
152 printk(KERN_ERR "%s:%s%s%s is/are missing for " 152 wiphy_err(dev->wiphy,
153 "channel:%d [%d MHz].\n", 153 "%s%s%s is/are missing for channel:%d [%d MHz].\n",
154 wiphy_name(dev->wiphy), 154 (list->channels[i].data & CHAN_HAS_CAL ? "" :
155 (list->channels[i].data & CHAN_HAS_CAL ? "" : 155 " [iqauto calibration data]"),
156 " [iqauto calibration data]"), 156 (list->channels[i].data & CHAN_HAS_LIMIT ? "" :
157 (list->channels[i].data & CHAN_HAS_LIMIT ? "" : 157 " [output power limits]"),
158 " [output power limits]"), 158 (list->channels[i].data & CHAN_HAS_CURVE ? "" :
159 (list->channels[i].data & CHAN_HAS_CURVE ? "" : 159 " [curve data]"),
160 " [curve data]"), 160 list->channels[i].index, list->channels[i].freq);
161 list->channels[i].index, list->channels[i].freq);
162 continue; 161 continue;
163 } 162 }
164 163
@@ -168,9 +167,8 @@ static int p54_generate_band(struct ieee80211_hw *dev,
168 } 167 }
169 168
170 if (j == 0) { 169 if (j == 0) {
171 printk(KERN_ERR "%s: Disabling totally damaged %s band.\n", 170 wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n",
172 wiphy_name(dev->wiphy), (band == IEEE80211_BAND_2GHZ) ? 171 (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
173 "2 GHz" : "5 GHz");
174 172
175 ret = -ENODATA; 173 ret = -ENODATA;
176 goto err_out; 174 goto err_out;
@@ -244,9 +242,9 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
244 242
245 if ((priv->iq_autocal_len != priv->curve_data->entries) || 243 if ((priv->iq_autocal_len != priv->curve_data->entries) ||
246 (priv->iq_autocal_len != priv->output_limit->entries)) 244 (priv->iq_autocal_len != priv->output_limit->entries))
247 printk(KERN_ERR "%s: Unsupported or damaged EEPROM detected. " 245 wiphy_err(dev->wiphy,
248 "You may not be able to use all channels.\n", 246 "Unsupported or damaged EEPROM detected. "
249 wiphy_name(dev->wiphy)); 247 "You may not be able to use all channels.\n");
250 248
251 max_channel_num = max_t(unsigned int, priv->output_limit->entries, 249 max_channel_num = max_t(unsigned int, priv->output_limit->entries,
252 priv->iq_autocal_len); 250 priv->iq_autocal_len);
@@ -419,15 +417,14 @@ static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len,
419 int i; 417 int i;
420 418
421 if (len != (entry_size * num_entries)) { 419 if (len != (entry_size * num_entries)) {
422 printk(KERN_ERR "%s: unknown rssi calibration data packing " 420 wiphy_err(dev->wiphy,
423 " type:(%x) len:%d.\n", 421 "unknown rssi calibration data packing type:(%x) len:%d.\n",
424 wiphy_name(dev->wiphy), type, len); 422 type, len);
425 423
426 print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, 424 print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE,
427 data, len); 425 data, len);
428 426
429 printk(KERN_ERR "%s: please report this issue.\n", 427 wiphy_err(dev->wiphy, "please report this issue.\n");
430 wiphy_name(dev->wiphy));
431 return; 428 return;
432 } 429 }
433 430
@@ -445,15 +442,14 @@ static void p54_parse_default_country(struct ieee80211_hw *dev,
445 struct pda_country *country; 442 struct pda_country *country;
446 443
447 if (len != sizeof(*country)) { 444 if (len != sizeof(*country)) {
448 printk(KERN_ERR "%s: found possible invalid default country " 445 wiphy_err(dev->wiphy,
449 "eeprom entry. (entry size: %d)\n", 446 "found possible invalid default country eeprom entry. (entry size: %d)\n",
450 wiphy_name(dev->wiphy), len); 447 len);
451 448
452 print_hex_dump_bytes("country:", DUMP_PREFIX_NONE, 449 print_hex_dump_bytes("country:", DUMP_PREFIX_NONE,
453 data, len); 450 data, len);
454 451
455 printk(KERN_ERR "%s: please report this issue.\n", 452 wiphy_err(dev->wiphy, "please report this issue.\n");
456 wiphy_name(dev->wiphy));
457 return; 453 return;
458 } 454 }
459 455
@@ -478,8 +474,8 @@ static int p54_convert_output_limits(struct ieee80211_hw *dev,
478 return -EINVAL; 474 return -EINVAL;
479 475
480 if (data[0] != 0) { 476 if (data[0] != 0) {
481 printk(KERN_ERR "%s: unknown output power db revision:%x\n", 477 wiphy_err(dev->wiphy, "unknown output power db revision:%x\n",
482 wiphy_name(dev->wiphy), data[0]); 478 data[0]);
483 return -EINVAL; 479 return -EINVAL;
484 } 480 }
485 481
@@ -587,10 +583,9 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
587 err = p54_convert_rev1(dev, curve_data); 583 err = p54_convert_rev1(dev, curve_data);
588 break; 584 break;
589 default: 585 default:
590 printk(KERN_ERR "%s: unknown curve data " 586 wiphy_err(dev->wiphy,
591 "revision %d\n", 587 "unknown curve data revision %d\n",
592 wiphy_name(dev->wiphy), 588 curve_data->cal_method_rev);
593 curve_data->cal_method_rev);
594 err = -ENODEV; 589 err = -ENODEV;
595 break; 590 break;
596 } 591 }
@@ -599,13 +594,13 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
599 } 594 }
600 break; 595 break;
601 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 596 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
602 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 597 priv->iq_autocal = kmemdup(entry->data, data_len,
598 GFP_KERNEL);
603 if (!priv->iq_autocal) { 599 if (!priv->iq_autocal) {
604 err = -ENOMEM; 600 err = -ENOMEM;
605 goto err; 601 goto err;
606 } 602 }
607 603
608 memcpy(priv->iq_autocal, entry->data, data_len);
609 priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); 604 priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry);
610 break; 605 break;
611 case PDR_DEFAULT_COUNTRY: 606 case PDR_DEFAULT_COUNTRY:
@@ -672,8 +667,8 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
672 667
673 if (!synth || !priv->iq_autocal || !priv->output_limit || 668 if (!synth || !priv->iq_autocal || !priv->output_limit ||
674 !priv->curve_data) { 669 !priv->curve_data) {
675 printk(KERN_ERR "%s: not all required entries found in eeprom!\n", 670 wiphy_err(dev->wiphy,
676 wiphy_name(dev->wiphy)); 671 "not all required entries found in eeprom!\n");
677 err = -EINVAL; 672 err = -EINVAL;
678 goto err; 673 goto err;
679 } 674 }
@@ -699,15 +694,15 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
699 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 694 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
700 u8 perm_addr[ETH_ALEN]; 695 u8 perm_addr[ETH_ALEN];
701 696
702 printk(KERN_WARNING "%s: Invalid hwaddr! Using randomly generated MAC addr\n", 697 wiphy_warn(dev->wiphy,
703 wiphy_name(dev->wiphy)); 698 "invalid hwaddr! using randomly generated mac addr\n");
704 random_ether_addr(perm_addr); 699 random_ether_addr(perm_addr);
705 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 700 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
706 } 701 }
707 702
708 printk(KERN_INFO "%s: hwaddr %pM, MAC:isl38%02x RF:%s\n", 703 wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n",
709 wiphy_name(dev->wiphy), dev->wiphy->perm_addr, priv->version, 704 dev->wiphy->perm_addr, priv->version,
710 p54_rf_chips[priv->rxhw]); 705 p54_rf_chips[priv->rxhw]);
711 706
712 return 0; 707 return 0;
713 708
@@ -719,8 +714,7 @@ err:
719 priv->output_limit = NULL; 714 priv->output_limit = NULL;
720 priv->curve_data = NULL; 715 priv->curve_data = NULL;
721 716
722 printk(KERN_ERR "%s: eeprom parse failed!\n", 717 wiphy_err(dev->wiphy, "eeprom parse failed!\n");
723 wiphy_name(dev->wiphy));
724 return err; 718 return err;
725} 719}
726EXPORT_SYMBOL_GPL(p54_parse_eeprom); 720EXPORT_SYMBOL_GPL(p54_parse_eeprom);
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index c43a5d461ab2..47006bca4852 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -62,16 +62,15 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
62 case FW_LM20: 62 case FW_LM20:
63 case FW_LM87: { 63 case FW_LM87: {
64 char *iftype = (char *)bootrec->data; 64 char *iftype = (char *)bootrec->data;
65 printk(KERN_INFO "%s: p54 detected a LM%c%c " 65 wiphy_info(priv->hw->wiphy,
66 "firmware\n", 66 "p54 detected a LM%c%c firmware\n",
67 wiphy_name(priv->hw->wiphy), 67 iftype[2], iftype[3]);
68 iftype[2], iftype[3]);
69 break; 68 break;
70 } 69 }
71 case FW_FMAC: 70 case FW_FMAC:
72 default: 71 default:
73 printk(KERN_ERR "%s: unsupported firmware\n", 72 wiphy_err(priv->hw->wiphy,
74 wiphy_name(priv->hw->wiphy)); 73 "unsupported firmware\n");
75 return -ENODEV; 74 return -ENODEV;
76 } 75 }
77 break; 76 break;
@@ -125,15 +124,15 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
125 } 124 }
126 125
127 if (fw_version) 126 if (fw_version)
128 printk(KERN_INFO "%s: FW rev %s - Softmac protocol %x.%x\n", 127 wiphy_info(priv->hw->wiphy,
129 wiphy_name(priv->hw->wiphy), fw_version, 128 "fw rev %s - softmac protocol %x.%x\n",
130 priv->fw_var >> 8, priv->fw_var & 0xff); 129 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
131 130
132 if (priv->fw_var < 0x500) 131 if (priv->fw_var < 0x500)
133 printk(KERN_INFO "%s: you are using an obsolete firmware. " 132 wiphy_info(priv->hw->wiphy,
134 "visit http://wireless.kernel.org/en/users/Drivers/p54 " 133 "you are using an obsolete firmware. "
135 "and grab one for \"kernel >= 2.6.28\"!\n", 134 "visit http://wireless.kernel.org/en/users/Drivers/p54 "
136 wiphy_name(priv->hw->wiphy)); 135 "and grab one for \"kernel >= 2.6.28\"!\n");
137 136
138 if (priv->fw_var >= 0x300) { 137 if (priv->fw_var >= 0x300) {
139 /* Firmware supports QoS, use it! */ 138 /* Firmware supports QoS, use it! */
@@ -152,13 +151,14 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
152 priv->hw->queues = P54_QUEUE_AC_NUM; 151 priv->hw->queues = P54_QUEUE_AC_NUM;
153 } 152 }
154 153
155 printk(KERN_INFO "%s: cryptographic accelerator " 154 wiphy_info(priv->hw->wiphy,
156 "WEP:%s, TKIP:%s, CCMP:%s\n", wiphy_name(priv->hw->wiphy), 155 "cryptographic accelerator WEP:%s, TKIP:%s, CCMP:%s\n",
157 (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" : 156 (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" : "no",
158 "no", (priv->privacy_caps & (BR_DESC_PRIV_CAP_TKIP | 157 (priv->privacy_caps &
159 BR_DESC_PRIV_CAP_MICHAEL)) ? "YES" : "no", 158 (BR_DESC_PRIV_CAP_TKIP | BR_DESC_PRIV_CAP_MICHAEL))
160 (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) ? 159 ? "YES" : "no",
161 "YES" : "no"); 160 (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)
161 ? "YES" : "no");
162 162
163 if (priv->rx_keycache_size) { 163 if (priv->rx_keycache_size) {
164 /* 164 /*
@@ -247,8 +247,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
247 247
248 if (!wait_for_completion_interruptible_timeout( 248 if (!wait_for_completion_interruptible_timeout(
249 &priv->eeprom_comp, HZ)) { 249 &priv->eeprom_comp, HZ)) {
250 printk(KERN_ERR "%s: device does not respond!\n", 250 wiphy_err(priv->hw->wiphy, "device does not respond!\n");
251 wiphy_name(priv->hw->wiphy));
252 ret = -EBUSY; 251 ret = -EBUSY;
253 } 252 }
254 priv->eeprom = NULL; 253 priv->eeprom = NULL;
@@ -523,9 +522,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
523 return 0; 522 return 0;
524 523
525err: 524err:
526 printk(KERN_ERR "%s: frequency change to channel %d failed.\n", 525 wiphy_err(priv->hw->wiphy, "frequency change to channel %d failed.\n",
527 wiphy_name(priv->hw->wiphy), ieee80211_frequency_to_channel( 526 ieee80211_frequency_to_channel(
528 priv->hw->conf.channel->center_freq)); 527 priv->hw->conf.channel->center_freq));
529 528
530 dev_kfree_skb_any(skb); 529 dev_kfree_skb_any(skb);
531 return -EINVAL; 530 return -EINVAL;
@@ -676,8 +675,8 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len,
676 break; 675 break;
677 676
678 default: 677 default:
679 printk(KERN_ERR "%s: invalid cryptographic algorithm: %d\n", 678 wiphy_err(priv->hw->wiphy,
680 wiphy_name(priv->hw->wiphy), algo); 679 "invalid cryptographic algorithm: %d\n", algo);
681 dev_kfree_skb(skb); 680 dev_kfree_skb(skb);
682 return -EINVAL; 681 return -EINVAL;
683 } 682 }
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index 9575ac033630..ea91f5cce6b3 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -57,8 +57,8 @@ static void p54_update_leds(struct work_struct *work)
57 57
58 err = p54_set_leds(priv); 58 err = p54_set_leds(priv);
59 if (err && net_ratelimit()) 59 if (err && net_ratelimit())
60 printk(KERN_ERR "%s: failed to update LEDs (%d).\n", 60 wiphy_err(priv->hw->wiphy,
61 wiphy_name(priv->hw->wiphy), err); 61 "failed to update leds (%d).\n", err);
62 62
63 if (rerun) 63 if (rerun)
64 ieee80211_queue_delayed_work(priv->hw, &priv->led_work, 64 ieee80211_queue_delayed_work(priv->hw, &priv->led_work,
@@ -102,8 +102,8 @@ static int p54_register_led(struct p54_common *priv,
102 102
103 err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); 103 err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev);
104 if (err) 104 if (err)
105 printk(KERN_ERR "%s: Failed to register %s LED.\n", 105 wiphy_err(priv->hw->wiphy,
106 wiphy_name(priv->hw->wiphy), name); 106 "failed to register %s led.\n", name);
107 else 107 else
108 led->registered = 1; 108 led->registered = 1;
109 109
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index c072f41747ca..47db439b63bf 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -507,6 +507,22 @@ out_unlock:
507 return ret; 507 return ret;
508} 508}
509 509
510static int p54_get_survey(struct ieee80211_hw *dev, int idx,
511 struct survey_info *survey)
512{
513 struct p54_common *priv = dev->priv;
514 struct ieee80211_conf *conf = &dev->conf;
515
516 if (idx != 0)
517 return -ENOENT;
518
519 survey->channel = conf->channel;
520 survey->filled = SURVEY_INFO_NOISE_DBM;
521 survey->noise = clamp_t(s8, priv->noise, -128, 127);
522
523 return 0;
524}
525
510static const struct ieee80211_ops p54_ops = { 526static const struct ieee80211_ops p54_ops = {
511 .tx = p54_tx_80211, 527 .tx = p54_tx_80211,
512 .start = p54_start, 528 .start = p54_start,
@@ -523,6 +539,7 @@ static const struct ieee80211_ops p54_ops = {
523 .configure_filter = p54_configure_filter, 539 .configure_filter = p54_configure_filter,
524 .conf_tx = p54_conf_tx, 540 .conf_tx = p54_conf_tx,
525 .get_stats = p54_get_stats, 541 .get_stats = p54_get_stats,
542 .get_survey = p54_get_survey,
526}; 543};
527 544
528struct ieee80211_hw *p54_init_common(size_t priv_data_len) 545struct ieee80211_hw *p54_init_common(size_t priv_data_len)
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
index 4915d9d54203..e3ed893b5aaf 100644
--- a/drivers/net/wireless/p54/net2280.h
+++ b/drivers/net/wireless/p54/net2280.h
@@ -232,7 +232,7 @@ struct net2280_regs {
232#define GPIO2_INTERRUPT 2 232#define GPIO2_INTERRUPT 2
233#define GPIO1_INTERRUPT 1 233#define GPIO1_INTERRUPT 1
234#define GPIO0_INTERRUPT 0 234#define GPIO0_INTERRUPT 0
235} __attribute__ ((packed)); 235} __packed;
236 236
237/* usb control, BAR0 + 0x0080 */ 237/* usb control, BAR0 + 0x0080 */
238struct net2280_usb_regs { 238struct net2280_usb_regs {
@@ -296,7 +296,7 @@ struct net2280_usb_regs {
296#define FORCE_IMMEDIATE 7 296#define FORCE_IMMEDIATE 7
297#define OUR_USB_ADDRESS 0 297#define OUR_USB_ADDRESS 0
298 __le32 ourconfig; 298 __le32 ourconfig;
299} __attribute__ ((packed)); 299} __packed;
300 300
301/* pci control, BAR0 + 0x0100 */ 301/* pci control, BAR0 + 0x0100 */
302struct net2280_pci_regs { 302struct net2280_pci_regs {
@@ -323,7 +323,7 @@ struct net2280_pci_regs {
323#define PCI_ARBITER_CLEAR 2 323#define PCI_ARBITER_CLEAR 2
324#define PCI_EXTERNAL_ARBITER 1 324#define PCI_EXTERNAL_ARBITER 1
325#define PCI_HOST_MODE 0 325#define PCI_HOST_MODE 0
326} __attribute__ ((packed)); 326} __packed;
327 327
328/* dma control, BAR0 + 0x0180 ... array of four structs like this, 328/* dma control, BAR0 + 0x0180 ... array of four structs like this,
329 * for channels 0..3. see also struct net2280_dma: descriptor 329 * for channels 0..3. see also struct net2280_dma: descriptor
@@ -364,7 +364,7 @@ struct net2280_dma_regs { /* [11.7] */
364 __le32 dmaaddr; 364 __le32 dmaaddr;
365 __le32 dmadesc; 365 __le32 dmadesc;
366 u32 _unused1; 366 u32 _unused1;
367} __attribute__ ((packed)); 367} __packed;
368 368
369/* dedicated endpoint registers, BAR0 + 0x0200 */ 369/* dedicated endpoint registers, BAR0 + 0x0200 */
370 370
@@ -374,7 +374,7 @@ struct net2280_dep_regs { /* [11.8] */
374 /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ 374 /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
375 __le32 dep_rsp; 375 __le32 dep_rsp;
376 u32 _unused[2]; 376 u32 _unused[2];
377} __attribute__ ((packed)); 377} __packed;
378 378
379/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs 379/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
380 * like this, for ep0 then the configurable endpoints A..F 380 * like this, for ep0 then the configurable endpoints A..F
@@ -437,16 +437,16 @@ struct net2280_ep_regs { /* [11.9] */
437 __le32 ep_avail; 437 __le32 ep_avail;
438 __le32 ep_data; 438 __le32 ep_data;
439 u32 _unused0[2]; 439 u32 _unused0[2];
440} __attribute__ ((packed)); 440} __packed;
441 441
442struct net2280_reg_write { 442struct net2280_reg_write {
443 __le16 port; 443 __le16 port;
444 __le32 addr; 444 __le32 addr;
445 __le32 val; 445 __le32 val;
446} __attribute__ ((packed)); 446} __packed;
447 447
448struct net2280_reg_read { 448struct net2280_reg_read {
449 __le16 port; 449 __le16 port;
450 __le32 addr; 450 __le32 addr;
451} __attribute__ ((packed)); 451} __packed;
452#endif /* NET2280_H */ 452#endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a5ea89cde8c4..822f8dc26e9c 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -466,8 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev)
466 P54P_READ(dev_int); 466 P54P_READ(dev_int);
467 467
468 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { 468 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
469 printk(KERN_ERR "%s: Cannot boot firmware!\n", 469 wiphy_err(dev->wiphy, "cannot boot firmware!\n");
470 wiphy_name(dev->wiphy));
471 p54p_stop(dev); 470 p54p_stop(dev);
472 return -ETIMEDOUT; 471 return -ETIMEDOUT;
473 } 472 }
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 2feead617a3b..ee9bc62a4fa2 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -65,7 +65,7 @@ struct p54p_csr {
65 u8 unused_6[1924]; 65 u8 unused_6[1924];
66 u8 cardbus_cis[0x800]; 66 u8 cardbus_cis[0x800];
67 u8 direct_mem_win[0x1000]; 67 u8 direct_mem_win[0x1000];
68} __attribute__ ((packed)); 68} __packed;
69 69
70/* usb backend only needs the register defines above */ 70/* usb backend only needs the register defines above */
71#ifndef P54USB_H 71#ifndef P54USB_H
@@ -74,7 +74,7 @@ struct p54p_desc {
74 __le32 device_addr; 74 __le32 device_addr;
75 __le16 len; 75 __le16 len;
76 __le16 flags; 76 __le16 flags;
77} __attribute__ ((packed)); 77} __packed;
78 78
79struct p54p_ring_control { 79struct p54p_ring_control {
80 __le32 host_idx[4]; 80 __le32 host_idx[4];
@@ -83,7 +83,7 @@ struct p54p_ring_control {
83 struct p54p_desc tx_data[32]; 83 struct p54p_desc tx_data[32];
84 struct p54p_desc rx_mgmt[4]; 84 struct p54p_desc rx_mgmt[4];
85 struct p54p_desc tx_mgmt[4]; 85 struct p54p_desc tx_mgmt[4];
86} __attribute__ ((packed)); 86} __packed;
87 87
88#define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r) 88#define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r)
89#define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r) 89#define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r)
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index c8f09da1f84d..087bf0698a5a 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -697,9 +697,7 @@ static int __devexit p54spi_remove(struct spi_device *spi)
697 697
698static struct spi_driver p54spi_driver = { 698static struct spi_driver p54spi_driver = {
699 .driver = { 699 .driver = {
700 /* use cx3110x name because board-n800.c uses that for the 700 .name = "p54spi",
701 * SPI port */
702 .name = "cx3110x",
703 .bus = &spi_bus_type, 701 .bus = &spi_bus_type,
704 .owner = THIS_MODULE, 702 .owner = THIS_MODULE,
705 }, 703 },
@@ -733,3 +731,4 @@ module_exit(p54spi_exit);
733MODULE_LICENSE("GPL"); 731MODULE_LICENSE("GPL");
734MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); 732MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
735MODULE_ALIAS("spi:cx3110x"); 733MODULE_ALIAS("spi:cx3110x");
734MODULE_ALIAS("spi:p54spi");
diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h
index 7fbe8d8fc67c..dfaa62aaeb07 100644
--- a/drivers/net/wireless/p54/p54spi.h
+++ b/drivers/net/wireless/p54/p54spi.h
@@ -96,7 +96,7 @@ struct p54s_dma_regs {
96 __le16 cmd; 96 __le16 cmd;
97 __le16 len; 97 __le16 len;
98 __le32 addr; 98 __le32 addr;
99} __attribute__ ((packed)); 99} __packed;
100 100
101struct p54s_tx_info { 101struct p54s_tx_info {
102 struct list_head tx_list; 102 struct list_head tx_list;
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 73073259f508..ad595958b7df 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -69,7 +69,8 @@ static struct usb_device_id p54u_table[] __devinitdata = {
69 {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */ 69 {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */
70 {USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/ 70 {USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/
71 {USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/ 71 {USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/
72 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */ 72 /* {USB_DEVICE(0x0cde, 0x0006)}, * Medion MD40900 already listed above,
73 * just noting it here for clarity */
73 {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ 74 {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */
74 {USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */ 75 {USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */
75 {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ 76 {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */
@@ -434,10 +435,9 @@ static int p54u_firmware_reset_3887(struct ieee80211_hw *dev)
434 u8 *buf; 435 u8 *buf;
435 int ret; 436 int ret;
436 437
437 buf = kmalloc(4, GFP_KERNEL); 438 buf = kmemdup(p54u_romboot_3887, 4, GFP_KERNEL);
438 if (!buf) 439 if (!buf)
439 return -ENOMEM; 440 return -ENOMEM;
440 memcpy(buf, p54u_romboot_3887, 4);
441 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, 441 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
442 buf, 4); 442 buf, 4);
443 kfree(buf); 443 kfree(buf);
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index e935b79f7f75..ed4034ade59a 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -70,12 +70,12 @@ struct net2280_tx_hdr {
70 __le16 len; 70 __le16 len;
71 __le16 follower; /* ? */ 71 __le16 follower; /* ? */
72 u8 padding[8]; 72 u8 padding[8];
73} __attribute__((packed)); 73} __packed;
74 74
75struct lm87_tx_hdr { 75struct lm87_tx_hdr {
76 __le32 device_addr; 76 __le32 device_addr;
77 __le32 chksum; 77 __le32 chksum;
78} __attribute__((packed)); 78} __packed;
79 79
80/* Some flags for the isl hardware registers controlling DMA inside the 80/* Some flags for the isl hardware registers controlling DMA inside the
81 * chip */ 81 * chip */
@@ -103,7 +103,7 @@ struct x2_header {
103 __le32 fw_load_addr; 103 __le32 fw_load_addr;
104 __le32 fw_length; 104 __le32 fw_length;
105 __le32 crc; 105 __le32 crc;
106} __attribute__((packed)); 106} __packed;
107 107
108/* pipes 3 and 4 are not used by the driver */ 108/* pipes 3 and 4 are not used by the driver */
109#define P54U_PIPE_NUMBER 9 109#define P54U_PIPE_NUMBER 9
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 4e6891099d43..427b46f558ed 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -38,8 +38,8 @@ static void p54_dump_tx_queue(struct p54_common *priv)
38 u32 largest_hole = 0, free; 38 u32 largest_hole = 0, free;
39 39
40 spin_lock_irqsave(&priv->tx_queue.lock, flags); 40 spin_lock_irqsave(&priv->tx_queue.lock, flags);
41 printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) ---\n", 41 wiphy_debug(priv->hw->wiphy, "/ --- tx queue dump (%d entries) ---\n",
42 wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue)); 42 skb_queue_len(&priv->tx_queue));
43 43
44 prev_addr = priv->rx_start; 44 prev_addr = priv->rx_start;
45 skb_queue_walk(&priv->tx_queue, skb) { 45 skb_queue_walk(&priv->tx_queue, skb) {
@@ -48,21 +48,23 @@ static void p54_dump_tx_queue(struct p54_common *priv)
48 hdr = (void *) skb->data; 48 hdr = (void *) skb->data;
49 49
50 free = range->start_addr - prev_addr; 50 free = range->start_addr - prev_addr;
51 printk(KERN_DEBUG "%s: | [%02d] => [skb:%p skb_len:0x%04x " 51 wiphy_debug(priv->hw->wiphy,
52 "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} " 52 "| [%02d] => [skb:%p skb_len:0x%04x "
53 "mem:{start:%04x end:%04x, free:%d}]\n", 53 "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} "
54 wiphy_name(priv->hw->wiphy), i++, skb, skb->len, 54 "mem:{start:%04x end:%04x, free:%d}]\n",
55 le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len), 55 i++, skb, skb->len,
56 le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type), 56 le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len),
57 range->start_addr, range->end_addr, free); 57 le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type),
58 range->start_addr, range->end_addr, free);
58 59
59 prev_addr = range->end_addr; 60 prev_addr = range->end_addr;
60 largest_hole = max(largest_hole, free); 61 largest_hole = max(largest_hole, free);
61 } 62 }
62 free = priv->rx_end - prev_addr; 63 free = priv->rx_end - prev_addr;
63 largest_hole = max(largest_hole, free); 64 largest_hole = max(largest_hole, free);
64 printk(KERN_DEBUG "%s: \\ --- [free: %d], largest free block: %d ---\n", 65 wiphy_debug(priv->hw->wiphy,
65 wiphy_name(priv->hw->wiphy), free, largest_hole); 66 "\\ --- [free: %d], largest free block: %d ---\n",
67 free, largest_hole);
66 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 68 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
67} 69}
68#endif /* P54_MM_DEBUG */ 70#endif /* P54_MM_DEBUG */
@@ -538,8 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
538 case P54_TRAP_BEACON_TX: 540 case P54_TRAP_BEACON_TX:
539 break; 541 break;
540 case P54_TRAP_RADAR: 542 case P54_TRAP_RADAR:
541 printk(KERN_INFO "%s: radar (freq:%d MHz)\n", 543 wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq);
542 wiphy_name(priv->hw->wiphy), freq);
543 break; 544 break;
544 case P54_TRAP_NO_BEACON: 545 case P54_TRAP_NO_BEACON:
545 if (priv->vif) 546 if (priv->vif)
@@ -558,8 +559,8 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
558 wiphy_rfkill_set_hw_state(priv->hw->wiphy, false); 559 wiphy_rfkill_set_hw_state(priv->hw->wiphy, false);
559 break; 560 break;
560 default: 561 default:
561 printk(KERN_INFO "%s: received event:%x freq:%d\n", 562 wiphy_info(priv->hw->wiphy, "received event:%x freq:%d\n",
562 wiphy_name(priv->hw->wiphy), event, freq); 563 event, freq);
563 break; 564 break;
564 } 565 }
565} 566}
@@ -584,8 +585,9 @@ static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb)
584 p54_rx_eeprom_readback(priv, skb); 585 p54_rx_eeprom_readback(priv, skb);
585 break; 586 break;
586 default: 587 default:
587 printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n", 588 wiphy_debug(priv->hw->wiphy,
588 wiphy_name(priv->hw->wiphy), le16_to_cpu(hdr->type)); 589 "not handling 0x%02x type control frame\n",
590 le16_to_cpu(hdr->type));
589 break; 591 break;
590 } 592 }
591 return 0; 593 return 0;
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 8d1190c0f062..77cd65db8500 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2067,7 +2067,7 @@ send_simple_event(islpci_private *priv, const char *str)
2067 memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL); 2067 memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL);
2068 if (!memptr) 2068 if (!memptr)
2069 return; 2069 return;
2070 BUG_ON(n > IW_CUSTOM_MAX); 2070 BUG_ON(n >= IW_CUSTOM_MAX);
2071 wrqu.data.pointer = memptr; 2071 wrqu.data.pointer = memptr;
2072 wrqu.data.length = n; 2072 wrqu.data.length = n;
2073 strcpy(memptr, str); 2073 strcpy(memptr, str);
@@ -2101,7 +2101,7 @@ struct ieee80211_beacon_phdr {
2101 u8 timestamp[8]; 2101 u8 timestamp[8];
2102 u16 beacon_int; 2102 u16 beacon_int;
2103 u16 capab_info; 2103 u16 capab_info;
2104} __attribute__ ((packed)); 2104} __packed;
2105 2105
2106#define WLAN_EID_GENERIC 0xdd 2106#define WLAN_EID_GENERIC 0xdd
2107static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; 2107static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
@@ -2751,14 +2751,9 @@ prism54_hostapd(struct net_device *ndev, struct iw_point *p)
2751 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) 2751 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
2752 return -EINVAL; 2752 return -EINVAL;
2753 2753
2754 param = kmalloc(p->length, GFP_KERNEL); 2754 param = memdup_user(p->pointer, p->length);
2755 if (param == NULL) 2755 if (IS_ERR(param))
2756 return -ENOMEM; 2756 return PTR_ERR(param);
2757
2758 if (copy_from_user(param, p->pointer, p->length)) {
2759 kfree(param);
2760 return -EFAULT;
2761 }
2762 2757
2763 switch (param->cmd) { 2758 switch (param->cmd) {
2764 case PRISM2_SET_ENCRYPTION: 2759 case PRISM2_SET_ENCRYPTION:
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index b7534c2869c8..59e31258d450 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -29,20 +29,20 @@
29struct obj_ssid { 29struct obj_ssid {
30 u8 length; 30 u8 length;
31 char octets[33]; 31 char octets[33];
32} __attribute__ ((packed)); 32} __packed;
33 33
34struct obj_key { 34struct obj_key {
35 u8 type; /* dot11_priv_t */ 35 u8 type; /* dot11_priv_t */
36 u8 length; 36 u8 length;
37 char key[32]; 37 char key[32];
38} __attribute__ ((packed)); 38} __packed;
39 39
40struct obj_mlme { 40struct obj_mlme {
41 u8 address[6]; 41 u8 address[6];
42 u16 id; 42 u16 id;
43 u16 state; 43 u16 state;
44 u16 code; 44 u16 code;
45} __attribute__ ((packed)); 45} __packed;
46 46
47struct obj_mlmeex { 47struct obj_mlmeex {
48 u8 address[6]; 48 u8 address[6];
@@ -51,12 +51,12 @@ struct obj_mlmeex {
51 u16 code; 51 u16 code;
52 u16 size; 52 u16 size;
53 u8 data[0]; 53 u8 data[0];
54} __attribute__ ((packed)); 54} __packed;
55 55
56struct obj_buffer { 56struct obj_buffer {
57 u32 size; 57 u32 size;
58 u32 addr; /* 32bit bus address */ 58 u32 addr; /* 32bit bus address */
59} __attribute__ ((packed)); 59} __packed;
60 60
61struct obj_bss { 61struct obj_bss {
62 u8 address[6]; 62 u8 address[6];
@@ -77,17 +77,17 @@ struct obj_bss {
77 short rates; 77 short rates;
78 short basic_rates; 78 short basic_rates;
79 int:16; /* padding */ 79 int:16; /* padding */
80} __attribute__ ((packed)); 80} __packed;
81 81
82struct obj_bsslist { 82struct obj_bsslist {
83 u32 nr; 83 u32 nr;
84 struct obj_bss bsslist[0]; 84 struct obj_bss bsslist[0];
85} __attribute__ ((packed)); 85} __packed;
86 86
87struct obj_frequencies { 87struct obj_frequencies {
88 u16 nr; 88 u16 nr;
89 u16 mhz[0]; 89 u16 mhz[0];
90} __attribute__ ((packed)); 90} __packed;
91 91
92struct obj_attachment { 92struct obj_attachment {
93 char type; 93 char type;
@@ -95,7 +95,7 @@ struct obj_attachment {
95 short id; 95 short id;
96 short size; 96 short size;
97 char data[0]; 97 char data[0];
98} __attribute__((packed)); 98} __packed;
99 99
100/* 100/*
101 * in case everything's ok, the inlined function below will be 101 * in case everything's ok, the inlined function below will be
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 54f9a4b7bf9b..6ca30a5b7bfb 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -34,13 +34,13 @@ struct rfmon_header {
34 __le16 unk3; 34 __le16 unk3;
35 u8 rssi; 35 u8 rssi;
36 u8 padding[3]; 36 u8 padding[3];
37} __attribute__ ((packed)); 37} __packed;
38 38
39struct rx_annex_header { 39struct rx_annex_header {
40 u8 addr1[ETH_ALEN]; 40 u8 addr1[ETH_ALEN];
41 u8 addr2[ETH_ALEN]; 41 u8 addr2[ETH_ALEN];
42 struct rfmon_header rfmon; 42 struct rfmon_header rfmon;
43} __attribute__ ((packed)); 43} __packed;
44 44
45/* wlan-ng (and hopefully others) AVS header, version one. Fields in 45/* wlan-ng (and hopefully others) AVS header, version one. Fields in
46 * network byte order. */ 46 * network byte order. */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 0b27e50fe0d5..0db93db9b675 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -101,7 +101,7 @@ typedef struct {
101 u8 device_id; 101 u8 device_id;
102 u8 flags; 102 u8 flags;
103 u32 length; 103 u32 length;
104} __attribute__ ((packed)) 104} __packed
105pimfor_header_t; 105pimfor_header_t;
106 106
107/* A received and interrupt-processed management frame, either for 107/* A received and interrupt-processed management frame, either for
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index abff8934db13..9c38fc331dca 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -97,7 +97,6 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev);
97static const struct iw_handler_def ray_handler_def; 97static const struct iw_handler_def ray_handler_def;
98 98
99/***** Prototypes for raylink functions **************************************/ 99/***** Prototypes for raylink functions **************************************/
100static int asc_to_int(char a);
101static void authenticate(ray_dev_t *local); 100static void authenticate(ray_dev_t *local);
102static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type); 101static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type);
103static void authenticate_timeout(u_long); 102static void authenticate_timeout(u_long);
@@ -1717,24 +1716,6 @@ static void authenticate_timeout(u_long data)
1717} 1716}
1718 1717
1719/*===========================================================================*/ 1718/*===========================================================================*/
1720static int asc_to_int(char a)
1721{
1722 if (a < '0')
1723 return -1;
1724 if (a <= '9')
1725 return (a - '0');
1726 if (a < 'A')
1727 return -1;
1728 if (a <= 'F')
1729 return (10 + a - 'A');
1730 if (a < 'a')
1731 return -1;
1732 if (a <= 'f')
1733 return (10 + a - 'a');
1734 return -1;
1735}
1736
1737/*===========================================================================*/
1738static int parse_addr(char *in_str, UCHAR *out) 1719static int parse_addr(char *in_str, UCHAR *out)
1739{ 1720{
1740 int len; 1721 int len;
@@ -1754,14 +1735,14 @@ static int parse_addr(char *in_str, UCHAR *out)
1754 i = 5; 1735 i = 5;
1755 1736
1756 while (j > 0) { 1737 while (j > 0) {
1757 if ((k = asc_to_int(in_str[j--])) != -1) 1738 if ((k = hex_to_bin(in_str[j--])) != -1)
1758 out[i] = k; 1739 out[i] = k;
1759 else 1740 else
1760 return 0; 1741 return 0;
1761 1742
1762 if (j == 0) 1743 if (j == 0)
1763 break; 1744 break;
1764 if ((k = asc_to_int(in_str[j--])) != -1) 1745 if ((k = hex_to_bin(in_str[j--])) != -1)
1765 out[i] += k << 4; 1746 out[i] += k << 4;
1766 else 1747 else
1767 return 0; 1748 return 0;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 4bd61ee627c0..719573bbbf81 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -238,19 +238,19 @@ struct ndis_80211_auth_request {
238 u8 bssid[6]; 238 u8 bssid[6];
239 u8 padding[2]; 239 u8 padding[2];
240 __le32 flags; 240 __le32 flags;
241} __attribute__((packed)); 241} __packed;
242 242
243struct ndis_80211_pmkid_candidate { 243struct ndis_80211_pmkid_candidate {
244 u8 bssid[6]; 244 u8 bssid[6];
245 u8 padding[2]; 245 u8 padding[2];
246 __le32 flags; 246 __le32 flags;
247} __attribute__((packed)); 247} __packed;
248 248
249struct ndis_80211_pmkid_cand_list { 249struct ndis_80211_pmkid_cand_list {
250 __le32 version; 250 __le32 version;
251 __le32 num_candidates; 251 __le32 num_candidates;
252 struct ndis_80211_pmkid_candidate candidate_list[0]; 252 struct ndis_80211_pmkid_candidate candidate_list[0];
253} __attribute__((packed)); 253} __packed;
254 254
255struct ndis_80211_status_indication { 255struct ndis_80211_status_indication {
256 __le32 status_type; 256 __le32 status_type;
@@ -260,19 +260,19 @@ struct ndis_80211_status_indication {
260 struct ndis_80211_auth_request auth_request[0]; 260 struct ndis_80211_auth_request auth_request[0];
261 struct ndis_80211_pmkid_cand_list cand_list; 261 struct ndis_80211_pmkid_cand_list cand_list;
262 } u; 262 } u;
263} __attribute__((packed)); 263} __packed;
264 264
265struct ndis_80211_ssid { 265struct ndis_80211_ssid {
266 __le32 length; 266 __le32 length;
267 u8 essid[NDIS_802_11_LENGTH_SSID]; 267 u8 essid[NDIS_802_11_LENGTH_SSID];
268} __attribute__((packed)); 268} __packed;
269 269
270struct ndis_80211_conf_freq_hop { 270struct ndis_80211_conf_freq_hop {
271 __le32 length; 271 __le32 length;
272 __le32 hop_pattern; 272 __le32 hop_pattern;
273 __le32 hop_set; 273 __le32 hop_set;
274 __le32 dwell_time; 274 __le32 dwell_time;
275} __attribute__((packed)); 275} __packed;
276 276
277struct ndis_80211_conf { 277struct ndis_80211_conf {
278 __le32 length; 278 __le32 length;
@@ -280,7 +280,7 @@ struct ndis_80211_conf {
280 __le32 atim_window; 280 __le32 atim_window;
281 __le32 ds_config; 281 __le32 ds_config;
282 struct ndis_80211_conf_freq_hop fh_config; 282 struct ndis_80211_conf_freq_hop fh_config;
283} __attribute__((packed)); 283} __packed;
284 284
285struct ndis_80211_bssid_ex { 285struct ndis_80211_bssid_ex {
286 __le32 length; 286 __le32 length;
@@ -295,25 +295,25 @@ struct ndis_80211_bssid_ex {
295 u8 rates[NDIS_802_11_LENGTH_RATES_EX]; 295 u8 rates[NDIS_802_11_LENGTH_RATES_EX];
296 __le32 ie_length; 296 __le32 ie_length;
297 u8 ies[0]; 297 u8 ies[0];
298} __attribute__((packed)); 298} __packed;
299 299
300struct ndis_80211_bssid_list_ex { 300struct ndis_80211_bssid_list_ex {
301 __le32 num_items; 301 __le32 num_items;
302 struct ndis_80211_bssid_ex bssid[0]; 302 struct ndis_80211_bssid_ex bssid[0];
303} __attribute__((packed)); 303} __packed;
304 304
305struct ndis_80211_fixed_ies { 305struct ndis_80211_fixed_ies {
306 u8 timestamp[8]; 306 u8 timestamp[8];
307 __le16 beacon_interval; 307 __le16 beacon_interval;
308 __le16 capabilities; 308 __le16 capabilities;
309} __attribute__((packed)); 309} __packed;
310 310
311struct ndis_80211_wep_key { 311struct ndis_80211_wep_key {
312 __le32 size; 312 __le32 size;
313 __le32 index; 313 __le32 index;
314 __le32 length; 314 __le32 length;
315 u8 material[32]; 315 u8 material[32];
316} __attribute__((packed)); 316} __packed;
317 317
318struct ndis_80211_key { 318struct ndis_80211_key {
319 __le32 size; 319 __le32 size;
@@ -323,14 +323,14 @@ struct ndis_80211_key {
323 u8 padding[6]; 323 u8 padding[6];
324 u8 rsc[8]; 324 u8 rsc[8];
325 u8 material[32]; 325 u8 material[32];
326} __attribute__((packed)); 326} __packed;
327 327
328struct ndis_80211_remove_key { 328struct ndis_80211_remove_key {
329 __le32 size; 329 __le32 size;
330 __le32 index; 330 __le32 index;
331 u8 bssid[6]; 331 u8 bssid[6];
332 u8 padding[2]; 332 u8 padding[2];
333} __attribute__((packed)); 333} __packed;
334 334
335struct ndis_config_param { 335struct ndis_config_param {
336 __le32 name_offs; 336 __le32 name_offs;
@@ -338,7 +338,7 @@ struct ndis_config_param {
338 __le32 type; 338 __le32 type;
339 __le32 value_offs; 339 __le32 value_offs;
340 __le32 value_length; 340 __le32 value_length;
341} __attribute__((packed)); 341} __packed;
342 342
343struct ndis_80211_assoc_info { 343struct ndis_80211_assoc_info {
344 __le32 length; 344 __le32 length;
@@ -358,12 +358,12 @@ struct ndis_80211_assoc_info {
358 } resp_ie; 358 } resp_ie;
359 __le32 resp_ie_length; 359 __le32 resp_ie_length;
360 __le32 offset_resp_ies; 360 __le32 offset_resp_ies;
361} __attribute__((packed)); 361} __packed;
362 362
363struct ndis_80211_auth_encr_pair { 363struct ndis_80211_auth_encr_pair {
364 __le32 auth_mode; 364 __le32 auth_mode;
365 __le32 encr_mode; 365 __le32 encr_mode;
366} __attribute__((packed)); 366} __packed;
367 367
368struct ndis_80211_capability { 368struct ndis_80211_capability {
369 __le32 length; 369 __le32 length;
@@ -371,7 +371,7 @@ struct ndis_80211_capability {
371 __le32 num_pmkids; 371 __le32 num_pmkids;
372 __le32 num_auth_encr_pair; 372 __le32 num_auth_encr_pair;
373 struct ndis_80211_auth_encr_pair auth_encr_pair[0]; 373 struct ndis_80211_auth_encr_pair auth_encr_pair[0];
374} __attribute__((packed)); 374} __packed;
375 375
376struct ndis_80211_bssid_info { 376struct ndis_80211_bssid_info {
377 u8 bssid[6]; 377 u8 bssid[6];
@@ -520,8 +520,9 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
520 520
521static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed); 521static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
522 522
523static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, 523static int rndis_set_tx_power(struct wiphy *wiphy,
524 int dbm); 524 enum nl80211_tx_power_setting type,
525 int mbm);
525static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm); 526static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm);
526 527
527static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, 528static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
@@ -1856,20 +1857,25 @@ static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1856 return 0; 1857 return 0;
1857} 1858}
1858 1859
1859static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, 1860static int rndis_set_tx_power(struct wiphy *wiphy,
1860 int dbm) 1861 enum nl80211_tx_power_setting type,
1862 int mbm)
1861{ 1863{
1862 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 1864 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
1863 struct usbnet *usbdev = priv->usbdev; 1865 struct usbnet *usbdev = priv->usbdev;
1864 1866
1865 netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n", 1867 netdev_dbg(usbdev->net, "%s(): type:0x%x mbm:%i\n",
1866 __func__, type, dbm); 1868 __func__, type, mbm);
1869
1870 if (mbm < 0 || (mbm % 100))
1871 return -ENOTSUPP;
1867 1872
1868 /* Device doesn't support changing txpower after initialization, only 1873 /* Device doesn't support changing txpower after initialization, only
1869 * turn off/on radio. Support 'auto' mode and setting same dBm that is 1874 * turn off/on radio. Support 'auto' mode and setting same dBm that is
1870 * currently used. 1875 * currently used.
1871 */ 1876 */
1872 if (type == TX_POWER_AUTOMATIC || dbm == get_bcm4320_power_dbm(priv)) { 1877 if (type == NL80211_TX_POWER_AUTOMATIC ||
1878 MBM_TO_DBM(mbm) == get_bcm4320_power_dbm(priv)) {
1873 if (!priv->radio_on) 1879 if (!priv->radio_on)
1874 disassociate(usbdev, true); /* turn on radio */ 1880 disassociate(usbdev, true); /* turn on radio */
1875 1881
@@ -2495,8 +2501,7 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
2495static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) 2501static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2496{ 2502{
2497 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2503 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2498 struct ndis_80211_assoc_info *info; 2504 struct ndis_80211_assoc_info *info = NULL;
2499 u8 assoc_buf[sizeof(*info) + IW_CUSTOM_MAX + 32];
2500 u8 bssid[ETH_ALEN]; 2505 u8 bssid[ETH_ALEN];
2501 int resp_ie_len, req_ie_len; 2506 int resp_ie_len, req_ie_len;
2502 u8 *req_ie, *resp_ie; 2507 u8 *req_ie, *resp_ie;
@@ -2515,23 +2520,43 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2515 resp_ie = NULL; 2520 resp_ie = NULL;
2516 2521
2517 if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { 2522 if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
2518 memset(assoc_buf, 0, sizeof(assoc_buf)); 2523 info = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
2519 info = (void *)assoc_buf; 2524 if (!info) {
2525 /* No memory? Try resume work later */
2526 set_bit(WORK_LINK_UP, &priv->work_pending);
2527 queue_work(priv->workqueue, &priv->work);
2528 return;
2529 }
2520 2530
2521 /* Get association info IEs from device and send them back to 2531 /* Get association info IEs from device. */
2522 * userspace. */ 2532 ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE);
2523 ret = get_association_info(usbdev, info, sizeof(assoc_buf));
2524 if (!ret) { 2533 if (!ret) {
2525 req_ie_len = le32_to_cpu(info->req_ie_length); 2534 req_ie_len = le32_to_cpu(info->req_ie_length);
2526 if (req_ie_len > 0) { 2535 if (req_ie_len > 0) {
2527 offset = le32_to_cpu(info->offset_req_ies); 2536 offset = le32_to_cpu(info->offset_req_ies);
2537
2538 if (offset > CONTROL_BUFFER_SIZE)
2539 offset = CONTROL_BUFFER_SIZE;
2540
2528 req_ie = (u8 *)info + offset; 2541 req_ie = (u8 *)info + offset;
2542
2543 if (offset + req_ie_len > CONTROL_BUFFER_SIZE)
2544 req_ie_len =
2545 CONTROL_BUFFER_SIZE - offset;
2529 } 2546 }
2530 2547
2531 resp_ie_len = le32_to_cpu(info->resp_ie_length); 2548 resp_ie_len = le32_to_cpu(info->resp_ie_length);
2532 if (resp_ie_len > 0) { 2549 if (resp_ie_len > 0) {
2533 offset = le32_to_cpu(info->offset_resp_ies); 2550 offset = le32_to_cpu(info->offset_resp_ies);
2551
2552 if (offset > CONTROL_BUFFER_SIZE)
2553 offset = CONTROL_BUFFER_SIZE;
2554
2534 resp_ie = (u8 *)info + offset; 2555 resp_ie = (u8 *)info + offset;
2556
2557 if (offset + resp_ie_len > CONTROL_BUFFER_SIZE)
2558 resp_ie_len =
2559 CONTROL_BUFFER_SIZE - offset;
2535 } 2560 }
2536 } 2561 }
2537 } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC)) 2562 } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC))
@@ -2563,6 +2588,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2563 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) 2588 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
2564 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); 2589 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
2565 2590
2591 if (info != NULL)
2592 kfree(info);
2593
2566 priv->connected = true; 2594 priv->connected = true;
2567 memcpy(priv->bssid, bssid, ETH_ALEN); 2595 memcpy(priv->bssid, bssid, ETH_ALEN);
2568 2596
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index ad2c98af7e9d..5063e01410e5 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -586,9 +586,11 @@ static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev,
586static inline void rt2400pci_set_vgc(struct rt2x00_dev *rt2x00dev, 586static inline void rt2400pci_set_vgc(struct rt2x00_dev *rt2x00dev,
587 struct link_qual *qual, u8 vgc_level) 587 struct link_qual *qual, u8 vgc_level)
588{ 588{
589 rt2400pci_bbp_write(rt2x00dev, 13, vgc_level); 589 if (qual->vgc_level_reg != vgc_level) {
590 qual->vgc_level = vgc_level; 590 rt2400pci_bbp_write(rt2x00dev, 13, vgc_level);
591 qual->vgc_level_reg = vgc_level; 591 qual->vgc_level = vgc_level;
592 qual->vgc_level_reg = vgc_level;
593 }
592} 594}
593 595
594static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev, 596static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev,
@@ -877,7 +879,8 @@ static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
877static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 879static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
878 enum dev_state state) 880 enum dev_state state)
879{ 881{
880 int mask = (state == STATE_RADIO_IRQ_OFF); 882 int mask = (state == STATE_RADIO_IRQ_OFF) ||
883 (state == STATE_RADIO_IRQ_OFF_ISR);
881 u32 reg; 884 u32 reg;
882 885
883 /* 886 /*
@@ -978,7 +981,9 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
978 rt2400pci_toggle_rx(rt2x00dev, state); 981 rt2400pci_toggle_rx(rt2x00dev, state);
979 break; 982 break;
980 case STATE_RADIO_IRQ_ON: 983 case STATE_RADIO_IRQ_ON:
984 case STATE_RADIO_IRQ_ON_ISR:
981 case STATE_RADIO_IRQ_OFF: 985 case STATE_RADIO_IRQ_OFF:
986 case STATE_RADIO_IRQ_OFF_ISR:
982 rt2400pci_toggle_irq(rt2x00dev, state); 987 rt2400pci_toggle_irq(rt2x00dev, state);
983 break; 988 break;
984 case STATE_DEEP_SLEEP: 989 case STATE_DEEP_SLEEP:
@@ -1076,9 +1081,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1076 struct txentry_desc *txdesc) 1081 struct txentry_desc *txdesc)
1077{ 1082{
1078 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1083 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1079 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1080 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1081 u32 word;
1082 u32 reg; 1084 u32 reg;
1083 1085
1084 /* 1086 /*
@@ -1091,9 +1093,15 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1091 1093
1092 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 1094 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
1093 1095
1094 rt2x00_desc_read(entry_priv->desc, 1, &word); 1096 /*
1095 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 1097 * Write the TX descriptor for the beacon.
1096 rt2x00_desc_write(entry_priv->desc, 1, word); 1098 */
1099 rt2400pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
1100
1101 /*
1102 * Dump beacon to userspace through debugfs.
1103 */
1104 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
1097 1105
1098 /* 1106 /*
1099 * Enable beaconing again. 1107 * Enable beaconing again.
@@ -1230,23 +1238,10 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1230 } 1238 }
1231} 1239}
1232 1240
1233static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) 1241static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance)
1234{ 1242{
1235 struct rt2x00_dev *rt2x00dev = dev_instance; 1243 struct rt2x00_dev *rt2x00dev = dev_instance;
1236 u32 reg; 1244 u32 reg = rt2x00dev->irqvalue[0];
1237
1238 /*
1239 * Get the interrupt sources & saved to local variable.
1240 * Write register value back to clear pending interrupts.
1241 */
1242 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
1243 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
1244
1245 if (!reg)
1246 return IRQ_NONE;
1247
1248 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1249 return IRQ_HANDLED;
1250 1245
1251 /* 1246 /*
1252 * Handle interrupts, walk through all bits 1247 * Handle interrupts, walk through all bits
@@ -1284,9 +1279,40 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1284 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1279 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1285 rt2400pci_txdone(rt2x00dev, QID_AC_BK); 1280 rt2400pci_txdone(rt2x00dev, QID_AC_BK);
1286 1281
1282 /* Enable interrupts again. */
1283 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
1284 STATE_RADIO_IRQ_ON_ISR);
1287 return IRQ_HANDLED; 1285 return IRQ_HANDLED;
1288} 1286}
1289 1287
1288static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1289{
1290 struct rt2x00_dev *rt2x00dev = dev_instance;
1291 u32 reg;
1292
1293 /*
1294 * Get the interrupt sources & saved to local variable.
1295 * Write register value back to clear pending interrupts.
1296 */
1297 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
1298 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
1299
1300 if (!reg)
1301 return IRQ_NONE;
1302
1303 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1304 return IRQ_HANDLED;
1305
1306 /* Store irqvalues for use in the interrupt thread. */
1307 rt2x00dev->irqvalue[0] = reg;
1308
1309 /* Disable interrupts, will be enabled again in the interrupt thread. */
1310 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
1311 STATE_RADIO_IRQ_OFF_ISR);
1312
1313 return IRQ_WAKE_THREAD;
1314}
1315
1290/* 1316/*
1291 * Device probe functions. 1317 * Device probe functions.
1292 */ 1318 */
@@ -1396,8 +1422,8 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1396 /* 1422 /*
1397 * Check if the BBP tuning should be enabled. 1423 * Check if the BBP tuning should be enabled.
1398 */ 1424 */
1399 if (!rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) 1425 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING))
1400 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); 1426 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
1401 1427
1402 return 0; 1428 return 0;
1403} 1429}
@@ -1563,7 +1589,8 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1563 .remove_interface = rt2x00mac_remove_interface, 1589 .remove_interface = rt2x00mac_remove_interface,
1564 .config = rt2x00mac_config, 1590 .config = rt2x00mac_config,
1565 .configure_filter = rt2x00mac_configure_filter, 1591 .configure_filter = rt2x00mac_configure_filter,
1566 .set_tim = rt2x00mac_set_tim, 1592 .sw_scan_start = rt2x00mac_sw_scan_start,
1593 .sw_scan_complete = rt2x00mac_sw_scan_complete,
1567 .get_stats = rt2x00mac_get_stats, 1594 .get_stats = rt2x00mac_get_stats,
1568 .bss_info_changed = rt2x00mac_bss_info_changed, 1595 .bss_info_changed = rt2x00mac_bss_info_changed,
1569 .conf_tx = rt2400pci_conf_tx, 1596 .conf_tx = rt2400pci_conf_tx,
@@ -1574,6 +1601,7 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1574 1601
1575static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { 1602static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1576 .irq_handler = rt2400pci_interrupt, 1603 .irq_handler = rt2400pci_interrupt,
1604 .irq_handler_thread = rt2400pci_interrupt_thread,
1577 .probe_hw = rt2400pci_probe_hw, 1605 .probe_hw = rt2400pci_probe_hw,
1578 .initialize = rt2x00pci_initialize, 1606 .initialize = rt2x00pci_initialize,
1579 .uninitialize = rt2x00pci_uninitialize, 1607 .uninitialize = rt2x00pci_uninitialize,
@@ -1585,7 +1613,6 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1585 .reset_tuner = rt2400pci_reset_tuner, 1613 .reset_tuner = rt2400pci_reset_tuner,
1586 .link_tuner = rt2400pci_link_tuner, 1614 .link_tuner = rt2400pci_link_tuner,
1587 .write_tx_desc = rt2400pci_write_tx_desc, 1615 .write_tx_desc = rt2400pci_write_tx_desc,
1588 .write_tx_data = rt2x00pci_write_tx_data,
1589 .write_beacon = rt2400pci_write_beacon, 1616 .write_beacon = rt2400pci_write_beacon,
1590 .kick_tx_queue = rt2400pci_kick_tx_queue, 1617 .kick_tx_queue = rt2400pci_kick_tx_queue,
1591 .kill_tx_queue = rt2400pci_kill_tx_queue, 1618 .kill_tx_queue = rt2400pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 41da3d218c65..c2a555d5376b 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -626,6 +626,7 @@ static inline void rt2500pci_set_vgc(struct rt2x00_dev *rt2x00dev,
626{ 626{
627 if (qual->vgc_level_reg != vgc_level) { 627 if (qual->vgc_level_reg != vgc_level) {
628 rt2500pci_bbp_write(rt2x00dev, 17, vgc_level); 628 rt2500pci_bbp_write(rt2x00dev, 17, vgc_level);
629 qual->vgc_level = vgc_level;
629 qual->vgc_level_reg = vgc_level; 630 qual->vgc_level_reg = vgc_level;
630 } 631 }
631} 632}
@@ -700,13 +701,10 @@ dynamic_cca_tune:
700 * R17 is inside the dynamic tuning range, 701 * R17 is inside the dynamic tuning range,
701 * start tuning the link based on the false cca counter. 702 * start tuning the link based on the false cca counter.
702 */ 703 */
703 if (qual->false_cca > 512 && qual->vgc_level_reg < 0x40) { 704 if (qual->false_cca > 512 && qual->vgc_level_reg < 0x40)
704 rt2500pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level_reg); 705 rt2500pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level_reg);
705 qual->vgc_level = qual->vgc_level_reg; 706 else if (qual->false_cca < 100 && qual->vgc_level_reg > 0x32)
706 } else if (qual->false_cca < 100 && qual->vgc_level_reg > 0x32) {
707 rt2500pci_set_vgc(rt2x00dev, qual, --qual->vgc_level_reg); 707 rt2500pci_set_vgc(rt2x00dev, qual, --qual->vgc_level_reg);
708 qual->vgc_level = qual->vgc_level_reg;
709 }
710} 708}
711 709
712/* 710/*
@@ -1035,7 +1033,8 @@ static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
1035static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1033static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1036 enum dev_state state) 1034 enum dev_state state)
1037{ 1035{
1038 int mask = (state == STATE_RADIO_IRQ_OFF); 1036 int mask = (state == STATE_RADIO_IRQ_OFF) ||
1037 (state == STATE_RADIO_IRQ_OFF_ISR);
1039 u32 reg; 1038 u32 reg;
1040 1039
1041 /* 1040 /*
@@ -1136,7 +1135,9 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1136 rt2500pci_toggle_rx(rt2x00dev, state); 1135 rt2500pci_toggle_rx(rt2x00dev, state);
1137 break; 1136 break;
1138 case STATE_RADIO_IRQ_ON: 1137 case STATE_RADIO_IRQ_ON:
1138 case STATE_RADIO_IRQ_ON_ISR:
1139 case STATE_RADIO_IRQ_OFF: 1139 case STATE_RADIO_IRQ_OFF:
1140 case STATE_RADIO_IRQ_OFF_ISR:
1140 rt2500pci_toggle_irq(rt2x00dev, state); 1141 rt2500pci_toggle_irq(rt2x00dev, state);
1141 break; 1142 break;
1142 case STATE_DEEP_SLEEP: 1143 case STATE_DEEP_SLEEP:
@@ -1233,9 +1234,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1233 struct txentry_desc *txdesc) 1234 struct txentry_desc *txdesc)
1234{ 1235{
1235 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1236 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1236 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1237 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1238 u32 word;
1239 u32 reg; 1237 u32 reg;
1240 1238
1241 /* 1239 /*
@@ -1248,9 +1246,15 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1248 1246
1249 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 1247 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
1250 1248
1251 rt2x00_desc_read(entry_priv->desc, 1, &word); 1249 /*
1252 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 1250 * Write the TX descriptor for the beacon.
1253 rt2x00_desc_write(entry_priv->desc, 1, word); 1251 */
1252 rt2500pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
1253
1254 /*
1255 * Dump beacon to userspace through debugfs.
1256 */
1257 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
1254 1258
1255 /* 1259 /*
1256 * Enable beaconing again. 1260 * Enable beaconing again.
@@ -1366,23 +1370,10 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1366 } 1370 }
1367} 1371}
1368 1372
1369static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) 1373static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance)
1370{ 1374{
1371 struct rt2x00_dev *rt2x00dev = dev_instance; 1375 struct rt2x00_dev *rt2x00dev = dev_instance;
1372 u32 reg; 1376 u32 reg = rt2x00dev->irqvalue[0];
1373
1374 /*
1375 * Get the interrupt sources & saved to local variable.
1376 * Write register value back to clear pending interrupts.
1377 */
1378 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
1379 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
1380
1381 if (!reg)
1382 return IRQ_NONE;
1383
1384 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1385 return IRQ_HANDLED;
1386 1377
1387 /* 1378 /*
1388 * Handle interrupts, walk through all bits 1379 * Handle interrupts, walk through all bits
@@ -1420,9 +1411,41 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1420 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1411 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1421 rt2500pci_txdone(rt2x00dev, QID_AC_BK); 1412 rt2500pci_txdone(rt2x00dev, QID_AC_BK);
1422 1413
1414 /* Enable interrupts again. */
1415 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
1416 STATE_RADIO_IRQ_ON_ISR);
1417
1423 return IRQ_HANDLED; 1418 return IRQ_HANDLED;
1424} 1419}
1425 1420
1421static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1422{
1423 struct rt2x00_dev *rt2x00dev = dev_instance;
1424 u32 reg;
1425
1426 /*
1427 * Get the interrupt sources & saved to local variable.
1428 * Write register value back to clear pending interrupts.
1429 */
1430 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
1431 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
1432
1433 if (!reg)
1434 return IRQ_NONE;
1435
1436 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1437 return IRQ_HANDLED;
1438
1439 /* Store irqvalues for use in the interrupt thread. */
1440 rt2x00dev->irqvalue[0] = reg;
1441
1442 /* Disable interrupts, will be enabled again in the interrupt thread. */
1443 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
1444 STATE_RADIO_IRQ_OFF_ISR);
1445
1446 return IRQ_WAKE_THREAD;
1447}
1448
1426/* 1449/*
1427 * Device probe functions. 1450 * Device probe functions.
1428 */ 1451 */
@@ -1554,9 +1577,8 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1554 * Check if the BBP tuning should be enabled. 1577 * Check if the BBP tuning should be enabled.
1555 */ 1578 */
1556 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 1579 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1557 1580 if (!rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE))
1558 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) 1581 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
1559 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags);
1560 1582
1561 /* 1583 /*
1562 * Read the RSSI <-> dBm offset information. 1584 * Read the RSSI <-> dBm offset information.
@@ -1861,7 +1883,8 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1861 .remove_interface = rt2x00mac_remove_interface, 1883 .remove_interface = rt2x00mac_remove_interface,
1862 .config = rt2x00mac_config, 1884 .config = rt2x00mac_config,
1863 .configure_filter = rt2x00mac_configure_filter, 1885 .configure_filter = rt2x00mac_configure_filter,
1864 .set_tim = rt2x00mac_set_tim, 1886 .sw_scan_start = rt2x00mac_sw_scan_start,
1887 .sw_scan_complete = rt2x00mac_sw_scan_complete,
1865 .get_stats = rt2x00mac_get_stats, 1888 .get_stats = rt2x00mac_get_stats,
1866 .bss_info_changed = rt2x00mac_bss_info_changed, 1889 .bss_info_changed = rt2x00mac_bss_info_changed,
1867 .conf_tx = rt2x00mac_conf_tx, 1890 .conf_tx = rt2x00mac_conf_tx,
@@ -1872,6 +1895,7 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1872 1895
1873static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { 1896static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1874 .irq_handler = rt2500pci_interrupt, 1897 .irq_handler = rt2500pci_interrupt,
1898 .irq_handler_thread = rt2500pci_interrupt_thread,
1875 .probe_hw = rt2500pci_probe_hw, 1899 .probe_hw = rt2500pci_probe_hw,
1876 .initialize = rt2x00pci_initialize, 1900 .initialize = rt2x00pci_initialize,
1877 .uninitialize = rt2x00pci_uninitialize, 1901 .uninitialize = rt2x00pci_uninitialize,
@@ -1883,7 +1907,6 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1883 .reset_tuner = rt2500pci_reset_tuner, 1907 .reset_tuner = rt2500pci_reset_tuner,
1884 .link_tuner = rt2500pci_link_tuner, 1908 .link_tuner = rt2500pci_link_tuner,
1885 .write_tx_desc = rt2500pci_write_tx_desc, 1909 .write_tx_desc = rt2500pci_write_tx_desc,
1886 .write_tx_data = rt2x00pci_write_tx_data,
1887 .write_beacon = rt2500pci_write_beacon, 1910 .write_beacon = rt2500pci_write_beacon,
1888 .kick_tx_queue = rt2500pci_kick_tx_queue, 1911 .kick_tx_queue = rt2500pci_kick_tx_queue,
1889 .kill_tx_queue = rt2500pci_kill_tx_queue, 1912 .kill_tx_queue = rt2500pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9ae96a626e6d..cdaf93f48263 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -345,12 +345,20 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
345 struct rt2x00lib_crypto *crypto, 345 struct rt2x00lib_crypto *crypto,
346 struct ieee80211_key_conf *key) 346 struct ieee80211_key_conf *key)
347{ 347{
348 int timeout;
349 u32 mask; 348 u32 mask;
350 u16 reg; 349 u16 reg;
350 enum cipher curr_cipher;
351 351
352 if (crypto->cmd == SET_KEY) { 352 if (crypto->cmd == SET_KEY) {
353 /* 353 /*
354 * Disallow to set WEP key other than with index 0,
355 * it is known that not work at least on some hardware.
356 * SW crypto will be used in that case.
357 */
358 if (key->alg == ALG_WEP && key->keyidx != 0)
359 return -EOPNOTSUPP;
360
361 /*
354 * Pairwise key will always be entry 0, but this 362 * Pairwise key will always be entry 0, but this
355 * could collide with a shared key on the same 363 * could collide with a shared key on the same
356 * position... 364 * position...
@@ -358,6 +366,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
358 mask = TXRX_CSR0_KEY_ID.bit_mask; 366 mask = TXRX_CSR0_KEY_ID.bit_mask;
359 367
360 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 368 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
369 curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM);
361 reg &= mask; 370 reg &= mask;
362 371
363 if (reg && reg == mask) 372 if (reg && reg == mask)
@@ -366,19 +375,17 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
366 reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); 375 reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID);
367 376
368 key->hw_key_idx += reg ? ffz(reg) : 0; 377 key->hw_key_idx += reg ? ffz(reg) : 0;
369
370 /* 378 /*
371 * The encryption key doesn't fit within the CSR cache, 379 * Hardware requires that all keys use the same cipher
372 * this means we should allocate it separately and use 380 * (e.g. TKIP-only, AES-only, but not TKIP+AES).
373 * rt2x00usb_vendor_request() to send the key to the hardware. 381 * If this is not the first key, compare the cipher with the
382 * first one and fall back to SW crypto if not the same.
374 */ 383 */
375 reg = KEY_ENTRY(key->hw_key_idx); 384 if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher)
376 timeout = REGISTER_TIMEOUT32(sizeof(crypto->key)); 385 return -EOPNOTSUPP;
377 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, 386
378 USB_VENDOR_REQUEST_OUT, reg, 387 rt2500usb_register_multiwrite(rt2x00dev, KEY_ENTRY(key->hw_key_idx),
379 crypto->key, 388 crypto->key, sizeof(crypto->key));
380 sizeof(crypto->key),
381 timeout);
382 389
383 /* 390 /*
384 * The driver does not support the IV/EIV generation 391 * The driver does not support the IV/EIV generation
@@ -818,6 +825,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
818 rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); 825 rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg);
819 826
820 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 827 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
828 rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, CIPHER_NONE);
821 rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); 829 rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER);
822 rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0); 830 rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0);
823 rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); 831 rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg);
@@ -1005,7 +1013,9 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1005 rt2500usb_toggle_rx(rt2x00dev, state); 1013 rt2500usb_toggle_rx(rt2x00dev, state);
1006 break; 1014 break;
1007 case STATE_RADIO_IRQ_ON: 1015 case STATE_RADIO_IRQ_ON:
1016 case STATE_RADIO_IRQ_ON_ISR:
1008 case STATE_RADIO_IRQ_OFF: 1017 case STATE_RADIO_IRQ_OFF:
1018 case STATE_RADIO_IRQ_OFF_ISR:
1009 /* No support, but no error either */ 1019 /* No support, but no error either */
1010 break; 1020 break;
1011 case STATE_DEEP_SLEEP: 1021 case STATE_DEEP_SLEEP:
@@ -1034,7 +1044,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1034 struct txentry_desc *txdesc) 1044 struct txentry_desc *txdesc)
1035{ 1045{
1036 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1046 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1037 __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE); 1047 __le32 *txd = (__le32 *) skb->data;
1038 u32 word; 1048 u32 word;
1039 1049
1040 /* 1050 /*
@@ -1080,6 +1090,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1080 /* 1090 /*
1081 * Register descriptor details in skb frame descriptor. 1091 * Register descriptor details in skb frame descriptor.
1082 */ 1092 */
1093 skbdesc->flags |= SKBDESC_DESC_IN_SKB;
1083 skbdesc->desc = txd; 1094 skbdesc->desc = txd;
1084 skbdesc->desc_len = TXD_DESC_SIZE; 1095 skbdesc->desc_len = TXD_DESC_SIZE;
1085} 1096}
@@ -1108,9 +1119,20 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
1108 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); 1119 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1109 1120
1110 /* 1121 /*
1111 * Take the descriptor in front of the skb into account. 1122 * Add space for the descriptor in front of the skb.
1112 */ 1123 */
1113 skb_push(entry->skb, TXD_DESC_SIZE); 1124 skb_push(entry->skb, TXD_DESC_SIZE);
1125 memset(entry->skb->data, 0, TXD_DESC_SIZE);
1126
1127 /*
1128 * Write the TX descriptor for the beacon.
1129 */
1130 rt2500usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
1131
1132 /*
1133 * Dump beacon to userspace through debugfs.
1134 */
1135 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
1114 1136
1115 /* 1137 /*
1116 * USB devices cannot blindly pass the skb->len as the 1138 * USB devices cannot blindly pass the skb->len as the
@@ -1460,13 +1482,6 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1460 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1482 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1461 1483
1462 /* 1484 /*
1463 * Check if the BBP tuning should be disabled.
1464 */
1465 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1466 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE))
1467 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags);
1468
1469 /*
1470 * Read the RSSI <-> dBm offset information. 1485 * Read the RSSI <-> dBm offset information.
1471 */ 1486 */
1472 rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); 1487 rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom);
@@ -1732,7 +1747,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1732 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 1747 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
1733 __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags); 1748 __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags);
1734 } 1749 }
1735 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); 1750 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
1736 1751
1737 /* 1752 /*
1738 * Set the rssi offset. 1753 * Set the rssi offset.
@@ -1752,6 +1767,8 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
1752 .configure_filter = rt2x00mac_configure_filter, 1767 .configure_filter = rt2x00mac_configure_filter,
1753 .set_tim = rt2x00mac_set_tim, 1768 .set_tim = rt2x00mac_set_tim,
1754 .set_key = rt2x00mac_set_key, 1769 .set_key = rt2x00mac_set_key,
1770 .sw_scan_start = rt2x00mac_sw_scan_start,
1771 .sw_scan_complete = rt2x00mac_sw_scan_complete,
1755 .get_stats = rt2x00mac_get_stats, 1772 .get_stats = rt2x00mac_get_stats,
1756 .bss_info_changed = rt2x00mac_bss_info_changed, 1773 .bss_info_changed = rt2x00mac_bss_info_changed,
1757 .conf_tx = rt2x00mac_conf_tx, 1774 .conf_tx = rt2x00mac_conf_tx,
@@ -1767,8 +1784,8 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1767 .rfkill_poll = rt2500usb_rfkill_poll, 1784 .rfkill_poll = rt2500usb_rfkill_poll,
1768 .link_stats = rt2500usb_link_stats, 1785 .link_stats = rt2500usb_link_stats,
1769 .reset_tuner = rt2500usb_reset_tuner, 1786 .reset_tuner = rt2500usb_reset_tuner,
1787 .watchdog = rt2x00usb_watchdog,
1770 .write_tx_desc = rt2500usb_write_tx_desc, 1788 .write_tx_desc = rt2500usb_write_tx_desc,
1771 .write_tx_data = rt2x00usb_write_tx_data,
1772 .write_beacon = rt2500usb_write_beacon, 1789 .write_beacon = rt2500usb_write_beacon,
1773 .get_tx_data_len = rt2500usb_get_tx_data_len, 1790 .get_tx_data_len = rt2500usb_get_tx_data_len,
1774 .kick_tx_queue = rt2x00usb_kick_tx_queue, 1791 .kick_tx_queue = rt2x00usb_kick_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 2aa03751c341..ed4ebcdde7c9 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -63,7 +63,6 @@
63 */ 63 */
64#define REV_RT2860C 0x0100 64#define REV_RT2860C 0x0100
65#define REV_RT2860D 0x0101 65#define REV_RT2860D 0x0101
66#define REV_RT2870D 0x0101
67#define REV_RT2872E 0x0200 66#define REV_RT2872E 0x0200
68#define REV_RT3070E 0x0200 67#define REV_RT3070E 0x0200
69#define REV_RT3070F 0x0201 68#define REV_RT3070F 0x0201
@@ -75,7 +74,7 @@
75 * Signal information. 74 * Signal information.
76 * Default offset is required for RSSI <-> dBm conversion. 75 * Default offset is required for RSSI <-> dBm conversion.
77 */ 76 */
78#define DEFAULT_RSSI_OFFSET 120 /* FIXME */ 77#define DEFAULT_RSSI_OFFSET 120
79 78
80/* 79/*
81 * Register layout information. 80 * Register layout information.
@@ -99,6 +98,21 @@
99 */ 98 */
100 99
101/* 100/*
101 * E2PROM_CSR: PCI EEPROM control register.
102 * RELOAD: Write 1 to reload eeprom content.
103 * TYPE: 0: 93c46, 1:93c66.
104 * LOAD_STATUS: 1:loading, 0:done.
105 */
106#define E2PROM_CSR 0x0004
107#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001)
108#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002)
109#define E2PROM_CSR_DATA_IN FIELD32(0x00000004)
110#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008)
111#define E2PROM_CSR_TYPE FIELD32(0x00000030)
112#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
113#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
114
115/*
102 * OPT_14: Unknown register used by rt3xxx devices. 116 * OPT_14: Unknown register used by rt3xxx devices.
103 */ 117 */
104#define OPT_14_CSR 0x0114 118#define OPT_14_CSR 0x0114
@@ -322,6 +336,39 @@
322#define RX_DRX_IDX 0x029c 336#define RX_DRX_IDX 0x029c
323 337
324/* 338/*
339 * USB_DMA_CFG
340 * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
341 * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
342 * PHY_CLEAR: phy watch dog enable.
343 * TX_CLEAR: Clear USB DMA TX path.
344 * TXOP_HALT: Halt TXOP count down when TX buffer is full.
345 * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation.
346 * RX_BULK_EN: Enable USB DMA Rx.
347 * TX_BULK_EN: Enable USB DMA Tx.
348 * EP_OUT_VALID: OUT endpoint data valid.
349 * RX_BUSY: USB DMA RX FSM busy.
350 * TX_BUSY: USB DMA TX FSM busy.
351 */
352#define USB_DMA_CFG 0x02a0
353#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff)
354#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00)
355#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000)
356#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000)
357#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000)
358#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000)
359#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000)
360#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000)
361#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000)
362#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000)
363#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000)
364
365/*
366 * US_CYC_CNT
367 */
368#define US_CYC_CNT 0x02a4
369#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff)
370
371/*
325 * PBF_SYS_CTRL 372 * PBF_SYS_CTRL
326 * HOST_RAM_WRITE: enable Host program ram write selection 373 * HOST_RAM_WRITE: enable Host program ram write selection
327 */ 374 */
@@ -672,14 +719,20 @@
672#define TBTT_TIMER 0x1124 719#define TBTT_TIMER 0x1124
673 720
674/* 721/*
675 * INT_TIMER_CFG: 722 * INT_TIMER_CFG: timer configuration
723 * PRE_TBTT_TIMER: leadtime to tbtt for pretbtt interrupt in units of 1/16 TU
724 * GP_TIMER: period of general purpose timer in units of 1/16 TU
676 */ 725 */
677#define INT_TIMER_CFG 0x1128 726#define INT_TIMER_CFG 0x1128
727#define INT_TIMER_CFG_PRE_TBTT_TIMER FIELD32(0x0000ffff)
728#define INT_TIMER_CFG_GP_TIMER FIELD32(0xffff0000)
678 729
679/* 730/*
680 * INT_TIMER_EN: GP-timer and pre-tbtt Int enable 731 * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
681 */ 732 */
682#define INT_TIMER_EN 0x112c 733#define INT_TIMER_EN 0x112c
734#define INT_TIMER_EN_PRE_TBTT_TIMER FIELD32(0x00000001)
735#define INT_TIMER_EN_GP_TIMER FIELD32(0x00000002)
683 736
684/* 737/*
685 * CH_IDLE_STA: channel idle time 738 * CH_IDLE_STA: channel idle time
@@ -756,6 +809,18 @@
756#define EDCA_TID_AC_MAP 0x1310 809#define EDCA_TID_AC_MAP 0x1310
757 810
758/* 811/*
812 * TX_PWR_CFG:
813 */
814#define TX_PWR_CFG_RATE0 FIELD32(0x0000000f)
815#define TX_PWR_CFG_RATE1 FIELD32(0x000000f0)
816#define TX_PWR_CFG_RATE2 FIELD32(0x00000f00)
817#define TX_PWR_CFG_RATE3 FIELD32(0x0000f000)
818#define TX_PWR_CFG_RATE4 FIELD32(0x000f0000)
819#define TX_PWR_CFG_RATE5 FIELD32(0x00f00000)
820#define TX_PWR_CFG_RATE6 FIELD32(0x0f000000)
821#define TX_PWR_CFG_RATE7 FIELD32(0xf0000000)
822
823/*
759 * TX_PWR_CFG_0: 824 * TX_PWR_CFG_0:
760 */ 825 */
761#define TX_PWR_CFG_0 0x1314 826#define TX_PWR_CFG_0 0x1314
@@ -1370,17 +1435,17 @@
1370struct mac_wcid_entry { 1435struct mac_wcid_entry {
1371 u8 mac[6]; 1436 u8 mac[6];
1372 u8 reserved[2]; 1437 u8 reserved[2];
1373} __attribute__ ((packed)); 1438} __packed;
1374 1439
1375struct hw_key_entry { 1440struct hw_key_entry {
1376 u8 key[16]; 1441 u8 key[16];
1377 u8 tx_mic[8]; 1442 u8 tx_mic[8];
1378 u8 rx_mic[8]; 1443 u8 rx_mic[8];
1379} __attribute__ ((packed)); 1444} __packed;
1380 1445
1381struct mac_iveiv_entry { 1446struct mac_iveiv_entry {
1382 u8 iv[8]; 1447 u8 iv[8];
1383} __attribute__ ((packed)); 1448} __packed;
1384 1449
1385/* 1450/*
1386 * MAC_WCID_ATTRIBUTE: 1451 * MAC_WCID_ATTRIBUTE:
@@ -1389,6 +1454,10 @@ struct mac_iveiv_entry {
1389#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e) 1454#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
1390#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070) 1455#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
1391#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380) 1456#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
1457#define MAC_WCID_ATTRIBUTE_CIPHER_EXT FIELD32(0x00000400)
1458#define MAC_WCID_ATTRIBUTE_BSS_IDX_EXT FIELD32(0x00000800)
1459#define MAC_WCID_ATTRIBUTE_WAPI_MCBC FIELD32(0x00008000)
1460#define MAC_WCID_ATTRIBUTE_WAPI_KEY_IDX FIELD32(0xff000000)
1392 1461
1393/* 1462/*
1394 * SHARED_KEY_MODE: 1463 * SHARED_KEY_MODE:
@@ -1510,7 +1579,9 @@ struct mac_iveiv_entry {
1510 */ 1579 */
1511 1580
1512/* 1581/*
1513 * BBP 1: TX Antenna 1582 * BBP 1: TX Antenna & Power
1583 * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
1584 * 3 - increase tx power by 6dBm
1514 */ 1585 */
1515#define BBP1_TX_POWER FIELD8(0x07) 1586#define BBP1_TX_POWER FIELD8(0x07)
1516#define BBP1_TX_ANTENNA FIELD8(0x18) 1587#define BBP1_TX_ANTENNA FIELD8(0x18)
@@ -1800,9 +1871,15 @@ struct mac_iveiv_entry {
1800#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) 1871#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
1801 1872
1802/* 1873/*
1803 * EEPROM TXpower byrate: 20MHZ power 1874 * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode
1804 */ 1875 */
1805#define EEPROM_TXPOWER_BYRATE 0x006f 1876#define EEPROM_TXPOWER_BYRATE 0x006f
1877#define EEPROM_TXPOWER_BYRATE_SIZE 9
1878
1879#define EEPROM_TXPOWER_BYRATE_RATE0 FIELD16(0x000f)
1880#define EEPROM_TXPOWER_BYRATE_RATE1 FIELD16(0x00f0)
1881#define EEPROM_TXPOWER_BYRATE_RATE2 FIELD16(0x0f00)
1882#define EEPROM_TXPOWER_BYRATE_RATE3 FIELD16(0xf000)
1806 1883
1807/* 1884/*
1808 * EEPROM BBP. 1885 * EEPROM BBP.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index db4250d1c8b3..b66e0fd8f0fa 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1,9 +1,9 @@
1/* 1/*
2 Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
2 Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> 3 Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
3 Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com> 4 Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
4 5
5 Based on the original rt2800pci.c and rt2800usb.c. 6 Based on the original rt2800pci.c and rt2800usb.c.
6 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
7 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> 7 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
8 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 8 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
9 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> 9 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
@@ -33,21 +33,14 @@
33 Abstract: rt2800 generic device routines. 33 Abstract: rt2800 generic device routines.
34 */ 34 */
35 35
36#include <linux/crc-ccitt.h>
36#include <linux/kernel.h> 37#include <linux/kernel.h>
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/slab.h> 39#include <linux/slab.h>
39 40
40#include "rt2x00.h" 41#include "rt2x00.h"
41#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
42#include "rt2x00usb.h"
43#endif
44#include "rt2800lib.h" 42#include "rt2800lib.h"
45#include "rt2800.h" 43#include "rt2800.h"
46#include "rt2800usb.h"
47
48MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
49MODULE_DESCRIPTION("rt2800 library");
50MODULE_LICENSE("GPL");
51 44
52/* 45/*
53 * Register access. 46 * Register access.
@@ -107,8 +100,7 @@ static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
107 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word); 100 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
108 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1); 101 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
109 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0); 102 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
110 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) 103 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
111 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
112 104
113 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); 105 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
114 } 106 }
@@ -136,8 +128,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
136 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word); 128 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
137 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1); 129 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
138 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1); 130 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
139 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) 131 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
140 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
141 132
142 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); 133 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
143 134
@@ -282,9 +273,162 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
282} 273}
283EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); 274EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
284 275
285void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc) 276static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
277{
278 u16 fw_crc;
279 u16 crc;
280
281 /*
282 * The last 2 bytes in the firmware array are the crc checksum itself,
283 * this means that we should never pass those 2 bytes to the crc
284 * algorithm.
285 */
286 fw_crc = (data[len - 2] << 8 | data[len - 1]);
287
288 /*
289 * Use the crc ccitt algorithm.
290 * This will return the same value as the legacy driver which
291 * used bit ordering reversion on the both the firmware bytes
292 * before input input as well as on the final output.
293 * Obviously using crc ccitt directly is much more efficient.
294 */
295 crc = crc_ccitt(~0, data, len - 2);
296
297 /*
298 * There is a small difference between the crc-itu-t + bitrev and
299 * the crc-ccitt crc calculation. In the latter method the 2 bytes
300 * will be swapped, use swab16 to convert the crc to the correct
301 * value.
302 */
303 crc = swab16(crc);
304
305 return fw_crc == crc;
306}
307
308int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
309 const u8 *data, const size_t len)
310{
311 size_t offset = 0;
312 size_t fw_len;
313 bool multiple;
314
315 /*
316 * PCI(e) & SOC devices require firmware with a length
317 * of 8kb. USB devices require firmware files with a length
318 * of 4kb. Certain USB chipsets however require different firmware,
319 * which Ralink only provides attached to the original firmware
320 * file. Thus for USB devices, firmware files have a length
321 * which is a multiple of 4kb.
322 */
323 if (rt2x00_is_usb(rt2x00dev)) {
324 fw_len = 4096;
325 multiple = true;
326 } else {
327 fw_len = 8192;
328 multiple = true;
329 }
330
331 /*
332 * Validate the firmware length
333 */
334 if (len != fw_len && (!multiple || (len % fw_len) != 0))
335 return FW_BAD_LENGTH;
336
337 /*
338 * Check if the chipset requires one of the upper parts
339 * of the firmware.
340 */
341 if (rt2x00_is_usb(rt2x00dev) &&
342 !rt2x00_rt(rt2x00dev, RT2860) &&
343 !rt2x00_rt(rt2x00dev, RT2872) &&
344 !rt2x00_rt(rt2x00dev, RT3070) &&
345 ((len / fw_len) == 1))
346 return FW_BAD_VERSION;
347
348 /*
349 * 8kb firmware files must be checked as if it were
350 * 2 separate firmware files.
351 */
352 while (offset < len) {
353 if (!rt2800_check_firmware_crc(data + offset, fw_len))
354 return FW_BAD_CRC;
355
356 offset += fw_len;
357 }
358
359 return FW_OK;
360}
361EXPORT_SYMBOL_GPL(rt2800_check_firmware);
362
363int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
364 const u8 *data, const size_t len)
365{
366 unsigned int i;
367 u32 reg;
368
369 /*
370 * Wait for stable hardware.
371 */
372 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
373 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
374 if (reg && reg != ~0)
375 break;
376 msleep(1);
377 }
378
379 if (i == REGISTER_BUSY_COUNT) {
380 ERROR(rt2x00dev, "Unstable hardware.\n");
381 return -EBUSY;
382 }
383
384 if (rt2x00_is_pci(rt2x00dev))
385 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
386
387 /*
388 * Disable DMA, will be reenabled later when enabling
389 * the radio.
390 */
391 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
392 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
393 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
394 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
395 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
396 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
397 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
398
399 /*
400 * Write firmware to the device.
401 */
402 rt2800_drv_write_firmware(rt2x00dev, data, len);
403
404 /*
405 * Wait for device to stabilize.
406 */
407 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
408 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
409 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
410 break;
411 msleep(1);
412 }
413
414 if (i == REGISTER_BUSY_COUNT) {
415 ERROR(rt2x00dev, "PBF system register not ready.\n");
416 return -EBUSY;
417 }
418
419 /*
420 * Initialize firmware.
421 */
422 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
423 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
424 msleep(1);
425
426 return 0;
427}
428EXPORT_SYMBOL_GPL(rt2800_load_firmware);
429
430void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
286{ 431{
287 __le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE);
288 u32 word; 432 u32 word;
289 433
290 /* 434 /*
@@ -336,9 +480,53 @@ void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc)
336} 480}
337EXPORT_SYMBOL_GPL(rt2800_write_txwi); 481EXPORT_SYMBOL_GPL(rt2800_write_txwi);
338 482
339void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) 483static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2)
340{ 484{
341 __le32 *rxwi = (__le32 *) skb->data; 485 int rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
486 int rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
487 int rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2);
488 u16 eeprom;
489 u8 offset0;
490 u8 offset1;
491 u8 offset2;
492
493 if (rt2x00dev->rx_status.band == IEEE80211_BAND_2GHZ) {
494 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
495 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
496 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
497 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
498 offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2);
499 } else {
500 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
501 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0);
502 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1);
503 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
504 offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2);
505 }
506
507 /*
508 * Convert the value from the descriptor into the RSSI value
509 * If the value in the descriptor is 0, it is considered invalid
510 * and the default (extremely low) rssi value is assumed
511 */
512 rssi0 = (rssi0) ? (-12 - offset0 - rt2x00dev->lna_gain - rssi0) : -128;
513 rssi1 = (rssi1) ? (-12 - offset1 - rt2x00dev->lna_gain - rssi1) : -128;
514 rssi2 = (rssi2) ? (-12 - offset2 - rt2x00dev->lna_gain - rssi2) : -128;
515
516 /*
517 * mac80211 only accepts a single RSSI value. Calculating the
518 * average doesn't deliver a fair answer either since -60:-60 would
519 * be considered equally good as -50:-70 while the second is the one
520 * which gives less energy...
521 */
522 rssi0 = max(rssi0, rssi1);
523 return max(rssi0, rssi2);
524}
525
526void rt2800_process_rxwi(struct queue_entry *entry,
527 struct rxdone_entry_desc *rxdesc)
528{
529 __le32 *rxwi = (__le32 *) entry->skb->data;
342 u32 word; 530 u32 word;
343 531
344 rt2x00_desc_read(rxwi, 0, &word); 532 rt2x00_desc_read(rxwi, 0, &word);
@@ -369,17 +557,93 @@ void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc)
369 557
370 rt2x00_desc_read(rxwi, 2, &word); 558 rt2x00_desc_read(rxwi, 2, &word);
371 559
372 rxdesc->rssi = 560 /*
373 (rt2x00_get_field32(word, RXWI_W2_RSSI0) + 561 * Convert descriptor AGC value to RSSI value.
374 rt2x00_get_field32(word, RXWI_W2_RSSI1)) / 2; 562 */
563 rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word);
375 564
376 /* 565 /*
377 * Remove RXWI descriptor from start of buffer. 566 * Remove RXWI descriptor from start of buffer.
378 */ 567 */
379 skb_pull(skb, RXWI_DESC_SIZE); 568 skb_pull(entry->skb, RXWI_DESC_SIZE);
380} 569}
381EXPORT_SYMBOL_GPL(rt2800_process_rxwi); 570EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
382 571
572void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
573{
574 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
575 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
576 unsigned int beacon_base;
577 u32 reg;
578
579 /*
580 * Disable beaconing while we are reloading the beacon data,
581 * otherwise we might be sending out invalid data.
582 */
583 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
584 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
585 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
586
587 /*
588 * Add space for the TXWI in front of the skb.
589 */
590 skb_push(entry->skb, TXWI_DESC_SIZE);
591 memset(entry->skb, 0, TXWI_DESC_SIZE);
592
593 /*
594 * Register descriptor details in skb frame descriptor.
595 */
596 skbdesc->flags |= SKBDESC_DESC_IN_SKB;
597 skbdesc->desc = entry->skb->data;
598 skbdesc->desc_len = TXWI_DESC_SIZE;
599
600 /*
601 * Add the TXWI for the beacon to the skb.
602 */
603 rt2800_write_txwi((__le32 *)entry->skb->data, txdesc);
604
605 /*
606 * Dump beacon to userspace through debugfs.
607 */
608 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
609
610 /*
611 * Write entire beacon with TXWI to register.
612 */
613 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
614 rt2800_register_multiwrite(rt2x00dev, beacon_base,
615 entry->skb->data, entry->skb->len);
616
617 /*
618 * Enable beaconing again.
619 */
620 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
621 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
622 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
623 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
624
625 /*
626 * Clean up beacon skb.
627 */
628 dev_kfree_skb_any(entry->skb);
629 entry->skb = NULL;
630}
631EXPORT_SYMBOL_GPL(rt2800_write_beacon);
632
633static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
634 unsigned int beacon_base)
635{
636 int i;
637
638 /*
639 * For the Beacon base registers we only need to clear
640 * the whole TXWI which (when set to 0) will invalidate
641 * the entire beacon.
642 */
643 for (i = 0; i < TXWI_DESC_SIZE; i += sizeof(__le32))
644 rt2800_register_write(rt2x00dev, beacon_base + i, 0);
645}
646
383#ifdef CONFIG_RT2X00_LIB_DEBUGFS 647#ifdef CONFIG_RT2X00_LIB_DEBUGFS
384const struct rt2x00debug rt2800_rt2x00debug = { 648const struct rt2x00debug rt2800_rt2x00debug = {
385 .owner = THIS_MODULE, 649 .owner = THIS_MODULE,
@@ -502,15 +766,28 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
502 766
503 offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx); 767 offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
504 768
505 rt2800_register_read(rt2x00dev, offset, &reg); 769 if (crypto->cmd == SET_KEY) {
506 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB, 770 rt2800_register_read(rt2x00dev, offset, &reg);
507 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); 771 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
508 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER, 772 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
509 (crypto->cmd == SET_KEY) * crypto->cipher); 773 /*
510 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX, 774 * Both the cipher as the BSS Idx numbers are split in a main
511 (crypto->cmd == SET_KEY) * crypto->bssidx); 775 * value of 3 bits, and a extended field for adding one additional
512 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher); 776 * bit to the value.
513 rt2800_register_write(rt2x00dev, offset, reg); 777 */
778 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
779 (crypto->cipher & 0x7));
780 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT,
781 (crypto->cipher & 0x8) >> 3);
782 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
783 (crypto->bssidx & 0x7));
784 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
785 (crypto->bssidx & 0x8) >> 3);
786 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
787 rt2800_register_write(rt2x00dev, offset, reg);
788 } else {
789 rt2800_register_write(rt2x00dev, offset, 0);
790 }
514 791
515 offset = MAC_IVEIV_ENTRY(key->hw_key_idx); 792 offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
516 793
@@ -668,19 +945,14 @@ EXPORT_SYMBOL_GPL(rt2800_config_filter);
668void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, 945void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
669 struct rt2x00intf_conf *conf, const unsigned int flags) 946 struct rt2x00intf_conf *conf, const unsigned int flags)
670{ 947{
671 unsigned int beacon_base;
672 u32 reg; 948 u32 reg;
673 949
674 if (flags & CONFIG_UPDATE_TYPE) { 950 if (flags & CONFIG_UPDATE_TYPE) {
675 /* 951 /*
676 * Clear current synchronisation setup. 952 * Clear current synchronisation setup.
677 * For the Beacon base registers we only need to clear
678 * the first byte since that byte contains the VALID and OWNER
679 * bits which (when set to 0) will invalidate the entire beacon.
680 */ 953 */
681 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 954 rt2800_clear_beacon(rt2x00dev,
682 rt2800_register_write(rt2x00dev, beacon_base, 0); 955 HW_BEACON_OFFSET(intf->beacon->entry_idx));
683
684 /* 956 /*
685 * Enable synchronisation. 957 * Enable synchronisation.
686 */ 958 */
@@ -688,8 +960,18 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
688 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 960 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
689 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync); 961 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
690 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 962 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
691 (conf->sync == TSF_SYNC_BEACON)); 963 (conf->sync == TSF_SYNC_ADHOC ||
964 conf->sync == TSF_SYNC_AP_NONE));
692 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 965 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
966
967 /*
968 * Enable pre tbtt interrupt for beaconing modes
969 */
970 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
971 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
972 (conf->sync == TSF_SYNC_AP_NONE));
973 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
974
693 } 975 }
694 976
695 if (flags & CONFIG_UPDATE_MAC) { 977 if (flags & CONFIG_UPDATE_MAC) {
@@ -703,8 +985,8 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
703 985
704 if (flags & CONFIG_UPDATE_BSSID) { 986 if (flags & CONFIG_UPDATE_BSSID) {
705 reg = le32_to_cpu(conf->bssid[1]); 987 reg = le32_to_cpu(conf->bssid[1]);
706 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0); 988 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
707 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0); 989 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
708 conf->bssid[1] = cpu_to_le32(reg); 990 conf->bssid[1] = cpu_to_le32(reg);
709 991
710 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0, 992 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
@@ -762,14 +1044,12 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
762 switch ((int)ant->tx) { 1044 switch ((int)ant->tx) {
763 case 1: 1045 case 1:
764 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); 1046 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
765 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
766 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
767 break; 1047 break;
768 case 2: 1048 case 2:
769 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); 1049 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
770 break; 1050 break;
771 case 3: 1051 case 3:
772 /* Do nothing */ 1052 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
773 break; 1053 break;
774 } 1054 }
775 1055
@@ -1016,66 +1296,115 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1016} 1296}
1017 1297
1018static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, 1298static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1019 const int txpower) 1299 const int max_txpower)
1020{ 1300{
1301 u8 txpower;
1302 u8 max_value = (u8)max_txpower;
1303 u16 eeprom;
1304 int i;
1021 u32 reg; 1305 u32 reg;
1022 u32 value = TXPOWER_G_TO_DEV(txpower);
1023 u8 r1; 1306 u8 r1;
1307 u32 offset;
1024 1308
1309 /*
1310 * set to normal tx power mode: +/- 0dBm
1311 */
1025 rt2800_bbp_read(rt2x00dev, 1, &r1); 1312 rt2800_bbp_read(rt2x00dev, 1, &r1);
1026 rt2x00_set_field8(&reg, BBP1_TX_POWER, 0); 1313 rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
1027 rt2800_bbp_write(rt2x00dev, 1, r1); 1314 rt2800_bbp_write(rt2x00dev, 1, r1);
1028 1315
1029 rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg); 1316 /*
1030 rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value); 1317 * The eeprom contains the tx power values for each rate. These
1031 rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value); 1318 * values map to 100% tx power. Each 16bit word contains four tx
1032 rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value); 1319 * power values and the order is the same as used in the TX_PWR_CFG
1033 rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value); 1320 * registers.
1034 rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value); 1321 */
1035 rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value); 1322 offset = TX_PWR_CFG_0;
1036 rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value); 1323
1037 rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value); 1324 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
1038 rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, reg); 1325 /* just to be safe */
1039 1326 if (offset > TX_PWR_CFG_4)
1040 rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, &reg); 1327 break;
1041 rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value); 1328
1042 rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value); 1329 rt2800_register_read(rt2x00dev, offset, &reg);
1043 rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value); 1330
1044 rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value); 1331 /* read the next four txpower values */
1045 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value); 1332 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
1046 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value); 1333 &eeprom);
1047 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value); 1334
1048 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value); 1335 /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
1049 rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, reg); 1336 * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
1050 1337 * TX_PWR_CFG_4: unknown */
1051 rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, &reg); 1338 txpower = rt2x00_get_field16(eeprom,
1052 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value); 1339 EEPROM_TXPOWER_BYRATE_RATE0);
1053 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value); 1340 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0,
1054 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value); 1341 min(txpower, max_value));
1055 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value); 1342
1056 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value); 1343 /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
1057 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value); 1344 * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
1058 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value); 1345 * TX_PWR_CFG_4: unknown */
1059 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value); 1346 txpower = rt2x00_get_field16(eeprom,
1060 rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, reg); 1347 EEPROM_TXPOWER_BYRATE_RATE1);
1061 1348 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1,
1062 rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, &reg); 1349 min(txpower, max_value));
1063 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value); 1350
1064 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value); 1351 /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS,
1065 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value); 1352 * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14,
1066 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value); 1353 * TX_PWR_CFG_4: unknown */
1067 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value); 1354 txpower = rt2x00_get_field16(eeprom,
1068 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value); 1355 EEPROM_TXPOWER_BYRATE_RATE2);
1069 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value); 1356 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2,
1070 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value); 1357 min(txpower, max_value));
1071 rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, reg); 1358
1072 1359 /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
1073 rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, &reg); 1360 * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15,
1074 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value); 1361 * TX_PWR_CFG_4: unknown */
1075 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value); 1362 txpower = rt2x00_get_field16(eeprom,
1076 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value); 1363 EEPROM_TXPOWER_BYRATE_RATE3);
1077 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value); 1364 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3,
1078 rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, reg); 1365 min(txpower, max_value));
1366
1367 /* read the next four txpower values */
1368 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
1369 &eeprom);
1370
1371 /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
1372 * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
1373 * TX_PWR_CFG_4: unknown */
1374 txpower = rt2x00_get_field16(eeprom,
1375 EEPROM_TXPOWER_BYRATE_RATE0);
1376 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4,
1377 min(txpower, max_value));
1378
1379 /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
1380 * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
1381 * TX_PWR_CFG_4: unknown */
1382 txpower = rt2x00_get_field16(eeprom,
1383 EEPROM_TXPOWER_BYRATE_RATE1);
1384 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5,
1385 min(txpower, max_value));
1386
1387 /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
1388 * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
1389 * TX_PWR_CFG_4: unknown */
1390 txpower = rt2x00_get_field16(eeprom,
1391 EEPROM_TXPOWER_BYRATE_RATE2);
1392 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6,
1393 min(txpower, max_value));
1394
1395 /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
1396 * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
1397 * TX_PWR_CFG_4: unknown */
1398 txpower = rt2x00_get_field16(eeprom,
1399 EEPROM_TXPOWER_BYRATE_RATE3);
1400 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7,
1401 min(txpower, max_value));
1402
1403 rt2800_register_write(rt2x00dev, offset, reg);
1404
1405 /* next TX_PWR_CFG register */
1406 offset += 4;
1407 }
1079} 1408}
1080 1409
1081static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev, 1410static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
@@ -1212,6 +1541,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1212 u32 reg; 1541 u32 reg;
1213 u16 eeprom; 1542 u16 eeprom;
1214 unsigned int i; 1543 unsigned int i;
1544 int ret;
1215 1545
1216 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 1546 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1217 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); 1547 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
@@ -1221,59 +1551,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1221 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); 1551 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
1222 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 1552 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1223 1553
1224 if (rt2x00_is_usb(rt2x00dev)) { 1554 ret = rt2800_drv_init_registers(rt2x00dev);
1225 /* 1555 if (ret)
1226 * Wait until BBP and RF are ready. 1556 return ret;
1227 */
1228 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1229 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1230 if (reg && reg != ~0)
1231 break;
1232 msleep(1);
1233 }
1234
1235 if (i == REGISTER_BUSY_COUNT) {
1236 ERROR(rt2x00dev, "Unstable hardware.\n");
1237 return -EBUSY;
1238 }
1239
1240 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1241 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
1242 reg & ~0x00002000);
1243 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
1244 /*
1245 * Reset DMA indexes
1246 */
1247 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
1248 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
1249 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
1250 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
1251 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
1252 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
1253 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
1254 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
1255 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
1256
1257 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
1258 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
1259
1260 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1261 }
1262
1263 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1264 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
1265 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
1266 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1267
1268 if (rt2x00_is_usb(rt2x00dev)) {
1269 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1270#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
1271 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1272 USB_MODE_RESET, REGISTER_TIMEOUT);
1273#endif
1274 }
1275
1276 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1277 1557
1278 rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg); 1558 rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
1279 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */ 1559 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
@@ -1295,7 +1575,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1295 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); 1575 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1296 1576
1297 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1577 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1298 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0); 1578 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 1600);
1299 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); 1579 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
1300 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0); 1580 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
1301 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); 1581 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
@@ -1328,7 +1608,6 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1328 } else { 1608 } else {
1329 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 1609 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1330 } 1610 }
1331 rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
1332 } else if (rt2x00_rt(rt2x00dev, RT3070)) { 1611 } else if (rt2x00_rt(rt2x00dev, RT3070)) {
1333 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 1612 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1334 1613
@@ -1339,6 +1618,10 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1339 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 1618 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1340 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 1619 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1341 } 1620 }
1621 } else if (rt2800_is_305x_soc(rt2x00dev)) {
1622 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1623 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1624 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
1342 } else { 1625 } else {
1343 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); 1626 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1344 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 1627 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1546,23 +1829,20 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1546 1829
1547 /* 1830 /*
1548 * Clear all beacons 1831 * Clear all beacons
1549 * For the Beacon base registers we only need to clear 1832 */
1550 * the first byte since that byte contains the VALID and OWNER 1833 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
1551 * bits which (when set to 0) will invalidate the entire beacon. 1834 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
1552 */ 1835 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
1553 rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0); 1836 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
1554 rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0); 1837 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
1555 rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0); 1838 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
1556 rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0); 1839 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
1557 rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0); 1840 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
1558 rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
1559 rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
1560 rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
1561 1841
1562 if (rt2x00_is_usb(rt2x00dev)) { 1842 if (rt2x00_is_usb(rt2x00dev)) {
1563 rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg); 1843 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
1564 rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30); 1844 rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
1565 rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg); 1845 rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
1566 } 1846 }
1567 1847
1568 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg); 1848 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -1617,6 +1897,13 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1617 rt2800_register_read(rt2x00dev, TX_STA_CNT1, &reg); 1897 rt2800_register_read(rt2x00dev, TX_STA_CNT1, &reg);
1618 rt2800_register_read(rt2x00dev, TX_STA_CNT2, &reg); 1898 rt2800_register_read(rt2x00dev, TX_STA_CNT2, &reg);
1619 1899
1900 /*
1901 * Setup leadtime for pre tbtt interrupt to 6ms
1902 */
1903 rt2800_register_read(rt2x00dev, INT_TIMER_CFG, &reg);
1904 rt2x00_set_field32(&reg, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4);
1905 rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg);
1906
1620 return 0; 1907 return 0;
1621} 1908}
1622EXPORT_SYMBOL_GPL(rt2800_init_registers); 1909EXPORT_SYMBOL_GPL(rt2800_init_registers);
@@ -1706,8 +1993,7 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1706 rt2800_bbp_write(rt2x00dev, 82, 0x62); 1993 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1707 rt2800_bbp_write(rt2x00dev, 83, 0x6a); 1994 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
1708 1995
1709 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) || 1996 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
1710 rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
1711 rt2800_bbp_write(rt2x00dev, 84, 0x19); 1997 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1712 else 1998 else
1713 rt2800_bbp_write(rt2x00dev, 84, 0x99); 1999 rt2800_bbp_write(rt2x00dev, 84, 0x99);
@@ -2013,8 +2299,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2013 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 2299 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2014 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 2300 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
2015 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 2301 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
2016 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 2302 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
2017 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
2018 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); 2303 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
2019 } 2304 }
2020 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); 2305 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
@@ -2147,7 +2432,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2147 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 2432 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
2148 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 2433 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
2149 } else if (rt2x00_rt(rt2x00dev, RT2860) || 2434 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
2150 rt2x00_rt(rt2x00dev, RT2870) ||
2151 rt2x00_rt(rt2x00dev, RT2872)) { 2435 rt2x00_rt(rt2x00dev, RT2872)) {
2152 /* 2436 /*
2153 * There is a max of 2 RX streams for RT28x0 series 2437 * There is a max of 2 RX streams for RT28x0 series
@@ -2169,6 +2453,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2169 rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0); 2453 rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
2170 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0); 2454 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
2171 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0); 2455 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
2456 rt2x00_set_field16(&word, EEPROM_NIC_ANT_DIVERSITY, 0);
2457 rt2x00_set_field16(&word, EEPROM_NIC_DAC_TEST, 0);
2172 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); 2458 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
2173 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); 2459 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
2174 } 2460 }
@@ -2176,6 +2462,10 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2176 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); 2462 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
2177 if ((word & 0x00ff) == 0x00ff) { 2463 if ((word & 0x00ff) == 0x00ff) {
2178 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); 2464 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
2465 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
2466 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
2467 }
2468 if ((word & 0xff00) == 0xff00) {
2179 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE, 2469 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
2180 LED_MODE_TXRX_ACTIVITY); 2470 LED_MODE_TXRX_ACTIVITY);
2181 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0); 2471 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
@@ -2183,7 +2473,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2183 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555); 2473 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
2184 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221); 2474 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
2185 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8); 2475 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
2186 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); 2476 EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word);
2187 } 2477 }
2188 2478
2189 /* 2479 /*
@@ -2251,7 +2541,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
2251 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); 2541 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
2252 2542
2253 if (!rt2x00_rt(rt2x00dev, RT2860) && 2543 if (!rt2x00_rt(rt2x00dev, RT2860) &&
2254 !rt2x00_rt(rt2x00dev, RT2870) &&
2255 !rt2x00_rt(rt2x00dev, RT2872) && 2544 !rt2x00_rt(rt2x00dev, RT2872) &&
2256 !rt2x00_rt(rt2x00dev, RT2883) && 2545 !rt2x00_rt(rt2x00dev, RT2883) &&
2257 !rt2x00_rt(rt2x00dev, RT3070) && 2546 !rt2x00_rt(rt2x00dev, RT3070) &&
@@ -2484,13 +2773,26 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2484 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2773 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2485 IEEE80211_HW_SIGNAL_DBM | 2774 IEEE80211_HW_SIGNAL_DBM |
2486 IEEE80211_HW_SUPPORTS_PS | 2775 IEEE80211_HW_SUPPORTS_PS |
2487 IEEE80211_HW_PS_NULLFUNC_STACK; 2776 IEEE80211_HW_PS_NULLFUNC_STACK |
2777 IEEE80211_HW_AMPDU_AGGREGATION;
2488 2778
2489 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 2779 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
2490 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 2780 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
2491 rt2x00_eeprom_addr(rt2x00dev, 2781 rt2x00_eeprom_addr(rt2x00dev,
2492 EEPROM_MAC_ADDR_0)); 2782 EEPROM_MAC_ADDR_0));
2493 2783
2784 /*
2785 * As rt2800 has a global fallback table we cannot specify
2786 * more then one tx rate per frame but since the hw will
2787 * try several rates (based on the fallback table) we should
2788 * still initialize max_rates to the maximum number of rates
2789 * we are going to try. Otherwise mac80211 will truncate our
2790 * reported tx rates and the rc algortihm will end up with
2791 * incorrect data.
2792 */
2793 rt2x00dev->hw->max_rates = 7;
2794 rt2x00dev->hw->max_rate_tries = 1;
2795
2494 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); 2796 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
2495 2797
2496 /* 2798 /*
@@ -2528,16 +2830,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2528 else 2830 else
2529 spec->ht.ht_supported = false; 2831 spec->ht.ht_supported = false;
2530 2832
2531 /*
2532 * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
2533 * reception problems with HT40 capable 11n APs
2534 */
2535 spec->ht.cap = 2833 spec->ht.cap =
2834 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
2536 IEEE80211_HT_CAP_GRN_FLD | 2835 IEEE80211_HT_CAP_GRN_FLD |
2537 IEEE80211_HT_CAP_SGI_20 | 2836 IEEE80211_HT_CAP_SGI_20 |
2538 IEEE80211_HT_CAP_SGI_40 | 2837 IEEE80211_HT_CAP_SGI_40;
2539 IEEE80211_HT_CAP_TX_STBC | 2838
2540 IEEE80211_HT_CAP_RX_STBC; 2839 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) >= 2)
2840 spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
2841
2842 spec->ht.cap |=
2843 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) <<
2844 IEEE80211_HT_CAP_RX_STBC_SHIFT;
2845
2541 spec->ht.ampdu_factor = 3; 2846 spec->ht.ampdu_factor = 3;
2542 spec->ht.ampdu_density = 4; 2847 spec->ht.ampdu_density = 4;
2543 spec->ht.mcs.tx_params = 2848 spec->ht.mcs.tx_params =
@@ -2591,8 +2896,8 @@ EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode);
2591/* 2896/*
2592 * IEEE80211 stack callback functions. 2897 * IEEE80211 stack callback functions.
2593 */ 2898 */
2594static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, 2899void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
2595 u32 *iv32, u16 *iv16) 2900 u16 *iv16)
2596{ 2901{
2597 struct rt2x00_dev *rt2x00dev = hw->priv; 2902 struct rt2x00_dev *rt2x00dev = hw->priv;
2598 struct mac_iveiv_entry iveiv_entry; 2903 struct mac_iveiv_entry iveiv_entry;
@@ -2605,8 +2910,9 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
2605 memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16)); 2910 memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
2606 memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32)); 2911 memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
2607} 2912}
2913EXPORT_SYMBOL_GPL(rt2800_get_tkip_seq);
2608 2914
2609static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 2915int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2610{ 2916{
2611 struct rt2x00_dev *rt2x00dev = hw->priv; 2917 struct rt2x00_dev *rt2x00dev = hw->priv;
2612 u32 reg; 2918 u32 reg;
@@ -2642,9 +2948,10 @@ static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2642 2948
2643 return 0; 2949 return 0;
2644} 2950}
2951EXPORT_SYMBOL_GPL(rt2800_set_rts_threshold);
2645 2952
2646static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, 2953int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2647 const struct ieee80211_tx_queue_params *params) 2954 const struct ieee80211_tx_queue_params *params)
2648{ 2955{
2649 struct rt2x00_dev *rt2x00dev = hw->priv; 2956 struct rt2x00_dev *rt2x00dev = hw->priv;
2650 struct data_queue *queue; 2957 struct data_queue *queue;
@@ -2709,8 +3016,9 @@ static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2709 3016
2710 return 0; 3017 return 0;
2711} 3018}
3019EXPORT_SYMBOL_GPL(rt2800_conf_tx);
2712 3020
2713static u64 rt2800_get_tsf(struct ieee80211_hw *hw) 3021u64 rt2800_get_tsf(struct ieee80211_hw *hw)
2714{ 3022{
2715 struct rt2x00_dev *rt2x00dev = hw->priv; 3023 struct rt2x00_dev *rt2x00dev = hw->priv;
2716 u64 tsf; 3024 u64 tsf;
@@ -2723,23 +3031,37 @@ static u64 rt2800_get_tsf(struct ieee80211_hw *hw)
2723 3031
2724 return tsf; 3032 return tsf;
2725} 3033}
3034EXPORT_SYMBOL_GPL(rt2800_get_tsf);
2726 3035
2727const struct ieee80211_ops rt2800_mac80211_ops = { 3036int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2728 .tx = rt2x00mac_tx, 3037 enum ieee80211_ampdu_mlme_action action,
2729 .start = rt2x00mac_start, 3038 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2730 .stop = rt2x00mac_stop, 3039{
2731 .add_interface = rt2x00mac_add_interface, 3040 int ret = 0;
2732 .remove_interface = rt2x00mac_remove_interface, 3041
2733 .config = rt2x00mac_config, 3042 switch (action) {
2734 .configure_filter = rt2x00mac_configure_filter, 3043 case IEEE80211_AMPDU_RX_START:
2735 .set_tim = rt2x00mac_set_tim, 3044 case IEEE80211_AMPDU_RX_STOP:
2736 .set_key = rt2x00mac_set_key, 3045 /* we don't support RX aggregation yet */
2737 .get_stats = rt2x00mac_get_stats, 3046 ret = -ENOTSUPP;
2738 .get_tkip_seq = rt2800_get_tkip_seq, 3047 break;
2739 .set_rts_threshold = rt2800_set_rts_threshold, 3048 case IEEE80211_AMPDU_TX_START:
2740 .bss_info_changed = rt2x00mac_bss_info_changed, 3049 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2741 .conf_tx = rt2800_conf_tx, 3050 break;
2742 .get_tsf = rt2800_get_tsf, 3051 case IEEE80211_AMPDU_TX_STOP:
2743 .rfkill_poll = rt2x00mac_rfkill_poll, 3052 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2744}; 3053 break;
2745EXPORT_SYMBOL_GPL(rt2800_mac80211_ops); 3054 case IEEE80211_AMPDU_TX_OPERATIONAL:
3055 break;
3056 default:
3057 WARNING((struct rt2x00_dev *)hw->priv, "Unknown AMPDU action\n");
3058 }
3059
3060 return ret;
3061}
3062EXPORT_SYMBOL_GPL(rt2800_ampdu_action);
3063
3064MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz");
3065MODULE_VERSION(DRV_VERSION);
3066MODULE_DESCRIPTION("Ralink RT2800 library");
3067MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 94de999e2290..091641e3c5e2 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -40,13 +40,17 @@ struct rt2800_ops {
40 int (*regbusy_read)(struct rt2x00_dev *rt2x00dev, 40 int (*regbusy_read)(struct rt2x00_dev *rt2x00dev,
41 const unsigned int offset, 41 const unsigned int offset,
42 const struct rt2x00_field32 field, u32 *reg); 42 const struct rt2x00_field32 field, u32 *reg);
43
44 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
45 const u8 *data, const size_t len);
46 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
43}; 47};
44 48
45static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev, 49static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
46 const unsigned int offset, 50 const unsigned int offset,
47 u32 *value) 51 u32 *value)
48{ 52{
49 const struct rt2800_ops *rt2800ops = rt2x00dev->priv; 53 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
50 54
51 rt2800ops->register_read(rt2x00dev, offset, value); 55 rt2800ops->register_read(rt2x00dev, offset, value);
52} 56}
@@ -55,7 +59,7 @@ static inline void rt2800_register_read_lock(struct rt2x00_dev *rt2x00dev,
55 const unsigned int offset, 59 const unsigned int offset,
56 u32 *value) 60 u32 *value)
57{ 61{
58 const struct rt2800_ops *rt2800ops = rt2x00dev->priv; 62 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
59 63
60 rt2800ops->register_read_lock(rt2x00dev, offset, value); 64 rt2800ops->register_read_lock(rt2x00dev, offset, value);
61} 65}
@@ -64,7 +68,7 @@ static inline void rt2800_register_write(struct rt2x00_dev *rt2x00dev,
64 const unsigned int offset, 68 const unsigned int offset,
65 u32 value) 69 u32 value)
66{ 70{
67 const struct rt2800_ops *rt2800ops = rt2x00dev->priv; 71 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
68 72
69 rt2800ops->register_write(rt2x00dev, offset, value); 73 rt2800ops->register_write(rt2x00dev, offset, value);
70} 74}
@@ -73,7 +77,7 @@ static inline void rt2800_register_write_lock(struct rt2x00_dev *rt2x00dev,
73 const unsigned int offset, 77 const unsigned int offset,
74 u32 value) 78 u32 value)
75{ 79{
76 const struct rt2800_ops *rt2800ops = rt2x00dev->priv; 80 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
77 81
78 rt2800ops->register_write_lock(rt2x00dev, offset, value); 82 rt2800ops->register_write_lock(rt2x00dev, offset, value);
79} 83}
@@ -82,7 +86,7 @@ static inline void rt2800_register_multiread(struct rt2x00_dev *rt2x00dev,
82 const unsigned int offset, 86 const unsigned int offset,
83 void *value, const u32 length) 87 void *value, const u32 length)
84{ 88{
85 const struct rt2800_ops *rt2800ops = rt2x00dev->priv; 89 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
86 90
87 rt2800ops->register_multiread(rt2x00dev, offset, value, length); 91 rt2800ops->register_multiread(rt2x00dev, offset, value, length);
88} 92}
@@ -92,7 +96,7 @@ static inline void rt2800_register_multiwrite(struct rt2x00_dev *rt2x00dev,
92 const void *value, 96 const void *value,
93 const u32 length) 97 const u32 length)
94{ 98{
95 const struct rt2800_ops *rt2800ops = rt2x00dev->priv; 99 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
96 100
97 rt2800ops->register_multiwrite(rt2x00dev, offset, value, length); 101 rt2800ops->register_multiwrite(rt2x00dev, offset, value, length);
98} 102}
@@ -102,17 +106,39 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
102 const struct rt2x00_field32 field, 106 const struct rt2x00_field32 field,
103 u32 *reg) 107 u32 *reg)
104{ 108{
105 const struct rt2800_ops *rt2800ops = rt2x00dev->priv; 109 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
106 110
107 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg); 111 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
108} 112}
109 113
114static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev,
115 const u8 *data, const size_t len)
116{
117 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
118
119 return rt2800ops->drv_write_firmware(rt2x00dev, data, len);
120}
121
122static inline int rt2800_drv_init_registers(struct rt2x00_dev *rt2x00dev)
123{
124 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
125
126 return rt2800ops->drv_init_registers(rt2x00dev);
127}
128
110void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, 129void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
111 const u8 command, const u8 token, 130 const u8 command, const u8 token,
112 const u8 arg0, const u8 arg1); 131 const u8 arg0, const u8 arg1);
113 132
114void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc); 133int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
115void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc); 134 const u8 *data, const size_t len);
135int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
136 const u8 *data, const size_t len);
137
138void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc);
139void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
140
141void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
116 142
117extern const struct rt2x00debug rt2800_rt2x00debug; 143extern const struct rt2x00debug rt2800_rt2x00debug;
118 144
@@ -148,6 +174,14 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev);
148int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev); 174int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev);
149int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev); 175int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
150 176
151extern const struct ieee80211_ops rt2800_mac80211_ops; 177void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
178 u16 *iv16);
179int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
180int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
181 const struct ieee80211_tx_queue_params *params);
182u64 rt2800_get_tsf(struct ieee80211_hw *hw);
183int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
184 enum ieee80211_ampdu_mlme_action action,
185 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
152 186
153#endif /* RT2800LIB_H */ 187#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b2f23272c3aa..39b3846fa340 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -31,7 +31,6 @@
31 Supported chipsets: RT2800E & RT2800ED. 31 Supported chipsets: RT2800E & RT2800ED.
32 */ 32 */
33 33
34#include <linux/crc-ccitt.h>
35#include <linux/delay.h> 34#include <linux/delay.h>
36#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
37#include <linux/init.h> 36#include <linux/init.h>
@@ -51,7 +50,7 @@
51/* 50/*
52 * Allow hardware encryption to be disabled. 51 * Allow hardware encryption to be disabled.
53 */ 52 */
54static int modparam_nohwcrypt = 1; 53static int modparam_nohwcrypt = 0;
55module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 54module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
56MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 55MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
57 56
@@ -139,8 +138,18 @@ static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
139 eeprom.data = rt2x00dev; 138 eeprom.data = rt2x00dev;
140 eeprom.register_read = rt2800pci_eepromregister_read; 139 eeprom.register_read = rt2800pci_eepromregister_read;
141 eeprom.register_write = rt2800pci_eepromregister_write; 140 eeprom.register_write = rt2800pci_eepromregister_write;
142 eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ? 141 switch (rt2x00_get_field32(reg, E2PROM_CSR_TYPE))
143 PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; 142 {
143 case 0:
144 eeprom.width = PCI_EEPROM_WIDTH_93C46;
145 break;
146 case 1:
147 eeprom.width = PCI_EEPROM_WIDTH_93C66;
148 break;
149 default:
150 eeprom.width = PCI_EEPROM_WIDTH_93C86;
151 break;
152 }
144 eeprom.reg_data_in = 0; 153 eeprom.reg_data_in = 0;
145 eeprom.reg_data_out = 0; 154 eeprom.reg_data_out = 0;
146 eeprom.reg_data_clock = 0; 155 eeprom.reg_data_clock = 0;
@@ -182,82 +191,14 @@ static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
182 return FIRMWARE_RT2860; 191 return FIRMWARE_RT2860;
183} 192}
184 193
185static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev, 194static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
186 const u8 *data, const size_t len) 195 const u8 *data, const size_t len)
187{ 196{
188 u16 fw_crc;
189 u16 crc;
190
191 /*
192 * Only support 8kb firmware files.
193 */
194 if (len != 8192)
195 return FW_BAD_LENGTH;
196
197 /*
198 * The last 2 bytes in the firmware array are the crc checksum itself,
199 * this means that we should never pass those 2 bytes to the crc
200 * algorithm.
201 */
202 fw_crc = (data[len - 2] << 8 | data[len - 1]);
203
204 /*
205 * Use the crc ccitt algorithm.
206 * This will return the same value as the legacy driver which
207 * used bit ordering reversion on the both the firmware bytes
208 * before input input as well as on the final output.
209 * Obviously using crc ccitt directly is much more efficient.
210 */
211 crc = crc_ccitt(~0, data, len - 2);
212
213 /*
214 * There is a small difference between the crc-itu-t + bitrev and
215 * the crc-ccitt crc calculation. In the latter method the 2 bytes
216 * will be swapped, use swab16 to convert the crc to the correct
217 * value.
218 */
219 crc = swab16(crc);
220
221 return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
222}
223
224static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev,
225 const u8 *data, const size_t len)
226{
227 unsigned int i;
228 u32 reg; 197 u32 reg;
229 198
230 /*
231 * Wait for stable hardware.
232 */
233 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
234 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
235 if (reg && reg != ~0)
236 break;
237 msleep(1);
238 }
239
240 if (i == REGISTER_BUSY_COUNT) {
241 ERROR(rt2x00dev, "Unstable hardware.\n");
242 return -EBUSY;
243 }
244
245 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
246 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000); 199 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
247 200
248 /* 201 /*
249 * Disable DMA, will be reenabled later when enabling
250 * the radio.
251 */
252 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
253 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
254 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
255 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
256 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
257 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
258 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
259
260 /*
261 * enable Host program ram write selection 202 * enable Host program ram write selection
262 */ 203 */
263 reg = 0; 204 reg = 0;
@@ -268,34 +209,11 @@ static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev,
268 * Write firmware to device. 209 * Write firmware to device.
269 */ 210 */
270 rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 211 rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
271 data, len); 212 data, len);
272 213
273 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000); 214 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
274 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001); 215 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
275 216
276 /*
277 * Wait for device to stabilize.
278 */
279 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
280 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
281 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
282 break;
283 msleep(1);
284 }
285
286 if (i == REGISTER_BUSY_COUNT) {
287 ERROR(rt2x00dev, "PBF system register not ready.\n");
288 return -EBUSY;
289 }
290
291 /*
292 * Disable interrupts
293 */
294 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
295
296 /*
297 * Initialize BBP R/W access agent
298 */
299 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 217 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
300 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 218 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
301 219
@@ -412,7 +330,8 @@ static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
412static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 330static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
413 enum dev_state state) 331 enum dev_state state)
414{ 332{
415 int mask = (state == STATE_RADIO_IRQ_ON); 333 int mask = (state == STATE_RADIO_IRQ_ON) ||
334 (state == STATE_RADIO_IRQ_ON_ISR);
416 u32 reg; 335 u32 reg;
417 336
418 /* 337 /*
@@ -446,6 +365,38 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
446 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 365 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
447} 366}
448 367
368static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
369{
370 u32 reg;
371
372 /*
373 * Reset DMA indexes
374 */
375 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
376 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
377 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
378 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
379 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
380 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
381 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
382 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
383 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
384
385 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
386 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
387
388 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
389
390 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
391 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
392 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
393 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
394
395 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
396
397 return 0;
398}
399
449static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) 400static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
450{ 401{
451 u32 reg; 402 u32 reg;
@@ -465,7 +416,7 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
465 /* 416 /*
466 * Send signal to firmware during boot time. 417 * Send signal to firmware during boot time.
467 */ 418 */
468 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); 419 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
469 420
470 /* 421 /*
471 * Enable RX. 422 * Enable RX.
@@ -589,7 +540,9 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
589 rt2800pci_toggle_rx(rt2x00dev, state); 540 rt2800pci_toggle_rx(rt2x00dev, state);
590 break; 541 break;
591 case STATE_RADIO_IRQ_ON: 542 case STATE_RADIO_IRQ_ON:
543 case STATE_RADIO_IRQ_ON_ISR:
592 case STATE_RADIO_IRQ_OFF: 544 case STATE_RADIO_IRQ_OFF:
545 case STATE_RADIO_IRQ_OFF_ISR:
593 rt2800pci_toggle_irq(rt2x00dev, state); 546 rt2800pci_toggle_irq(rt2x00dev, state);
594 break; 547 break;
595 case STATE_DEEP_SLEEP: 548 case STATE_DEEP_SLEEP:
@@ -613,18 +566,12 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
613/* 566/*
614 * TX descriptor initialization 567 * TX descriptor initialization
615 */ 568 */
616static int rt2800pci_write_tx_data(struct queue_entry* entry, 569static void rt2800pci_write_tx_data(struct queue_entry* entry,
617 struct txentry_desc *txdesc) 570 struct txentry_desc *txdesc)
618{ 571{
619 int ret; 572 __le32 *txwi = (__le32 *) entry->skb->data;
620
621 ret = rt2x00pci_write_tx_data(entry, txdesc);
622 if (ret)
623 return ret;
624 573
625 rt2800_write_txwi(entry->skb, txdesc); 574 rt2800_write_txwi(txwi, txdesc);
626
627 return 0;
628} 575}
629 576
630 577
@@ -684,49 +631,6 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
684/* 631/*
685 * TX data initialization 632 * TX data initialization
686 */ 633 */
687static void rt2800pci_write_beacon(struct queue_entry *entry,
688 struct txentry_desc *txdesc)
689{
690 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
691 unsigned int beacon_base;
692 u32 reg;
693
694 /*
695 * Disable beaconing while we are reloading the beacon data,
696 * otherwise we might be sending out invalid data.
697 */
698 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
699 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
700 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
701
702 /*
703 * Add the TXWI for the beacon to the skb.
704 */
705 rt2800_write_txwi(entry->skb, txdesc);
706 skb_push(entry->skb, TXWI_DESC_SIZE);
707
708 /*
709 * Write entire beacon with TXWI to register.
710 */
711 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
712 rt2800_register_multiwrite(rt2x00dev, beacon_base,
713 entry->skb->data, entry->skb->len);
714
715 /*
716 * Enable beaconing again.
717 */
718 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
719 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
720 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
721 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
722
723 /*
724 * Clean up beacon skb.
725 */
726 dev_kfree_skb_any(entry->skb);
727 entry->skb = NULL;
728}
729
730static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 634static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
731 const enum data_queue_qid queue_idx) 635 const enum data_queue_qid queue_idx)
732{ 636{
@@ -812,7 +716,7 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
812 /* 716 /*
813 * Process the RXWI structure that is at the start of the buffer. 717 * Process the RXWI structure that is at the start of the buffer.
814 */ 718 */
815 rt2800_process_rxwi(entry->skb, rxdesc); 719 rt2800_process_rxwi(entry, rxdesc);
816 720
817 /* 721 /*
818 * Set RX IDX in register to inform hardware that we have handled 722 * Set RX IDX in register to inform hardware that we have handled
@@ -832,29 +736,24 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
832 struct txdone_entry_desc txdesc; 736 struct txdone_entry_desc txdesc;
833 u32 word; 737 u32 word;
834 u32 reg; 738 u32 reg;
835 u32 old_reg;
836 int wcid, ack, pid, tx_wcid, tx_ack, tx_pid; 739 int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
837 u16 mcs, real_mcs; 740 u16 mcs, real_mcs;
741 int i;
838 742
839 /* 743 /*
840 * During each loop we will compare the freshly read 744 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
841 * TX_STA_FIFO register value with the value read from 745 * at most X times and also stop processing once the TX_STA_FIFO_VALID
842 * the previous loop. If the 2 values are equal then 746 * flag is not set anymore.
843 * we should stop processing because the chance it 747 *
844 * quite big that the device has been unplugged and 748 * The legacy drivers use X=TX_RING_SIZE but state in a comment
845 * we risk going into an endless loop. 749 * that the TX_STA_FIFO stack has a size of 16. We stick to our
750 * tx ring size for now.
846 */ 751 */
847 old_reg = 0; 752 for (i = 0; i < TX_ENTRIES; i++) {
848
849 while (1) {
850 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg); 753 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
851 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID)) 754 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
852 break; 755 break;
853 756
854 if (old_reg == reg)
855 break;
856 old_reg = reg;
857
858 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); 757 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
859 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); 758 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
860 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); 759 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -880,8 +779,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
880 779
881 /* Check if we got a match by looking at WCID/ACK/PID 780 /* Check if we got a match by looking at WCID/ACK/PID
882 * fields */ 781 * fields */
883 txwi = (__le32 *)(entry->skb->data - 782 txwi = (__le32 *) entry->skb->data;
884 rt2x00dev->ops->extra_tx_headroom);
885 783
886 rt2x00_desc_read(txwi, 1, &word); 784 rt2x00_desc_read(txwi, 1, &word);
887 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID); 785 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
@@ -923,8 +821,12 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
923 txdesc.retry = 7; 821 txdesc.retry = 7;
924 } 822 }
925 823
926 __set_bit(TXDONE_FALLBACK, &txdesc.flags); 824 /*
927 825 * the frame was retried at least once
826 * -> hw used fallback rates
827 */
828 if (txdesc.retry)
829 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
928 830
929 rt2x00lib_txdone(entry, &txdesc); 831 rt2x00lib_txdone(entry, &txdesc);
930 } 832 }
@@ -938,6 +840,48 @@ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
938 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 840 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
939} 841}
940 842
843static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
844{
845 struct rt2x00_dev *rt2x00dev = dev_instance;
846 u32 reg = rt2x00dev->irqvalue[0];
847
848 /*
849 * 1 - Pre TBTT interrupt.
850 */
851 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
852 rt2x00lib_pretbtt(rt2x00dev);
853
854 /*
855 * 2 - Beacondone interrupt.
856 */
857 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
858 rt2x00lib_beacondone(rt2x00dev);
859
860 /*
861 * 3 - Rx ring done interrupt.
862 */
863 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
864 rt2x00pci_rxdone(rt2x00dev);
865
866 /*
867 * 4 - Tx done interrupt.
868 */
869 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
870 rt2800pci_txdone(rt2x00dev);
871
872 /*
873 * 5 - Auto wakeup interrupt.
874 */
875 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
876 rt2800pci_wakeup(rt2x00dev);
877
878 /* Enable interrupts again. */
879 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
880 STATE_RADIO_IRQ_ON_ISR);
881
882 return IRQ_HANDLED;
883}
884
941static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) 885static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
942{ 886{
943 struct rt2x00_dev *rt2x00dev = dev_instance; 887 struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -953,19 +897,15 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
953 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 897 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
954 return IRQ_HANDLED; 898 return IRQ_HANDLED;
955 899
956 /* 900 /* Store irqvalue for use in the interrupt thread. */
957 * 1 - Rx ring done interrupt. 901 rt2x00dev->irqvalue[0] = reg;
958 */
959 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
960 rt2x00pci_rxdone(rt2x00dev);
961 902
962 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) 903 /* Disable interrupts, will be enabled again in the interrupt thread. */
963 rt2800pci_txdone(rt2x00dev); 904 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
905 STATE_RADIO_IRQ_OFF_ISR);
964 906
965 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
966 rt2800pci_wakeup(rt2x00dev);
967 907
968 return IRQ_HANDLED; 908 return IRQ_WAKE_THREAD;
969} 909}
970 910
971/* 911/*
@@ -986,24 +926,10 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
986 return rt2800_validate_eeprom(rt2x00dev); 926 return rt2800_validate_eeprom(rt2x00dev);
987} 927}
988 928
989static const struct rt2800_ops rt2800pci_rt2800_ops = {
990 .register_read = rt2x00pci_register_read,
991 .register_read_lock = rt2x00pci_register_read, /* same for PCI */
992 .register_write = rt2x00pci_register_write,
993 .register_write_lock = rt2x00pci_register_write, /* same for PCI */
994
995 .register_multiread = rt2x00pci_register_multiread,
996 .register_multiwrite = rt2x00pci_register_multiwrite,
997
998 .regbusy_read = rt2x00pci_regbusy_read,
999};
1000
1001static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) 929static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1002{ 930{
1003 int retval; 931 int retval;
1004 932
1005 rt2x00dev->priv = (void *)&rt2800pci_rt2800_ops;
1006
1007 /* 933 /*
1008 * Allocate eeprom data. 934 * Allocate eeprom data.
1009 */ 935 */
@@ -1030,6 +956,12 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1030 __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags); 956 __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags);
1031 957
1032 /* 958 /*
959 * This device has a pre tbtt interrupt and thus fetches
960 * a new beacon directly prior to transmission.
961 */
962 __set_bit(DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, &rt2x00dev->flags);
963
964 /*
1033 * This device requires firmware. 965 * This device requires firmware.
1034 */ 966 */
1035 if (!rt2x00_is_soc(rt2x00dev)) 967 if (!rt2x00_is_soc(rt2x00dev))
@@ -1038,6 +970,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1038 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 970 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
1039 if (!modparam_nohwcrypt) 971 if (!modparam_nohwcrypt)
1040 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 972 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
973 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
1041 974
1042 /* 975 /*
1043 * Set the rssi offset. 976 * Set the rssi offset.
@@ -1047,12 +980,46 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1047 return 0; 980 return 0;
1048} 981}
1049 982
983static const struct ieee80211_ops rt2800pci_mac80211_ops = {
984 .tx = rt2x00mac_tx,
985 .start = rt2x00mac_start,
986 .stop = rt2x00mac_stop,
987 .add_interface = rt2x00mac_add_interface,
988 .remove_interface = rt2x00mac_remove_interface,
989 .config = rt2x00mac_config,
990 .configure_filter = rt2x00mac_configure_filter,
991 .set_key = rt2x00mac_set_key,
992 .sw_scan_start = rt2x00mac_sw_scan_start,
993 .sw_scan_complete = rt2x00mac_sw_scan_complete,
994 .get_stats = rt2x00mac_get_stats,
995 .get_tkip_seq = rt2800_get_tkip_seq,
996 .set_rts_threshold = rt2800_set_rts_threshold,
997 .bss_info_changed = rt2x00mac_bss_info_changed,
998 .conf_tx = rt2800_conf_tx,
999 .get_tsf = rt2800_get_tsf,
1000 .rfkill_poll = rt2x00mac_rfkill_poll,
1001 .ampdu_action = rt2800_ampdu_action,
1002};
1003
1004static const struct rt2800_ops rt2800pci_rt2800_ops = {
1005 .register_read = rt2x00pci_register_read,
1006 .register_read_lock = rt2x00pci_register_read, /* same for PCI */
1007 .register_write = rt2x00pci_register_write,
1008 .register_write_lock = rt2x00pci_register_write, /* same for PCI */
1009 .register_multiread = rt2x00pci_register_multiread,
1010 .register_multiwrite = rt2x00pci_register_multiwrite,
1011 .regbusy_read = rt2x00pci_regbusy_read,
1012 .drv_write_firmware = rt2800pci_write_firmware,
1013 .drv_init_registers = rt2800pci_init_registers,
1014};
1015
1050static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { 1016static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1051 .irq_handler = rt2800pci_interrupt, 1017 .irq_handler = rt2800pci_interrupt,
1018 .irq_handler_thread = rt2800pci_interrupt_thread,
1052 .probe_hw = rt2800pci_probe_hw, 1019 .probe_hw = rt2800pci_probe_hw,
1053 .get_firmware_name = rt2800pci_get_firmware_name, 1020 .get_firmware_name = rt2800pci_get_firmware_name,
1054 .check_firmware = rt2800pci_check_firmware, 1021 .check_firmware = rt2800_check_firmware,
1055 .load_firmware = rt2800pci_load_firmware, 1022 .load_firmware = rt2800_load_firmware,
1056 .initialize = rt2x00pci_initialize, 1023 .initialize = rt2x00pci_initialize,
1057 .uninitialize = rt2x00pci_uninitialize, 1024 .uninitialize = rt2x00pci_uninitialize,
1058 .get_entry_state = rt2800pci_get_entry_state, 1025 .get_entry_state = rt2800pci_get_entry_state,
@@ -1064,7 +1031,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1064 .link_tuner = rt2800_link_tuner, 1031 .link_tuner = rt2800_link_tuner,
1065 .write_tx_desc = rt2800pci_write_tx_desc, 1032 .write_tx_desc = rt2800pci_write_tx_desc,
1066 .write_tx_data = rt2800pci_write_tx_data, 1033 .write_tx_data = rt2800pci_write_tx_data,
1067 .write_beacon = rt2800pci_write_beacon, 1034 .write_beacon = rt2800_write_beacon,
1068 .kick_tx_queue = rt2800pci_kick_tx_queue, 1035 .kick_tx_queue = rt2800pci_kick_tx_queue,
1069 .kill_tx_queue = rt2800pci_kill_tx_queue, 1036 .kill_tx_queue = rt2800pci_kill_tx_queue,
1070 .fill_rxdone = rt2800pci_fill_rxdone, 1037 .fill_rxdone = rt2800pci_fill_rxdone,
@@ -1110,7 +1077,8 @@ static const struct rt2x00_ops rt2800pci_ops = {
1110 .tx = &rt2800pci_queue_tx, 1077 .tx = &rt2800pci_queue_tx,
1111 .bcn = &rt2800pci_queue_bcn, 1078 .bcn = &rt2800pci_queue_bcn,
1112 .lib = &rt2800pci_rt2x00_ops, 1079 .lib = &rt2800pci_rt2x00_ops,
1113 .hw = &rt2800_mac80211_ops, 1080 .drv = &rt2800pci_rt2800_ops,
1081 .hw = &rt2800pci_mac80211_ops,
1114#ifdef CONFIG_RT2X00_LIB_DEBUGFS 1082#ifdef CONFIG_RT2X00_LIB_DEBUGFS
1115 .debugfs = &rt2800_rt2x00debug, 1083 .debugfs = &rt2800_rt2x00debug,
1116#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 1084#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index afc8e7da27cb..5a8dda9b5b5a 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,25 +35,6 @@
35#define RT2800PCI_H 35#define RT2800PCI_H
36 36
37/* 37/*
38 * PCI registers.
39 */
40
41/*
42 * E2PROM_CSR: EEPROM control register.
43 * RELOAD: Write 1 to reload eeprom content.
44 * TYPE: 0: 93c46, 1:93c66.
45 * LOAD_STATUS: 1:loading, 0:done.
46 */
47#define E2PROM_CSR 0x0004
48#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001)
49#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002)
50#define E2PROM_CSR_DATA_IN FIELD32(0x00000004)
51#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008)
52#define E2PROM_CSR_TYPE FIELD32(0x00000030)
53#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
54#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
55
56/*
57 * Queue register offset macros 38 * Queue register offset macros
58 */ 39 */
59#define TX_QUEUE_REG_OFFSET 0x10 40#define TX_QUEUE_REG_OFFSET 0x10
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 0f8b84b7224c..5a2dfe87c6b6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -28,7 +28,6 @@
28 Supported chipsets: RT2800U. 28 Supported chipsets: RT2800U.
29 */ 29 */
30 30
31#include <linux/crc-ccitt.h>
32#include <linux/delay.h> 31#include <linux/delay.h>
33#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
34#include <linux/init.h> 33#include <linux/init.h>
@@ -45,7 +44,7 @@
45/* 44/*
46 * Allow hardware encryption to be disabled. 45 * Allow hardware encryption to be disabled.
47 */ 46 */
48static int modparam_nohwcrypt = 1; 47static int modparam_nohwcrypt = 0;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 48module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 49MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51 50
@@ -57,84 +56,10 @@ static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
57 return FIRMWARE_RT2870; 56 return FIRMWARE_RT2870;
58} 57}
59 58
60static bool rt2800usb_check_crc(const u8 *data, const size_t len) 59static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
61{
62 u16 fw_crc;
63 u16 crc;
64
65 /*
66 * The last 2 bytes in the firmware array are the crc checksum itself,
67 * this means that we should never pass those 2 bytes to the crc
68 * algorithm.
69 */
70 fw_crc = (data[len - 2] << 8 | data[len - 1]);
71
72 /*
73 * Use the crc ccitt algorithm.
74 * This will return the same value as the legacy driver which
75 * used bit ordering reversion on the both the firmware bytes
76 * before input input as well as on the final output.
77 * Obviously using crc ccitt directly is much more efficient.
78 */
79 crc = crc_ccitt(~0, data, len - 2);
80
81 /*
82 * There is a small difference between the crc-itu-t + bitrev and
83 * the crc-ccitt crc calculation. In the latter method the 2 bytes
84 * will be swapped, use swab16 to convert the crc to the correct
85 * value.
86 */
87 crc = swab16(crc);
88
89 return fw_crc == crc;
90}
91
92static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
93 const u8 *data, const size_t len) 60 const u8 *data, const size_t len)
94{ 61{
95 size_t offset = 0;
96
97 /*
98 * Firmware files:
99 * There are 2 variations of the rt2870 firmware.
100 * a) size: 4kb
101 * b) size: 8kb
102 * Note that (b) contains 2 separate firmware blobs of 4k
103 * within the file. The first blob is the same firmware as (a),
104 * but the second blob is for the additional chipsets.
105 */
106 if (len != 4096 && len != 8192)
107 return FW_BAD_LENGTH;
108
109 /*
110 * Check if we need the upper 4kb firmware data or not.
111 */
112 if ((len == 4096) &&
113 !rt2x00_rt(rt2x00dev, RT2860) &&
114 !rt2x00_rt(rt2x00dev, RT2872) &&
115 !rt2x00_rt(rt2x00dev, RT3070))
116 return FW_BAD_VERSION;
117
118 /*
119 * 8kb firmware files must be checked as if it were
120 * 2 separate firmware files.
121 */
122 while (offset < len) {
123 if (!rt2800usb_check_crc(data + offset, 4096))
124 return FW_BAD_CRC;
125
126 offset += 4096;
127 }
128
129 return FW_OK;
130}
131
132static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
133 const u8 *data, const size_t len)
134{
135 unsigned int i;
136 int status; 62 int status;
137 u32 reg;
138 u32 offset; 63 u32 offset;
139 u32 length; 64 u32 length;
140 65
@@ -152,28 +77,10 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
152 } 77 }
153 78
154 /* 79 /*
155 * Wait for stable hardware.
156 */
157 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
158 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
159 if (reg && reg != ~0)
160 break;
161 msleep(1);
162 }
163
164 if (i == REGISTER_BUSY_COUNT) {
165 ERROR(rt2x00dev, "Unstable hardware.\n");
166 return -EBUSY;
167 }
168
169 /*
170 * Write firmware to device. 80 * Write firmware to device.
171 */ 81 */
172 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, 82 rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
173 USB_VENDOR_REQUEST_OUT, 83 data + offset, length);
174 FIRMWARE_IMAGE_BASE,
175 data + offset, length,
176 REGISTER_TIMEOUT32(length));
177 84
178 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 85 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
179 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 86 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
@@ -196,7 +103,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
196 /* 103 /*
197 * Send signal to firmware during boot time. 104 * Send signal to firmware during boot time.
198 */ 105 */
199 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); 106 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
200 107
201 if (rt2x00_rt(rt2x00dev, RT3070) || 108 if (rt2x00_rt(rt2x00dev, RT3070) ||
202 rt2x00_rt(rt2x00dev, RT3071) || 109 rt2x00_rt(rt2x00dev, RT3071) ||
@@ -206,28 +113,6 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
206 udelay(10); 113 udelay(10);
207 } 114 }
208 115
209 /*
210 * Wait for device to stabilize.
211 */
212 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
213 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
214 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
215 break;
216 msleep(1);
217 }
218
219 if (i == REGISTER_BUSY_COUNT) {
220 ERROR(rt2x00dev, "PBF system register not ready.\n");
221 return -EBUSY;
222 }
223
224 /*
225 * Initialize firmware.
226 */
227 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
228 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
229 msleep(1);
230
231 return 0; 116 return 0;
232} 117}
233 118
@@ -246,6 +131,44 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
246 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 131 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
247} 132}
248 133
134static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
135{
136 u32 reg;
137 int i;
138
139 /*
140 * Wait until BBP and RF are ready.
141 */
142 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
143 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
144 if (reg && reg != ~0)
145 break;
146 msleep(1);
147 }
148
149 if (i == REGISTER_BUSY_COUNT) {
150 ERROR(rt2x00dev, "Unstable hardware.\n");
151 return -EBUSY;
152 }
153
154 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
155 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
156
157 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
158 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
159 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
160 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
161
162 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
163
164 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
165 USB_MODE_RESET, REGISTER_TIMEOUT);
166
167 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
168
169 return 0;
170}
171
249static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) 172static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
250{ 173{
251 u32 reg; 174 u32 reg;
@@ -371,7 +294,9 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
371 rt2800usb_toggle_rx(rt2x00dev, state); 294 rt2800usb_toggle_rx(rt2x00dev, state);
372 break; 295 break;
373 case STATE_RADIO_IRQ_ON: 296 case STATE_RADIO_IRQ_ON:
297 case STATE_RADIO_IRQ_ON_ISR:
374 case STATE_RADIO_IRQ_OFF: 298 case STATE_RADIO_IRQ_OFF:
299 case STATE_RADIO_IRQ_OFF_ISR:
375 /* No support, but no error either */ 300 /* No support, but no error either */
376 break; 301 break;
377 case STATE_DEEP_SLEEP: 302 case STATE_DEEP_SLEEP:
@@ -395,25 +320,29 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
395/* 320/*
396 * TX descriptor initialization 321 * TX descriptor initialization
397 */ 322 */
323static void rt2800usb_write_tx_data(struct queue_entry* entry,
324 struct txentry_desc *txdesc)
325{
326 __le32 *txwi = (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
327
328 rt2800_write_txwi(txwi, txdesc);
329}
330
331
398static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 332static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
399 struct sk_buff *skb, 333 struct sk_buff *skb,
400 struct txentry_desc *txdesc) 334 struct txentry_desc *txdesc)
401{ 335{
402 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 336 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
403 __le32 *txi = (__le32 *)(skb->data - TXWI_DESC_SIZE - TXINFO_DESC_SIZE); 337 __le32 *txi = (__le32 *) skb->data;
404 u32 word; 338 u32 word;
405 339
406 /* 340 /*
407 * Initialize TXWI descriptor
408 */
409 rt2800_write_txwi(skb, txdesc);
410
411 /*
412 * Initialize TXINFO descriptor 341 * Initialize TXINFO descriptor
413 */ 342 */
414 rt2x00_desc_read(txi, 0, &word); 343 rt2x00_desc_read(txi, 0, &word);
415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 344 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
416 skb->len + TXWI_DESC_SIZE); 345 skb->len - TXINFO_DESC_SIZE);
417 rt2x00_set_field32(&word, TXINFO_W0_WIV, 346 rt2x00_set_field32(&word, TXINFO_W0_WIV,
418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 347 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); 348 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -426,6 +355,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
426 /* 355 /*
427 * Register descriptor details in skb frame descriptor. 356 * Register descriptor details in skb frame descriptor.
428 */ 357 */
358 skbdesc->flags |= SKBDESC_DESC_IN_SKB;
429 skbdesc->desc = txi; 359 skbdesc->desc = txi;
430 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; 360 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
431} 361}
@@ -433,51 +363,6 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
433/* 363/*
434 * TX data initialization 364 * TX data initialization
435 */ 365 */
436static void rt2800usb_write_beacon(struct queue_entry *entry,
437 struct txentry_desc *txdesc)
438{
439 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
440 unsigned int beacon_base;
441 u32 reg;
442
443 /*
444 * Disable beaconing while we are reloading the beacon data,
445 * otherwise we might be sending out invalid data.
446 */
447 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
448 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
449 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
450
451 /*
452 * Add the TXWI for the beacon to the skb.
453 */
454 rt2800_write_txwi(entry->skb, txdesc);
455 skb_push(entry->skb, TXWI_DESC_SIZE);
456
457 /*
458 * Write entire beacon with descriptor to register.
459 */
460 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
461 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
462 USB_VENDOR_REQUEST_OUT, beacon_base,
463 entry->skb->data, entry->skb->len,
464 REGISTER_TIMEOUT32(entry->skb->len));
465
466 /*
467 * Enable beaconing again.
468 */
469 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
470 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
471 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
472 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
473
474 /*
475 * Clean up the beacon skb.
476 */
477 dev_kfree_skb(entry->skb);
478 entry->skb = NULL;
479}
480
481static int rt2800usb_get_tx_data_len(struct queue_entry *entry) 366static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
482{ 367{
483 int length; 368 int length;
@@ -568,7 +453,7 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
568 /* 453 /*
569 * Process the RXWI structure. 454 * Process the RXWI structure.
570 */ 455 */
571 rt2800_process_rxwi(entry->skb, rxdesc); 456 rt2800_process_rxwi(entry, rxdesc);
572} 457}
573 458
574/* 459/*
@@ -585,24 +470,10 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
585 return rt2800_validate_eeprom(rt2x00dev); 470 return rt2800_validate_eeprom(rt2x00dev);
586} 471}
587 472
588static const struct rt2800_ops rt2800usb_rt2800_ops = {
589 .register_read = rt2x00usb_register_read,
590 .register_read_lock = rt2x00usb_register_read_lock,
591 .register_write = rt2x00usb_register_write,
592 .register_write_lock = rt2x00usb_register_write_lock,
593
594 .register_multiread = rt2x00usb_register_multiread,
595 .register_multiwrite = rt2x00usb_register_multiwrite,
596
597 .regbusy_read = rt2x00usb_regbusy_read,
598};
599
600static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) 473static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
601{ 474{
602 int retval; 475 int retval;
603 476
604 rt2x00dev->priv = (void *)&rt2800usb_rt2800_ops;
605
606 /* 477 /*
607 * Allocate eeprom data. 478 * Allocate eeprom data.
608 */ 479 */
@@ -635,6 +506,8 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
635 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 506 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
636 if (!modparam_nohwcrypt) 507 if (!modparam_nohwcrypt)
637 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 508 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
509 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
510 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
638 511
639 /* 512 /*
640 * Set the rssi offset. 513 * Set the rssi offset.
@@ -644,11 +517,45 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
644 return 0; 517 return 0;
645} 518}
646 519
520static const struct ieee80211_ops rt2800usb_mac80211_ops = {
521 .tx = rt2x00mac_tx,
522 .start = rt2x00mac_start,
523 .stop = rt2x00mac_stop,
524 .add_interface = rt2x00mac_add_interface,
525 .remove_interface = rt2x00mac_remove_interface,
526 .config = rt2x00mac_config,
527 .configure_filter = rt2x00mac_configure_filter,
528 .set_tim = rt2x00mac_set_tim,
529 .set_key = rt2x00mac_set_key,
530 .sw_scan_start = rt2x00mac_sw_scan_start,
531 .sw_scan_complete = rt2x00mac_sw_scan_complete,
532 .get_stats = rt2x00mac_get_stats,
533 .get_tkip_seq = rt2800_get_tkip_seq,
534 .set_rts_threshold = rt2800_set_rts_threshold,
535 .bss_info_changed = rt2x00mac_bss_info_changed,
536 .conf_tx = rt2800_conf_tx,
537 .get_tsf = rt2800_get_tsf,
538 .rfkill_poll = rt2x00mac_rfkill_poll,
539 .ampdu_action = rt2800_ampdu_action,
540};
541
542static const struct rt2800_ops rt2800usb_rt2800_ops = {
543 .register_read = rt2x00usb_register_read,
544 .register_read_lock = rt2x00usb_register_read_lock,
545 .register_write = rt2x00usb_register_write,
546 .register_write_lock = rt2x00usb_register_write_lock,
547 .register_multiread = rt2x00usb_register_multiread,
548 .register_multiwrite = rt2x00usb_register_multiwrite,
549 .regbusy_read = rt2x00usb_regbusy_read,
550 .drv_write_firmware = rt2800usb_write_firmware,
551 .drv_init_registers = rt2800usb_init_registers,
552};
553
647static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { 554static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
648 .probe_hw = rt2800usb_probe_hw, 555 .probe_hw = rt2800usb_probe_hw,
649 .get_firmware_name = rt2800usb_get_firmware_name, 556 .get_firmware_name = rt2800usb_get_firmware_name,
650 .check_firmware = rt2800usb_check_firmware, 557 .check_firmware = rt2800_check_firmware,
651 .load_firmware = rt2800usb_load_firmware, 558 .load_firmware = rt2800_load_firmware,
652 .initialize = rt2x00usb_initialize, 559 .initialize = rt2x00usb_initialize,
653 .uninitialize = rt2x00usb_uninitialize, 560 .uninitialize = rt2x00usb_uninitialize,
654 .clear_entry = rt2x00usb_clear_entry, 561 .clear_entry = rt2x00usb_clear_entry,
@@ -657,9 +564,10 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
657 .link_stats = rt2800_link_stats, 564 .link_stats = rt2800_link_stats,
658 .reset_tuner = rt2800_reset_tuner, 565 .reset_tuner = rt2800_reset_tuner,
659 .link_tuner = rt2800_link_tuner, 566 .link_tuner = rt2800_link_tuner,
567 .watchdog = rt2x00usb_watchdog,
660 .write_tx_desc = rt2800usb_write_tx_desc, 568 .write_tx_desc = rt2800usb_write_tx_desc,
661 .write_tx_data = rt2x00usb_write_tx_data, 569 .write_tx_data = rt2800usb_write_tx_data,
662 .write_beacon = rt2800usb_write_beacon, 570 .write_beacon = rt2800_write_beacon,
663 .get_tx_data_len = rt2800usb_get_tx_data_len, 571 .get_tx_data_len = rt2800usb_get_tx_data_len,
664 .kick_tx_queue = rt2x00usb_kick_tx_queue, 572 .kick_tx_queue = rt2x00usb_kick_tx_queue,
665 .kill_tx_queue = rt2x00usb_kill_tx_queue, 573 .kill_tx_queue = rt2x00usb_kill_tx_queue,
@@ -706,7 +614,8 @@ static const struct rt2x00_ops rt2800usb_ops = {
706 .tx = &rt2800usb_queue_tx, 614 .tx = &rt2800usb_queue_tx,
707 .bcn = &rt2800usb_queue_bcn, 615 .bcn = &rt2800usb_queue_bcn,
708 .lib = &rt2800usb_rt2x00_ops, 616 .lib = &rt2800usb_rt2x00_ops,
709 .hw = &rt2800_mac80211_ops, 617 .drv = &rt2800usb_rt2800_ops,
618 .hw = &rt2800usb_mac80211_ops,
710#ifdef CONFIG_RT2X00_LIB_DEBUGFS 619#ifdef CONFIG_RT2X00_LIB_DEBUGFS
711 .debugfs = &rt2800_rt2x00debug, 620 .debugfs = &rt2800_rt2x00debug,
712#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 621#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 2bca6a71a7f5..0722badccf86 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -32,43 +32,6 @@
32#define RT2800USB_H 32#define RT2800USB_H
33 33
34/* 34/*
35 * USB registers.
36 */
37
38/*
39 * USB_DMA_CFG
40 * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
41 * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
42 * PHY_CLEAR: phy watch dog enable.
43 * TX_CLEAR: Clear USB DMA TX path.
44 * TXOP_HALT: Halt TXOP count down when TX buffer is full.
45 * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation.
46 * RX_BULK_EN: Enable USB DMA Rx.
47 * TX_BULK_EN: Enable USB DMA Tx.
48 * EP_OUT_VALID: OUT endpoint data valid.
49 * RX_BUSY: USB DMA RX FSM busy.
50 * TX_BUSY: USB DMA TX FSM busy.
51 */
52#define USB_DMA_CFG 0x02a0
53#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff)
54#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00)
55#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000)
56#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000)
57#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000)
58#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000)
59#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000)
60#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000)
61#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000)
62#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000)
63#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000)
64
65/*
66 * USB_CYC_CFG
67 */
68#define USB_CYC_CFG 0x02a4
69#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff)
70
71/*
72 * 8051 firmware image. 35 * 8051 firmware image.
73 */ 36 */
74#define FIRMWARE_RT2870 "rt2870.bin" 37#define FIRMWARE_RT2870 "rt2870.bin"
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 6c1ff4c15c84..c21af38cc5af 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
39#include <net/mac80211.h> 39#include <net/mac80211.h>
40 40
41#include "rt2x00debug.h" 41#include "rt2x00debug.h"
42#include "rt2x00dump.h"
42#include "rt2x00leds.h" 43#include "rt2x00leds.h"
43#include "rt2x00reg.h" 44#include "rt2x00reg.h"
44#include "rt2x00queue.h" 45#include "rt2x00queue.h"
@@ -159,6 +160,7 @@ struct avg_val {
159 160
160enum rt2x00_chip_intf { 161enum rt2x00_chip_intf {
161 RT2X00_CHIP_INTF_PCI, 162 RT2X00_CHIP_INTF_PCI,
163 RT2X00_CHIP_INTF_PCIE,
162 RT2X00_CHIP_INTF_USB, 164 RT2X00_CHIP_INTF_USB,
163 RT2X00_CHIP_INTF_SOC, 165 RT2X00_CHIP_INTF_SOC,
164}; 166};
@@ -175,8 +177,7 @@ struct rt2x00_chip {
175#define RT2570 0x2570 177#define RT2570 0x2570
176#define RT2661 0x2661 178#define RT2661 0x2661
177#define RT2573 0x2573 179#define RT2573 0x2573
178#define RT2860 0x2860 /* 2.4GHz PCI/CB */ 180#define RT2860 0x2860 /* 2.4GHz */
179#define RT2870 0x2870
180#define RT2872 0x2872 /* WSOC */ 181#define RT2872 0x2872 /* WSOC */
181#define RT2883 0x2883 /* WSOC */ 182#define RT2883 0x2883 /* WSOC */
182#define RT3070 0x3070 183#define RT3070 0x3070
@@ -331,6 +332,11 @@ struct link {
331 * Work structure for scheduling periodic link tuning. 332 * Work structure for scheduling periodic link tuning.
332 */ 333 */
333 struct delayed_work work; 334 struct delayed_work work;
335
336 /*
337 * Work structure for scheduling periodic watchdog monitoring.
338 */
339 struct delayed_work watchdog_work;
334}; 340};
335 341
336/* 342/*
@@ -509,6 +515,11 @@ struct rt2x00lib_ops {
509 irq_handler_t irq_handler; 515 irq_handler_t irq_handler;
510 516
511 /* 517 /*
518 * Threaded Interrupt handlers.
519 */
520 irq_handler_t irq_handler_thread;
521
522 /*
512 * Device init handlers. 523 * Device init handlers.
513 */ 524 */
514 int (*probe_hw) (struct rt2x00_dev *rt2x00dev); 525 int (*probe_hw) (struct rt2x00_dev *rt2x00dev);
@@ -542,6 +553,7 @@ struct rt2x00lib_ops {
542 struct link_qual *qual); 553 struct link_qual *qual);
543 void (*link_tuner) (struct rt2x00_dev *rt2x00dev, 554 void (*link_tuner) (struct rt2x00_dev *rt2x00dev,
544 struct link_qual *qual, const u32 count); 555 struct link_qual *qual, const u32 count);
556 void (*watchdog) (struct rt2x00_dev *rt2x00dev);
545 557
546 /* 558 /*
547 * TX control handlers 559 * TX control handlers
@@ -549,8 +561,8 @@ struct rt2x00lib_ops {
549 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 561 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
550 struct sk_buff *skb, 562 struct sk_buff *skb,
551 struct txentry_desc *txdesc); 563 struct txentry_desc *txdesc);
552 int (*write_tx_data) (struct queue_entry *entry, 564 void (*write_tx_data) (struct queue_entry *entry,
553 struct txentry_desc *txdesc); 565 struct txentry_desc *txdesc);
554 void (*write_beacon) (struct queue_entry *entry, 566 void (*write_beacon) (struct queue_entry *entry,
555 struct txentry_desc *txdesc); 567 struct txentry_desc *txdesc);
556 int (*get_tx_data_len) (struct queue_entry *entry); 568 int (*get_tx_data_len) (struct queue_entry *entry);
@@ -609,6 +621,7 @@ struct rt2x00_ops {
609 const struct data_queue_desc *bcn; 621 const struct data_queue_desc *bcn;
610 const struct data_queue_desc *atim; 622 const struct data_queue_desc *atim;
611 const struct rt2x00lib_ops *lib; 623 const struct rt2x00lib_ops *lib;
624 const void *drv;
612 const struct ieee80211_ops *hw; 625 const struct ieee80211_ops *hw;
613#ifdef CONFIG_RT2X00_LIB_DEBUGFS 626#ifdef CONFIG_RT2X00_LIB_DEBUGFS
614 const struct rt2x00debug *debugfs; 627 const struct rt2x00debug *debugfs;
@@ -627,6 +640,7 @@ enum rt2x00_flags {
627 DEVICE_STATE_INITIALIZED, 640 DEVICE_STATE_INITIALIZED,
628 DEVICE_STATE_STARTED, 641 DEVICE_STATE_STARTED,
629 DEVICE_STATE_ENABLED_RADIO, 642 DEVICE_STATE_ENABLED_RADIO,
643 DEVICE_STATE_SCANNING,
630 644
631 /* 645 /*
632 * Driver requirements 646 * Driver requirements
@@ -645,6 +659,9 @@ enum rt2x00_flags {
645 CONFIG_SUPPORT_HW_CRYPTO, 659 CONFIG_SUPPORT_HW_CRYPTO,
646 DRIVER_SUPPORT_CONTROL_FILTERS, 660 DRIVER_SUPPORT_CONTROL_FILTERS,
647 DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, 661 DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
662 DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
663 DRIVER_SUPPORT_LINK_TUNING,
664 DRIVER_SUPPORT_WATCHDOG,
648 665
649 /* 666 /*
650 * Driver configuration 667 * Driver configuration
@@ -654,7 +671,6 @@ enum rt2x00_flags {
654 CONFIG_EXTERNAL_LNA_A, 671 CONFIG_EXTERNAL_LNA_A,
655 CONFIG_EXTERNAL_LNA_BG, 672 CONFIG_EXTERNAL_LNA_BG,
656 CONFIG_DOUBLE_ANTENNA, 673 CONFIG_DOUBLE_ANTENNA,
657 CONFIG_DISABLE_LINK_TUNING,
658 CONFIG_CHANNEL_HT40, 674 CONFIG_CHANNEL_HT40,
659}; 675};
660 676
@@ -862,9 +878,10 @@ struct rt2x00_dev {
862 const struct firmware *fw; 878 const struct firmware *fw;
863 879
864 /* 880 /*
865 * Driver specific data. 881 * Interrupt values, stored between interrupt service routine
882 * and interrupt thread routine.
866 */ 883 */
867 void *priv; 884 u32 irqvalue[2];
868}; 885};
869 886
870/* 887/*
@@ -978,7 +995,13 @@ static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
978 995
979static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev) 996static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev)
980{ 997{
981 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); 998 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI) ||
999 rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
1000}
1001
1002static inline bool rt2x00_is_pcie(struct rt2x00_dev *rt2x00dev)
1003{
1004 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
982} 1005}
983 1006
984static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev) 1007static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev)
@@ -999,6 +1022,13 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
999void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 1022void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
1000 1023
1001/** 1024/**
1025 * rt2x00queue_unmap_skb - Unmap a skb from DMA.
1026 * @rt2x00dev: Pointer to &struct rt2x00_dev.
1027 * @skb: The skb to unmap.
1028 */
1029void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
1030
1031/**
1002 * rt2x00queue_get_queue - Convert queue index to queue pointer 1032 * rt2x00queue_get_queue - Convert queue index to queue pointer
1003 * @rt2x00dev: Pointer to &struct rt2x00_dev. 1033 * @rt2x00dev: Pointer to &struct rt2x00_dev.
1004 * @queue: rt2x00 queue index (see &enum data_queue_qid). 1034 * @queue: rt2x00 queue index (see &enum data_queue_qid).
@@ -1015,9 +1045,30 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
1015 enum queue_index index); 1045 enum queue_index index);
1016 1046
1017/* 1047/*
1048 * Debugfs handlers.
1049 */
1050/**
1051 * rt2x00debug_dump_frame - Dump a frame to userspace through debugfs.
1052 * @rt2x00dev: Pointer to &struct rt2x00_dev.
1053 * @type: The type of frame that is being dumped.
1054 * @skb: The skb containing the frame to be dumped.
1055 */
1056#ifdef CONFIG_RT2X00_LIB_DEBUGFS
1057void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
1058 enum rt2x00_dump_type type, struct sk_buff *skb);
1059#else
1060static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
1061 enum rt2x00_dump_type type,
1062 struct sk_buff *skb)
1063{
1064}
1065#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
1066
1067/*
1018 * Interrupt context handlers. 1068 * Interrupt context handlers.
1019 */ 1069 */
1020void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); 1070void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
1071void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev);
1021void rt2x00lib_txdone(struct queue_entry *entry, 1072void rt2x00lib_txdone(struct queue_entry *entry,
1022 struct txdone_entry_desc *txdesc); 1073 struct txdone_entry_desc *txdesc);
1023void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, 1074void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
@@ -1047,6 +1098,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1047#else 1098#else
1048#define rt2x00mac_set_key NULL 1099#define rt2x00mac_set_key NULL
1049#endif /* CONFIG_RT2X00_LIB_CRYPTO */ 1100#endif /* CONFIG_RT2X00_LIB_CRYPTO */
1101void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw);
1102void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw);
1050int rt2x00mac_get_stats(struct ieee80211_hw *hw, 1103int rt2x00mac_get_stats(struct ieee80211_hw *hw,
1051 struct ieee80211_low_level_stats *stats); 1104 struct ieee80211_low_level_stats *stats);
1052void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, 1105void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 098315a271ca..953dc4f2c6af 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -41,10 +41,12 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
41 41
42 switch (type) { 42 switch (type) {
43 case NL80211_IFTYPE_ADHOC: 43 case NL80211_IFTYPE_ADHOC:
44 conf.sync = TSF_SYNC_ADHOC;
45 break;
44 case NL80211_IFTYPE_AP: 46 case NL80211_IFTYPE_AP:
45 case NL80211_IFTYPE_MESH_POINT: 47 case NL80211_IFTYPE_MESH_POINT:
46 case NL80211_IFTYPE_WDS: 48 case NL80211_IFTYPE_WDS:
47 conf.sync = TSF_SYNC_BEACON; 49 conf.sync = TSF_SYNC_AP_NONE;
48 break; 50 break;
49 case NL80211_IFTYPE_STATION: 51 case NL80211_IFTYPE_STATION:
50 conf.sync = TSF_SYNC_INFRA; 52 conf.sync = TSF_SYNC_INFRA;
@@ -170,23 +172,27 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
170 unsigned int ieee80211_flags) 172 unsigned int ieee80211_flags)
171{ 173{
172 struct rt2x00lib_conf libconf; 174 struct rt2x00lib_conf libconf;
175 u16 hw_value;
173 176
174 memset(&libconf, 0, sizeof(libconf)); 177 memset(&libconf, 0, sizeof(libconf));
175 178
176 libconf.conf = conf; 179 libconf.conf = conf;
177 180
178 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { 181 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
179 if (conf_is_ht40(conf)) 182 if (conf_is_ht40(conf)) {
180 __set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); 183 __set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
181 else 184 hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
185 } else {
182 __clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); 186 __clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
187 hw_value = conf->channel->hw_value;
188 }
183 189
184 memcpy(&libconf.rf, 190 memcpy(&libconf.rf,
185 &rt2x00dev->spec.channels[conf->channel->hw_value], 191 &rt2x00dev->spec.channels[hw_value],
186 sizeof(libconf.rf)); 192 sizeof(libconf.rf));
187 193
188 memcpy(&libconf.channel, 194 memcpy(&libconf.channel,
189 &rt2x00dev->spec.channels_info[conf->channel->hw_value], 195 &rt2x00dev->spec.channels_info[hw_value],
190 sizeof(libconf.channel)); 196 sizeof(libconf.channel));
191 } 197 }
192 198
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index e9fe93fd8042..b0498e7e7aae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -211,6 +211,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
211 if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)) 211 if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))
212 skb_queue_purge(&intf->frame_dump_skbqueue); 212 skb_queue_purge(&intf->frame_dump_skbqueue);
213} 213}
214EXPORT_SYMBOL_GPL(rt2x00debug_dump_frame);
214 215
215static int rt2x00debug_file_open(struct inode *inode, struct file *file) 216static int rt2x00debug_file_open(struct inode *inode, struct file *file)
216{ 217{
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index f20d3eeeea7f..585e8166f22a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -70,6 +70,11 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
70 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 70 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
71 71
72 /* 72 /*
73 * Start watchdog monitoring.
74 */
75 rt2x00link_start_watchdog(rt2x00dev);
76
77 /*
73 * Start the TX queues. 78 * Start the TX queues.
74 */ 79 */
75 ieee80211_wake_queues(rt2x00dev->hw); 80 ieee80211_wake_queues(rt2x00dev->hw);
@@ -89,6 +94,11 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
89 rt2x00queue_stop_queues(rt2x00dev); 94 rt2x00queue_stop_queues(rt2x00dev);
90 95
91 /* 96 /*
97 * Stop watchdog monitoring.
98 */
99 rt2x00link_stop_watchdog(rt2x00dev);
100
101 /*
92 * Disable RX. 102 * Disable RX.
93 */ 103 */
94 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 104 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
@@ -168,10 +178,32 @@ static void rt2x00lib_intf_scheduled(struct work_struct *work)
168/* 178/*
169 * Interrupt context handlers. 179 * Interrupt context handlers.
170 */ 180 */
171static void rt2x00lib_beacondone_iter(void *data, u8 *mac, 181static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
172 struct ieee80211_vif *vif) 182 struct ieee80211_vif *vif)
173{ 183{
174 struct rt2x00_intf *intf = vif_to_intf(vif); 184 struct rt2x00_dev *rt2x00dev = data;
185 struct sk_buff *skb;
186
187 /*
188 * Only AP mode interfaces do broad- and multicast buffering
189 */
190 if (vif->type != NL80211_IFTYPE_AP)
191 return;
192
193 /*
194 * Send out buffered broad- and multicast frames
195 */
196 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
197 while (skb) {
198 rt2x00mac_tx(rt2x00dev->hw, skb);
199 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
200 }
201}
202
203static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
204 struct ieee80211_vif *vif)
205{
206 struct rt2x00_dev *rt2x00dev = data;
175 207
176 if (vif->type != NL80211_IFTYPE_AP && 208 if (vif->type != NL80211_IFTYPE_AP &&
177 vif->type != NL80211_IFTYPE_ADHOC && 209 vif->type != NL80211_IFTYPE_ADHOC &&
@@ -179,9 +211,7 @@ static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
179 vif->type != NL80211_IFTYPE_WDS) 211 vif->type != NL80211_IFTYPE_WDS)
180 return; 212 return;
181 213
182 spin_lock(&intf->lock); 214 rt2x00queue_update_beacon(rt2x00dev, vif, true);
183 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
184 spin_unlock(&intf->lock);
185} 215}
186 216
187void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) 217void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -189,14 +219,37 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
189 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 219 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
190 return; 220 return;
191 221
192 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, 222 /* send buffered bc/mc frames out for every bssid */
193 rt2x00lib_beacondone_iter, 223 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
194 rt2x00dev); 224 rt2x00lib_bc_buffer_iter,
225 rt2x00dev);
226 /*
227 * Devices with pre tbtt interrupt don't need to update the beacon
228 * here as they will fetch the next beacon directly prior to
229 * transmission.
230 */
231 if (test_bit(DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, &rt2x00dev->flags))
232 return;
195 233
196 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work); 234 /* fetch next beacon */
235 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
236 rt2x00lib_beaconupdate_iter,
237 rt2x00dev);
197} 238}
198EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); 239EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
199 240
241void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
242{
243 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
244 return;
245
246 /* fetch next beacon */
247 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
248 rt2x00lib_beaconupdate_iter,
249 rt2x00dev);
250}
251EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
252
200void rt2x00lib_txdone(struct queue_entry *entry, 253void rt2x00lib_txdone(struct queue_entry *entry,
201 struct txdone_entry_desc *txdesc) 254 struct txdone_entry_desc *txdesc)
202{ 255{
@@ -216,6 +269,16 @@ void rt2x00lib_txdone(struct queue_entry *entry,
216 rt2x00queue_unmap_skb(rt2x00dev, entry->skb); 269 rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
217 270
218 /* 271 /*
272 * Remove the extra tx headroom from the skb.
273 */
274 skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
275
276 /*
277 * Signal that the TX descriptor is no longer in the skb.
278 */
279 skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
280
281 /*
219 * Remove L2 padding which was added during 282 * Remove L2 padding which was added during
220 */ 283 */
221 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) 284 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
@@ -224,7 +287,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
224 /* 287 /*
225 * If the IV/EIV data was stripped from the frame before it was 288 * If the IV/EIV data was stripped from the frame before it was
226 * passed to the hardware, we should now reinsert it again because 289 * passed to the hardware, we should now reinsert it again because
227 * mac80211 will expect the the same data to be present it the 290 * mac80211 will expect the same data to be present it the
228 * frame as it was passed to us. 291 * frame as it was passed to us.
229 */ 292 */
230 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) 293 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
@@ -241,8 +304,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
241 */ 304 */
242 success = 305 success =
243 test_bit(TXDONE_SUCCESS, &txdesc->flags) || 306 test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
244 test_bit(TXDONE_UNKNOWN, &txdesc->flags) || 307 test_bit(TXDONE_UNKNOWN, &txdesc->flags);
245 test_bit(TXDONE_FALLBACK, &txdesc->flags);
246 308
247 /* 309 /*
248 * Update TX statistics. 310 * Update TX statistics.
@@ -264,11 +326,22 @@ void rt2x00lib_txdone(struct queue_entry *entry,
264 /* 326 /*
265 * Frame was send with retries, hardware tried 327 * Frame was send with retries, hardware tried
266 * different rates to send out the frame, at each 328 * different rates to send out the frame, at each
267 * retry it lowered the rate 1 step. 329 * retry it lowered the rate 1 step except when the
330 * lowest rate was used.
268 */ 331 */
269 for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) { 332 for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
270 tx_info->status.rates[i].idx = rate_idx - i; 333 tx_info->status.rates[i].idx = rate_idx - i;
271 tx_info->status.rates[i].flags = rate_flags; 334 tx_info->status.rates[i].flags = rate_flags;
335
336 if (rate_idx - i == 0) {
337 /*
338 * The lowest rate (index 0) was used until the
339 * number of max retries was reached.
340 */
341 tx_info->status.rates[i].count = retry_rates - i;
342 i++;
343 break;
344 }
272 tx_info->status.rates[i].count = 1; 345 tx_info->status.rates[i].count = 1;
273 } 346 }
274 if (i < (IEEE80211_TX_MAX_RATES - 1)) 347 if (i < (IEEE80211_TX_MAX_RATES - 1))
@@ -281,6 +354,21 @@ void rt2x00lib_txdone(struct queue_entry *entry,
281 rt2x00dev->low_level_stats.dot11ACKFailureCount++; 354 rt2x00dev->low_level_stats.dot11ACKFailureCount++;
282 } 355 }
283 356
357 /*
358 * Every single frame has it's own tx status, hence report
359 * every frame as ampdu of size 1.
360 *
361 * TODO: if we can find out how many frames were aggregated
362 * by the hw we could provide the real ampdu_len to mac80211
363 * which would allow the rc algorithm to better decide on
364 * which rates are suitable.
365 */
366 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
367 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
368 tx_info->status.ampdu_len = 1;
369 tx_info->status.ampdu_ack_len = success ? 1 : 0;
370 }
371
284 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 372 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
285 if (success) 373 if (success)
286 rt2x00dev->low_level_stats.dot11RTSSuccessCount++; 374 rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
@@ -295,9 +383,17 @@ void rt2x00lib_txdone(struct queue_entry *entry,
295 * send the status report back. 383 * send the status report back.
296 */ 384 */
297 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) 385 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
298 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb); 386 /*
387 * Only PCI and SOC devices process the tx status in process
388 * context. Hence use ieee80211_tx_status for PCI and SOC
389 * devices and stick to ieee80211_tx_status_irqsafe for USB.
390 */
391 if (rt2x00_is_usb(rt2x00dev))
392 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
393 else
394 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
299 else 395 else
300 dev_kfree_skb_irq(entry->skb); 396 dev_kfree_skb_any(entry->skb);
301 397
302 /* 398 /*
303 * Make this entry available for reuse. 399 * Make this entry available for reuse.
@@ -444,7 +540,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
444 */ 540 */
445 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb); 541 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
446 memcpy(IEEE80211_SKB_RXCB(entry->skb), rx_status, sizeof(*rx_status)); 542 memcpy(IEEE80211_SKB_RXCB(entry->skb), rx_status, sizeof(*rx_status));
447 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb); 543
544 /*
545 * Currently only PCI and SOC devices handle rx interrupts in process
546 * context. Hence, use ieee80211_rx_irqsafe for USB and ieee80211_rx_ni
547 * for PCI and SOC devices.
548 */
549 if (rt2x00_is_usb(rt2x00dev))
550 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb);
551 else
552 ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
448 553
449 /* 554 /*
450 * Replace the skb with the freshly allocated one. 555 * Replace the skb with the freshly allocated one.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index ed303b423e41..5d6e0b83151f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -20,7 +20,12 @@
20 20
21/* 21/*
22 Module: rt2x00dump 22 Module: rt2x00dump
23 Abstract: Data structures for the rt2x00debug & userspace. 23 Abstract:
24 Data structures for the rt2x00debug & userspace.
25
26 The declarations in this file can be used by both rt2x00
27 and userspace and therefore should be kept together in
28 this file.
24 */ 29 */
25 30
26#ifndef RT2X00DUMP_H 31#ifndef RT2X00DUMP_H
@@ -111,7 +116,7 @@ struct rt2x00dump_hdr {
111 116
112 __le16 chip_rt; 117 __le16 chip_rt;
113 __le16 chip_rf; 118 __le16 chip_rf;
114 __le32 chip_rev; 119 __le16 chip_rev;
115 120
116 __le16 type; 121 __le16 type;
117 __u8 queue_index; 122 __u8 queue_index;
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index 5a407602ce3e..c004cd3a8847 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -44,11 +44,22 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
44 txdesc->mpdu_density = 0; 44 txdesc->mpdu_density = 0;
45 45
46 txdesc->ba_size = 7; /* FIXME: What value is needed? */ 46 txdesc->ba_size = 7; /* FIXME: What value is needed? */
47 txdesc->stbc = 0; /* FIXME: What value is needed? */
48 47
49 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); 48 txdesc->stbc =
50 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 49 (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
51 txdesc->mcs |= 0x08; 50
51 /*
52 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
53 * mcs rate to be used
54 */
55 if (txrate->flags & IEEE80211_TX_RC_MCS) {
56 txdesc->mcs = txrate->idx;
57 } else {
58 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
59 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
60 txdesc->mcs |= 0x08;
61 }
62
52 63
53 /* 64 /*
54 * Convert flags 65 * Convert flags
@@ -84,3 +95,31 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
84 else 95 else
85 txdesc->txop = TXOP_HTTXOP; 96 txdesc->txop = TXOP_HTTXOP;
86} 97}
98
99u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
100 struct ieee80211_conf *conf)
101{
102 struct hw_mode_spec *spec = &rt2x00dev->spec;
103 int center_channel;
104 u16 i;
105
106 /*
107 * Initialize center channel to current channel.
108 */
109 center_channel = spec->channels[conf->channel->hw_value].channel;
110
111 /*
112 * Adjust center channel to HT40+ and HT40- operation.
113 */
114 if (conf_is_ht40_plus(conf))
115 center_channel += 2;
116 else if (conf_is_ht40_minus(conf))
117 center_channel -= (center_channel == 14) ? 1 : 2;
118
119 for (i = 0; i < spec->num_channels; i++)
120 if (spec->channels[i].channel == center_channel)
121 return i;
122
123 WARN_ON(1);
124 return conf->channel->hw_value;
125}
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index be2e37fb4071..dc5c6574aaf4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -27,11 +27,10 @@
27#ifndef RT2X00LIB_H 27#ifndef RT2X00LIB_H
28#define RT2X00LIB_H 28#define RT2X00LIB_H
29 29
30#include "rt2x00dump.h"
31
32/* 30/*
33 * Interval defines 31 * Interval defines
34 */ 32 */
33#define WATCHDOG_INTERVAL round_jiffies_relative(HZ)
35#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ) 34#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ)
36 35
37/* 36/*
@@ -107,13 +106,6 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
107 struct queue_entry *entry); 106 struct queue_entry *entry);
108 107
109/** 108/**
110 * rt2x00queue_unmap_skb - Unmap a skb from DMA.
111 * @rt2x00dev: Pointer to &struct rt2x00_dev.
112 * @skb: The skb to unmap.
113 */
114void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
115
116/**
117 * rt2x00queue_free_skb - free a skb 109 * rt2x00queue_free_skb - free a skb
118 * @rt2x00dev: Pointer to &struct rt2x00_dev. 110 * @rt2x00dev: Pointer to &struct rt2x00_dev.
119 * @skb: The skb to free. 111 * @skb: The skb to free.
@@ -266,11 +258,30 @@ void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev);
266void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna); 258void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna);
267 259
268/** 260/**
269 * rt2x00link_register - Initialize link tuning functionality 261 * rt2x00link_start_watchdog - Start periodic watchdog monitoring
262 * @rt2x00dev: Pointer to &struct rt2x00_dev.
263 *
264 * This start the watchdog periodic work, this work will
265 *be executed periodically until &rt2x00link_stop_watchdog has
266 * been called.
267 */
268void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev);
269
270/**
271 * rt2x00link_stop_watchdog - Stop periodic watchdog monitoring
270 * @rt2x00dev: Pointer to &struct rt2x00_dev. 272 * @rt2x00dev: Pointer to &struct rt2x00_dev.
271 * 273 *
272 * Initialize work structure and all link tuning related 274 * After this function completed the watchdog monitoring will not
273 * parameters. This will not start the link tuning process itself. 275 * be running until &rt2x00link_start_watchdog is called.
276 */
277void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev);
278
279/**
280 * rt2x00link_register - Initialize link tuning & watchdog functionality
281 * @rt2x00dev: Pointer to &struct rt2x00_dev.
282 *
283 * Initialize work structure and all link tuning and watchdog related
284 * parameters. This will not start the periodic work itself.
274 */ 285 */
275void rt2x00link_register(struct rt2x00_dev *rt2x00dev); 286void rt2x00link_register(struct rt2x00_dev *rt2x00dev);
276 287
@@ -296,8 +307,6 @@ static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev)
296#ifdef CONFIG_RT2X00_LIB_DEBUGFS 307#ifdef CONFIG_RT2X00_LIB_DEBUGFS
297void rt2x00debug_register(struct rt2x00_dev *rt2x00dev); 308void rt2x00debug_register(struct rt2x00_dev *rt2x00dev);
298void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); 309void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
299void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
300 enum rt2x00_dump_type type, struct sk_buff *skb);
301void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, 310void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
302 struct rxdone_entry_desc *rxdesc); 311 struct rxdone_entry_desc *rxdesc);
303#else 312#else
@@ -309,12 +318,6 @@ static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
309{ 318{
310} 319}
311 320
312static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
313 enum rt2x00_dump_type type,
314 struct sk_buff *skb)
315{
316}
317
318static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, 321static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
319 struct rxdone_entry_desc *rxdesc) 322 struct rxdone_entry_desc *rxdesc)
320{ 323{
@@ -384,12 +387,21 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
384void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, 387void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
385 struct txentry_desc *txdesc, 388 struct txentry_desc *txdesc,
386 const struct rt2x00_rate *hwrate); 389 const struct rt2x00_rate *hwrate);
390
391u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
392 struct ieee80211_conf *conf);
387#else 393#else
388static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, 394static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
389 struct txentry_desc *txdesc, 395 struct txentry_desc *txdesc,
390 const struct rt2x00_rate *hwrate) 396 const struct rt2x00_rate *hwrate)
391{ 397{
392} 398}
399
400static inline u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
401 struct ieee80211_conf *conf)
402{
403 return conf->channel->hw_value;
404}
393#endif /* CONFIG_RT2X00_LIB_HT */ 405#endif /* CONFIG_RT2X00_LIB_HT */
394 406
395/* 407/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 0efbf5a6c254..666cef3f8472 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -271,11 +271,20 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
271 271
272 /* 272 /*
273 * Link tuning should only be performed when 273 * Link tuning should only be performed when
274 * an active sta or master interface exists. 274 * an active sta interface exists. AP interfaces
275 * Single monitor mode interfaces should never have 275 * don't need link tuning and monitor mode interfaces
276 * work with link tuners. 276 * should never have to work with link tuners.
277 */ 277 */
278 if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count) 278 if (!rt2x00dev->intf_sta_count)
279 return;
280
281 /**
282 * While scanning, link tuning is disabled. By default
283 * the most sensitive settings will be used to make sure
284 * that all beacons and probe responses will be recieved
285 * during the scan.
286 */
287 if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
279 return; 288 return;
280 289
281 rt2x00link_reset_tuner(rt2x00dev, false); 290 rt2x00link_reset_tuner(rt2x00dev, false);
@@ -293,6 +302,7 @@ void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev)
293void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna) 302void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
294{ 303{
295 struct link_qual *qual = &rt2x00dev->link.qual; 304 struct link_qual *qual = &rt2x00dev->link.qual;
305 u8 vgc_level = qual->vgc_level_reg;
296 306
297 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 307 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
298 return; 308 return;
@@ -309,6 +319,13 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
309 memset(qual, 0, sizeof(*qual)); 319 memset(qual, 0, sizeof(*qual));
310 320
311 /* 321 /*
322 * Restore the VGC level as stored in the registers,
323 * the driver can use this to determine if the register
324 * must be updated during reset or not.
325 */
326 qual->vgc_level_reg = vgc_level;
327
328 /*
312 * Reset the link tuner. 329 * Reset the link tuner.
313 */ 330 */
314 rt2x00dev->ops->lib->reset_tuner(rt2x00dev, qual); 331 rt2x00dev->ops->lib->reset_tuner(rt2x00dev, qual);
@@ -338,7 +355,8 @@ static void rt2x00link_tuner(struct work_struct *work)
338 * When the radio is shutting down we should 355 * When the radio is shutting down we should
339 * immediately cease all link tuning. 356 * immediately cease all link tuning.
340 */ 357 */
341 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 358 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
359 test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
342 return; 360 return;
343 361
344 /* 362 /*
@@ -359,10 +377,11 @@ static void rt2x00link_tuner(struct work_struct *work)
359 qual->rssi = link->avg_rssi.avg; 377 qual->rssi = link->avg_rssi.avg;
360 378
361 /* 379 /*
362 * Only perform the link tuning when Link tuning 380 * Check if link tuning is supported by the hardware, some hardware
363 * has been enabled (This could have been disabled from the EEPROM). 381 * do not support link tuning at all, while other devices can disable
382 * the feature from the EEPROM.
364 */ 383 */
365 if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags)) 384 if (test_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags))
366 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count); 385 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
367 386
368 /* 387 /*
@@ -388,7 +407,45 @@ static void rt2x00link_tuner(struct work_struct *work)
388 &link->work, LINK_TUNE_INTERVAL); 407 &link->work, LINK_TUNE_INTERVAL);
389} 408}
390 409
410void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
411{
412 struct link *link = &rt2x00dev->link;
413
414 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
415 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
416 return;
417
418 ieee80211_queue_delayed_work(rt2x00dev->hw,
419 &link->watchdog_work, WATCHDOG_INTERVAL);
420}
421
422void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
423{
424 cancel_delayed_work_sync(&rt2x00dev->link.watchdog_work);
425}
426
427static void rt2x00link_watchdog(struct work_struct *work)
428{
429 struct rt2x00_dev *rt2x00dev =
430 container_of(work, struct rt2x00_dev, link.watchdog_work.work);
431 struct link *link = &rt2x00dev->link;
432
433 /*
434 * When the radio is shutting down we should
435 * immediately cease the watchdog monitoring.
436 */
437 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
438 return;
439
440 rt2x00dev->ops->lib->watchdog(rt2x00dev);
441
442 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
443 ieee80211_queue_delayed_work(rt2x00dev->hw,
444 &link->watchdog_work, WATCHDOG_INTERVAL);
445}
446
391void rt2x00link_register(struct rt2x00_dev *rt2x00dev) 447void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
392{ 448{
449 INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
393 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); 450 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
394} 451}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index abbd857ec759..235e037e6509 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -273,16 +273,24 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
273 mutex_init(&intf->beacon_skb_mutex); 273 mutex_init(&intf->beacon_skb_mutex);
274 intf->beacon = entry; 274 intf->beacon = entry;
275 275
276 if (vif->type == NL80211_IFTYPE_AP)
277 memcpy(&intf->bssid, vif->addr, ETH_ALEN);
278 memcpy(&intf->mac, vif->addr, ETH_ALEN);
279
280 /* 276 /*
281 * The MAC adddress must be configured after the device 277 * The MAC adddress must be configured after the device
282 * has been initialized. Otherwise the device can reset 278 * has been initialized. Otherwise the device can reset
283 * the MAC registers. 279 * the MAC registers.
280 * The BSSID address must only be configured in AP mode,
281 * however we should not send an empty BSSID address for
282 * STA interfaces at this time, since this can cause
283 * invalid behavior in the device.
284 */ 284 */
285 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL); 285 memcpy(&intf->mac, vif->addr, ETH_ALEN);
286 if (vif->type == NL80211_IFTYPE_AP) {
287 memcpy(&intf->bssid, vif->addr, ETH_ALEN);
288 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
289 intf->mac, intf->bssid);
290 } else {
291 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
292 intf->mac, NULL);
293 }
286 294
287 /* 295 /*
288 * Some filters depend on the current working mode. We can force 296 * Some filters depend on the current working mode. We can force
@@ -346,9 +354,11 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
346 /* 354 /*
347 * Some configuration parameters (e.g. channel and antenna values) can 355 * Some configuration parameters (e.g. channel and antenna values) can
348 * only be set when the radio is enabled, but do require the RX to 356 * only be set when the radio is enabled, but do require the RX to
349 * be off. 357 * be off. During this period we should keep link tuning enabled,
358 * if for any reason the link tuner must be reset, this will be
359 * handled by rt2x00lib_config().
350 */ 360 */
351 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 361 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
352 362
353 /* 363 /*
354 * When we've just turned on the radio, we want to reprogram 364 * When we've just turned on the radio, we want to reprogram
@@ -366,7 +376,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
366 rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); 376 rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
367 377
368 /* Turn RX back on */ 378 /* Turn RX back on */
369 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 379 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
370 380
371 return 0; 381 return 0;
372} 382}
@@ -430,12 +440,36 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
430} 440}
431EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); 441EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
432 442
443static void rt2x00mac_set_tim_iter(void *data, u8 *mac,
444 struct ieee80211_vif *vif)
445{
446 struct rt2x00_intf *intf = vif_to_intf(vif);
447
448 if (vif->type != NL80211_IFTYPE_AP &&
449 vif->type != NL80211_IFTYPE_ADHOC &&
450 vif->type != NL80211_IFTYPE_MESH_POINT &&
451 vif->type != NL80211_IFTYPE_WDS)
452 return;
453
454 spin_lock(&intf->lock);
455 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
456 spin_unlock(&intf->lock);
457}
458
433int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 459int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
434 bool set) 460 bool set)
435{ 461{
436 struct rt2x00_dev *rt2x00dev = hw->priv; 462 struct rt2x00_dev *rt2x00dev = hw->priv;
437 463
438 rt2x00lib_beacondone(rt2x00dev); 464 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
465 return 0;
466
467 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
468 rt2x00mac_set_tim_iter,
469 rt2x00dev);
470
471 /* queue work to upodate the beacon template */
472 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work);
439 return 0; 473 return 0;
440} 474}
441EXPORT_SYMBOL_GPL(rt2x00mac_set_tim); 475EXPORT_SYMBOL_GPL(rt2x00mac_set_tim);
@@ -539,6 +573,22 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
539EXPORT_SYMBOL_GPL(rt2x00mac_set_key); 573EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
540#endif /* CONFIG_RT2X00_LIB_CRYPTO */ 574#endif /* CONFIG_RT2X00_LIB_CRYPTO */
541 575
576void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw)
577{
578 struct rt2x00_dev *rt2x00dev = hw->priv;
579 __set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
580 rt2x00link_stop_tuner(rt2x00dev);
581}
582EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start);
583
584void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw)
585{
586 struct rt2x00_dev *rt2x00dev = hw->priv;
587 __clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
588 rt2x00link_start_tuner(rt2x00dev);
589}
590EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_complete);
591
542int rt2x00mac_get_stats(struct ieee80211_hw *hw, 592int rt2x00mac_get_stats(struct ieee80211_hw *hw,
543 struct ieee80211_low_level_stats *stats) 593 struct ieee80211_low_level_stats *stats)
544{ 594{
@@ -562,7 +612,6 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
562{ 612{
563 struct rt2x00_dev *rt2x00dev = hw->priv; 613 struct rt2x00_dev *rt2x00dev = hw->priv;
564 struct rt2x00_intf *intf = vif_to_intf(vif); 614 struct rt2x00_intf *intf = vif_to_intf(vif);
565 int update_bssid = 0;
566 615
567 /* 616 /*
568 * mac80211 might be calling this function while we are trying 617 * mac80211 might be calling this function while we are trying
@@ -577,10 +626,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
577 * conf->bssid can be NULL if coming from the internal 626 * conf->bssid can be NULL if coming from the internal
578 * beacon update routine. 627 * beacon update routine.
579 */ 628 */
580 if (changes & BSS_CHANGED_BSSID) { 629 if (changes & BSS_CHANGED_BSSID)
581 update_bssid = 1;
582 memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN); 630 memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN);
583 }
584 631
585 spin_unlock(&intf->lock); 632 spin_unlock(&intf->lock);
586 633
@@ -592,7 +639,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
592 */ 639 */
593 if (changes & BSS_CHANGED_BSSID) 640 if (changes & BSS_CHANGED_BSSID)
594 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL, 641 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
595 update_bssid ? bss_conf->bssid : NULL); 642 bss_conf->bssid);
596 643
597 /* 644 /*
598 * Update the beacon. 645 * Update the beacon.
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index f71eee67f977..19b262e1ddbe 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -60,34 +60,6 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
60} 60}
61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); 61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
62 62
63/*
64 * TX data handlers.
65 */
66int rt2x00pci_write_tx_data(struct queue_entry *entry,
67 struct txentry_desc *txdesc)
68{
69 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
70
71 /*
72 * This should not happen, we already checked the entry
73 * was ours. When the hardware disagrees there has been
74 * a queue corruption!
75 */
76 if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) {
77 ERROR(rt2x00dev,
78 "Corrupt queue %d, accessing entry which is not ours.\n"
79 "Please file bug report to %s.\n",
80 entry->queue->qid, DRV_PROJECT);
81 return -EINVAL;
82 }
83
84 return 0;
85}
86EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
87
88/*
89 * TX/RX data handlers.
90 */
91void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) 63void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
92{ 64{
93 struct data_queue *queue = rt2x00dev->rx; 65 struct data_queue *queue = rt2x00dev->rx;
@@ -181,8 +153,10 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
181 /* 153 /*
182 * Register interrupt handler. 154 * Register interrupt handler.
183 */ 155 */
184 status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler, 156 status = request_threaded_irq(rt2x00dev->irq,
185 IRQF_SHARED, rt2x00dev->name, rt2x00dev); 157 rt2x00dev->ops->lib->irq_handler,
158 rt2x00dev->ops->lib->irq_handler_thread,
159 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
186 if (status) { 160 if (status) {
187 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 161 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
188 rt2x00dev->irq, status); 162 rt2x00dev->irq, status);
@@ -305,7 +279,10 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
305 rt2x00dev->irq = pci_dev->irq; 279 rt2x00dev->irq = pci_dev->irq;
306 rt2x00dev->name = pci_name(pci_dev); 280 rt2x00dev->name = pci_name(pci_dev);
307 281
308 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); 282 if (pci_dev->is_pcie)
283 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
284 else
285 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
309 286
310 retval = rt2x00pci_alloc_reg(rt2x00dev); 287 retval = rt2x00pci_alloc_reg(rt2x00dev);
311 if (retval) 288 if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 51bcef3839ce..b854d62ff99b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -86,16 +86,6 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
86 u32 *reg); 86 u32 *reg);
87 87
88/** 88/**
89 * rt2x00pci_write_tx_data - Initialize data for TX operation
90 * @entry: The entry where the frame is located
91 *
92 * This function will initialize the DMA and skb descriptor
93 * to prepare the entry for the actual TX operation.
94 */
95int rt2x00pci_write_tx_data(struct queue_entry *entry,
96 struct txentry_desc *txdesc);
97
98/**
99 * struct queue_entry_priv_pci: Per entry PCI specific information 89 * struct queue_entry_priv_pci: Per entry PCI specific information
100 * 90 *
101 * @desc: Pointer to device descriptor 91 * @desc: Pointer to device descriptor
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 20dbdd6fb904..a3401d301058 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -100,21 +100,8 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
100{ 100{
101 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 101 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
102 102
103 /*
104 * If device has requested headroom, we should make sure that
105 * is also mapped to the DMA so it can be used for transfering
106 * additional descriptor information to the hardware.
107 */
108 skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
109
110 skbdesc->skb_dma = 103 skbdesc->skb_dma =
111 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE); 104 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
112
113 /*
114 * Restore data pointer to original location again.
115 */
116 skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
117
118 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 105 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
119} 106}
120EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 107EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -130,16 +117,12 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
130 } 117 }
131 118
132 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 119 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
133 /* 120 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
134 * Add headroom to the skb length, it has been removed
135 * by the driver, but it was actually mapped to DMA.
136 */
137 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
138 skb->len + rt2x00dev->ops->extra_tx_headroom,
139 DMA_TO_DEVICE); 121 DMA_TO_DEVICE);
140 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 122 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
141 } 123 }
142} 124}
125EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
143 126
144void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) 127void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
145{ 128{
@@ -370,13 +353,18 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
370 /* 353 /*
371 * Check if more fragments are pending 354 * Check if more fragments are pending
372 */ 355 */
373 if (ieee80211_has_morefrags(hdr->frame_control) || 356 if (ieee80211_has_morefrags(hdr->frame_control)) {
374 (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
375 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 357 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
376 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); 358 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
377 } 359 }
378 360
379 /* 361 /*
362 * Check if more frames (!= fragments) are pending
363 */
364 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
365 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
366
367 /*
380 * Beacons and probe responses require the tsf timestamp 368 * Beacons and probe responses require the tsf timestamp
381 * to be inserted into the frame, except for a frame that has been injected 369 * to be inserted into the frame, except for a frame that has been injected
382 * through a monitor interface. This latter is needed for testing a 370 * through a monitor interface. This latter is needed for testing a
@@ -416,12 +404,51 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
416 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 404 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
417} 405}
418 406
407static int rt2x00queue_write_tx_data(struct queue_entry *entry,
408 struct txentry_desc *txdesc)
409{
410 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
411
412 /*
413 * This should not happen, we already checked the entry
414 * was ours. When the hardware disagrees there has been
415 * a queue corruption!
416 */
417 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
418 rt2x00dev->ops->lib->get_entry_state(entry))) {
419 ERROR(rt2x00dev,
420 "Corrupt queue %d, accessing entry which is not ours.\n"
421 "Please file bug report to %s.\n",
422 entry->queue->qid, DRV_PROJECT);
423 return -EINVAL;
424 }
425
426 /*
427 * Add the requested extra tx headroom in front of the skb.
428 */
429 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
430 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
431
432 /*
433 * Call the driver's write_tx_data function, if it exists.
434 */
435 if (rt2x00dev->ops->lib->write_tx_data)
436 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
437
438 /*
439 * Map the skb to DMA.
440 */
441 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
442 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
443
444 return 0;
445}
446
419static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, 447static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
420 struct txentry_desc *txdesc) 448 struct txentry_desc *txdesc)
421{ 449{
422 struct data_queue *queue = entry->queue; 450 struct data_queue *queue = entry->queue;
423 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 451 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
424 enum rt2x00_dump_type dump_type;
425 452
426 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); 453 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
427 454
@@ -429,9 +456,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
429 * All processing on the frame has been completed, this means 456 * All processing on the frame has been completed, this means
430 * it is now ready to be dumped to userspace through debugfs. 457 * it is now ready to be dumped to userspace through debugfs.
431 */ 458 */
432 dump_type = (txdesc->queue == QID_BEACON) ? 459 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
433 DUMP_FRAME_BEACON : DUMP_FRAME_TX;
434 rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb);
435} 460}
436 461
437static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, 462static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
@@ -530,16 +555,12 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
530 * call failed. Since we always return NETDEV_TX_OK to mac80211, 555 * call failed. Since we always return NETDEV_TX_OK to mac80211,
531 * this frame will simply be dropped. 556 * this frame will simply be dropped.
532 */ 557 */
533 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry, 558 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
534 &txdesc))) {
535 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 559 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
536 entry->skb = NULL; 560 entry->skb = NULL;
537 return -EIO; 561 return -EIO;
538 } 562 }
539 563
540 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
541 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
542
543 set_bit(ENTRY_DATA_PENDING, &entry->flags); 564 set_bit(ENTRY_DATA_PENDING, &entry->flags);
544 565
545 rt2x00queue_index_inc(queue, Q_INDEX); 566 rt2x00queue_index_inc(queue, Q_INDEX);
@@ -595,11 +616,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
595 skbdesc->entry = intf->beacon; 616 skbdesc->entry = intf->beacon;
596 617
597 /* 618 /*
598 * Write TX descriptor into reserved room in front of the beacon.
599 */
600 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
601
602 /*
603 * Send beacon to hardware and enable beacon genaration.. 619 * Send beacon to hardware and enable beacon genaration..
604 */ 620 */
605 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 621 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
@@ -672,9 +688,11 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
672 688
673 if (index == Q_INDEX) { 689 if (index == Q_INDEX) {
674 queue->length++; 690 queue->length++;
691 queue->last_index = jiffies;
675 } else if (index == Q_INDEX_DONE) { 692 } else if (index == Q_INDEX_DONE) {
676 queue->length--; 693 queue->length--;
677 queue->count++; 694 queue->count++;
695 queue->last_index_done = jiffies;
678 } 696 }
679 697
680 spin_unlock_irqrestore(&queue->lock, irqflags); 698 spin_unlock_irqrestore(&queue->lock, irqflags);
@@ -688,6 +706,8 @@ static void rt2x00queue_reset(struct data_queue *queue)
688 706
689 queue->count = 0; 707 queue->count = 0;
690 queue->length = 0; 708 queue->length = 0;
709 queue->last_index = jiffies;
710 queue->last_index_done = jiffies;
691 memset(queue->index, 0, sizeof(queue->index)); 711 memset(queue->index, 0, sizeof(queue->index));
692 712
693 spin_unlock_irqrestore(&queue->lock, irqflags); 713 spin_unlock_irqrestore(&queue->lock, irqflags);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index f79170849add..191e7775a9c0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -213,9 +213,16 @@ struct rxdone_entry_desc {
213/** 213/**
214 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc 214 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
215 * 215 *
216 * Every txdone report has to contain the basic result of the
217 * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
218 * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
219 * conjunction with all of these flags but should only be set
220 * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
221 * in conjunction with &TXDONE_FAILURE.
222 *
216 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission. 223 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
217 * @TXDONE_SUCCESS: Frame was successfully send 224 * @TXDONE_SUCCESS: Frame was successfully send
218 * @TXDONE_FALLBACK: Frame was successfully send using a fallback rate. 225 * @TXDONE_FALLBACK: Hardware used fallback rates for retries
219 * @TXDONE_FAILURE: Frame was not successfully send 226 * @TXDONE_FAILURE: Frame was not successfully send
220 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the 227 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
221 * frame transmission failed due to excessive retries. 228 * frame transmission failed due to excessive retries.
@@ -439,6 +446,8 @@ struct data_queue {
439 enum data_queue_qid qid; 446 enum data_queue_qid qid;
440 447
441 spinlock_t lock; 448 spinlock_t lock;
449 unsigned long last_index;
450 unsigned long last_index_done;
442 unsigned int count; 451 unsigned int count;
443 unsigned short limit; 452 unsigned short limit;
444 unsigned short threshold; 453 unsigned short threshold;
@@ -592,6 +601,15 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
592} 601}
593 602
594/** 603/**
604 * rt2x00queue_timeout - Check if a timeout occured for this queue
605 * @queue: Queue to check.
606 */
607static inline int rt2x00queue_timeout(struct data_queue *queue)
608{
609 return time_after(queue->last_index, queue->last_index_done + (HZ / 10));
610}
611
612/**
595 * _rt2x00_desc_read - Read a word from the hardware descriptor. 613 * _rt2x00_desc_read - Read a word from the hardware descriptor.
596 * @desc: Base descriptor address 614 * @desc: Base descriptor address
597 * @word: Word index from where the descriptor should be read. 615 * @word: Word index from where the descriptor should be read.
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index b9fe94873ee0..cef94621cef7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -63,7 +63,8 @@ enum led_mode {
63enum tsf_sync { 63enum tsf_sync {
64 TSF_SYNC_NONE = 0, 64 TSF_SYNC_NONE = 0,
65 TSF_SYNC_INFRA = 1, 65 TSF_SYNC_INFRA = 1,
66 TSF_SYNC_BEACON = 2, 66 TSF_SYNC_ADHOC = 2,
67 TSF_SYNC_AP_NONE = 3,
67}; 68};
68 69
69/* 70/*
@@ -88,6 +89,8 @@ enum dev_state {
88 STATE_RADIO_RX_OFF_LINK, 89 STATE_RADIO_RX_OFF_LINK,
89 STATE_RADIO_IRQ_ON, 90 STATE_RADIO_IRQ_ON,
90 STATE_RADIO_IRQ_OFF, 91 STATE_RADIO_IRQ_OFF,
92 STATE_RADIO_IRQ_ON_ISR,
93 STATE_RADIO_IRQ_OFF_ISR,
91}; 94};
92 95
93/* 96/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index bd1546ba7ad2..ff3a36622d1b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -113,26 +113,6 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
113 const u16 offset, void *buffer, 113 const u16 offset, void *buffer,
114 const u16 buffer_length, const int timeout) 114 const u16 buffer_length, const int timeout)
115{ 115{
116 int status;
117
118 mutex_lock(&rt2x00dev->csr_mutex);
119
120 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
121 requesttype, offset, buffer,
122 buffer_length, timeout);
123
124 mutex_unlock(&rt2x00dev->csr_mutex);
125
126 return status;
127}
128EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
129
130int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
131 const u8 request, const u8 requesttype,
132 const u16 offset, const void *buffer,
133 const u16 buffer_length,
134 const int timeout)
135{
136 int status = 0; 116 int status = 0;
137 unsigned char *tb; 117 unsigned char *tb;
138 u16 off, len, bsize; 118 u16 off, len, bsize;
@@ -157,7 +137,7 @@ int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
157 137
158 return status; 138 return status;
159} 139}
160EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff); 140EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
161 141
162int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, 142int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
163 const unsigned int offset, 143 const unsigned int offset,
@@ -216,48 +196,28 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
216 rt2x00lib_txdone(entry, &txdesc); 196 rt2x00lib_txdone(entry, &txdesc);
217} 197}
218 198
219int rt2x00usb_write_tx_data(struct queue_entry *entry, 199static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
220 struct txentry_desc *txdesc)
221{ 200{
222 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 201 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
223 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 202 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
224 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 203 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
225 u32 length; 204 u32 length;
226 205
227 /* 206 if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) {
228 * Add the descriptor in front of the skb. 207 /*
229 */ 208 * USB devices cannot blindly pass the skb->len as the
230 skb_push(entry->skb, entry->queue->desc_size); 209 * length of the data to usb_fill_bulk_urb. Pass the skb
231 memset(entry->skb->data, 0, entry->queue->desc_size); 210 * to the driver to determine what the length should be.
232 211 */
233 /* 212 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
234 * USB devices cannot blindly pass the skb->len as the
235 * length of the data to usb_fill_bulk_urb. Pass the skb
236 * to the driver to determine what the length should be.
237 */
238 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
239
240 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
241 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
242 entry->skb->data, length,
243 rt2x00usb_interrupt_txdone, entry);
244
245 /*
246 * Make sure the skb->data pointer points to the frame, not the
247 * descriptor.
248 */
249 skb_pull(entry->skb, entry->queue->desc_size);
250 213
251 return 0; 214 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
252} 215 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
253EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); 216 entry->skb->data, length,
217 rt2x00usb_interrupt_txdone, entry);
254 218
255static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
256{
257 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
258
259 if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
260 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 219 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
220 }
261} 221}
262 222
263void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 223void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -332,6 +292,56 @@ void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
332} 292}
333EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); 293EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
334 294
295static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
296{
297 struct queue_entry_priv_usb *entry_priv;
298 unsigned short threshold = queue->threshold;
299
300 WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid);
301
302 /*
303 * Temporarily disable the TX queue, this will force mac80211
304 * to use the other queues until this queue has been restored.
305 *
306 * Set the queue threshold to the queue limit. This prevents the
307 * queue from being enabled during the txdone handler.
308 */
309 queue->threshold = queue->limit;
310 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
311
312 /*
313 * Reset all currently uploaded TX frames.
314 */
315 while (!rt2x00queue_empty(queue)) {
316 entry_priv = rt2x00queue_get_entry(queue, Q_INDEX_DONE)->priv_data;
317 usb_kill_urb(entry_priv->urb);
318
319 /*
320 * We need a short delay here to wait for
321 * the URB to be canceled and invoked the tx_done handler.
322 */
323 udelay(200);
324 }
325
326 /*
327 * The queue has been reset, and mac80211 is allowed to use the
328 * queue again.
329 */
330 queue->threshold = threshold;
331 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
332}
333
334void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
335{
336 struct data_queue *queue;
337
338 tx_queue_for_each(rt2x00dev, queue) {
339 if (rt2x00queue_timeout(queue))
340 rt2x00usb_watchdog_reset_tx(queue);
341 }
342}
343EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
344
335/* 345/*
336 * RX data handlers. 346 * RX data handlers.
337 */ 347 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 621d0f829251..d3d3ddc40875 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -167,25 +167,6 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
167 const u16 buffer_length, const int timeout); 167 const u16 buffer_length, const int timeout);
168 168
169/** 169/**
170 * rt2x00usb_vendor_request_large_buff - Send register command to device (buffered)
171 * @rt2x00dev: Pointer to &struct rt2x00_dev
172 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
173 * @requesttype: Request type &USB_VENDOR_REQUEST_*
174 * @offset: Register start offset to perform action on
175 * @buffer: Buffer where information will be read/written to by device
176 * @buffer_length: Size of &buffer
177 * @timeout: Operation timeout
178 *
179 * This function is used to transfer register data in blocks larger
180 * then CSR_CACHE_SIZE. Use for firmware upload, keys and beacons.
181 */
182int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
183 const u8 request, const u8 requesttype,
184 const u16 offset, const void *buffer,
185 const u16 buffer_length,
186 const int timeout);
187
188/**
189 * rt2x00usb_vendor_request_sw - Send single register command to device 170 * rt2x00usb_vendor_request_sw - Send single register command to device
190 * @rt2x00dev: Pointer to &struct rt2x00_dev 171 * @rt2x00dev: Pointer to &struct rt2x00_dev
191 * @request: USB vendor command (See &enum rt2x00usb_vendor_request) 172 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
@@ -370,16 +351,6 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
370void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev); 351void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
371 352
372/** 353/**
373 * rt2x00usb_write_tx_data - Initialize URB for TX operation
374 * @entry: The entry where the frame is located
375 *
376 * This function will initialize the URB and skb descriptor
377 * to prepare the entry for the actual TX operation.
378 */
379int rt2x00usb_write_tx_data(struct queue_entry *entry,
380 struct txentry_desc *txdesc);
381
382/**
383 * struct queue_entry_priv_usb: Per entry USB specific information 354 * struct queue_entry_priv_usb: Per entry USB specific information
384 * 355 *
385 * @urb: Urb structure used for device communication. 356 * @urb: Urb structure used for device communication.
@@ -428,6 +399,16 @@ void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
428void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 399void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
429 const enum data_queue_qid qid); 400 const enum data_queue_qid qid);
430 401
402/**
403 * rt2x00usb_watchdog - Watchdog for USB communication
404 * @rt2x00dev: Pointer to &struct rt2x00_dev
405 *
406 * Check the health of the USB communication and determine
407 * if timeouts have occured. If this is the case, this function
408 * will reset all communication to restore functionality again.
409 */
410void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev);
411
431/* 412/*
432 * Device initialization handlers. 413 * Device initialization handlers.
433 */ 414 */
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 6a74baf4e934..e539c6cb636f 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -931,6 +931,9 @@ static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
931 u32 reg; 931 u32 reg;
932 932
933 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg); 933 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
934 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
935 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
936 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
934 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT, 937 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
935 libconf->conf->long_frame_max_tx_count); 938 libconf->conf->long_frame_max_tx_count);
936 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, 939 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -1619,7 +1622,8 @@ static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
1619static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1622static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1620 enum dev_state state) 1623 enum dev_state state)
1621{ 1624{
1622 int mask = (state == STATE_RADIO_IRQ_OFF); 1625 int mask = (state == STATE_RADIO_IRQ_OFF) ||
1626 (state == STATE_RADIO_IRQ_OFF_ISR);
1623 u32 reg; 1627 u32 reg;
1624 1628
1625 /* 1629 /*
@@ -1736,7 +1740,9 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1736 rt61pci_toggle_rx(rt2x00dev, state); 1740 rt61pci_toggle_rx(rt2x00dev, state);
1737 break; 1741 break;
1738 case STATE_RADIO_IRQ_ON: 1742 case STATE_RADIO_IRQ_ON:
1743 case STATE_RADIO_IRQ_ON_ISR:
1739 case STATE_RADIO_IRQ_OFF: 1744 case STATE_RADIO_IRQ_OFF:
1745 case STATE_RADIO_IRQ_OFF_ISR:
1740 rt61pci_toggle_irq(rt2x00dev, state); 1746 rt61pci_toggle_irq(rt2x00dev, state);
1741 break; 1747 break;
1742 case STATE_DEEP_SLEEP: 1748 case STATE_DEEP_SLEEP:
@@ -1874,6 +1880,16 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1874 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1880 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1875 1881
1876 /* 1882 /*
1883 * Write the TX descriptor for the beacon.
1884 */
1885 rt61pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
1886
1887 /*
1888 * Dump beacon to userspace through debugfs.
1889 */
1890 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
1891
1892 /*
1877 * Write entire beacon with descriptor to register. 1893 * Write entire beacon with descriptor to register.
1878 */ 1894 */
1879 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1895 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -2039,29 +2055,24 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2039 struct txdone_entry_desc txdesc; 2055 struct txdone_entry_desc txdesc;
2040 u32 word; 2056 u32 word;
2041 u32 reg; 2057 u32 reg;
2042 u32 old_reg;
2043 int type; 2058 int type;
2044 int index; 2059 int index;
2060 int i;
2045 2061
2046 /* 2062 /*
2047 * During each loop we will compare the freshly read 2063 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
2048 * STA_CSR4 register value with the value read from 2064 * at most X times and also stop processing once the TX_STA_FIFO_VALID
2049 * the previous loop. If the 2 values are equal then 2065 * flag is not set anymore.
2050 * we should stop processing because the chance is 2066 *
2051 * quite big that the device has been unplugged and 2067 * The legacy drivers use X=TX_RING_SIZE but state in a comment
2052 * we risk going into an endless loop. 2068 * that the TX_STA_FIFO stack has a size of 16. We stick to our
2069 * tx ring size for now.
2053 */ 2070 */
2054 old_reg = 0; 2071 for (i = 0; i < TX_ENTRIES; i++) {
2055
2056 while (1) {
2057 rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg); 2072 rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
2058 if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) 2073 if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
2059 break; 2074 break;
2060 2075
2061 if (old_reg == reg)
2062 break;
2063 old_reg = reg;
2064
2065 /* 2076 /*
2066 * Skip this entry when it contains an invalid 2077 * Skip this entry when it contains an invalid
2067 * queue identication number. 2078 * queue identication number.
@@ -2120,6 +2131,13 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2120 } 2131 }
2121 txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); 2132 txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
2122 2133
2134 /*
2135 * the frame was retried at least once
2136 * -> hw used fallback rates
2137 */
2138 if (txdesc.retry)
2139 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
2140
2123 rt2x00lib_txdone(entry, &txdesc); 2141 rt2x00lib_txdone(entry, &txdesc);
2124 } 2142 }
2125} 2143}
@@ -2132,27 +2150,11 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2132 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 2150 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2133} 2151}
2134 2152
2135static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) 2153static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance)
2136{ 2154{
2137 struct rt2x00_dev *rt2x00dev = dev_instance; 2155 struct rt2x00_dev *rt2x00dev = dev_instance;
2138 u32 reg_mcu; 2156 u32 reg = rt2x00dev->irqvalue[0];
2139 u32 reg; 2157 u32 reg_mcu = rt2x00dev->irqvalue[1];
2140
2141 /*
2142 * Get the interrupt sources & saved to local variable.
2143 * Write register value back to clear pending interrupts.
2144 */
2145 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg_mcu);
2146 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu);
2147
2148 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
2149 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
2150
2151 if (!reg && !reg_mcu)
2152 return IRQ_NONE;
2153
2154 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
2155 return IRQ_HANDLED;
2156 2158
2157 /* 2159 /*
2158 * Handle interrupts, walk through all bits 2160 * Handle interrupts, walk through all bits
@@ -2185,9 +2187,51 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2185 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP)) 2187 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
2186 rt61pci_wakeup(rt2x00dev); 2188 rt61pci_wakeup(rt2x00dev);
2187 2189
2190 /*
2191 * 5 - Beacon done interrupt.
2192 */
2193 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
2194 rt2x00lib_beacondone(rt2x00dev);
2195
2196 /* Enable interrupts again. */
2197 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
2198 STATE_RADIO_IRQ_ON_ISR);
2188 return IRQ_HANDLED; 2199 return IRQ_HANDLED;
2189} 2200}
2190 2201
2202
2203static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2204{
2205 struct rt2x00_dev *rt2x00dev = dev_instance;
2206 u32 reg_mcu;
2207 u32 reg;
2208
2209 /*
2210 * Get the interrupt sources & saved to local variable.
2211 * Write register value back to clear pending interrupts.
2212 */
2213 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg_mcu);
2214 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu);
2215
2216 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
2217 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
2218
2219 if (!reg && !reg_mcu)
2220 return IRQ_NONE;
2221
2222 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
2223 return IRQ_HANDLED;
2224
2225 /* Store irqvalues for use in the interrupt thread. */
2226 rt2x00dev->irqvalue[0] = reg;
2227 rt2x00dev->irqvalue[1] = reg_mcu;
2228
2229 /* Disable interrupts, will be enabled again in the interrupt thread. */
2230 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
2231 STATE_RADIO_IRQ_OFF_ISR);
2232 return IRQ_WAKE_THREAD;
2233}
2234
2191/* 2235/*
2192 * Device probe functions. 2236 * Device probe functions.
2193 */ 2237 */
@@ -2577,6 +2621,18 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2577 EEPROM_MAC_ADDR_0)); 2621 EEPROM_MAC_ADDR_0));
2578 2622
2579 /* 2623 /*
2624 * As rt61 has a global fallback table we cannot specify
2625 * more then one tx rate per frame but since the hw will
2626 * try several rates (based on the fallback table) we should
2627 * still initialize max_rates to the maximum number of rates
2628 * we are going to try. Otherwise mac80211 will truncate our
2629 * reported tx rates and the rc algortihm will end up with
2630 * incorrect data.
2631 */
2632 rt2x00dev->hw->max_rates = 7;
2633 rt2x00dev->hw->max_rate_tries = 1;
2634
2635 /*
2580 * Initialize hw_mode information. 2636 * Initialize hw_mode information.
2581 */ 2637 */
2582 spec->supported_bands = SUPPORT_BAND_2GHZ; 2638 spec->supported_bands = SUPPORT_BAND_2GHZ;
@@ -2657,6 +2713,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2657 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 2713 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
2658 if (!modparam_nohwcrypt) 2714 if (!modparam_nohwcrypt)
2659 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 2715 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
2716 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
2660 2717
2661 /* 2718 /*
2662 * Set the rssi offset. 2719 * Set the rssi offset.
@@ -2748,8 +2805,9 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2748 .remove_interface = rt2x00mac_remove_interface, 2805 .remove_interface = rt2x00mac_remove_interface,
2749 .config = rt2x00mac_config, 2806 .config = rt2x00mac_config,
2750 .configure_filter = rt2x00mac_configure_filter, 2807 .configure_filter = rt2x00mac_configure_filter,
2751 .set_tim = rt2x00mac_set_tim,
2752 .set_key = rt2x00mac_set_key, 2808 .set_key = rt2x00mac_set_key,
2809 .sw_scan_start = rt2x00mac_sw_scan_start,
2810 .sw_scan_complete = rt2x00mac_sw_scan_complete,
2753 .get_stats = rt2x00mac_get_stats, 2811 .get_stats = rt2x00mac_get_stats,
2754 .bss_info_changed = rt2x00mac_bss_info_changed, 2812 .bss_info_changed = rt2x00mac_bss_info_changed,
2755 .conf_tx = rt61pci_conf_tx, 2813 .conf_tx = rt61pci_conf_tx,
@@ -2759,6 +2817,7 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2759 2817
2760static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { 2818static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2761 .irq_handler = rt61pci_interrupt, 2819 .irq_handler = rt61pci_interrupt,
2820 .irq_handler_thread = rt61pci_interrupt_thread,
2762 .probe_hw = rt61pci_probe_hw, 2821 .probe_hw = rt61pci_probe_hw,
2763 .get_firmware_name = rt61pci_get_firmware_name, 2822 .get_firmware_name = rt61pci_get_firmware_name,
2764 .check_firmware = rt61pci_check_firmware, 2823 .check_firmware = rt61pci_check_firmware,
@@ -2773,7 +2832,6 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2773 .reset_tuner = rt61pci_reset_tuner, 2832 .reset_tuner = rt61pci_reset_tuner,
2774 .link_tuner = rt61pci_link_tuner, 2833 .link_tuner = rt61pci_link_tuner,
2775 .write_tx_desc = rt61pci_write_tx_desc, 2834 .write_tx_desc = rt61pci_write_tx_desc,
2776 .write_tx_data = rt2x00pci_write_tx_data,
2777 .write_beacon = rt61pci_write_beacon, 2835 .write_beacon = rt61pci_write_beacon,
2778 .kick_tx_queue = rt61pci_kick_tx_queue, 2836 .kick_tx_queue = rt61pci_kick_tx_queue,
2779 .kill_tx_queue = rt61pci_kill_tx_queue, 2837 .kill_tx_queue = rt61pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index df80f1af22a4..e2e728ab0b2e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -153,13 +153,13 @@ struct hw_key_entry {
153 u8 key[16]; 153 u8 key[16];
154 u8 tx_mic[8]; 154 u8 tx_mic[8];
155 u8 rx_mic[8]; 155 u8 rx_mic[8];
156} __attribute__ ((packed)); 156} __packed;
157 157
158struct hw_pairwise_ta_entry { 158struct hw_pairwise_ta_entry {
159 u8 address[6]; 159 u8 address[6];
160 u8 cipher; 160 u8 cipher;
161 u8 reserved; 161 u8 reserved;
162} __attribute__ ((packed)); 162} __packed;
163 163
164/* 164/*
165 * Other on-chip shared memory space. 165 * Other on-chip shared memory space.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 6e0d82efe924..aa9de18fd410 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -270,7 +270,6 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
270{ 270{
271 struct hw_key_entry key_entry; 271 struct hw_key_entry key_entry;
272 struct rt2x00_field32 field; 272 struct rt2x00_field32 field;
273 int timeout;
274 u32 mask; 273 u32 mask;
275 u32 reg; 274 u32 reg;
276 275
@@ -306,12 +305,8 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
306 sizeof(key_entry.rx_mic)); 305 sizeof(key_entry.rx_mic));
307 306
308 reg = SHARED_KEY_ENTRY(key->hw_key_idx); 307 reg = SHARED_KEY_ENTRY(key->hw_key_idx);
309 timeout = REGISTER_TIMEOUT32(sizeof(key_entry)); 308 rt2x00usb_register_multiwrite(rt2x00dev, reg,
310 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, 309 &key_entry, sizeof(key_entry));
311 USB_VENDOR_REQUEST_OUT, reg,
312 &key_entry,
313 sizeof(key_entry),
314 timeout);
315 310
316 /* 311 /*
317 * The cipher types are stored over 2 registers. 312 * The cipher types are stored over 2 registers.
@@ -372,7 +367,6 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
372{ 367{
373 struct hw_pairwise_ta_entry addr_entry; 368 struct hw_pairwise_ta_entry addr_entry;
374 struct hw_key_entry key_entry; 369 struct hw_key_entry key_entry;
375 int timeout;
376 u32 mask; 370 u32 mask;
377 u32 reg; 371 u32 reg;
378 372
@@ -407,17 +401,11 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
407 sizeof(key_entry.rx_mic)); 401 sizeof(key_entry.rx_mic));
408 402
409 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); 403 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
410 timeout = REGISTER_TIMEOUT32(sizeof(key_entry)); 404 rt2x00usb_register_multiwrite(rt2x00dev, reg,
411 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, 405 &key_entry, sizeof(key_entry));
412 USB_VENDOR_REQUEST_OUT, reg,
413 &key_entry,
414 sizeof(key_entry),
415 timeout);
416 406
417 /* 407 /*
418 * Send the address and cipher type to the hardware register. 408 * Send the address and cipher type to the hardware register.
419 * This data fits within the CSR cache size, so we can use
420 * rt2x00usb_register_multiwrite() directly.
421 */ 409 */
422 memset(&addr_entry, 0, sizeof(addr_entry)); 410 memset(&addr_entry, 0, sizeof(addr_entry));
423 memcpy(&addr_entry, crypto->address, ETH_ALEN); 411 memcpy(&addr_entry, crypto->address, ETH_ALEN);
@@ -828,6 +816,9 @@ static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
828 u32 reg; 816 u32 reg;
829 817
830 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg); 818 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
819 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
820 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
821 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
831 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT, 822 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
832 libconf->conf->long_frame_max_tx_count); 823 libconf->conf->long_frame_max_tx_count);
833 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, 824 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -1092,11 +1083,7 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1092 /* 1083 /*
1093 * Write firmware to device. 1084 * Write firmware to device.
1094 */ 1085 */
1095 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, 1086 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len);
1096 USB_VENDOR_REQUEST_OUT,
1097 FIRMWARE_IMAGE_BASE,
1098 data, len,
1099 REGISTER_TIMEOUT32(len));
1100 1087
1101 /* 1088 /*
1102 * Send firmware request to device to load firmware, 1089 * Send firmware request to device to load firmware,
@@ -1413,7 +1400,9 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1413 rt73usb_toggle_rx(rt2x00dev, state); 1400 rt73usb_toggle_rx(rt2x00dev, state);
1414 break; 1401 break;
1415 case STATE_RADIO_IRQ_ON: 1402 case STATE_RADIO_IRQ_ON:
1403 case STATE_RADIO_IRQ_ON_ISR:
1416 case STATE_RADIO_IRQ_OFF: 1404 case STATE_RADIO_IRQ_OFF:
1405 case STATE_RADIO_IRQ_OFF_ISR:
1417 /* No support, but no error either */ 1406 /* No support, but no error either */
1418 break; 1407 break;
1419 case STATE_DEEP_SLEEP: 1408 case STATE_DEEP_SLEEP:
@@ -1442,7 +1431,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1442 struct txentry_desc *txdesc) 1431 struct txentry_desc *txdesc)
1443{ 1432{
1444 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1433 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1445 __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE); 1434 __le32 *txd = (__le32 *) skb->data;
1446 u32 word; 1435 u32 word;
1447 1436
1448 /* 1437 /*
@@ -1505,6 +1494,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1505 /* 1494 /*
1506 * Register descriptor details in skb frame descriptor. 1495 * Register descriptor details in skb frame descriptor.
1507 */ 1496 */
1497 skbdesc->flags |= SKBDESC_DESC_IN_SKB;
1508 skbdesc->desc = txd; 1498 skbdesc->desc = txd;
1509 skbdesc->desc_len = TXD_DESC_SIZE; 1499 skbdesc->desc_len = TXD_DESC_SIZE;
1510} 1500}
@@ -1528,18 +1518,27 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1528 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1518 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1529 1519
1530 /* 1520 /*
1531 * Take the descriptor in front of the skb into account. 1521 * Add space for the descriptor in front of the skb.
1532 */ 1522 */
1533 skb_push(entry->skb, TXD_DESC_SIZE); 1523 skb_push(entry->skb, TXD_DESC_SIZE);
1524 memset(entry->skb->data, 0, TXD_DESC_SIZE);
1525
1526 /*
1527 * Write the TX descriptor for the beacon.
1528 */
1529 rt73usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
1530
1531 /*
1532 * Dump beacon to userspace through debugfs.
1533 */
1534 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
1534 1535
1535 /* 1536 /*
1536 * Write entire beacon with descriptor to register. 1537 * Write entire beacon with descriptor to register.
1537 */ 1538 */
1538 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1539 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1539 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, 1540 rt2x00usb_register_multiwrite(rt2x00dev, beacon_base,
1540 USB_VENDOR_REQUEST_OUT, beacon_base, 1541 entry->skb->data, entry->skb->len);
1541 entry->skb->data, entry->skb->len,
1542 REGISTER_TIMEOUT32(entry->skb->len));
1543 1542
1544 /* 1543 /*
1545 * Enable beaconing again. 1544 * Enable beaconing again.
@@ -2138,6 +2137,8 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2138 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2137 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
2139 if (!modparam_nohwcrypt) 2138 if (!modparam_nohwcrypt)
2140 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 2139 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
2140 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
2141 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
2141 2142
2142 /* 2143 /*
2143 * Set the rssi offset. 2144 * Set the rssi offset.
@@ -2231,6 +2232,8 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2231 .configure_filter = rt2x00mac_configure_filter, 2232 .configure_filter = rt2x00mac_configure_filter,
2232 .set_tim = rt2x00mac_set_tim, 2233 .set_tim = rt2x00mac_set_tim,
2233 .set_key = rt2x00mac_set_key, 2234 .set_key = rt2x00mac_set_key,
2235 .sw_scan_start = rt2x00mac_sw_scan_start,
2236 .sw_scan_complete = rt2x00mac_sw_scan_complete,
2234 .get_stats = rt2x00mac_get_stats, 2237 .get_stats = rt2x00mac_get_stats,
2235 .bss_info_changed = rt2x00mac_bss_info_changed, 2238 .bss_info_changed = rt2x00mac_bss_info_changed,
2236 .conf_tx = rt73usb_conf_tx, 2239 .conf_tx = rt73usb_conf_tx,
@@ -2251,8 +2254,8 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2251 .link_stats = rt73usb_link_stats, 2254 .link_stats = rt73usb_link_stats,
2252 .reset_tuner = rt73usb_reset_tuner, 2255 .reset_tuner = rt73usb_reset_tuner,
2253 .link_tuner = rt73usb_link_tuner, 2256 .link_tuner = rt73usb_link_tuner,
2257 .watchdog = rt2x00usb_watchdog,
2254 .write_tx_desc = rt73usb_write_tx_desc, 2258 .write_tx_desc = rt73usb_write_tx_desc,
2255 .write_tx_data = rt2x00usb_write_tx_data,
2256 .write_beacon = rt73usb_write_beacon, 2259 .write_beacon = rt73usb_write_beacon,
2257 .get_tx_data_len = rt73usb_get_tx_data_len, 2260 .get_tx_data_len = rt73usb_get_tx_data_len,
2258 .kick_tx_queue = rt2x00usb_kick_tx_queue, 2261 .kick_tx_queue = rt2x00usb_kick_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7abe7eb14555..44d5b2bebd39 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -108,13 +108,13 @@ struct hw_key_entry {
108 u8 key[16]; 108 u8 key[16];
109 u8 tx_mic[8]; 109 u8 tx_mic[8];
110 u8 rx_mic[8]; 110 u8 rx_mic[8];
111} __attribute__ ((packed)); 111} __packed;
112 112
113struct hw_pairwise_ta_entry { 113struct hw_pairwise_ta_entry {
114 u8 address[6]; 114 u8 address[6];
115 u8 cipher; 115 u8 cipher;
116 u8 reserved; 116 u8 reserved;
117} __attribute__ ((packed)); 117} __packed;
118 118
119/* 119/*
120 * Since NULL frame won't be that long (256 byte), 120 * Since NULL frame won't be that long (256 byte),
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 4baf0cf0826f..30523314da43 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -36,7 +36,7 @@ struct rtl8180_tx_desc {
36 u8 agc; 36 u8 agc;
37 u8 flags2; 37 u8 flags2;
38 u32 reserved[2]; 38 u32 reserved[2];
39} __attribute__ ((packed)); 39} __packed;
40 40
41struct rtl8180_rx_desc { 41struct rtl8180_rx_desc {
42 __le32 flags; 42 __le32 flags;
@@ -45,7 +45,7 @@ struct rtl8180_rx_desc {
45 __le32 rx_buf; 45 __le32 rx_buf;
46 __le64 tsft; 46 __le64 tsft;
47 }; 47 };
48} __attribute__ ((packed)); 48} __packed;
49 49
50struct rtl8180_tx_ring { 50struct rtl8180_tx_ring {
51 struct rtl8180_tx_desc *desc; 51 struct rtl8180_tx_desc *desc;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 515817de2905..1d8178563d76 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -103,6 +103,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
103{ 103{
104 struct rtl8180_priv *priv = dev->priv; 104 struct rtl8180_priv *priv = dev->priv;
105 unsigned int count = 32; 105 unsigned int count = 32;
106 u8 signal, agc, sq;
106 107
107 while (count--) { 108 while (count--) {
108 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; 109 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -130,10 +131,18 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
130 skb_put(skb, flags & 0xFFF); 131 skb_put(skb, flags & 0xFFF);
131 132
132 rx_status.antenna = (flags2 >> 15) & 1; 133 rx_status.antenna = (flags2 >> 15) & 1;
133 /* TODO: improve signal/rssi reporting */
134 rx_status.signal = (flags2 >> 8) & 0x7F;
135 /* XXX: is this correct? */
136 rx_status.rate_idx = (flags >> 20) & 0xF; 134 rx_status.rate_idx = (flags >> 20) & 0xF;
135 agc = (flags2 >> 17) & 0x7F;
136 if (priv->r8185) {
137 if (rx_status.rate_idx > 3)
138 signal = 90 - clamp_t(u8, agc, 25, 90);
139 else
140 signal = 95 - clamp_t(u8, agc, 30, 95);
141 } else {
142 sq = flags2 & 0xff;
143 signal = priv->rf->calc_rssi(agc, sq);
144 }
145 rx_status.signal = signal;
137 rx_status.freq = dev->conf.channel->center_freq; 146 rx_status.freq = dev->conf.channel->center_freq;
138 rx_status.band = dev->conf.channel->band; 147 rx_status.band = dev->conf.channel->band;
139 rx_status.mactime = le64_to_cpu(entry->tsft); 148 rx_status.mactime = le64_to_cpu(entry->tsft);
@@ -352,7 +361,7 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
352 361
353 /* check success of reset */ 362 /* check success of reset */
354 if (rtl818x_ioread8(priv, &priv->map->CMD) & RTL818X_CMD_RESET) { 363 if (rtl818x_ioread8(priv, &priv->map->CMD) & RTL818X_CMD_RESET) {
355 printk(KERN_ERR "%s: reset timeout!\n", wiphy_name(dev->wiphy)); 364 wiphy_err(dev->wiphy, "reset timeout!\n");
356 return -ETIMEDOUT; 365 return -ETIMEDOUT;
357 } 366 }
358 367
@@ -436,8 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
436 &priv->rx_ring_dma); 445 &priv->rx_ring_dma);
437 446
438 if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { 447 if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
439 printk(KERN_ERR "%s: Cannot allocate RX ring\n", 448 wiphy_err(dev->wiphy, "cannot allocate rx ring\n");
440 wiphy_name(dev->wiphy));
441 return -ENOMEM; 449 return -ENOMEM;
442 } 450 }
443 451
@@ -494,8 +502,8 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
494 502
495 ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); 503 ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
496 if (!ring || (unsigned long)ring & 0xFF) { 504 if (!ring || (unsigned long)ring & 0xFF) {
497 printk(KERN_ERR "%s: Cannot allocate TX ring (prio = %d)\n", 505 wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n",
498 wiphy_name(dev->wiphy), prio); 506 prio);
499 return -ENOMEM; 507 return -ENOMEM;
500 } 508 }
501 509
@@ -560,8 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
560 ret = request_irq(priv->pdev->irq, rtl8180_interrupt, 568 ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
561 IRQF_SHARED, KBUILD_MODNAME, dev); 569 IRQF_SHARED, KBUILD_MODNAME, dev);
562 if (ret) { 570 if (ret) {
563 printk(KERN_ERR "%s: failed to register IRQ handler\n", 571 wiphy_err(dev->wiphy, "failed to register irq handler\n");
564 wiphy_name(dev->wiphy));
565 goto err_free_rings; 572 goto err_free_rings;
566 } 573 }
567 574
@@ -671,7 +678,7 @@ static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
671 (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32; 678 (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
672} 679}
673 680
674void rtl8180_beacon_work(struct work_struct *work) 681static void rtl8180_beacon_work(struct work_struct *work)
675{ 682{
676 struct rtl8180_vif *vif_priv = 683 struct rtl8180_vif *vif_priv =
677 container_of(work, struct rtl8180_vif, beacon_work.work); 684 container_of(work, struct rtl8180_vif, beacon_work.work);
@@ -1098,9 +1105,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
1098 goto err_iounmap; 1105 goto err_iounmap;
1099 } 1106 }
1100 1107
1101 printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n", 1108 wiphy_info(dev->wiphy, "hwaddr %pm, %s + %s\n",
1102 wiphy_name(dev->wiphy), mac_addr, 1109 mac_addr, chip_name, priv->rf->name);
1103 chip_name, priv->rf->name);
1104 1110
1105 return 0; 1111 return 0;
1106 1112
diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c
index 947ee55f18b2..5cab9dfa8c07 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c
@@ -69,6 +69,15 @@ static void grf5101_write_phy_antenna(struct ieee80211_hw *dev, short chan)
69 rtl8180_write_phy(dev, 0x10, ant); 69 rtl8180_write_phy(dev, 0x10, ant);
70} 70}
71 71
72static u8 grf5101_rf_calc_rssi(u8 agc, u8 sq)
73{
74 if (agc > 60)
75 return 65;
76
77 /* TODO(?): just return agc (or agc + 5) to avoid mult / div */
78 return 65 * agc / 60;
79}
80
72static void grf5101_rf_set_channel(struct ieee80211_hw *dev, 81static void grf5101_rf_set_channel(struct ieee80211_hw *dev,
73 struct ieee80211_conf *conf) 82 struct ieee80211_conf *conf)
74{ 83{
@@ -176,5 +185,6 @@ const struct rtl818x_rf_ops grf5101_rf_ops = {
176 .name = "GCT", 185 .name = "GCT",
177 .init = grf5101_rf_init, 186 .init = grf5101_rf_init,
178 .stop = grf5101_rf_stop, 187 .stop = grf5101_rf_stop,
179 .set_chan = grf5101_rf_set_channel 188 .set_chan = grf5101_rf_set_channel,
189 .calc_rssi = grf5101_rf_calc_rssi,
180}; 190};
diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.c b/drivers/net/wireless/rtl818x/rtl8180_max2820.c
index 6c825fd7f3b6..16c4655181c0 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_max2820.c
@@ -74,6 +74,22 @@ static void max2820_write_phy_antenna(struct ieee80211_hw *dev, short chan)
74 rtl8180_write_phy(dev, 0x10, ant); 74 rtl8180_write_phy(dev, 0x10, ant);
75} 75}
76 76
77static u8 max2820_rf_calc_rssi(u8 agc, u8 sq)
78{
79 bool odd;
80
81 odd = !!(agc & 1);
82
83 agc >>= 1;
84 if (odd)
85 agc += 76;
86 else
87 agc += 66;
88
89 /* TODO: change addends above to avoid mult / div below */
90 return 65 * agc / 100;
91}
92
77static void max2820_rf_set_channel(struct ieee80211_hw *dev, 93static void max2820_rf_set_channel(struct ieee80211_hw *dev,
78 struct ieee80211_conf *conf) 94 struct ieee80211_conf *conf)
79{ 95{
@@ -148,5 +164,6 @@ const struct rtl818x_rf_ops max2820_rf_ops = {
148 .name = "Maxim", 164 .name = "Maxim",
149 .init = max2820_rf_init, 165 .init = max2820_rf_init,
150 .stop = max2820_rf_stop, 166 .stop = max2820_rf_stop,
151 .set_chan = max2820_rf_set_channel 167 .set_chan = max2820_rf_set_channel,
168 .calc_rssi = max2820_rf_calc_rssi,
152}; 169};
diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c
index 4d2be0d9672b..69e4d4745dae 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c
@@ -50,7 +50,10 @@ static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
50 udelay(10); 50 udelay(10);
51 51
52 for (i = 15; i >= 0; i--) { 52 for (i = 15; i >= 0; i--) {
53 u16 reg = reg80 | !!(bangdata & (1 << i)); 53 u16 reg = reg80;
54
55 if (bangdata & (1 << i))
56 reg |= 1;
54 57
55 if (i & 1) 58 if (i & 1)
56 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); 59 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c
index cea4e0ccb92d..d064fcc5ec08 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c
@@ -76,6 +76,31 @@ static void sa2400_write_phy_antenna(struct ieee80211_hw *dev, short chan)
76 76
77} 77}
78 78
79static u8 sa2400_rf_rssi_map[] = {
80 0x64, 0x64, 0x63, 0x62, 0x61, 0x60, 0x5f, 0x5e,
81 0x5d, 0x5c, 0x5b, 0x5a, 0x57, 0x54, 0x52, 0x50,
82 0x4e, 0x4c, 0x4a, 0x48, 0x46, 0x44, 0x41, 0x3f,
83 0x3c, 0x3a, 0x37, 0x36, 0x36, 0x1c, 0x1c, 0x1b,
84 0x1b, 0x1a, 0x1a, 0x19, 0x19, 0x18, 0x18, 0x17,
85 0x17, 0x16, 0x16, 0x15, 0x15, 0x14, 0x14, 0x13,
86 0x13, 0x12, 0x12, 0x11, 0x11, 0x10, 0x10, 0x0f,
87 0x0f, 0x0e, 0x0e, 0x0d, 0x0d, 0x0c, 0x0c, 0x0b,
88 0x0b, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x07,
89 0x07, 0x06, 0x06, 0x05, 0x04, 0x03, 0x02,
90};
91
92static u8 sa2400_rf_calc_rssi(u8 agc, u8 sq)
93{
94 if (sq == 0x80)
95 return 1;
96
97 if (sq > 78)
98 return 32;
99
100 /* TODO: recalc sa2400_rf_rssi_map to avoid mult / div */
101 return 65 * sa2400_rf_rssi_map[sq] / 100;
102}
103
79static void sa2400_rf_set_channel(struct ieee80211_hw *dev, 104static void sa2400_rf_set_channel(struct ieee80211_hw *dev,
80 struct ieee80211_conf *conf) 105 struct ieee80211_conf *conf)
81{ 106{
@@ -198,5 +223,6 @@ const struct rtl818x_rf_ops sa2400_rf_ops = {
198 .name = "Philips", 223 .name = "Philips",
199 .init = sa2400_rf_init, 224 .init = sa2400_rf_init,
200 .stop = sa2400_rf_stop, 225 .stop = sa2400_rf_stop,
201 .set_chan = sa2400_rf_set_channel 226 .set_chan = sa2400_rf_set_channel,
227 .calc_rssi = sa2400_rf_calc_rssi,
202}; 228};
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6bb32112e65c..98878160a65a 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -47,7 +47,7 @@ struct rtl8187_rx_hdr {
47 u8 agc; 47 u8 agc;
48 u8 reserved; 48 u8 reserved;
49 __le64 mac_time; 49 __le64 mac_time;
50} __attribute__((packed)); 50} __packed;
51 51
52struct rtl8187b_rx_hdr { 52struct rtl8187b_rx_hdr {
53 __le32 flags; 53 __le32 flags;
@@ -59,7 +59,7 @@ struct rtl8187b_rx_hdr {
59 __le16 snr_long2end; 59 __le16 snr_long2end;
60 s8 pwdb_g12; 60 s8 pwdb_g12;
61 u8 fot; 61 u8 fot;
62} __attribute__((packed)); 62} __packed;
63 63
64/* {rtl8187,rtl8187b}_tx_info is in skb */ 64/* {rtl8187,rtl8187b}_tx_info is in skb */
65 65
@@ -68,7 +68,7 @@ struct rtl8187_tx_hdr {
68 __le16 rts_duration; 68 __le16 rts_duration;
69 __le16 len; 69 __le16 len;
70 __le32 retry; 70 __le32 retry;
71} __attribute__((packed)); 71} __packed;
72 72
73struct rtl8187b_tx_hdr { 73struct rtl8187b_tx_hdr {
74 __le32 flags; 74 __le32 flags;
@@ -80,7 +80,7 @@ struct rtl8187b_tx_hdr {
80 __le32 unused_3; 80 __le32 unused_3;
81 __le32 retry; 81 __le32 retry;
82 __le32 unused_4[2]; 82 __le32 unused_4[2];
83} __attribute__((packed)); 83} __packed;
84 84
85enum { 85enum {
86 DEVICE_RTL8187, 86 DEVICE_RTL8187,
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 891b8490e349..5738a55c1b06 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
573 } while (--i); 573 } while (--i);
574 574
575 if (!i) { 575 if (!i) {
576 printk(KERN_ERR "%s: Reset timeout!\n", wiphy_name(dev->wiphy)); 576 wiphy_err(dev->wiphy, "reset timeout!\n");
577 return -ETIMEDOUT; 577 return -ETIMEDOUT;
578 } 578 }
579 579
@@ -589,8 +589,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
589 } while (--i); 589 } while (--i);
590 590
591 if (!i) { 591 if (!i) {
592 printk(KERN_ERR "%s: eeprom reset timeout!\n", 592 wiphy_err(dev->wiphy, "eeprom reset timeout!\n");
593 wiphy_name(dev->wiphy));
594 return -ETIMEDOUT; 593 return -ETIMEDOUT;
595 } 594 }
596 595
@@ -1527,9 +1526,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1527 mutex_init(&priv->conf_mutex); 1526 mutex_init(&priv->conf_mutex);
1528 skb_queue_head_init(&priv->b_tx_status.queue); 1527 skb_queue_head_init(&priv->b_tx_status.queue);
1529 1528
1530 printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n", 1529 wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n",
1531 wiphy_name(dev->wiphy), mac_addr, 1530 mac_addr, chip_name, priv->asic_rev, priv->rf->name,
1532 chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); 1531 priv->rfkill_mask);
1533 1532
1534#ifdef CONFIG_RTL8187_LEDS 1533#ifdef CONFIG_RTL8187_LEDS
1535 eeprom_93cx6_read(&eeprom, 0x3F, &reg); 1534 eeprom_93cx6_read(&eeprom, 0x3F, &reg);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
index a09819386a1e..fd96f9112322 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
@@ -366,8 +366,8 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev)
366 rtl8225_write(dev, 0x02, 0x044d); 366 rtl8225_write(dev, 0x02, 0x044d);
367 msleep(100); 367 msleep(100);
368 if (!(rtl8225_read(dev, 6) & (1 << 7))) 368 if (!(rtl8225_read(dev, 6) & (1 << 7)))
369 printk(KERN_WARNING "%s: RF Calibration Failed! %x\n", 369 wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
370 wiphy_name(dev->wiphy), rtl8225_read(dev, 6)); 370 rtl8225_read(dev, 6));
371 } 371 }
372 372
373 rtl8225_write(dev, 0x0, 0x127); 373 rtl8225_write(dev, 0x0, 0x127);
@@ -735,8 +735,8 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
735 rtl8225_write(dev, 0x02, 0x044D); 735 rtl8225_write(dev, 0x02, 0x044D);
736 msleep(100); 736 msleep(100);
737 if (!(rtl8225_read(dev, 6) & (1 << 7))) 737 if (!(rtl8225_read(dev, 6) & (1 << 7)))
738 printk(KERN_WARNING "%s: RF Calibration Failed! %x\n", 738 wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
739 wiphy_name(dev->wiphy), rtl8225_read(dev, 6)); 739 rtl8225_read(dev, 6));
740 } 740 }
741 741
742 msleep(200); 742 msleep(200);
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 8522490d2e29..1615f63b02f6 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -185,7 +185,7 @@ struct rtl818x_csr {
185 u8 reserved_22[4]; 185 u8 reserved_22[4];
186 __le16 TALLY_CNT; 186 __le16 TALLY_CNT;
187 u8 TALLY_SEL; 187 u8 TALLY_SEL;
188} __attribute__((packed)); 188} __packed;
189 189
190struct rtl818x_rf_ops { 190struct rtl818x_rf_ops {
191 char *name; 191 char *name;
@@ -193,6 +193,7 @@ struct rtl818x_rf_ops {
193 void (*stop)(struct ieee80211_hw *); 193 void (*stop)(struct ieee80211_hw *);
194 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); 194 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
195 void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *); 195 void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *);
196 u8 (*calc_rssi)(u8 agc, u8 sq);
196}; 197};
197 198
198/** 199/**
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 337fc7bec5a5..2f98058be451 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -41,7 +41,7 @@ config WL1251_SDIO
41 41
42config WL1271 42config WL1271
43 tristate "TI wl1271 support" 43 tristate "TI wl1271 support"
44 depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS 44 depends on WL12XX && GENERIC_HARDIRQS
45 depends on INET 45 depends on INET
46 select FW_LOADER 46 select FW_LOADER
47 select CRC7 47 select CRC7
@@ -65,7 +65,7 @@ config WL1271_SPI
65 65
66config WL1271_SDIO 66config WL1271_SDIO
67 tristate "TI wl1271 SDIO support" 67 tristate "TI wl1271 SDIO support"
68 depends on WL1271 && MMC && ARM 68 depends on WL1271 && MMC
69 ---help--- 69 ---help---
70 This module adds support for the SDIO interface of adapters using 70 This module adds support for the SDIO interface of adapters using
71 TI wl1271 chipset. Select this if your platform is using 71 TI wl1271 chipset. Select this if your platform is using
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 27ddd2be0a91..078b4398ac1f 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
10wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \ 10wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
11 wl1271_event.o wl1271_tx.o wl1271_rx.o \ 11 wl1271_event.o wl1271_tx.o wl1271_rx.o \
12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \ 12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \
13 wl1271_init.o wl1271_debugfs.o 13 wl1271_init.o wl1271_debugfs.o wl1271_scan.o
14 14
15wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o 15wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
16obj-$(CONFIG_WL1271) += wl1271.o 16obj-$(CONFIG_WL1271) += wl1271.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 4f5f02a26e62..6b942a28e6a5 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -381,6 +381,9 @@ struct wl1251 {
381 381
382 u32 chip_id; 382 u32 chip_id;
383 char fw_ver[21]; 383 char fw_ver[21];
384
385 /* Most recently reported noise in dBm */
386 s8 noise;
384}; 387};
385 388
386int wl1251_plt_start(struct wl1251 *wl); 389int wl1251_plt_start(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 26160c45784c..842df310d92a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -60,7 +60,7 @@ struct acx_error_counter {
60 /* the number of missed sequence numbers in the squentially */ 60 /* the number of missed sequence numbers in the squentially */
61 /* values of frames seq numbers */ 61 /* values of frames seq numbers */
62 u32 seq_num_miss; 62 u32 seq_num_miss;
63} __attribute__ ((packed)); 63} __packed;
64 64
65struct acx_revision { 65struct acx_revision {
66 struct acx_header header; 66 struct acx_header header;
@@ -89,7 +89,7 @@ struct acx_revision {
89 * bits 24 - 31: Chip ID - The WiLink chip ID. 89 * bits 24 - 31: Chip ID - The WiLink chip ID.
90 */ 90 */
91 u32 hw_version; 91 u32 hw_version;
92} __attribute__ ((packed)); 92} __packed;
93 93
94enum wl1251_psm_mode { 94enum wl1251_psm_mode {
95 /* Active mode */ 95 /* Active mode */
@@ -111,7 +111,7 @@ struct acx_sleep_auth {
111 /* 2 - ELP mode: Deep / Max sleep*/ 111 /* 2 - ELP mode: Deep / Max sleep*/
112 u8 sleep_auth; 112 u8 sleep_auth;
113 u8 padding[3]; 113 u8 padding[3];
114} __attribute__ ((packed)); 114} __packed;
115 115
116enum { 116enum {
117 HOSTIF_PCI_MASTER_HOST_INDIRECT, 117 HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -159,7 +159,7 @@ struct acx_data_path_params {
159 * complete ring until an interrupt is generated. 159 * complete ring until an interrupt is generated.
160 */ 160 */
161 u32 tx_complete_timeout; 161 u32 tx_complete_timeout;
162} __attribute__ ((packed)); 162} __packed;
163 163
164 164
165struct acx_data_path_params_resp { 165struct acx_data_path_params_resp {
@@ -180,7 +180,7 @@ struct acx_data_path_params_resp {
180 u32 tx_control_addr; 180 u32 tx_control_addr;
181 181
182 u32 tx_complete_addr; 182 u32 tx_complete_addr;
183} __attribute__ ((packed)); 183} __packed;
184 184
185#define TX_MSDU_LIFETIME_MIN 0 185#define TX_MSDU_LIFETIME_MIN 0
186#define TX_MSDU_LIFETIME_MAX 3000 186#define TX_MSDU_LIFETIME_MAX 3000
@@ -197,7 +197,7 @@ struct acx_rx_msdu_lifetime {
197 * firmware discards the MSDU. 197 * firmware discards the MSDU.
198 */ 198 */
199 u32 lifetime; 199 u32 lifetime;
200} __attribute__ ((packed)); 200} __packed;
201 201
202/* 202/*
203 * RX Config Options Table 203 * RX Config Options Table
@@ -285,7 +285,7 @@ struct acx_rx_config {
285 285
286 u32 config_options; 286 u32 config_options;
287 u32 filter_options; 287 u32 filter_options;
288} __attribute__ ((packed)); 288} __packed;
289 289
290enum { 290enum {
291 QOS_AC_BE = 0, 291 QOS_AC_BE = 0,
@@ -325,13 +325,13 @@ struct acx_tx_queue_qos_config {
325 325
326 /* Lowest memory blocks guaranteed for this queue */ 326 /* Lowest memory blocks guaranteed for this queue */
327 u16 low_threshold; 327 u16 low_threshold;
328} __attribute__ ((packed)); 328} __packed;
329 329
330struct acx_packet_detection { 330struct acx_packet_detection {
331 struct acx_header header; 331 struct acx_header header;
332 332
333 u32 threshold; 333 u32 threshold;
334} __attribute__ ((packed)); 334} __packed;
335 335
336 336
337enum acx_slot_type { 337enum acx_slot_type {
@@ -349,7 +349,7 @@ struct acx_slot {
349 u8 wone_index; /* Reserved */ 349 u8 wone_index; /* Reserved */
350 u8 slot_time; 350 u8 slot_time;
351 u8 reserved[6]; 351 u8 reserved[6];
352} __attribute__ ((packed)); 352} __packed;
353 353
354 354
355#define ADDRESS_GROUP_MAX (8) 355#define ADDRESS_GROUP_MAX (8)
@@ -362,7 +362,7 @@ struct acx_dot11_grp_addr_tbl {
362 u8 num_groups; 362 u8 num_groups;
363 u8 pad[2]; 363 u8 pad[2];
364 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 364 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
365} __attribute__ ((packed)); 365} __packed;
366 366
367 367
368#define RX_TIMEOUT_PS_POLL_MIN 0 368#define RX_TIMEOUT_PS_POLL_MIN 0
@@ -388,7 +388,7 @@ struct acx_rx_timeout {
388 * from an UPSD enabled queue. 388 * from an UPSD enabled queue.
389 */ 389 */
390 u16 upsd_timeout; 390 u16 upsd_timeout;
391} __attribute__ ((packed)); 391} __packed;
392 392
393#define RTS_THRESHOLD_MIN 0 393#define RTS_THRESHOLD_MIN 0
394#define RTS_THRESHOLD_MAX 4096 394#define RTS_THRESHOLD_MAX 4096
@@ -399,7 +399,7 @@ struct acx_rts_threshold {
399 399
400 u16 threshold; 400 u16 threshold;
401 u8 pad[2]; 401 u8 pad[2];
402} __attribute__ ((packed)); 402} __packed;
403 403
404struct acx_beacon_filter_option { 404struct acx_beacon_filter_option {
405 struct acx_header header; 405 struct acx_header header;
@@ -415,7 +415,7 @@ struct acx_beacon_filter_option {
415 */ 415 */
416 u8 max_num_beacons; 416 u8 max_num_beacons;
417 u8 pad[2]; 417 u8 pad[2];
418} __attribute__ ((packed)); 418} __packed;
419 419
420/* 420/*
421 * ACXBeaconFilterEntry (not 221) 421 * ACXBeaconFilterEntry (not 221)
@@ -461,7 +461,7 @@ struct acx_beacon_filter_ie_table {
461 u8 num_ie; 461 u8 num_ie;
462 u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; 462 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
463 u8 pad[3]; 463 u8 pad[3];
464} __attribute__ ((packed)); 464} __packed;
465 465
466#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ 466#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */
467#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */ 467#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */
@@ -494,7 +494,7 @@ struct acx_bt_wlan_coex {
494 */ 494 */
495 u8 enable; 495 u8 enable;
496 u8 pad[3]; 496 u8 pad[3];
497} __attribute__ ((packed)); 497} __packed;
498 498
499#define PTA_ANTENNA_TYPE_DEF (0) 499#define PTA_ANTENNA_TYPE_DEF (0)
500#define PTA_BT_HP_MAXTIME_DEF (2000) 500#define PTA_BT_HP_MAXTIME_DEF (2000)
@@ -648,7 +648,7 @@ struct acx_bt_wlan_coex_param {
648 648
649 /* range: 0 - 20 default: 1 */ 649 /* range: 0 - 20 default: 1 */
650 u8 bt_hp_respected_num; 650 u8 bt_hp_respected_num;
651} __attribute__ ((packed)); 651} __packed;
652 652
653#define CCA_THRSH_ENABLE_ENERGY_D 0x140A 653#define CCA_THRSH_ENABLE_ENERGY_D 0x140A
654#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF 654#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF
@@ -660,7 +660,7 @@ struct acx_energy_detection {
660 u16 rx_cca_threshold; 660 u16 rx_cca_threshold;
661 u8 tx_energy_detection; 661 u8 tx_energy_detection;
662 u8 pad; 662 u8 pad;
663} __attribute__ ((packed)); 663} __packed;
664 664
665#define BCN_RX_TIMEOUT_DEF_VALUE 10000 665#define BCN_RX_TIMEOUT_DEF_VALUE 10000
666#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 666#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000
@@ -679,14 +679,14 @@ struct acx_beacon_broadcast {
679 /* Consecutive PS Poll failures before updating the host */ 679 /* Consecutive PS Poll failures before updating the host */
680 u8 ps_poll_threshold; 680 u8 ps_poll_threshold;
681 u8 pad[2]; 681 u8 pad[2];
682} __attribute__ ((packed)); 682} __packed;
683 683
684struct acx_event_mask { 684struct acx_event_mask {
685 struct acx_header header; 685 struct acx_header header;
686 686
687 u32 event_mask; 687 u32 event_mask;
688 u32 high_event_mask; /* Unused */ 688 u32 high_event_mask; /* Unused */
689} __attribute__ ((packed)); 689} __packed;
690 690
691#define CFG_RX_FCS BIT(2) 691#define CFG_RX_FCS BIT(2)
692#define CFG_RX_ALL_GOOD BIT(3) 692#define CFG_RX_ALL_GOOD BIT(3)
@@ -729,7 +729,7 @@ struct acx_fw_gen_frame_rates {
729 u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */ 729 u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */
730 u8 tx_mgt_frame_rate; 730 u8 tx_mgt_frame_rate;
731 u8 tx_mgt_frame_mod; 731 u8 tx_mgt_frame_mod;
732} __attribute__ ((packed)); 732} __packed;
733 733
734/* STA MAC */ 734/* STA MAC */
735struct acx_dot11_station_id { 735struct acx_dot11_station_id {
@@ -737,28 +737,28 @@ struct acx_dot11_station_id {
737 737
738 u8 mac[ETH_ALEN]; 738 u8 mac[ETH_ALEN];
739 u8 pad[2]; 739 u8 pad[2];
740} __attribute__ ((packed)); 740} __packed;
741 741
742struct acx_feature_config { 742struct acx_feature_config {
743 struct acx_header header; 743 struct acx_header header;
744 744
745 u32 options; 745 u32 options;
746 u32 data_flow_options; 746 u32 data_flow_options;
747} __attribute__ ((packed)); 747} __packed;
748 748
749struct acx_current_tx_power { 749struct acx_current_tx_power {
750 struct acx_header header; 750 struct acx_header header;
751 751
752 u8 current_tx_power; 752 u8 current_tx_power;
753 u8 padding[3]; 753 u8 padding[3];
754} __attribute__ ((packed)); 754} __packed;
755 755
756struct acx_dot11_default_key { 756struct acx_dot11_default_key {
757 struct acx_header header; 757 struct acx_header header;
758 758
759 u8 id; 759 u8 id;
760 u8 pad[3]; 760 u8 pad[3];
761} __attribute__ ((packed)); 761} __packed;
762 762
763struct acx_tsf_info { 763struct acx_tsf_info {
764 struct acx_header header; 764 struct acx_header header;
@@ -769,7 +769,7 @@ struct acx_tsf_info {
769 u32 last_TBTT_lsb; 769 u32 last_TBTT_lsb;
770 u8 last_dtim_count; 770 u8 last_dtim_count;
771 u8 pad[3]; 771 u8 pad[3];
772} __attribute__ ((packed)); 772} __packed;
773 773
774enum acx_wake_up_event { 774enum acx_wake_up_event {
775 WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ 775 WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/
@@ -785,7 +785,7 @@ struct acx_wake_up_condition {
785 u8 wake_up_event; /* Only one bit can be set */ 785 u8 wake_up_event; /* Only one bit can be set */
786 u8 listen_interval; 786 u8 listen_interval;
787 u8 pad[2]; 787 u8 pad[2];
788} __attribute__ ((packed)); 788} __packed;
789 789
790struct acx_aid { 790struct acx_aid {
791 struct acx_header header; 791 struct acx_header header;
@@ -795,7 +795,7 @@ struct acx_aid {
795 */ 795 */
796 u16 aid; 796 u16 aid;
797 u8 pad[2]; 797 u8 pad[2];
798} __attribute__ ((packed)); 798} __packed;
799 799
800enum acx_preamble_type { 800enum acx_preamble_type {
801 ACX_PREAMBLE_LONG = 0, 801 ACX_PREAMBLE_LONG = 0,
@@ -811,7 +811,7 @@ struct acx_preamble {
811 */ 811 */
812 u8 preamble; 812 u8 preamble;
813 u8 padding[3]; 813 u8 padding[3];
814} __attribute__ ((packed)); 814} __packed;
815 815
816enum acx_ctsprotect_type { 816enum acx_ctsprotect_type {
817 CTSPROTECT_DISABLE = 0, 817 CTSPROTECT_DISABLE = 0,
@@ -822,11 +822,11 @@ struct acx_ctsprotect {
822 struct acx_header header; 822 struct acx_header header;
823 u8 ctsprotect; 823 u8 ctsprotect;
824 u8 padding[3]; 824 u8 padding[3];
825} __attribute__ ((packed)); 825} __packed;
826 826
827struct acx_tx_statistics { 827struct acx_tx_statistics {
828 u32 internal_desc_overflow; 828 u32 internal_desc_overflow;
829} __attribute__ ((packed)); 829} __packed;
830 830
831struct acx_rx_statistics { 831struct acx_rx_statistics {
832 u32 out_of_mem; 832 u32 out_of_mem;
@@ -837,14 +837,14 @@ struct acx_rx_statistics {
837 u32 xfr_hint_trig; 837 u32 xfr_hint_trig;
838 u32 path_reset; 838 u32 path_reset;
839 u32 reset_counter; 839 u32 reset_counter;
840} __attribute__ ((packed)); 840} __packed;
841 841
842struct acx_dma_statistics { 842struct acx_dma_statistics {
843 u32 rx_requested; 843 u32 rx_requested;
844 u32 rx_errors; 844 u32 rx_errors;
845 u32 tx_requested; 845 u32 tx_requested;
846 u32 tx_errors; 846 u32 tx_errors;
847} __attribute__ ((packed)); 847} __packed;
848 848
849struct acx_isr_statistics { 849struct acx_isr_statistics {
850 /* host command complete */ 850 /* host command complete */
@@ -903,7 +903,7 @@ struct acx_isr_statistics {
903 903
904 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ 904 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
905 u32 low_rssi; 905 u32 low_rssi;
906} __attribute__ ((packed)); 906} __packed;
907 907
908struct acx_wep_statistics { 908struct acx_wep_statistics {
909 /* WEP address keys configured */ 909 /* WEP address keys configured */
@@ -925,7 +925,7 @@ struct acx_wep_statistics {
925 925
926 /* WEP decrypt interrupts */ 926 /* WEP decrypt interrupts */
927 u32 interrupt; 927 u32 interrupt;
928} __attribute__ ((packed)); 928} __packed;
929 929
930#define ACX_MISSED_BEACONS_SPREAD 10 930#define ACX_MISSED_BEACONS_SPREAD 10
931 931
@@ -985,12 +985,12 @@ struct acx_pwr_statistics {
985 985
986 /* the number of beacons in awake mode */ 986 /* the number of beacons in awake mode */
987 u32 rcvd_awake_beacons; 987 u32 rcvd_awake_beacons;
988} __attribute__ ((packed)); 988} __packed;
989 989
990struct acx_mic_statistics { 990struct acx_mic_statistics {
991 u32 rx_pkts; 991 u32 rx_pkts;
992 u32 calc_failure; 992 u32 calc_failure;
993} __attribute__ ((packed)); 993} __packed;
994 994
995struct acx_aes_statistics { 995struct acx_aes_statistics {
996 u32 encrypt_fail; 996 u32 encrypt_fail;
@@ -999,7 +999,7 @@ struct acx_aes_statistics {
999 u32 decrypt_packets; 999 u32 decrypt_packets;
1000 u32 encrypt_interrupt; 1000 u32 encrypt_interrupt;
1001 u32 decrypt_interrupt; 1001 u32 decrypt_interrupt;
1002} __attribute__ ((packed)); 1002} __packed;
1003 1003
1004struct acx_event_statistics { 1004struct acx_event_statistics {
1005 u32 heart_beat; 1005 u32 heart_beat;
@@ -1010,7 +1010,7 @@ struct acx_event_statistics {
1010 u32 oom_late; 1010 u32 oom_late;
1011 u32 phy_transmit_error; 1011 u32 phy_transmit_error;
1012 u32 tx_stuck; 1012 u32 tx_stuck;
1013} __attribute__ ((packed)); 1013} __packed;
1014 1014
1015struct acx_ps_statistics { 1015struct acx_ps_statistics {
1016 u32 pspoll_timeouts; 1016 u32 pspoll_timeouts;
@@ -1020,7 +1020,7 @@ struct acx_ps_statistics {
1020 u32 pspoll_max_apturn; 1020 u32 pspoll_max_apturn;
1021 u32 pspoll_utilization; 1021 u32 pspoll_utilization;
1022 u32 upsd_utilization; 1022 u32 upsd_utilization;
1023} __attribute__ ((packed)); 1023} __packed;
1024 1024
1025struct acx_rxpipe_statistics { 1025struct acx_rxpipe_statistics {
1026 u32 rx_prep_beacon_drop; 1026 u32 rx_prep_beacon_drop;
@@ -1028,7 +1028,7 @@ struct acx_rxpipe_statistics {
1028 u32 beacon_buffer_thres_host_int_trig_rx_data; 1028 u32 beacon_buffer_thres_host_int_trig_rx_data;
1029 u32 missed_beacon_host_int_trig_rx_data; 1029 u32 missed_beacon_host_int_trig_rx_data;
1030 u32 tx_xfr_host_int_trig_rx_data; 1030 u32 tx_xfr_host_int_trig_rx_data;
1031} __attribute__ ((packed)); 1031} __packed;
1032 1032
1033struct acx_statistics { 1033struct acx_statistics {
1034 struct acx_header header; 1034 struct acx_header header;
@@ -1044,7 +1044,7 @@ struct acx_statistics {
1044 struct acx_event_statistics event; 1044 struct acx_event_statistics event;
1045 struct acx_ps_statistics ps; 1045 struct acx_ps_statistics ps;
1046 struct acx_rxpipe_statistics rxpipe; 1046 struct acx_rxpipe_statistics rxpipe;
1047} __attribute__ ((packed)); 1047} __packed;
1048 1048
1049#define ACX_MAX_RATE_CLASSES 8 1049#define ACX_MAX_RATE_CLASSES 8
1050#define ACX_RATE_MASK_UNSPECIFIED 0 1050#define ACX_RATE_MASK_UNSPECIFIED 0
@@ -1063,7 +1063,7 @@ struct acx_rate_policy {
1063 1063
1064 u32 rate_class_cnt; 1064 u32 rate_class_cnt;
1065 struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; 1065 struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES];
1066} __attribute__ ((packed)); 1066} __packed;
1067 1067
1068struct wl1251_acx_memory { 1068struct wl1251_acx_memory {
1069 __le16 num_stations; /* number of STAs to be supported. */ 1069 __le16 num_stations; /* number of STAs to be supported. */
@@ -1082,7 +1082,7 @@ struct wl1251_acx_memory {
1082 u8 tx_min_mem_block_num; 1082 u8 tx_min_mem_block_num;
1083 u8 num_ssid_profiles; 1083 u8 num_ssid_profiles;
1084 __le16 debug_buffer_size; 1084 __le16 debug_buffer_size;
1085} __attribute__ ((packed)); 1085} __packed;
1086 1086
1087 1087
1088#define ACX_RX_DESC_MIN 1 1088#define ACX_RX_DESC_MIN 1
@@ -1094,7 +1094,7 @@ struct wl1251_acx_rx_queue_config {
1094 u8 type; 1094 u8 type;
1095 u8 priority; 1095 u8 priority;
1096 __le32 dma_address; 1096 __le32 dma_address;
1097} __attribute__ ((packed)); 1097} __packed;
1098 1098
1099#define ACX_TX_DESC_MIN 1 1099#define ACX_TX_DESC_MIN 1
1100#define ACX_TX_DESC_MAX 127 1100#define ACX_TX_DESC_MAX 127
@@ -1103,7 +1103,7 @@ struct wl1251_acx_tx_queue_config {
1103 u8 num_descs; 1103 u8 num_descs;
1104 u8 pad[2]; 1104 u8 pad[2];
1105 u8 attributes; 1105 u8 attributes;
1106} __attribute__ ((packed)); 1106} __packed;
1107 1107
1108#define MAX_TX_QUEUE_CONFIGS 5 1108#define MAX_TX_QUEUE_CONFIGS 5
1109#define MAX_TX_QUEUES 4 1109#define MAX_TX_QUEUES 4
@@ -1113,7 +1113,7 @@ struct wl1251_acx_config_memory {
1113 struct wl1251_acx_memory mem_config; 1113 struct wl1251_acx_memory mem_config;
1114 struct wl1251_acx_rx_queue_config rx_queue_config; 1114 struct wl1251_acx_rx_queue_config rx_queue_config;
1115 struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS]; 1115 struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS];
1116} __attribute__ ((packed)); 1116} __packed;
1117 1117
1118struct wl1251_acx_mem_map { 1118struct wl1251_acx_mem_map {
1119 struct acx_header header; 1119 struct acx_header header;
@@ -1147,7 +1147,7 @@ struct wl1251_acx_mem_map {
1147 1147
1148 /* Number of blocks FW allocated for RX packets */ 1148 /* Number of blocks FW allocated for RX packets */
1149 u32 num_rx_mem_blocks; 1149 u32 num_rx_mem_blocks;
1150} __attribute__ ((packed)); 1150} __packed;
1151 1151
1152 1152
1153struct wl1251_acx_wr_tbtt_and_dtim { 1153struct wl1251_acx_wr_tbtt_and_dtim {
@@ -1164,7 +1164,7 @@ struct wl1251_acx_wr_tbtt_and_dtim {
1164 */ 1164 */
1165 u8 dtim; 1165 u8 dtim;
1166 u8 padding; 1166 u8 padding;
1167} __attribute__ ((packed)); 1167} __packed;
1168 1168
1169struct wl1251_acx_ac_cfg { 1169struct wl1251_acx_ac_cfg {
1170 struct acx_header header; 1170 struct acx_header header;
@@ -1194,7 +1194,7 @@ struct wl1251_acx_ac_cfg {
1194 1194
1195 /* The TX Op Limit (in microseconds) for the access class. */ 1195 /* The TX Op Limit (in microseconds) for the access class. */
1196 u16 txop_limit; 1196 u16 txop_limit;
1197} __attribute__ ((packed)); 1197} __packed;
1198 1198
1199 1199
1200enum wl1251_acx_channel_type { 1200enum wl1251_acx_channel_type {
@@ -1245,7 +1245,7 @@ struct wl1251_acx_tid_cfg {
1245 1245
1246 /* not supported */ 1246 /* not supported */
1247 u32 apsdconf[2]; 1247 u32 apsdconf[2];
1248} __attribute__ ((packed)); 1248} __packed;
1249 1249
1250/************************************************************************* 1250/*************************************************************************
1251 1251
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 2545123931e8..65e0416be5b6 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -225,7 +225,7 @@ static void wl1251_boot_set_ecpu_ctrl(struct wl1251 *wl, u32 flag)
225int wl1251_boot_run_firmware(struct wl1251 *wl) 225int wl1251_boot_run_firmware(struct wl1251 *wl)
226{ 226{
227 int loop, ret; 227 int loop, ret;
228 u32 chip_id, interrupt; 228 u32 chip_id, acx_intr;
229 229
230 wl1251_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 230 wl1251_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
231 231
@@ -242,15 +242,15 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
242 loop = 0; 242 loop = 0;
243 while (loop++ < INIT_LOOP) { 243 while (loop++ < INIT_LOOP) {
244 udelay(INIT_LOOP_DELAY); 244 udelay(INIT_LOOP_DELAY);
245 interrupt = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 245 acx_intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
246 246
247 if (interrupt == 0xffffffff) { 247 if (acx_intr == 0xffffffff) {
248 wl1251_error("error reading hardware complete " 248 wl1251_error("error reading hardware complete "
249 "init indication"); 249 "init indication");
250 return -EIO; 250 return -EIO;
251 } 251 }
252 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 252 /* check that ACX_INTR_INIT_COMPLETE is enabled */
253 else if (interrupt & WL1251_ACX_INTR_INIT_COMPLETE) { 253 else if (acx_intr & WL1251_ACX_INTR_INIT_COMPLETE) {
254 wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK, 254 wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
255 WL1251_ACX_INTR_INIT_COMPLETE); 255 WL1251_ACX_INTR_INIT_COMPLETE);
256 break; 256 break;
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index 4ad67cae94d2..a9e4991369be 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -106,7 +106,7 @@ struct wl1251_cmd_header {
106 u16 status; 106 u16 status;
107 /* payload */ 107 /* payload */
108 u8 data[0]; 108 u8 data[0];
109} __attribute__ ((packed)); 109} __packed;
110 110
111struct wl1251_command { 111struct wl1251_command {
112 struct wl1251_cmd_header header; 112 struct wl1251_cmd_header header;
@@ -175,8 +175,8 @@ struct cmd_read_write_memory {
175#define WL1251_SCAN_NUM_PROBES 3 175#define WL1251_SCAN_NUM_PROBES 3
176 176
177struct wl1251_scan_parameters { 177struct wl1251_scan_parameters {
178 u32 rx_config_options; 178 __le32 rx_config_options;
179 u32 rx_filter_options; 179 __le32 rx_filter_options;
180 180
181 /* 181 /*
182 * Scan options: 182 * Scan options:
@@ -186,7 +186,7 @@ struct wl1251_scan_parameters {
186 * bit 2: voice mode, 0 for normal scan. 186 * bit 2: voice mode, 0 for normal scan.
187 * bit 3: scan priority, 1 for high priority. 187 * bit 3: scan priority, 1 for high priority.
188 */ 188 */
189 u16 scan_options; 189 __le16 scan_options;
190 190
191 /* Number of channels to scan */ 191 /* Number of channels to scan */
192 u8 num_channels; 192 u8 num_channels;
@@ -195,17 +195,17 @@ struct wl1251_scan_parameters {
195 u8 num_probe_requests; 195 u8 num_probe_requests;
196 196
197 /* Rate and modulation for probe requests */ 197 /* Rate and modulation for probe requests */
198 u16 tx_rate; 198 __le16 tx_rate;
199 199
200 u8 tid_trigger; 200 u8 tid_trigger;
201 u8 ssid_len; 201 u8 ssid_len;
202 u8 ssid[32]; 202 u8 ssid[32];
203 203
204} __attribute__ ((packed)); 204} __packed;
205 205
206struct wl1251_scan_ch_parameters { 206struct wl1251_scan_ch_parameters {
207 u32 min_duration; /* in TU */ 207 __le32 min_duration; /* in TU */
208 u32 max_duration; /* in TU */ 208 __le32 max_duration; /* in TU */
209 u32 bssid_lsb; 209 u32 bssid_lsb;
210 u16 bssid_msb; 210 u16 bssid_msb;
211 211
@@ -218,7 +218,7 @@ struct wl1251_scan_ch_parameters {
218 u8 tx_power_att; 218 u8 tx_power_att;
219 u8 channel; 219 u8 channel;
220 u8 pad[3]; 220 u8 pad[3];
221} __attribute__ ((packed)); 221} __packed;
222 222
223/* SCAN parameters */ 223/* SCAN parameters */
224#define SCAN_MAX_NUM_OF_CHANNELS 16 224#define SCAN_MAX_NUM_OF_CHANNELS 16
@@ -228,7 +228,7 @@ struct wl1251_cmd_scan {
228 228
229 struct wl1251_scan_parameters params; 229 struct wl1251_scan_parameters params;
230 struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; 230 struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
231} __attribute__ ((packed)); 231} __packed;
232 232
233enum { 233enum {
234 BSS_TYPE_IBSS = 0, 234 BSS_TYPE_IBSS = 0,
@@ -276,14 +276,14 @@ struct cmd_join {
276 u8 tx_mgt_frame_rate; /* OBSOLETE */ 276 u8 tx_mgt_frame_rate; /* OBSOLETE */
277 u8 tx_mgt_frame_mod; /* OBSOLETE */ 277 u8 tx_mgt_frame_mod; /* OBSOLETE */
278 u8 reserved; 278 u8 reserved;
279} __attribute__ ((packed)); 279} __packed;
280 280
281struct cmd_enabledisable_path { 281struct cmd_enabledisable_path {
282 struct wl1251_cmd_header header; 282 struct wl1251_cmd_header header;
283 283
284 u8 channel; 284 u8 channel;
285 u8 padding[3]; 285 u8 padding[3];
286} __attribute__ ((packed)); 286} __packed;
287 287
288#define WL1251_MAX_TEMPLATE_SIZE 300 288#define WL1251_MAX_TEMPLATE_SIZE 300
289 289
@@ -292,7 +292,7 @@ struct wl1251_cmd_packet_template {
292 292
293 __le16 size; 293 __le16 size;
294 u8 data[0]; 294 u8 data[0];
295} __attribute__ ((packed)); 295} __packed;
296 296
297#define TIM_ELE_ID 5 297#define TIM_ELE_ID 5
298#define PARTIAL_VBM_MAX 251 298#define PARTIAL_VBM_MAX 251
@@ -304,7 +304,7 @@ struct wl1251_tim {
304 u8 dtim_period; 304 u8 dtim_period;
305 u8 bitmap_ctrl; 305 u8 bitmap_ctrl;
306 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ 306 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
307} __attribute__ ((packed)); 307} __packed;
308 308
309/* Virtual Bit Map update */ 309/* Virtual Bit Map update */
310struct wl1251_cmd_vbm_update { 310struct wl1251_cmd_vbm_update {
@@ -312,7 +312,7 @@ struct wl1251_cmd_vbm_update {
312 __le16 len; 312 __le16 len;
313 u8 padding[2]; 313 u8 padding[2];
314 struct wl1251_tim tim; 314 struct wl1251_tim tim;
315} __attribute__ ((packed)); 315} __packed;
316 316
317enum wl1251_cmd_ps_mode { 317enum wl1251_cmd_ps_mode {
318 STATION_ACTIVE_MODE, 318 STATION_ACTIVE_MODE,
@@ -333,7 +333,7 @@ struct wl1251_cmd_ps_params {
333 u8 hang_over_period; 333 u8 hang_over_period;
334 u16 null_data_rate; 334 u16 null_data_rate;
335 u8 pad[2]; 335 u8 pad[2];
336} __attribute__ ((packed)); 336} __packed;
337 337
338struct wl1251_cmd_trigger_scan_to { 338struct wl1251_cmd_trigger_scan_to {
339 struct wl1251_cmd_header header; 339 struct wl1251_cmd_header header;
@@ -411,7 +411,7 @@ struct wl1251_cmd_set_keys {
411 u8 key[MAX_KEY_SIZE]; 411 u8 key[MAX_KEY_SIZE];
412 u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; 412 u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
413 u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 413 u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
414} __attribute__ ((packed)); 414} __packed;
415 415
416 416
417#endif /* __WL1251_CMD_H__ */ 417#endif /* __WL1251_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h
index be0ac54d6246..f48a2b66bc5a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.h
+++ b/drivers/net/wireless/wl12xx/wl1251_event.h
@@ -82,7 +82,7 @@ struct event_debug_report {
82 u32 report_1; 82 u32 report_1;
83 u32 report_2; 83 u32 report_2;
84 u32 report_3; 84 u32 report_3;
85} __attribute__ ((packed)); 85} __packed;
86 86
87struct event_mailbox { 87struct event_mailbox {
88 u32 events_vector; 88 u32 events_vector;
@@ -112,7 +112,7 @@ struct event_mailbox {
112 struct event_debug_report report; 112 struct event_debug_report report;
113 u8 average_snr_level; 113 u8 average_snr_level;
114 u8 padding[19]; 114 u8 padding[19];
115} __attribute__ ((packed)); 115} __packed;
116 116
117int wl1251_event_unmask(struct wl1251 *wl); 117int wl1251_event_unmask(struct wl1251 *wl);
118void wl1251_event_mbox_config(struct wl1251 *wl); 118void wl1251_event_mbox_config(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 00b24282fc73..861a5f33761e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -124,7 +124,7 @@ static int wl1251_fetch_nvs(struct wl1251 *wl)
124 } 124 }
125 125
126 wl->nvs_len = fw->size; 126 wl->nvs_len = fw->size;
127 wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL); 127 wl->nvs = kmemdup(fw->data, wl->nvs_len, GFP_KERNEL);
128 128
129 if (!wl->nvs) { 129 if (!wl->nvs) {
130 wl1251_error("could not allocate memory for the nvs file"); 130 wl1251_error("could not allocate memory for the nvs file");
@@ -132,8 +132,6 @@ static int wl1251_fetch_nvs(struct wl1251 *wl)
132 goto out; 132 goto out;
133 } 133 }
134 134
135 memcpy(wl->nvs, fw->data, wl->nvs_len);
136
137 ret = 0; 135 ret = 0;
138 136
139out: 137out:
@@ -413,6 +411,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
413static int wl1251_op_start(struct ieee80211_hw *hw) 411static int wl1251_op_start(struct ieee80211_hw *hw)
414{ 412{
415 struct wl1251 *wl = hw->priv; 413 struct wl1251 *wl = hw->priv;
414 struct wiphy *wiphy = hw->wiphy;
416 int ret = 0; 415 int ret = 0;
417 416
418 wl1251_debug(DEBUG_MAC80211, "mac80211 start"); 417 wl1251_debug(DEBUG_MAC80211, "mac80211 start");
@@ -446,6 +445,10 @@ static int wl1251_op_start(struct ieee80211_hw *hw)
446 445
447 wl1251_info("firmware booted (%s)", wl->fw_ver); 446 wl1251_info("firmware booted (%s)", wl->fw_ver);
448 447
448 /* update hw/fw version info in wiphy struct */
449 wiphy->hw_version = wl->chip_id;
450 strncpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version));
451
449out: 452out:
450 if (ret < 0) 453 if (ret < 0)
451 wl1251_power_off(wl); 454 wl1251_power_off(wl);
@@ -1174,6 +1177,22 @@ out:
1174 return ret; 1177 return ret;
1175} 1178}
1176 1179
1180static int wl1251_op_get_survey(struct ieee80211_hw *hw, int idx,
1181 struct survey_info *survey)
1182{
1183 struct wl1251 *wl = hw->priv;
1184 struct ieee80211_conf *conf = &hw->conf;
1185
1186 if (idx != 0)
1187 return -ENOENT;
1188
1189 survey->channel = conf->channel;
1190 survey->filled = SURVEY_INFO_NOISE_DBM;
1191 survey->noise = wl->noise;
1192
1193 return 0;
1194}
1195
1177/* can't be const, mac80211 writes to this */ 1196/* can't be const, mac80211 writes to this */
1178static struct ieee80211_supported_band wl1251_band_2ghz = { 1197static struct ieee80211_supported_band wl1251_band_2ghz = {
1179 .channels = wl1251_channels, 1198 .channels = wl1251_channels,
@@ -1195,6 +1214,7 @@ static const struct ieee80211_ops wl1251_ops = {
1195 .bss_info_changed = wl1251_op_bss_info_changed, 1214 .bss_info_changed = wl1251_op_bss_info_changed,
1196 .set_rts_threshold = wl1251_op_set_rts_threshold, 1215 .set_rts_threshold = wl1251_op_set_rts_threshold,
1197 .conf_tx = wl1251_op_conf_tx, 1216 .conf_tx = wl1251_op_conf_tx,
1217 .get_survey = wl1251_op_get_survey,
1198}; 1218};
1199 1219
1200static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data) 1220static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
@@ -1419,5 +1439,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
1419MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); 1439MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
1420MODULE_LICENSE("GPL"); 1440MODULE_LICENSE("GPL");
1421MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 1441MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
1422MODULE_ALIAS("spi:wl1251");
1423MODULE_FIRMWARE(WL1251_FW_NAME); 1442MODULE_FIRMWARE(WL1251_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 851515836a7f..1b6294b3b996 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -74,6 +74,12 @@ static void wl1251_rx_status(struct wl1251 *wl,
74 74
75 status->signal = desc->rssi; 75 status->signal = desc->rssi;
76 76
77 /*
78 * FIXME: guessing that snr needs to be divided by two, otherwise
79 * the values don't make any sense
80 */
81 wl->noise = desc->rssi - desc->snr / 2;
82
77 status->freq = ieee80211_channel_to_frequency(desc->channel); 83 status->freq = ieee80211_channel_to_frequency(desc->channel);
78 84
79 status->flag |= RX_FLAG_TSFT; 85 status->flag |= RX_FLAG_TSFT;
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h
index 563a3fde40fb..da4e53406a0e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.h
@@ -117,7 +117,7 @@ struct wl1251_rx_descriptor {
117 s8 rssi; /* in dB */ 117 s8 rssi; /* in dB */
118 u8 rcpi; /* in dB */ 118 u8 rcpi; /* in dB */
119 u8 snr; /* in dB */ 119 u8 snr; /* in dB */
120} __attribute__ ((packed)); 120} __packed;
121 121
122void wl1251_rx(struct wl1251 *wl); 122void wl1251_rx(struct wl1251 *wl);
123 123
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index c561332e7009..b901b6135654 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -37,11 +37,17 @@
37#define SDIO_DEVICE_ID_TI_WL1251 0x9066 37#define SDIO_DEVICE_ID_TI_WL1251 0x9066
38#endif 38#endif
39 39
40struct wl1251_sdio {
41 struct sdio_func *func;
42 u32 elp_val;
43};
44
40static struct wl12xx_platform_data *wl12xx_board_data; 45static struct wl12xx_platform_data *wl12xx_board_data;
41 46
42static struct sdio_func *wl_to_func(struct wl1251 *wl) 47static struct sdio_func *wl_to_func(struct wl1251 *wl)
43{ 48{
44 return wl->if_priv; 49 struct wl1251_sdio *wl_sdio = wl->if_priv;
50 return wl_sdio->func;
45} 51}
46 52
47static void wl1251_sdio_interrupt(struct sdio_func *func) 53static void wl1251_sdio_interrupt(struct sdio_func *func)
@@ -90,10 +96,17 @@ static void wl1251_sdio_write(struct wl1251 *wl, int addr,
90static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val) 96static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
91{ 97{
92 int ret = 0; 98 int ret = 0;
93 struct sdio_func *func = wl_to_func(wl); 99 struct wl1251_sdio *wl_sdio = wl->if_priv;
94 100 struct sdio_func *func = wl_sdio->func;
101
102 /*
103 * The hardware only supports RAW (read after write) access for
104 * reading, regular sdio_readb won't work here (it interprets
105 * the unused bits of CMD52 as write data even if we send read
106 * request).
107 */
95 sdio_claim_host(func); 108 sdio_claim_host(func);
96 *val = sdio_readb(func, addr, &ret); 109 *val = sdio_writeb_readb(func, wl_sdio->elp_val, addr, &ret);
97 sdio_release_host(func); 110 sdio_release_host(func);
98 111
99 if (ret) 112 if (ret)
@@ -103,7 +116,8 @@ static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
103static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val) 116static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
104{ 117{
105 int ret = 0; 118 int ret = 0;
106 struct sdio_func *func = wl_to_func(wl); 119 struct wl1251_sdio *wl_sdio = wl->if_priv;
120 struct sdio_func *func = wl_sdio->func;
107 121
108 sdio_claim_host(func); 122 sdio_claim_host(func);
109 sdio_writeb(func, val, addr, &ret); 123 sdio_writeb(func, val, addr, &ret);
@@ -111,6 +125,8 @@ static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
111 125
112 if (ret) 126 if (ret)
113 wl1251_error("sdio_writeb failed (%d)", ret); 127 wl1251_error("sdio_writeb failed (%d)", ret);
128 else
129 wl_sdio->elp_val = val;
114} 130}
115 131
116static void wl1251_sdio_reset(struct wl1251 *wl) 132static void wl1251_sdio_reset(struct wl1251 *wl)
@@ -197,6 +213,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
197 int ret; 213 int ret;
198 struct wl1251 *wl; 214 struct wl1251 *wl;
199 struct ieee80211_hw *hw; 215 struct ieee80211_hw *hw;
216 struct wl1251_sdio *wl_sdio;
200 217
201 hw = wl1251_alloc_hw(); 218 hw = wl1251_alloc_hw();
202 if (IS_ERR(hw)) 219 if (IS_ERR(hw))
@@ -204,6 +221,12 @@ static int wl1251_sdio_probe(struct sdio_func *func,
204 221
205 wl = hw->priv; 222 wl = hw->priv;
206 223
224 wl_sdio = kzalloc(sizeof(*wl_sdio), GFP_KERNEL);
225 if (wl_sdio == NULL) {
226 ret = -ENOMEM;
227 goto out_free_hw;
228 }
229
207 sdio_claim_host(func); 230 sdio_claim_host(func);
208 ret = sdio_enable_func(func); 231 ret = sdio_enable_func(func);
209 if (ret) 232 if (ret)
@@ -213,7 +236,8 @@ static int wl1251_sdio_probe(struct sdio_func *func,
213 sdio_release_host(func); 236 sdio_release_host(func);
214 237
215 SET_IEEE80211_DEV(hw, &func->dev); 238 SET_IEEE80211_DEV(hw, &func->dev);
216 wl->if_priv = func; 239 wl_sdio->func = func;
240 wl->if_priv = wl_sdio;
217 wl->if_ops = &wl1251_sdio_ops; 241 wl->if_ops = &wl1251_sdio_ops;
218 wl->set_power = wl1251_sdio_set_power; 242 wl->set_power = wl1251_sdio_set_power;
219 243
@@ -259,6 +283,8 @@ disable:
259 sdio_disable_func(func); 283 sdio_disable_func(func);
260release: 284release:
261 sdio_release_host(func); 285 sdio_release_host(func);
286 kfree(wl_sdio);
287out_free_hw:
262 wl1251_free_hw(wl); 288 wl1251_free_hw(wl);
263 return ret; 289 return ret;
264} 290}
@@ -266,9 +292,11 @@ release:
266static void __devexit wl1251_sdio_remove(struct sdio_func *func) 292static void __devexit wl1251_sdio_remove(struct sdio_func *func)
267{ 293{
268 struct wl1251 *wl = sdio_get_drvdata(func); 294 struct wl1251 *wl = sdio_get_drvdata(func);
295 struct wl1251_sdio *wl_sdio = wl->if_priv;
269 296
270 if (wl->irq) 297 if (wl->irq)
271 free_irq(wl->irq, wl); 298 free_irq(wl->irq, wl);
299 kfree(wl_sdio);
272 wl1251_free_hw(wl); 300 wl1251_free_hw(wl);
273 301
274 sdio_claim_host(func); 302 sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index e81474203a23..27fdfaaeb074 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -345,3 +345,4 @@ module_exit(wl1251_spi_exit);
345 345
346MODULE_LICENSE("GPL"); 346MODULE_LICENSE("GPL");
347MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 347MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
348MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index c8223185efd2..a38ec199187a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -117,7 +117,7 @@ static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr)
117 frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 117 frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
118 tx_hdr->frag_threshold = cpu_to_le16(frag_threshold); 118 tx_hdr->frag_threshold = cpu_to_le16(frag_threshold);
119 119
120 payload_len = tx_hdr->length + MAX_MSDU_SECURITY_LENGTH; 120 payload_len = le16_to_cpu(tx_hdr->length) + MAX_MSDU_SECURITY_LENGTH;
121 121
122 if (payload_len > frag_threshold) { 122 if (payload_len > frag_threshold) {
123 mem_blocks_per_frag = 123 mem_blocks_per_frag =
@@ -191,11 +191,13 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
191 if (control->control.hw_key && 191 if (control->control.hw_key &&
192 control->control.hw_key->alg == ALG_TKIP) { 192 control->control.hw_key->alg == ALG_TKIP) {
193 int hdrlen; 193 int hdrlen;
194 u16 fc; 194 __le16 fc;
195 u16 length;
195 u8 *pos; 196 u8 *pos;
196 197
197 fc = *(u16 *)(skb->data + sizeof(*tx_hdr)); 198 fc = *(__le16 *)(skb->data + sizeof(*tx_hdr));
198 tx_hdr->length += WL1251_TKIP_IV_SPACE; 199 length = le16_to_cpu(tx_hdr->length) + WL1251_TKIP_IV_SPACE;
200 tx_hdr->length = cpu_to_le16(length);
199 201
200 hdrlen = ieee80211_hdrlen(fc); 202 hdrlen = ieee80211_hdrlen(fc);
201 203
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 55856c6bb97a..f40eeb37f5aa 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -109,12 +109,12 @@ struct tx_control {
109 unsigned xfer_pad:1; 109 unsigned xfer_pad:1;
110 110
111 unsigned reserved:7; 111 unsigned reserved:7;
112} __attribute__ ((packed)); 112} __packed;
113 113
114 114
115struct tx_double_buffer_desc { 115struct tx_double_buffer_desc {
116 /* Length of payload, including headers. */ 116 /* Length of payload, including headers. */
117 u16 length; 117 __le16 length;
118 118
119 /* 119 /*
120 * A bit mask that specifies the initial rate to be used 120 * A bit mask that specifies the initial rate to be used
@@ -133,10 +133,10 @@ struct tx_double_buffer_desc {
133 * 0x0800 - 48Mbits 133 * 0x0800 - 48Mbits
134 * 0x1000 - 54Mbits 134 * 0x1000 - 54Mbits
135 */ 135 */
136 u16 rate; 136 __le16 rate;
137 137
138 /* Time in us that a packet can spend in the target */ 138 /* Time in us that a packet can spend in the target */
139 u32 expiry_time; 139 __le32 expiry_time;
140 140
141 /* index of the TX queue used for this packet */ 141 /* index of the TX queue used for this packet */
142 u8 xmit_queue; 142 u8 xmit_queue;
@@ -150,13 +150,13 @@ struct tx_double_buffer_desc {
150 * The FW should cut the packet into fragments 150 * The FW should cut the packet into fragments
151 * of this size. 151 * of this size.
152 */ 152 */
153 u16 frag_threshold; 153 __le16 frag_threshold;
154 154
155 /* Numbers of HW queue blocks to be allocated */ 155 /* Numbers of HW queue blocks to be allocated */
156 u8 num_mem_blocks; 156 u8 num_mem_blocks;
157 157
158 u8 reserved; 158 u8 reserved;
159} __attribute__ ((packed)); 159} __packed;
160 160
161enum { 161enum {
162 TX_SUCCESS = 0, 162 TX_SUCCESS = 0,
@@ -208,7 +208,7 @@ struct tx_result {
208 208
209 /* See done_1 */ 209 /* See done_1 */
210 u8 done_2; 210 u8 done_2;
211} __attribute__ ((packed)); 211} __packed;
212 212
213static inline int wl1251_tx_get_queue(int queue) 213static inline int wl1251_tx_get_queue(int queue)
214{ 214{
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 6f1b6b5640c0..dd3cee6ea5bb 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -33,6 +33,7 @@
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "wl1271_conf.h" 35#include "wl1271_conf.h"
36#include "wl1271_ini.h"
36 37
37#define DRIVER_NAME "wl1271" 38#define DRIVER_NAME "wl1271"
38#define DRIVER_PREFIX DRIVER_NAME ": " 39#define DRIVER_PREFIX DRIVER_NAME ": "
@@ -116,33 +117,6 @@ enum {
116#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) 117#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
117#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) 118#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
118 119
119/* NVS data structure */
120#define WL1271_NVS_SECTION_SIZE 468
121
122#define WL1271_NVS_GENERAL_PARAMS_SIZE 57
123#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \
124 (WL1271_NVS_GENERAL_PARAMS_SIZE + 1)
125#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE 17
126#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \
127 (WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1)
128#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE 65
129#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \
130 (WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1)
131#define WL1271_NVS_FEM_COUNT 2
132#define WL1271_NVS_INI_SPARE_SIZE 124
133
134struct wl1271_nvs_file {
135 /* NVS section */
136 u8 nvs[WL1271_NVS_SECTION_SIZE];
137
138 /* INI section */
139 u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED];
140 u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED];
141 u8 dyn_radio_params[WL1271_NVS_FEM_COUNT]
142 [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED];
143 u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE];
144} __attribute__ ((packed));
145
146/* 120/*
147 * Enable/disable 802.11a support for WL1273 121 * Enable/disable 802.11a support for WL1273
148 */ 122 */
@@ -317,7 +291,7 @@ struct wl1271_fw_status {
317 __le32 tx_released_blks[NUM_TX_QUEUES]; 291 __le32 tx_released_blks[NUM_TX_QUEUES];
318 __le32 fw_localtime; 292 __le32 fw_localtime;
319 __le32 padding[2]; 293 __le32 padding[2];
320} __attribute__ ((packed)); 294} __packed;
321 295
322struct wl1271_rx_mem_pool_addr { 296struct wl1271_rx_mem_pool_addr {
323 u32 addr; 297 u32 addr;
@@ -325,12 +299,11 @@ struct wl1271_rx_mem_pool_addr {
325}; 299};
326 300
327struct wl1271_scan { 301struct wl1271_scan {
302 struct cfg80211_scan_request *req;
303 bool *scanned_ch;
328 u8 state; 304 u8 state;
329 u8 ssid[IW_ESSID_MAX_SIZE+1]; 305 u8 ssid[IW_ESSID_MAX_SIZE+1];
330 size_t ssid_len; 306 size_t ssid_len;
331 u8 active;
332 u8 high_prio;
333 u8 probe_requests;
334}; 307};
335 308
336struct wl1271_if_operations { 309struct wl1271_if_operations {
@@ -368,13 +341,14 @@ struct wl1271 {
368#define WL1271_FLAG_JOINED (2) 341#define WL1271_FLAG_JOINED (2)
369#define WL1271_FLAG_GPIO_POWER (3) 342#define WL1271_FLAG_GPIO_POWER (3)
370#define WL1271_FLAG_TX_QUEUE_STOPPED (4) 343#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
371#define WL1271_FLAG_SCANNING (5) 344#define WL1271_FLAG_IN_ELP (5)
372#define WL1271_FLAG_IN_ELP (6) 345#define WL1271_FLAG_PSM (6)
373#define WL1271_FLAG_PSM (7) 346#define WL1271_FLAG_PSM_REQUESTED (7)
374#define WL1271_FLAG_PSM_REQUESTED (8) 347#define WL1271_FLAG_IRQ_PENDING (8)
375#define WL1271_FLAG_IRQ_PENDING (9) 348#define WL1271_FLAG_IRQ_RUNNING (9)
376#define WL1271_FLAG_IRQ_RUNNING (10) 349#define WL1271_FLAG_IDLE (10)
377#define WL1271_FLAG_IDLE (11) 350#define WL1271_FLAG_IDLE_REQUESTED (11)
351#define WL1271_FLAG_PSPOLL_FAILURE (12)
378 unsigned long flags; 352 unsigned long flags;
379 353
380 struct wl1271_partition_set part; 354 struct wl1271_partition_set part;
@@ -421,6 +395,7 @@ struct wl1271 {
421 395
422 /* Pending TX frames */ 396 /* Pending TX frames */
423 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; 397 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
398 int tx_frames_cnt;
424 399
425 /* Security sequence number counters */ 400 /* Security sequence number counters */
426 u8 tx_security_last_seq; 401 u8 tx_security_last_seq;
@@ -468,6 +443,10 @@ struct wl1271 {
468 443
469 struct completion *elp_compl; 444 struct completion *elp_compl;
470 struct delayed_work elp_work; 445 struct delayed_work elp_work;
446 struct delayed_work pspoll_work;
447
448 /* counter for ps-poll delivery failures */
449 int ps_poll_failures;
471 450
472 /* retry counter for PSM entries */ 451 /* retry counter for PSM entries */
473 u8 psm_entry_retry; 452 u8 psm_entry_retry;
@@ -496,6 +475,9 @@ struct wl1271 {
496 bool sg_enabled; 475 bool sg_enabled;
497 476
498 struct list_head list; 477 struct list_head list;
478
479 /* Most recently reported noise in dBm */
480 s8 noise;
499}; 481};
500 482
501int wl1271_plt_start(struct wl1271 *wl); 483int wl1271_plt_start(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index e19e2f8f1e52..bb245f05af49 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -1075,8 +1075,7 @@ out:
1075 return ret; 1075 return ret;
1076} 1076}
1077 1077
1078int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, 1078int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address)
1079 u8 version)
1080{ 1079{
1081 struct wl1271_acx_arp_filter *acx; 1080 struct wl1271_acx_arp_filter *acx;
1082 int ret; 1081 int ret;
@@ -1089,17 +1088,11 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1089 goto out; 1088 goto out;
1090 } 1089 }
1091 1090
1092 acx->version = version; 1091 acx->version = ACX_IPV4_VERSION;
1093 acx->enable = enable; 1092 acx->enable = enable;
1094 1093
1095 if (enable == true) { 1094 if (enable == true)
1096 if (version == ACX_IPV4_VERSION) 1095 memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
1097 memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE);
1098 else if (version == ACX_IPV6_VERSION)
1099 memcpy(acx->address, address, sizeof(acx->address));
1100 else
1101 wl1271_error("Invalid IP version");
1102 }
1103 1096
1104 ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER, 1097 ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
1105 acx, sizeof(*acx)); 1098 acx, sizeof(*acx));
@@ -1266,3 +1259,29 @@ out:
1266 kfree(acx); 1259 kfree(acx);
1267 return ret; 1260 return ret;
1268} 1261}
1262
1263int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
1264{
1265 struct wl1271_acx_fw_tsf_information *tsf_info;
1266 int ret;
1267
1268 tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL);
1269 if (!tsf_info) {
1270 ret = -ENOMEM;
1271 goto out;
1272 }
1273
1274 ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO,
1275 tsf_info, sizeof(*tsf_info));
1276 if (ret < 0) {
1277 wl1271_warning("acx tsf info interrogate failed");
1278 goto out;
1279 }
1280
1281 *mactime = le32_to_cpu(tsf_info->current_tsf_low) |
1282 ((u64) le32_to_cpu(tsf_info->current_tsf_high) << 32);
1283
1284out:
1285 kfree(tsf_info);
1286 return ret;
1287}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 420e7e2fc021..4235bc56f750 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -75,7 +75,7 @@ struct acx_header {
75 75
76 /* payload length (not including headers */ 76 /* payload length (not including headers */
77 __le16 len; 77 __le16 len;
78} __attribute__ ((packed)); 78} __packed;
79 79
80struct acx_error_counter { 80struct acx_error_counter {
81 struct acx_header header; 81 struct acx_header header;
@@ -98,7 +98,7 @@ struct acx_error_counter {
98 /* the number of missed sequence numbers in the squentially */ 98 /* the number of missed sequence numbers in the squentially */
99 /* values of frames seq numbers */ 99 /* values of frames seq numbers */
100 __le32 seq_num_miss; 100 __le32 seq_num_miss;
101} __attribute__ ((packed)); 101} __packed;
102 102
103struct acx_revision { 103struct acx_revision {
104 struct acx_header header; 104 struct acx_header header;
@@ -127,7 +127,7 @@ struct acx_revision {
127 * bits 24 - 31: Chip ID - The WiLink chip ID. 127 * bits 24 - 31: Chip ID - The WiLink chip ID.
128 */ 128 */
129 __le32 hw_version; 129 __le32 hw_version;
130} __attribute__ ((packed)); 130} __packed;
131 131
132enum wl1271_psm_mode { 132enum wl1271_psm_mode {
133 /* Active mode */ 133 /* Active mode */
@@ -149,7 +149,7 @@ struct acx_sleep_auth {
149 /* 2 - ELP mode: Deep / Max sleep*/ 149 /* 2 - ELP mode: Deep / Max sleep*/
150 u8 sleep_auth; 150 u8 sleep_auth;
151 u8 padding[3]; 151 u8 padding[3];
152} __attribute__ ((packed)); 152} __packed;
153 153
154enum { 154enum {
155 HOSTIF_PCI_MASTER_HOST_INDIRECT, 155 HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -187,7 +187,7 @@ struct acx_rx_msdu_lifetime {
187 * firmware discards the MSDU. 187 * firmware discards the MSDU.
188 */ 188 */
189 __le32 lifetime; 189 __le32 lifetime;
190} __attribute__ ((packed)); 190} __packed;
191 191
192/* 192/*
193 * RX Config Options Table 193 * RX Config Options Table
@@ -275,13 +275,13 @@ struct acx_rx_config {
275 275
276 __le32 config_options; 276 __le32 config_options;
277 __le32 filter_options; 277 __le32 filter_options;
278} __attribute__ ((packed)); 278} __packed;
279 279
280struct acx_packet_detection { 280struct acx_packet_detection {
281 struct acx_header header; 281 struct acx_header header;
282 282
283 __le32 threshold; 283 __le32 threshold;
284} __attribute__ ((packed)); 284} __packed;
285 285
286 286
287enum acx_slot_type { 287enum acx_slot_type {
@@ -299,7 +299,7 @@ struct acx_slot {
299 u8 wone_index; /* Reserved */ 299 u8 wone_index; /* Reserved */
300 u8 slot_time; 300 u8 slot_time;
301 u8 reserved[6]; 301 u8 reserved[6];
302} __attribute__ ((packed)); 302} __packed;
303 303
304 304
305#define ACX_MC_ADDRESS_GROUP_MAX (8) 305#define ACX_MC_ADDRESS_GROUP_MAX (8)
@@ -312,21 +312,21 @@ struct acx_dot11_grp_addr_tbl {
312 u8 num_groups; 312 u8 num_groups;
313 u8 pad[2]; 313 u8 pad[2];
314 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 314 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
315} __attribute__ ((packed)); 315} __packed;
316 316
317struct acx_rx_timeout { 317struct acx_rx_timeout {
318 struct acx_header header; 318 struct acx_header header;
319 319
320 __le16 ps_poll_timeout; 320 __le16 ps_poll_timeout;
321 __le16 upsd_timeout; 321 __le16 upsd_timeout;
322} __attribute__ ((packed)); 322} __packed;
323 323
324struct acx_rts_threshold { 324struct acx_rts_threshold {
325 struct acx_header header; 325 struct acx_header header;
326 326
327 __le16 threshold; 327 __le16 threshold;
328 u8 pad[2]; 328 u8 pad[2];
329} __attribute__ ((packed)); 329} __packed;
330 330
331struct acx_beacon_filter_option { 331struct acx_beacon_filter_option {
332 struct acx_header header; 332 struct acx_header header;
@@ -342,7 +342,7 @@ struct acx_beacon_filter_option {
342 */ 342 */
343 u8 max_num_beacons; 343 u8 max_num_beacons;
344 u8 pad[2]; 344 u8 pad[2];
345} __attribute__ ((packed)); 345} __packed;
346 346
347/* 347/*
348 * ACXBeaconFilterEntry (not 221) 348 * ACXBeaconFilterEntry (not 221)
@@ -383,21 +383,21 @@ struct acx_beacon_filter_ie_table {
383 u8 num_ie; 383 u8 num_ie;
384 u8 pad[3]; 384 u8 pad[3];
385 u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; 385 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
386} __attribute__ ((packed)); 386} __packed;
387 387
388struct acx_conn_monit_params { 388struct acx_conn_monit_params {
389 struct acx_header header; 389 struct acx_header header;
390 390
391 __le32 synch_fail_thold; /* number of beacons missed */ 391 __le32 synch_fail_thold; /* number of beacons missed */
392 __le32 bss_lose_timeout; /* number of TU's from synch fail */ 392 __le32 bss_lose_timeout; /* number of TU's from synch fail */
393} __attribute__ ((packed)); 393} __packed;
394 394
395struct acx_bt_wlan_coex { 395struct acx_bt_wlan_coex {
396 struct acx_header header; 396 struct acx_header header;
397 397
398 u8 enable; 398 u8 enable;
399 u8 pad[3]; 399 u8 pad[3];
400} __attribute__ ((packed)); 400} __packed;
401 401
402struct acx_bt_wlan_coex_param { 402struct acx_bt_wlan_coex_param {
403 struct acx_header header; 403 struct acx_header header;
@@ -405,7 +405,7 @@ struct acx_bt_wlan_coex_param {
405 __le32 params[CONF_SG_PARAMS_MAX]; 405 __le32 params[CONF_SG_PARAMS_MAX];
406 u8 param_idx; 406 u8 param_idx;
407 u8 padding[3]; 407 u8 padding[3];
408} __attribute__ ((packed)); 408} __packed;
409 409
410struct acx_dco_itrim_params { 410struct acx_dco_itrim_params {
411 struct acx_header header; 411 struct acx_header header;
@@ -413,7 +413,7 @@ struct acx_dco_itrim_params {
413 u8 enable; 413 u8 enable;
414 u8 padding[3]; 414 u8 padding[3];
415 __le32 timeout; 415 __le32 timeout;
416} __attribute__ ((packed)); 416} __packed;
417 417
418struct acx_energy_detection { 418struct acx_energy_detection {
419 struct acx_header header; 419 struct acx_header header;
@@ -422,7 +422,7 @@ struct acx_energy_detection {
422 __le16 rx_cca_threshold; 422 __le16 rx_cca_threshold;
423 u8 tx_energy_detection; 423 u8 tx_energy_detection;
424 u8 pad; 424 u8 pad;
425} __attribute__ ((packed)); 425} __packed;
426 426
427struct acx_beacon_broadcast { 427struct acx_beacon_broadcast {
428 struct acx_header header; 428 struct acx_header header;
@@ -436,14 +436,14 @@ struct acx_beacon_broadcast {
436 /* Consecutive PS Poll failures before updating the host */ 436 /* Consecutive PS Poll failures before updating the host */
437 u8 ps_poll_threshold; 437 u8 ps_poll_threshold;
438 u8 pad[2]; 438 u8 pad[2];
439} __attribute__ ((packed)); 439} __packed;
440 440
441struct acx_event_mask { 441struct acx_event_mask {
442 struct acx_header header; 442 struct acx_header header;
443 443
444 __le32 event_mask; 444 __le32 event_mask;
445 __le32 high_event_mask; /* Unused */ 445 __le32 high_event_mask; /* Unused */
446} __attribute__ ((packed)); 446} __packed;
447 447
448#define CFG_RX_FCS BIT(2) 448#define CFG_RX_FCS BIT(2)
449#define CFG_RX_ALL_GOOD BIT(3) 449#define CFG_RX_ALL_GOOD BIT(3)
@@ -488,14 +488,14 @@ struct acx_feature_config {
488 488
489 __le32 options; 489 __le32 options;
490 __le32 data_flow_options; 490 __le32 data_flow_options;
491} __attribute__ ((packed)); 491} __packed;
492 492
493struct acx_current_tx_power { 493struct acx_current_tx_power {
494 struct acx_header header; 494 struct acx_header header;
495 495
496 u8 current_tx_power; 496 u8 current_tx_power;
497 u8 padding[3]; 497 u8 padding[3];
498} __attribute__ ((packed)); 498} __packed;
499 499
500struct acx_wake_up_condition { 500struct acx_wake_up_condition {
501 struct acx_header header; 501 struct acx_header header;
@@ -503,7 +503,7 @@ struct acx_wake_up_condition {
503 u8 wake_up_event; /* Only one bit can be set */ 503 u8 wake_up_event; /* Only one bit can be set */
504 u8 listen_interval; 504 u8 listen_interval;
505 u8 pad[2]; 505 u8 pad[2];
506} __attribute__ ((packed)); 506} __packed;
507 507
508struct acx_aid { 508struct acx_aid {
509 struct acx_header header; 509 struct acx_header header;
@@ -513,7 +513,7 @@ struct acx_aid {
513 */ 513 */
514 __le16 aid; 514 __le16 aid;
515 u8 pad[2]; 515 u8 pad[2];
516} __attribute__ ((packed)); 516} __packed;
517 517
518enum acx_preamble_type { 518enum acx_preamble_type {
519 ACX_PREAMBLE_LONG = 0, 519 ACX_PREAMBLE_LONG = 0,
@@ -529,7 +529,7 @@ struct acx_preamble {
529 */ 529 */
530 u8 preamble; 530 u8 preamble;
531 u8 padding[3]; 531 u8 padding[3];
532} __attribute__ ((packed)); 532} __packed;
533 533
534enum acx_ctsprotect_type { 534enum acx_ctsprotect_type {
535 CTSPROTECT_DISABLE = 0, 535 CTSPROTECT_DISABLE = 0,
@@ -540,11 +540,11 @@ struct acx_ctsprotect {
540 struct acx_header header; 540 struct acx_header header;
541 u8 ctsprotect; 541 u8 ctsprotect;
542 u8 padding[3]; 542 u8 padding[3];
543} __attribute__ ((packed)); 543} __packed;
544 544
545struct acx_tx_statistics { 545struct acx_tx_statistics {
546 __le32 internal_desc_overflow; 546 __le32 internal_desc_overflow;
547} __attribute__ ((packed)); 547} __packed;
548 548
549struct acx_rx_statistics { 549struct acx_rx_statistics {
550 __le32 out_of_mem; 550 __le32 out_of_mem;
@@ -555,14 +555,14 @@ struct acx_rx_statistics {
555 __le32 xfr_hint_trig; 555 __le32 xfr_hint_trig;
556 __le32 path_reset; 556 __le32 path_reset;
557 __le32 reset_counter; 557 __le32 reset_counter;
558} __attribute__ ((packed)); 558} __packed;
559 559
560struct acx_dma_statistics { 560struct acx_dma_statistics {
561 __le32 rx_requested; 561 __le32 rx_requested;
562 __le32 rx_errors; 562 __le32 rx_errors;
563 __le32 tx_requested; 563 __le32 tx_requested;
564 __le32 tx_errors; 564 __le32 tx_errors;
565} __attribute__ ((packed)); 565} __packed;
566 566
567struct acx_isr_statistics { 567struct acx_isr_statistics {
568 /* host command complete */ 568 /* host command complete */
@@ -621,7 +621,7 @@ struct acx_isr_statistics {
621 621
622 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ 622 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
623 __le32 low_rssi; 623 __le32 low_rssi;
624} __attribute__ ((packed)); 624} __packed;
625 625
626struct acx_wep_statistics { 626struct acx_wep_statistics {
627 /* WEP address keys configured */ 627 /* WEP address keys configured */
@@ -643,7 +643,7 @@ struct acx_wep_statistics {
643 643
644 /* WEP decrypt interrupts */ 644 /* WEP decrypt interrupts */
645 __le32 interrupt; 645 __le32 interrupt;
646} __attribute__ ((packed)); 646} __packed;
647 647
648#define ACX_MISSED_BEACONS_SPREAD 10 648#define ACX_MISSED_BEACONS_SPREAD 10
649 649
@@ -703,12 +703,12 @@ struct acx_pwr_statistics {
703 703
704 /* the number of beacons in awake mode */ 704 /* the number of beacons in awake mode */
705 __le32 rcvd_awake_beacons; 705 __le32 rcvd_awake_beacons;
706} __attribute__ ((packed)); 706} __packed;
707 707
708struct acx_mic_statistics { 708struct acx_mic_statistics {
709 __le32 rx_pkts; 709 __le32 rx_pkts;
710 __le32 calc_failure; 710 __le32 calc_failure;
711} __attribute__ ((packed)); 711} __packed;
712 712
713struct acx_aes_statistics { 713struct acx_aes_statistics {
714 __le32 encrypt_fail; 714 __le32 encrypt_fail;
@@ -717,7 +717,7 @@ struct acx_aes_statistics {
717 __le32 decrypt_packets; 717 __le32 decrypt_packets;
718 __le32 encrypt_interrupt; 718 __le32 encrypt_interrupt;
719 __le32 decrypt_interrupt; 719 __le32 decrypt_interrupt;
720} __attribute__ ((packed)); 720} __packed;
721 721
722struct acx_event_statistics { 722struct acx_event_statistics {
723 __le32 heart_beat; 723 __le32 heart_beat;
@@ -728,7 +728,7 @@ struct acx_event_statistics {
728 __le32 oom_late; 728 __le32 oom_late;
729 __le32 phy_transmit_error; 729 __le32 phy_transmit_error;
730 __le32 tx_stuck; 730 __le32 tx_stuck;
731} __attribute__ ((packed)); 731} __packed;
732 732
733struct acx_ps_statistics { 733struct acx_ps_statistics {
734 __le32 pspoll_timeouts; 734 __le32 pspoll_timeouts;
@@ -738,7 +738,7 @@ struct acx_ps_statistics {
738 __le32 pspoll_max_apturn; 738 __le32 pspoll_max_apturn;
739 __le32 pspoll_utilization; 739 __le32 pspoll_utilization;
740 __le32 upsd_utilization; 740 __le32 upsd_utilization;
741} __attribute__ ((packed)); 741} __packed;
742 742
743struct acx_rxpipe_statistics { 743struct acx_rxpipe_statistics {
744 __le32 rx_prep_beacon_drop; 744 __le32 rx_prep_beacon_drop;
@@ -746,7 +746,7 @@ struct acx_rxpipe_statistics {
746 __le32 beacon_buffer_thres_host_int_trig_rx_data; 746 __le32 beacon_buffer_thres_host_int_trig_rx_data;
747 __le32 missed_beacon_host_int_trig_rx_data; 747 __le32 missed_beacon_host_int_trig_rx_data;
748 __le32 tx_xfr_host_int_trig_rx_data; 748 __le32 tx_xfr_host_int_trig_rx_data;
749} __attribute__ ((packed)); 749} __packed;
750 750
751struct acx_statistics { 751struct acx_statistics {
752 struct acx_header header; 752 struct acx_header header;
@@ -762,7 +762,7 @@ struct acx_statistics {
762 struct acx_event_statistics event; 762 struct acx_event_statistics event;
763 struct acx_ps_statistics ps; 763 struct acx_ps_statistics ps;
764 struct acx_rxpipe_statistics rxpipe; 764 struct acx_rxpipe_statistics rxpipe;
765} __attribute__ ((packed)); 765} __packed;
766 766
767struct acx_rate_class { 767struct acx_rate_class {
768 __le32 enabled_rates; 768 __le32 enabled_rates;
@@ -780,7 +780,7 @@ struct acx_rate_policy {
780 780
781 __le32 rate_class_cnt; 781 __le32 rate_class_cnt;
782 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; 782 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
783} __attribute__ ((packed)); 783} __packed;
784 784
785struct acx_ac_cfg { 785struct acx_ac_cfg {
786 struct acx_header header; 786 struct acx_header header;
@@ -790,7 +790,7 @@ struct acx_ac_cfg {
790 u8 aifsn; 790 u8 aifsn;
791 u8 reserved; 791 u8 reserved;
792 __le16 tx_op_limit; 792 __le16 tx_op_limit;
793} __attribute__ ((packed)); 793} __packed;
794 794
795struct acx_tid_config { 795struct acx_tid_config {
796 struct acx_header header; 796 struct acx_header header;
@@ -801,19 +801,19 @@ struct acx_tid_config {
801 u8 ack_policy; 801 u8 ack_policy;
802 u8 padding[3]; 802 u8 padding[3];
803 __le32 apsd_conf[2]; 803 __le32 apsd_conf[2];
804} __attribute__ ((packed)); 804} __packed;
805 805
806struct acx_frag_threshold { 806struct acx_frag_threshold {
807 struct acx_header header; 807 struct acx_header header;
808 __le16 frag_threshold; 808 __le16 frag_threshold;
809 u8 padding[2]; 809 u8 padding[2];
810} __attribute__ ((packed)); 810} __packed;
811 811
812struct acx_tx_config_options { 812struct acx_tx_config_options {
813 struct acx_header header; 813 struct acx_header header;
814 __le16 tx_compl_timeout; /* msec */ 814 __le16 tx_compl_timeout; /* msec */
815 __le16 tx_compl_threshold; /* number of packets */ 815 __le16 tx_compl_threshold; /* number of packets */
816} __attribute__ ((packed)); 816} __packed;
817 817
818#define ACX_RX_MEM_BLOCKS 70 818#define ACX_RX_MEM_BLOCKS 70
819#define ACX_TX_MIN_MEM_BLOCKS 40 819#define ACX_TX_MIN_MEM_BLOCKS 40
@@ -828,7 +828,7 @@ struct wl1271_acx_config_memory {
828 u8 num_stations; 828 u8 num_stations;
829 u8 num_ssid_profiles; 829 u8 num_ssid_profiles;
830 __le32 total_tx_descriptors; 830 __le32 total_tx_descriptors;
831} __attribute__ ((packed)); 831} __packed;
832 832
833struct wl1271_acx_mem_map { 833struct wl1271_acx_mem_map {
834 struct acx_header header; 834 struct acx_header header;
@@ -872,7 +872,7 @@ struct wl1271_acx_mem_map {
872 u8 *rx_cbuf; 872 u8 *rx_cbuf;
873 __le32 rx_ctrl; 873 __le32 rx_ctrl;
874 __le32 tx_ctrl; 874 __le32 tx_ctrl;
875} __attribute__ ((packed)); 875} __packed;
876 876
877struct wl1271_acx_rx_config_opt { 877struct wl1271_acx_rx_config_opt {
878 struct acx_header header; 878 struct acx_header header;
@@ -882,7 +882,7 @@ struct wl1271_acx_rx_config_opt {
882 __le16 timeout; 882 __le16 timeout;
883 u8 queue_type; 883 u8 queue_type;
884 u8 reserved; 884 u8 reserved;
885} __attribute__ ((packed)); 885} __packed;
886 886
887 887
888struct wl1271_acx_bet_enable { 888struct wl1271_acx_bet_enable {
@@ -891,7 +891,7 @@ struct wl1271_acx_bet_enable {
891 u8 enable; 891 u8 enable;
892 u8 max_consecutive; 892 u8 max_consecutive;
893 u8 padding[2]; 893 u8 padding[2];
894} __attribute__ ((packed)); 894} __packed;
895 895
896#define ACX_IPV4_VERSION 4 896#define ACX_IPV4_VERSION 4
897#define ACX_IPV6_VERSION 6 897#define ACX_IPV6_VERSION 6
@@ -905,7 +905,7 @@ struct wl1271_acx_arp_filter {
905 requests directed to this IP address will pass 905 requests directed to this IP address will pass
906 through. For IPv4, the first four bytes are 906 through. For IPv4, the first four bytes are
907 used. */ 907 used. */
908} __attribute__((packed)); 908} __packed;
909 909
910struct wl1271_acx_pm_config { 910struct wl1271_acx_pm_config {
911 struct acx_header header; 911 struct acx_header header;
@@ -913,14 +913,14 @@ struct wl1271_acx_pm_config {
913 __le32 host_clk_settling_time; 913 __le32 host_clk_settling_time;
914 u8 host_fast_wakeup_support; 914 u8 host_fast_wakeup_support;
915 u8 padding[3]; 915 u8 padding[3];
916} __attribute__ ((packed)); 916} __packed;
917 917
918struct wl1271_acx_keep_alive_mode { 918struct wl1271_acx_keep_alive_mode {
919 struct acx_header header; 919 struct acx_header header;
920 920
921 u8 enabled; 921 u8 enabled;
922 u8 padding[3]; 922 u8 padding[3];
923} __attribute__ ((packed)); 923} __packed;
924 924
925enum { 925enum {
926 ACX_KEEP_ALIVE_NO_TX = 0, 926 ACX_KEEP_ALIVE_NO_TX = 0,
@@ -940,7 +940,7 @@ struct wl1271_acx_keep_alive_config {
940 u8 tpl_validation; 940 u8 tpl_validation;
941 u8 trigger; 941 u8 trigger;
942 u8 padding; 942 u8 padding;
943} __attribute__ ((packed)); 943} __packed;
944 944
945enum { 945enum {
946 WL1271_ACX_TRIG_TYPE_LEVEL = 0, 946 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
@@ -993,6 +993,17 @@ struct wl1271_acx_rssi_snr_avg_weights {
993 u8 snr_data; 993 u8 snr_data;
994}; 994};
995 995
996struct wl1271_acx_fw_tsf_information {
997 struct acx_header header;
998
999 __le32 current_tsf_high;
1000 __le32 current_tsf_low;
1001 __le32 last_bttt_high;
1002 __le32 last_tbtt_low;
1003 u8 last_dtim_count;
1004 u8 padding[3];
1005} __packed;
1006
996enum { 1007enum {
997 ACX_WAKE_UP_CONDITIONS = 0x0002, 1008 ACX_WAKE_UP_CONDITIONS = 0x0002,
998 ACX_MEM_CFG = 0x0003, 1009 ACX_MEM_CFG = 0x0003,
@@ -1106,13 +1117,13 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
1106int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1117int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1107int wl1271_acx_smart_reflex(struct wl1271 *wl); 1118int wl1271_acx_smart_reflex(struct wl1271 *wl);
1108int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); 1119int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1109int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, 1120int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address);
1110 u8 version);
1111int wl1271_acx_pm_config(struct wl1271 *wl); 1121int wl1271_acx_pm_config(struct wl1271 *wl);
1112int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable); 1122int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
1113int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid); 1123int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
1114int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, 1124int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1115 s16 thold, u8 hyst); 1125 s16 thold, u8 hyst);
1116int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); 1126int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
1127int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1117 1128
1118#endif /* __WL1271_ACX_H__ */ 1129#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 1a36d8a2196e..f36430b0336d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -414,7 +414,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
414 PS_REPORT_EVENT_ID | 414 PS_REPORT_EVENT_ID |
415 JOIN_EVENT_COMPLETE_ID | 415 JOIN_EVENT_COMPLETE_ID |
416 DISCONNECT_EVENT_COMPLETE_ID | 416 DISCONNECT_EVENT_COMPLETE_ID |
417 RSSI_SNR_TRIGGER_0_EVENT_ID; 417 RSSI_SNR_TRIGGER_0_EVENT_ID |
418 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
419 SOFT_GEMINI_SENSE_EVENT_ID;
418 420
419 ret = wl1271_event_unmask(wl); 421 ret = wl1271_event_unmask(wl);
420 if (ret < 0) { 422 if (ret < 0) {
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 19393e236e2c..ce503ddd5a41 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -104,100 +104,6 @@ out:
104 return ret; 104 return ret;
105} 105}
106 106
107static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
108{
109 struct wl1271_cmd_cal_channel_tune *cmd;
110 int ret = 0;
111
112 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
113 if (!cmd)
114 return -ENOMEM;
115
116 cmd->test.id = TEST_CMD_CHANNEL_TUNE;
117
118 cmd->band = WL1271_CHANNEL_TUNE_BAND_2_4;
119 /* set up any channel, 7 is in the middle of the range */
120 cmd->channel = 7;
121
122 ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0);
123 if (ret < 0)
124 wl1271_warning("TEST_CMD_CHANNEL_TUNE failed");
125
126 kfree(cmd);
127 return ret;
128}
129
130static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
131{
132 struct wl1271_cmd_cal_update_ref_point *cmd;
133 int ret = 0;
134
135 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
136 if (!cmd)
137 return -ENOMEM;
138
139 cmd->test.id = TEST_CMD_UPDATE_PD_REFERENCE_POINT;
140
141 /* FIXME: still waiting for the correct values */
142 cmd->ref_power = 0;
143 cmd->ref_detector = 0;
144
145 cmd->sub_band = WL1271_PD_REFERENCE_POINT_BAND_B_G;
146
147 ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0);
148 if (ret < 0)
149 wl1271_warning("TEST_CMD_UPDATE_PD_REFERENCE_POINT failed");
150
151 kfree(cmd);
152 return ret;
153}
154
155static int wl1271_cmd_cal_p2g(struct wl1271 *wl)
156{
157 struct wl1271_cmd_cal_p2g *cmd;
158 int ret = 0;
159
160 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
161 if (!cmd)
162 return -ENOMEM;
163
164 cmd->test.id = TEST_CMD_P2G_CAL;
165
166 cmd->sub_band_mask = WL1271_CAL_P2G_BAND_B_G;
167
168 ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0);
169 if (ret < 0)
170 wl1271_warning("TEST_CMD_P2G_CAL failed");
171
172 kfree(cmd);
173 return ret;
174}
175
176static int wl1271_cmd_cal(struct wl1271 *wl)
177{
178 /*
179 * FIXME: we must make sure that we're not sleeping when calibration
180 * is done
181 */
182 int ret;
183
184 wl1271_notice("performing tx calibration");
185
186 ret = wl1271_cmd_cal_channel_tune(wl);
187 if (ret < 0)
188 return ret;
189
190 ret = wl1271_cmd_cal_update_ref_point(wl);
191 if (ret < 0)
192 return ret;
193
194 ret = wl1271_cmd_cal_p2g(wl);
195 if (ret < 0)
196 return ret;
197
198 return ret;
199}
200
201int wl1271_cmd_general_parms(struct wl1271 *wl) 107int wl1271_cmd_general_parms(struct wl1271 *wl)
202{ 108{
203 struct wl1271_general_parms_cmd *gen_parms; 109 struct wl1271_general_parms_cmd *gen_parms;
@@ -212,8 +118,8 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
212 118
213 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; 119 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
214 120
215 memcpy(gen_parms->params, wl->nvs->general_params, 121 memcpy(&gen_parms->general_params, &wl->nvs->general_params,
216 WL1271_NVS_GENERAL_PARAMS_SIZE); 122 sizeof(struct wl1271_ini_general_params));
217 123
218 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 124 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
219 if (ret < 0) 125 if (ret < 0)
@@ -226,7 +132,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
226int wl1271_cmd_radio_parms(struct wl1271 *wl) 132int wl1271_cmd_radio_parms(struct wl1271 *wl)
227{ 133{
228 struct wl1271_radio_parms_cmd *radio_parms; 134 struct wl1271_radio_parms_cmd *radio_parms;
229 struct conf_radio_parms *rparam = &wl->conf.init.radioparam; 135 struct wl1271_ini_general_params *gp = &wl->nvs->general_params;
230 int ret; 136 int ret;
231 137
232 if (!wl->nvs) 138 if (!wl->nvs)
@@ -238,13 +144,20 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
238 144
239 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; 145 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
240 146
241 memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params, 147 /* 2.4GHz parameters */
242 WL1271_NVS_STAT_RADIO_PARAMS_SIZE); 148 memcpy(&radio_parms->static_params_2, &wl->nvs->stat_radio_params_2,
243 memcpy(radio_parms->dyn_radio_params, 149 sizeof(struct wl1271_ini_band_params_2));
244 wl->nvs->dyn_radio_params[rparam->fem], 150 memcpy(&radio_parms->dyn_params_2,
245 WL1271_NVS_DYN_RADIO_PARAMS_SIZE); 151 &wl->nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
246 152 sizeof(struct wl1271_ini_fem_params_2));
247 /* FIXME: current NVS is missing 5GHz parameters */ 153
154 /* 5GHz parameters */
155 memcpy(&radio_parms->static_params_5,
156 &wl->nvs->stat_radio_params_5,
157 sizeof(struct wl1271_ini_band_params_5));
158 memcpy(&radio_parms->dyn_params_5,
159 &wl->nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
160 sizeof(struct wl1271_ini_fem_params_5));
248 161
249 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", 162 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
250 radio_parms, sizeof(*radio_parms)); 163 radio_parms, sizeof(*radio_parms));
@@ -288,20 +201,10 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
288 201
289int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) 202int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
290{ 203{
291 static bool do_cal = true;
292 struct wl1271_cmd_join *join; 204 struct wl1271_cmd_join *join;
293 int ret, i; 205 int ret, i;
294 u8 *bssid; 206 u8 *bssid;
295 207
296 /* FIXME: remove when we get calibration from the factory */
297 if (do_cal) {
298 ret = wl1271_cmd_cal(wl);
299 if (ret < 0)
300 wl1271_warning("couldn't calibrate");
301 else
302 do_cal = false;
303 }
304
305 join = kzalloc(sizeof(*join), GFP_KERNEL); 208 join = kzalloc(sizeof(*join), GFP_KERNEL);
306 if (!join) { 209 if (!join) {
307 ret = -ENOMEM; 210 ret = -ENOMEM;
@@ -329,12 +232,6 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
329 join->channel = wl->channel; 232 join->channel = wl->channel;
330 join->ssid_len = wl->ssid_len; 233 join->ssid_len = wl->ssid_len;
331 memcpy(join->ssid, wl->ssid, wl->ssid_len); 234 memcpy(join->ssid, wl->ssid, wl->ssid_len);
332 join->ctrl = WL1271_JOIN_CMD_CTRL_TX_FLUSH;
333
334 /* increment the session counter */
335 wl->session_counter++;
336 if (wl->session_counter >= SESSION_COUNTER_MAX)
337 wl->session_counter = 0;
338 235
339 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; 236 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
340 237
@@ -517,7 +414,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
517 ps_params->send_null_data = send; 414 ps_params->send_null_data = send;
518 ps_params->retries = 5; 415 ps_params->retries = 5;
519 ps_params->hang_over_period = 1; 416 ps_params->hang_over_period = 1;
520 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */ 417 ps_params->null_data_rate = cpu_to_le32(wl->basic_rate_set);
521 418
522 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 419 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
523 sizeof(*ps_params), 0); 420 sizeof(*ps_params), 0);
@@ -566,140 +463,6 @@ out:
566 return ret; 463 return ret;
567} 464}
568 465
569int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
570 const u8 *ie, size_t ie_len, u8 active_scan,
571 u8 high_prio, u8 band, u8 probe_requests)
572{
573
574 struct wl1271_cmd_trigger_scan_to *trigger = NULL;
575 struct wl1271_cmd_scan *params = NULL;
576 struct ieee80211_channel *channels;
577 u32 rate;
578 int i, j, n_ch, ret;
579 u16 scan_options = 0;
580 u8 ieee_band;
581
582 if (band == WL1271_SCAN_BAND_2_4_GHZ) {
583 ieee_band = IEEE80211_BAND_2GHZ;
584 rate = wl->conf.tx.basic_rate;
585 } else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) {
586 ieee_band = IEEE80211_BAND_2GHZ;
587 rate = wl->conf.tx.basic_rate;
588 } else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) {
589 ieee_band = IEEE80211_BAND_5GHZ;
590 rate = wl->conf.tx.basic_rate_5;
591 } else
592 return -EINVAL;
593
594 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
595 return -EINVAL;
596
597 channels = wl->hw->wiphy->bands[ieee_band]->channels;
598 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
599
600 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
601 return -EINVAL;
602
603 params = kzalloc(sizeof(*params), GFP_KERNEL);
604 if (!params)
605 return -ENOMEM;
606
607 params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
608 params->params.rx_filter_options =
609 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
610
611 if (!active_scan)
612 scan_options |= WL1271_SCAN_OPT_PASSIVE;
613 if (high_prio)
614 scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH;
615 params->params.scan_options = cpu_to_le16(scan_options);
616
617 params->params.num_probe_requests = probe_requests;
618 params->params.tx_rate = cpu_to_le32(rate);
619 params->params.tid_trigger = 0;
620 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
621
622 if (band == WL1271_SCAN_BAND_DUAL)
623 params->params.band = WL1271_SCAN_BAND_2_4_GHZ;
624 else
625 params->params.band = band;
626
627 for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) {
628 if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) {
629 params->channels[j].min_duration =
630 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
631 params->channels[j].max_duration =
632 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
633 memset(&params->channels[j].bssid_lsb, 0xff, 4);
634 memset(&params->channels[j].bssid_msb, 0xff, 2);
635 params->channels[j].early_termination = 0;
636 params->channels[j].tx_power_att =
637 WL1271_SCAN_CURRENT_TX_PWR;
638 params->channels[j].channel = channels[i].hw_value;
639 j++;
640 }
641 }
642
643 params->params.num_channels = j;
644
645 if (ssid_len && ssid) {
646 params->params.ssid_len = ssid_len;
647 memcpy(params->params.ssid, ssid, ssid_len);
648 }
649
650 ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
651 ie, ie_len, ieee_band);
652 if (ret < 0) {
653 wl1271_error("PROBE request template failed");
654 goto out;
655 }
656
657 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
658 if (!trigger) {
659 ret = -ENOMEM;
660 goto out;
661 }
662
663 /* disable the timeout */
664 trigger->timeout = 0;
665
666 ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
667 sizeof(*trigger), 0);
668 if (ret < 0) {
669 wl1271_error("trigger scan to failed for hw scan");
670 goto out;
671 }
672
673 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
674
675 set_bit(WL1271_FLAG_SCANNING, &wl->flags);
676 if (wl1271_11a_enabled()) {
677 wl->scan.state = band;
678 if (band == WL1271_SCAN_BAND_DUAL) {
679 wl->scan.active = active_scan;
680 wl->scan.high_prio = high_prio;
681 wl->scan.probe_requests = probe_requests;
682 if (ssid_len && ssid) {
683 wl->scan.ssid_len = ssid_len;
684 memcpy(wl->scan.ssid, ssid, ssid_len);
685 } else
686 wl->scan.ssid_len = 0;
687 }
688 }
689
690 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
691 if (ret < 0) {
692 wl1271_error("SCAN failed");
693 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
694 goto out;
695 }
696
697out:
698 kfree(params);
699 kfree(trigger);
700 return ret;
701}
702
703int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 466int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
704 void *buf, size_t buf_len, int index, u32 rates) 467 void *buf, size_t buf_len, int index, u32 rates)
705{ 468{
@@ -804,7 +567,7 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
804 goto out; 567 goto out;
805 568
806 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data, 569 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
807 skb->len, 0, wl->basic_rate); 570 skb->len, 0, wl->basic_rate_set);
808 571
809out: 572out:
810 dev_kfree_skb(skb); 573 dev_kfree_skb(skb);
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index f2820b42a943..af577ee8eb02 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -41,9 +41,6 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send); 41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
43 size_t len); 43 size_t len);
44int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
45 const u8 *ie, size_t ie_len, u8 active_scan,
46 u8 high_prio, u8 band, u8 probe_requests);
47int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 44int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
48 void *buf, size_t buf_len, int index, u32 rates); 45 void *buf, size_t buf_len, int index, u32 rates);
49int wl1271_cmd_build_null_data(struct wl1271 *wl); 46int wl1271_cmd_build_null_data(struct wl1271 *wl);
@@ -136,14 +133,14 @@ struct wl1271_cmd_header {
136 __le16 status; 133 __le16 status;
137 /* payload */ 134 /* payload */
138 u8 data[0]; 135 u8 data[0];
139} __attribute__ ((packed)); 136} __packed;
140 137
141#define WL1271_CMD_MAX_PARAMS 572 138#define WL1271_CMD_MAX_PARAMS 572
142 139
143struct wl1271_command { 140struct wl1271_command {
144 struct wl1271_cmd_header header; 141 struct wl1271_cmd_header header;
145 u8 parameters[WL1271_CMD_MAX_PARAMS]; 142 u8 parameters[WL1271_CMD_MAX_PARAMS];
146} __attribute__ ((packed)); 143} __packed;
147 144
148enum { 145enum {
149 CMD_MAILBOX_IDLE = 0, 146 CMD_MAILBOX_IDLE = 0,
@@ -196,7 +193,7 @@ struct cmd_read_write_memory {
196 of this field is the Host in WRITE command or the Wilink in READ 193 of this field is the Host in WRITE command or the Wilink in READ
197 command. */ 194 command. */
198 u8 value[MAX_READ_SIZE]; 195 u8 value[MAX_READ_SIZE];
199} __attribute__ ((packed)); 196} __packed;
200 197
201#define CMDMBOX_HEADER_LEN 4 198#define CMDMBOX_HEADER_LEN 4
202#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 199#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -243,14 +240,14 @@ struct wl1271_cmd_join {
243 u8 ssid[IW_ESSID_MAX_SIZE]; 240 u8 ssid[IW_ESSID_MAX_SIZE];
244 u8 ctrl; /* JOIN_CMD_CTRL_* */ 241 u8 ctrl; /* JOIN_CMD_CTRL_* */
245 u8 reserved[3]; 242 u8 reserved[3];
246} __attribute__ ((packed)); 243} __packed;
247 244
248struct cmd_enabledisable_path { 245struct cmd_enabledisable_path {
249 struct wl1271_cmd_header header; 246 struct wl1271_cmd_header header;
250 247
251 u8 channel; 248 u8 channel;
252 u8 padding[3]; 249 u8 padding[3];
253} __attribute__ ((packed)); 250} __packed;
254 251
255#define WL1271_RATE_AUTOMATIC 0 252#define WL1271_RATE_AUTOMATIC 0
256 253
@@ -266,7 +263,7 @@ struct wl1271_cmd_template_set {
266 u8 aflags; 263 u8 aflags;
267 u8 reserved; 264 u8 reserved;
268 u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; 265 u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE];
269} __attribute__ ((packed)); 266} __packed;
270 267
271#define TIM_ELE_ID 5 268#define TIM_ELE_ID 5
272#define PARTIAL_VBM_MAX 251 269#define PARTIAL_VBM_MAX 251
@@ -278,7 +275,7 @@ struct wl1271_tim {
278 u8 dtim_period; 275 u8 dtim_period;
279 u8 bitmap_ctrl; 276 u8 bitmap_ctrl;
280 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ 277 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
281} __attribute__ ((packed)); 278} __packed;
282 279
283enum wl1271_cmd_ps_mode { 280enum wl1271_cmd_ps_mode {
284 STATION_ACTIVE_MODE, 281 STATION_ACTIVE_MODE,
@@ -298,7 +295,7 @@ struct wl1271_cmd_ps_params {
298 */ 295 */
299 u8 hang_over_period; 296 u8 hang_over_period;
300 __le32 null_data_rate; 297 __le32 null_data_rate;
301} __attribute__ ((packed)); 298} __packed;
302 299
303/* HW encryption keys */ 300/* HW encryption keys */
304#define NUM_ACCESS_CATEGORIES_COPY 4 301#define NUM_ACCESS_CATEGORIES_COPY 4
@@ -348,77 +345,12 @@ struct wl1271_cmd_set_keys {
348 u8 key[MAX_KEY_SIZE]; 345 u8 key[MAX_KEY_SIZE];
349 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; 346 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
350 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 347 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
351} __attribute__ ((packed)); 348} __packed;
352
353
354#define WL1271_SCAN_MAX_CHANNELS 24
355#define WL1271_SCAN_DEFAULT_TAG 1
356#define WL1271_SCAN_CURRENT_TX_PWR 0
357#define WL1271_SCAN_OPT_ACTIVE 0
358#define WL1271_SCAN_OPT_PASSIVE 1
359#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
360#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */
361#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */
362#define WL1271_SCAN_BAND_2_4_GHZ 0
363#define WL1271_SCAN_BAND_5_GHZ 1
364#define WL1271_SCAN_BAND_DUAL 2
365
366struct basic_scan_params {
367 __le32 rx_config_options;
368 __le32 rx_filter_options;
369 /* Scan option flags (WL1271_SCAN_OPT_*) */
370 __le16 scan_options;
371 /* Number of scan channels in the list (maximum 30) */
372 u8 num_channels;
373 /* This field indicates the number of probe requests to send
374 per channel for an active scan */
375 u8 num_probe_requests;
376 /* Rate bit field for sending the probes */
377 __le32 tx_rate;
378 u8 tid_trigger;
379 u8 ssid_len;
380 /* in order to align */
381 u8 padding1[2];
382 u8 ssid[IW_ESSID_MAX_SIZE];
383 /* Band to scan */
384 u8 band;
385 u8 use_ssid_list;
386 u8 scan_tag;
387 u8 padding2;
388} __attribute__ ((packed));
389
390struct basic_scan_channel_params {
391 /* Duration in TU to wait for frames on a channel for active scan */
392 __le32 min_duration;
393 __le32 max_duration;
394 __le32 bssid_lsb;
395 __le16 bssid_msb;
396 u8 early_termination;
397 u8 tx_power_att;
398 u8 channel;
399 /* FW internal use only! */
400 u8 dfs_candidate;
401 u8 activity_detected;
402 u8 pad;
403} __attribute__ ((packed));
404
405struct wl1271_cmd_scan {
406 struct wl1271_cmd_header header;
407
408 struct basic_scan_params params;
409 struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
410} __attribute__ ((packed));
411
412struct wl1271_cmd_trigger_scan_to {
413 struct wl1271_cmd_header header;
414
415 __le32 timeout;
416} __attribute__ ((packed));
417 349
418struct wl1271_cmd_test_header { 350struct wl1271_cmd_test_header {
419 u8 id; 351 u8 id;
420 u8 padding[3]; 352 u8 padding[3];
421} __attribute__ ((packed)); 353} __packed;
422 354
423enum wl1271_channel_tune_bands { 355enum wl1271_channel_tune_bands {
424 WL1271_CHANNEL_TUNE_BAND_2_4, 356 WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -439,25 +371,31 @@ struct wl1271_general_parms_cmd {
439 371
440 struct wl1271_cmd_test_header test; 372 struct wl1271_cmd_test_header test;
441 373
442 u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE]; 374 struct wl1271_ini_general_params general_params;
443 s8 reserved[23];
444} __attribute__ ((packed));
445 375
446#define WL1271_STAT_RADIO_PARAMS_5_SIZE 29 376 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
447#define WL1271_DYN_RADIO_PARAMS_5_SIZE 104 377 u8 sr_sen_n_p;
378 u8 sr_sen_n_p_gain;
379 u8 sr_sen_nrn;
380 u8 sr_sen_prn;
381 u8 padding[3];
382} __packed;
448 383
449struct wl1271_radio_parms_cmd { 384struct wl1271_radio_parms_cmd {
450 struct wl1271_cmd_header header; 385 struct wl1271_cmd_header header;
451 386
452 struct wl1271_cmd_test_header test; 387 struct wl1271_cmd_test_header test;
453 388
454 u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE]; 389 /* Static radio parameters */
455 u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE]; 390 struct wl1271_ini_band_params_2 static_params_2;
391 struct wl1271_ini_band_params_5 static_params_5;
456 392
457 u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE]; 393 /* Dynamic radio parameters */
458 u8 reserved; 394 struct wl1271_ini_fem_params_2 dyn_params_2;
459 u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE]; 395 u8 padding2;
460} __attribute__ ((packed)); 396 struct wl1271_ini_fem_params_5 dyn_params_5;
397 u8 padding3[2];
398} __packed;
461 399
462struct wl1271_cmd_cal_channel_tune { 400struct wl1271_cmd_cal_channel_tune {
463 struct wl1271_cmd_header header; 401 struct wl1271_cmd_header header;
@@ -468,7 +406,7 @@ struct wl1271_cmd_cal_channel_tune {
468 u8 channel; 406 u8 channel;
469 407
470 __le16 radio_status; 408 __le16 radio_status;
471} __attribute__ ((packed)); 409} __packed;
472 410
473struct wl1271_cmd_cal_update_ref_point { 411struct wl1271_cmd_cal_update_ref_point {
474 struct wl1271_cmd_header header; 412 struct wl1271_cmd_header header;
@@ -479,7 +417,7 @@ struct wl1271_cmd_cal_update_ref_point {
479 __le32 ref_detector; 417 __le32 ref_detector;
480 u8 sub_band; 418 u8 sub_band;
481 u8 padding[3]; 419 u8 padding[3];
482} __attribute__ ((packed)); 420} __packed;
483 421
484#define MAX_TLV_LENGTH 400 422#define MAX_TLV_LENGTH 400
485#define MAX_NVS_VERSION_LENGTH 12 423#define MAX_NVS_VERSION_LENGTH 12
@@ -501,7 +439,7 @@ struct wl1271_cmd_cal_p2g {
501 439
502 u8 sub_band_mask; 440 u8 sub_band_mask;
503 u8 padding2; 441 u8 padding2;
504} __attribute__ ((packed)); 442} __packed;
505 443
506 444
507/* 445/*
@@ -529,6 +467,6 @@ struct wl1271_cmd_disconnect {
529 u8 type; 467 u8 type;
530 468
531 u8 padding; 469 u8 padding;
532} __attribute__ ((packed)); 470} __packed;
533 471
534#endif /* __WL1271_CMD_H__ */ 472#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index d046d044b5bd..0435ffda8f73 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -874,6 +874,13 @@ struct conf_conn_settings {
874 u8 ps_poll_threshold; 874 u8 ps_poll_threshold;
875 875
876 /* 876 /*
877 * PS Poll failure recovery ACTIVE period length
878 *
879 * Range: u32 (ms)
880 */
881 u32 ps_poll_recovery_period;
882
883 /*
877 * Configuration of signal average weights. 884 * Configuration of signal average weights.
878 */ 885 */
879 struct conf_sig_weights sig_weights; 886 struct conf_sig_weights sig_weights;
@@ -948,14 +955,6 @@ struct conf_radio_parms {
948 u8 fem; 955 u8 fem;
949}; 956};
950 957
951struct conf_init_settings {
952 /*
953 * Configure radio parameters.
954 */
955 struct conf_radio_parms radioparam;
956
957};
958
959struct conf_itrim_settings { 958struct conf_itrim_settings {
960 /* enable dco itrim */ 959 /* enable dco itrim */
961 u8 enable; 960 u8 enable;
@@ -1022,7 +1021,6 @@ struct conf_drv_settings {
1022 struct conf_rx_settings rx; 1021 struct conf_rx_settings rx;
1023 struct conf_tx_settings tx; 1022 struct conf_tx_settings tx;
1024 struct conf_conn_settings conn; 1023 struct conf_conn_settings conn;
1025 struct conf_init_settings init;
1026 struct conf_itrim_settings itrim; 1024 struct conf_itrim_settings itrim;
1027 struct conf_pm_config_settings pm_config; 1025 struct conf_pm_config_settings pm_config;
1028 struct conf_roam_trigger_settings roam_trigger; 1026 struct conf_roam_trigger_settings roam_trigger;
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index cf37aa6eb137..25ce2cd5e3f3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -26,36 +26,64 @@
26#include "wl1271_io.h" 26#include "wl1271_io.h"
27#include "wl1271_event.h" 27#include "wl1271_event.h"
28#include "wl1271_ps.h" 28#include "wl1271_ps.h"
29#include "wl1271_scan.h"
29#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
30 31
31static int wl1271_event_scan_complete(struct wl1271 *wl, 32void wl1271_pspoll_work(struct work_struct *work)
32 struct event_mailbox *mbox)
33{ 33{
34 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 34 struct delayed_work *dwork;
35 mbox->scheduled_scan_status); 35 struct wl1271 *wl;
36 36
37 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) { 37 dwork = container_of(work, struct delayed_work, work);
38 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { 38 wl = container_of(dwork, struct wl1271, pspoll_work);
39 /* 2.4 GHz band scanned, scan 5 GHz band, pretend 39
40 * to the wl1271_cmd_scan function that we are not 40 wl1271_debug(DEBUG_EVENT, "pspoll work");
41 * scanning as it checks that. 41
42 */ 42 mutex_lock(&wl->mutex);
43 clear_bit(WL1271_FLAG_SCANNING, &wl->flags); 43
44 /* FIXME: ie missing! */ 44 if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
45 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, 45 goto out;
46 NULL, 0, 46
47 wl->scan.active, 47 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
48 wl->scan.high_prio, 48 goto out;
49 WL1271_SCAN_BAND_5_GHZ, 49
50 wl->scan.probe_requests); 50 /*
51 } else { 51 * if we end up here, then we were in powersave when the pspoll
52 mutex_unlock(&wl->mutex); 52 * delivery failure occurred, and no-one changed state since, so
53 ieee80211_scan_completed(wl->hw, false); 53 * we should go back to powersave.
54 mutex_lock(&wl->mutex); 54 */
55 clear_bit(WL1271_FLAG_SCANNING, &wl->flags); 55 wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, true);
56 } 56
57out:
58 mutex_unlock(&wl->mutex);
59};
60
61static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
62{
63 int delay = wl->conf.conn.ps_poll_recovery_period;
64 int ret;
65
66 wl->ps_poll_failures++;
67 if (wl->ps_poll_failures == 1)
68 wl1271_info("AP with dysfunctional ps-poll, "
69 "trying to work around it.");
70
71 /* force active mode receive data from the AP */
72 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
73 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, true);
74 if (ret < 0)
75 return;
76 set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
77 ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
78 msecs_to_jiffies(delay));
57 } 79 }
58 return 0; 80
81 /*
82 * If already in active mode, lets we should be getting data from
83 * the AP right away. If we enter PSM too fast after this, and data
84 * remains on the AP, we will get another event like this, and we'll
85 * go into active once more.
86 */
59} 87}
60 88
61static int wl1271_event_ps_report(struct wl1271 *wl, 89static int wl1271_event_ps_report(struct wl1271 *wl,
@@ -163,9 +191,19 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
163 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); 191 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
164 192
165 if (vector & SCAN_COMPLETE_EVENT_ID) { 193 if (vector & SCAN_COMPLETE_EVENT_ID) {
166 ret = wl1271_event_scan_complete(wl, mbox); 194 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
167 if (ret < 0) 195 mbox->scheduled_scan_status);
168 return ret; 196
197 wl1271_scan_stm(wl);
198 }
199
200 /* disable dynamic PS when requested by the firmware */
201 if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
202 wl->bss_type == BSS_TYPE_STA_BSS) {
203 if (mbox->soft_gemini_sense_info)
204 ieee80211_disable_dyn_ps(wl->vif);
205 else
206 ieee80211_enable_dyn_ps(wl->vif);
169 } 207 }
170 208
171 /* 209 /*
@@ -191,6 +229,9 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
191 return ret; 229 return ret;
192 } 230 }
193 231
232 if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
233 wl1271_event_pspoll_delivery_fail(wl);
234
194 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { 235 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
195 wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT"); 236 wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
196 if (wl->vif) 237 if (wl->vif)
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 58371008f270..e4751667cf5e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -85,7 +85,7 @@ struct event_debug_report {
85 __le32 report_1; 85 __le32 report_1;
86 __le32 report_2; 86 __le32 report_2;
87 __le32 report_3; 87 __le32 report_3;
88} __attribute__ ((packed)); 88} __packed;
89 89
90#define NUM_OF_RSSI_SNR_TRIGGERS 8 90#define NUM_OF_RSSI_SNR_TRIGGERS 8
91 91
@@ -116,10 +116,11 @@ struct event_mailbox {
116 u8 ps_status; 116 u8 ps_status;
117 117
118 u8 reserved_5[29]; 118 u8 reserved_5[29];
119} __attribute__ ((packed)); 119} __packed;
120 120
121int wl1271_event_unmask(struct wl1271 *wl); 121int wl1271_event_unmask(struct wl1271 *wl);
122void wl1271_event_mbox_config(struct wl1271 *wl); 122void wl1271_event_mbox_config(struct wl1271 *wl);
123int wl1271_event_handle(struct wl1271 *wl, u8 mbox); 123int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
124void wl1271_pspoll_work(struct work_struct *work);
124 125
125#endif 126#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/wl1271_ini.h
new file mode 100644
index 000000000000..2313047d4015
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_ini.h
@@ -0,0 +1,123 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1271_INI_H__
25#define __WL1271_INI_H__
26
27#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16
28
29struct wl1271_ini_general_params {
30 u8 ref_clock;
31 u8 settling_time;
32 u8 clk_valid_on_wakeup;
33 u8 dc2dc_mode;
34 u8 dual_mode_select;
35 u8 tx_bip_fem_auto_detect;
36 u8 tx_bip_fem_manufacturer;
37 u8 general_settings;
38 u8 sr_state;
39 u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM];
40 u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM];
41 u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
42} __packed;
43
44#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15
45
46struct wl1271_ini_band_params_2 {
47 u8 rx_trace_insertion_loss;
48 u8 tx_trace_loss;
49 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
50} __packed;
51
52#define WL1271_INI_RATE_GROUP_COUNT 6
53#define WL1271_INI_CHANNEL_COUNT_2 14
54
55struct wl1271_ini_fem_params_2 {
56 __le16 tx_bip_ref_pd_voltage;
57 u8 tx_bip_ref_power;
58 u8 tx_bip_ref_offset;
59 u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT];
60 u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT];
61 u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT];
62 u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2];
63 u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2];
64 u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT];
65 u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT];
66 u8 rx_fem_insertion_loss;
67 u8 degraded_low_to_normal_thr;
68 u8 normal_to_degraded_high_thr;
69} __packed;
70
71#define WL1271_INI_CHANNEL_COUNT_5 35
72#define WL1271_INI_SUB_BAND_COUNT_5 7
73
74struct wl1271_ini_band_params_5 {
75 u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
76 u8 tx_trace_loss[WL1271_INI_SUB_BAND_COUNT_5];
77 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
78} __packed;
79
80struct wl1271_ini_fem_params_5 {
81 __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
82 u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5];
83 u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5];
84 u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT];
85 u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT];
86 u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT];
87 u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5];
88 u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT];
89 u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT];
90 u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
91 u8 degraded_low_to_normal_thr;
92 u8 normal_to_degraded_high_thr;
93} __packed;
94
95
96/* NVS data structure */
97#define WL1271_INI_NVS_SECTION_SIZE 468
98#define WL1271_INI_FEM_MODULE_COUNT 2
99
100#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
101
102struct wl1271_nvs_file {
103 /* NVS section */
104 u8 nvs[WL1271_INI_NVS_SECTION_SIZE];
105
106 /* INI section */
107 struct wl1271_ini_general_params general_params;
108 u8 padding1;
109 struct wl1271_ini_band_params_2 stat_radio_params_2;
110 u8 padding2;
111 struct {
112 struct wl1271_ini_fem_params_2 params;
113 u8 padding;
114 } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
115 struct wl1271_ini_band_params_5 stat_radio_params_5;
116 u8 padding3;
117 struct {
118 struct wl1271_ini_fem_params_5 params;
119 u8 padding;
120 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
121} __packed;
122
123#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b7d9137851ac..9d68f0012f05 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -28,7 +28,6 @@
28#include <linux/crc32.h> 28#include <linux/crc32.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
31#include <linux/inetdevice.h>
32#include <linux/platform_device.h> 31#include <linux/platform_device.h>
33#include <linux/slab.h> 32#include <linux/slab.h>
34 33
@@ -45,6 +44,7 @@
45#include "wl1271_cmd.h" 44#include "wl1271_cmd.h"
46#include "wl1271_boot.h" 45#include "wl1271_boot.h"
47#include "wl1271_testmode.h" 46#include "wl1271_testmode.h"
47#include "wl1271_scan.h"
48 48
49#define WL1271_BOOT_RETRIES 3 49#define WL1271_BOOT_RETRIES 3
50 50
@@ -55,7 +55,7 @@ static struct conf_drv_settings default_conf = {
55 [CONF_SG_HV3_MAX_OVERRIDE] = 0, 55 [CONF_SG_HV3_MAX_OVERRIDE] = 0,
56 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400, 56 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
57 [CONF_SG_BT_LOAD_RATIO] = 50, 57 [CONF_SG_BT_LOAD_RATIO] = 50,
58 [CONF_SG_AUTO_PS_MODE] = 0, 58 [CONF_SG_AUTO_PS_MODE] = 1,
59 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, 59 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
60 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, 60 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
61 [CONF_SG_ANTENNA_CONFIGURATION] = 0, 61 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
@@ -234,18 +234,14 @@ static struct conf_drv_settings default_conf = {
234 .beacon_rx_timeout = 10000, 234 .beacon_rx_timeout = 10000,
235 .broadcast_timeout = 20000, 235 .broadcast_timeout = 20000,
236 .rx_broadcast_in_ps = 1, 236 .rx_broadcast_in_ps = 1,
237 .ps_poll_threshold = 20, 237 .ps_poll_threshold = 10,
238 .ps_poll_recovery_period = 700,
238 .bet_enable = CONF_BET_MODE_ENABLE, 239 .bet_enable = CONF_BET_MODE_ENABLE,
239 .bet_max_consecutive = 10, 240 .bet_max_consecutive = 10,
240 .psm_entry_retries = 3, 241 .psm_entry_retries = 3,
241 .keep_alive_interval = 55000, 242 .keep_alive_interval = 55000,
242 .max_listen_interval = 20, 243 .max_listen_interval = 20,
243 }, 244 },
244 .init = {
245 .radioparam = {
246 .fem = 1,
247 }
248 },
249 .itrim = { 245 .itrim = {
250 .enable = false, 246 .enable = false,
251 .timeout = 50000, 247 .timeout = 50000,
@@ -566,14 +562,21 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
566 return ret; 562 return ret;
567 } 563 }
568 564
569 if (fw->size != sizeof(struct wl1271_nvs_file)) { 565 /*
566 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
567 * configurations) can be removed when those NVS files stop floating
568 * around.
569 */
570 if (fw->size != sizeof(struct wl1271_nvs_file) &&
571 (fw->size != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
572 wl1271_11a_enabled())) {
570 wl1271_error("nvs size is not as expected: %zu != %zu", 573 wl1271_error("nvs size is not as expected: %zu != %zu",
571 fw->size, sizeof(struct wl1271_nvs_file)); 574 fw->size, sizeof(struct wl1271_nvs_file));
572 ret = -EILSEQ; 575 ret = -EILSEQ;
573 goto out; 576 goto out;
574 } 577 }
575 578
576 wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); 579 wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL);
577 580
578 if (!wl->nvs) { 581 if (!wl->nvs) {
579 wl1271_error("could not allocate memory for the nvs file"); 582 wl1271_error("could not allocate memory for the nvs file");
@@ -581,8 +584,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
581 goto out; 584 goto out;
582 } 585 }
583 586
584 memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
585
586out: 587out:
587 release_firmware(fw); 588 release_firmware(fw);
588 589
@@ -811,93 +812,6 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
811 return NETDEV_TX_OK; 812 return NETDEV_TX_OK;
812} 813}
813 814
814static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
815 void *arg)
816{
817 struct net_device *dev;
818 struct wireless_dev *wdev;
819 struct wiphy *wiphy;
820 struct ieee80211_hw *hw;
821 struct wl1271 *wl;
822 struct wl1271 *wl_temp;
823 struct in_device *idev;
824 struct in_ifaddr *ifa = arg;
825 int ret = 0;
826
827 /* FIXME: this ugly function should probably be implemented in the
828 * mac80211, and here should only be a simple callback handling actual
829 * setting of the filters. Now we need to dig up references to
830 * various structures to gain access to what we need.
831 * Also, because of this, there is no "initial" setting of the filter
832 * in "op_start", because we don't want to dig up struct net_device
833 * there - the filter will be set upon first change of the interface
834 * IP address. */
835
836 dev = ifa->ifa_dev->dev;
837
838 wdev = dev->ieee80211_ptr;
839 if (wdev == NULL)
840 return NOTIFY_DONE;
841
842 wiphy = wdev->wiphy;
843 if (wiphy == NULL)
844 return NOTIFY_DONE;
845
846 hw = wiphy_priv(wiphy);
847 if (hw == NULL)
848 return NOTIFY_DONE;
849
850 /* Check that the interface is one supported by this driver. */
851 wl_temp = hw->priv;
852 list_for_each_entry(wl, &wl_list, list) {
853 if (wl == wl_temp)
854 break;
855 }
856 if (wl != wl_temp)
857 return NOTIFY_DONE;
858
859 /* Get the interface IP address for the device. "ifa" will become
860 NULL if:
861 - there is no IPV4 protocol address configured
862 - there are multiple (virtual) IPV4 addresses configured
863 When "ifa" is NULL, filtering will be disabled.
864 */
865 ifa = NULL;
866 idev = dev->ip_ptr;
867 if (idev)
868 ifa = idev->ifa_list;
869
870 if (ifa && ifa->ifa_next)
871 ifa = NULL;
872
873 mutex_lock(&wl->mutex);
874
875 if (wl->state == WL1271_STATE_OFF)
876 goto out;
877
878 ret = wl1271_ps_elp_wakeup(wl, false);
879 if (ret < 0)
880 goto out;
881 if (ifa)
882 ret = wl1271_acx_arp_ip_filter(wl, true,
883 (u8 *)&ifa->ifa_address,
884 ACX_IPV4_VERSION);
885 else
886 ret = wl1271_acx_arp_ip_filter(wl, false, NULL,
887 ACX_IPV4_VERSION);
888 wl1271_ps_elp_sleep(wl);
889
890out:
891 mutex_unlock(&wl->mutex);
892
893 return NOTIFY_OK;
894}
895
896static struct notifier_block wl1271_dev_notifier = {
897 .notifier_call = wl1271_dev_notify,
898};
899
900
901static int wl1271_op_start(struct ieee80211_hw *hw) 815static int wl1271_op_start(struct ieee80211_hw *hw)
902{ 816{
903 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 817 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -925,6 +839,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
925 struct ieee80211_vif *vif) 839 struct ieee80211_vif *vif)
926{ 840{
927 struct wl1271 *wl = hw->priv; 841 struct wl1271 *wl = hw->priv;
842 struct wiphy *wiphy = hw->wiphy;
928 int retries = WL1271_BOOT_RETRIES; 843 int retries = WL1271_BOOT_RETRIES;
929 int ret = 0; 844 int ret = 0;
930 845
@@ -978,6 +893,12 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
978 893
979 wl->state = WL1271_STATE_ON; 894 wl->state = WL1271_STATE_ON;
980 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 895 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
896
897 /* update hw/fw version info in wiphy struct */
898 wiphy->hw_version = wl->chip.id;
899 strncpy(wiphy->fw_version, wl->chip.fw_ver,
900 sizeof(wiphy->fw_version));
901
981 goto out; 902 goto out;
982 903
983irq_disable: 904irq_disable:
@@ -1001,10 +922,8 @@ power_off:
1001out: 922out:
1002 mutex_unlock(&wl->mutex); 923 mutex_unlock(&wl->mutex);
1003 924
1004 if (!ret) { 925 if (!ret)
1005 list_add(&wl->list, &wl_list); 926 list_add(&wl->list, &wl_list);
1006 register_inetaddr_notifier(&wl1271_dev_notifier);
1007 }
1008 927
1009 return ret; 928 return ret;
1010} 929}
@@ -1015,8 +934,6 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1015 struct wl1271 *wl = hw->priv; 934 struct wl1271 *wl = hw->priv;
1016 int i; 935 int i;
1017 936
1018 unregister_inetaddr_notifier(&wl1271_dev_notifier);
1019
1020 mutex_lock(&wl->mutex); 937 mutex_lock(&wl->mutex);
1021 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 938 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1022 939
@@ -1026,10 +943,17 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1026 943
1027 WARN_ON(wl->state != WL1271_STATE_ON); 944 WARN_ON(wl->state != WL1271_STATE_ON);
1028 945
1029 if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) { 946 /* enable dyn ps just in case (if left on due to fw crash etc) */
947 if (wl->bss_type == BSS_TYPE_STA_BSS)
948 ieee80211_enable_dyn_ps(wl->vif);
949
950 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
1030 mutex_unlock(&wl->mutex); 951 mutex_unlock(&wl->mutex);
1031 ieee80211_scan_completed(wl->hw, true); 952 ieee80211_scan_completed(wl->hw, true);
1032 mutex_lock(&wl->mutex); 953 mutex_lock(&wl->mutex);
954 wl->scan.state = WL1271_SCAN_STATE_IDLE;
955 kfree(wl->scan.scanned_ch);
956 wl->scan.scanned_ch = NULL;
1033 } 957 }
1034 958
1035 wl->state = WL1271_STATE_OFF; 959 wl->state = WL1271_STATE_OFF;
@@ -1040,11 +964,12 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1040 964
1041 cancel_work_sync(&wl->irq_work); 965 cancel_work_sync(&wl->irq_work);
1042 cancel_work_sync(&wl->tx_work); 966 cancel_work_sync(&wl->tx_work);
967 cancel_delayed_work_sync(&wl->pspoll_work);
1043 968
1044 mutex_lock(&wl->mutex); 969 mutex_lock(&wl->mutex);
1045 970
1046 /* let's notify MAC80211 about the remaining pending TX frames */ 971 /* let's notify MAC80211 about the remaining pending TX frames */
1047 wl1271_tx_flush(wl); 972 wl1271_tx_reset(wl);
1048 wl1271_power_off(wl); 973 wl1271_power_off(wl);
1049 974
1050 memset(wl->bssid, 0, ETH_ALEN); 975 memset(wl->bssid, 0, ETH_ALEN);
@@ -1241,6 +1166,42 @@ static u32 wl1271_min_rate_get(struct wl1271 *wl)
1241 return rate; 1166 return rate;
1242} 1167}
1243 1168
1169static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
1170{
1171 int ret;
1172
1173 if (idle) {
1174 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1175 ret = wl1271_unjoin(wl);
1176 if (ret < 0)
1177 goto out;
1178 }
1179 wl->rate_set = wl1271_min_rate_get(wl);
1180 wl->sta_rate_set = 0;
1181 ret = wl1271_acx_rate_policies(wl);
1182 if (ret < 0)
1183 goto out;
1184 ret = wl1271_acx_keep_alive_config(
1185 wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1186 ACX_KEEP_ALIVE_TPL_INVALID);
1187 if (ret < 0)
1188 goto out;
1189 set_bit(WL1271_FLAG_IDLE, &wl->flags);
1190 } else {
1191 /* increment the session counter */
1192 wl->session_counter++;
1193 if (wl->session_counter >= SESSION_COUNTER_MAX)
1194 wl->session_counter = 0;
1195 ret = wl1271_dummy_join(wl);
1196 if (ret < 0)
1197 goto out;
1198 clear_bit(WL1271_FLAG_IDLE, &wl->flags);
1199 }
1200
1201out:
1202 return ret;
1203}
1204
1244static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 1205static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1245{ 1206{
1246 struct wl1271 *wl = hw->priv; 1207 struct wl1271 *wl = hw->priv;
@@ -1255,6 +1216,15 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1255 conf->power_level, 1216 conf->power_level,
1256 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use"); 1217 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
1257 1218
1219 /*
1220 * mac80211 will go to idle nearly immediately after transmitting some
1221 * frames, such as the deauth. To make sure those frames reach the air,
1222 * wait here until the TX queue is fully flushed.
1223 */
1224 if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
1225 (conf->flags & IEEE80211_CONF_IDLE))
1226 wl1271_tx_flush(wl);
1227
1258 mutex_lock(&wl->mutex); 1228 mutex_lock(&wl->mutex);
1259 1229
1260 if (unlikely(wl->state == WL1271_STATE_OFF)) 1230 if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -1295,24 +1265,18 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1295 } 1265 }
1296 1266
1297 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1267 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1298 if (conf->flags & IEEE80211_CONF_IDLE && 1268 ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
1299 test_bit(WL1271_FLAG_JOINED, &wl->flags)) 1269 if (ret < 0)
1300 wl1271_unjoin(wl); 1270 wl1271_warning("idle mode change failed %d", ret);
1301 else if (!(conf->flags & IEEE80211_CONF_IDLE))
1302 wl1271_dummy_join(wl);
1303
1304 if (conf->flags & IEEE80211_CONF_IDLE) {
1305 wl->rate_set = wl1271_min_rate_get(wl);
1306 wl->sta_rate_set = 0;
1307 wl1271_acx_rate_policies(wl);
1308 wl1271_acx_keep_alive_config(
1309 wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1310 ACX_KEEP_ALIVE_TPL_INVALID);
1311 set_bit(WL1271_FLAG_IDLE, &wl->flags);
1312 } else
1313 clear_bit(WL1271_FLAG_IDLE, &wl->flags);
1314 } 1271 }
1315 1272
1273 /*
1274 * if mac80211 changes the PSM mode, make sure the mode is not
1275 * incorrectly changed after the pspoll failure active window.
1276 */
1277 if (changed & IEEE80211_CONF_CHANGE_PS)
1278 clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
1279
1316 if (conf->flags & IEEE80211_CONF_PS && 1280 if (conf->flags & IEEE80211_CONF_PS &&
1317 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 1281 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1318 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); 1282 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1595,13 +1559,9 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1595 goto out; 1559 goto out;
1596 1560
1597 if (wl1271_11a_enabled()) 1561 if (wl1271_11a_enabled())
1598 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1562 ret = wl1271_scan(hw->priv, ssid, len, req);
1599 req->ie, req->ie_len, 1, 0,
1600 WL1271_SCAN_BAND_DUAL, 3);
1601 else 1563 else
1602 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1564 ret = wl1271_scan(hw->priv, ssid, len, req);
1603 req->ie, req->ie_len, 1, 0,
1604 WL1271_SCAN_BAND_2_4_GHZ, 3);
1605 1565
1606 wl1271_ps_elp_sleep(wl); 1566 wl1271_ps_elp_sleep(wl);
1607 1567
@@ -1774,6 +1734,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1774 wl->aid = bss_conf->aid; 1734 wl->aid = bss_conf->aid;
1775 set_assoc = true; 1735 set_assoc = true;
1776 1736
1737 wl->ps_poll_failures = 0;
1738
1777 /* 1739 /*
1778 * use basic rates from AP, and determine lowest rate 1740 * use basic rates from AP, and determine lowest rate
1779 * to use with control frames. 1741 * to use with control frames.
@@ -1823,6 +1785,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1823 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 1785 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1824 wl->aid = 0; 1786 wl->aid = 0;
1825 1787
1788 /* re-enable dynamic ps - just in case */
1789 ieee80211_enable_dyn_ps(wl->vif);
1790
1826 /* revert back to minimum rates for the current band */ 1791 /* revert back to minimum rates for the current band */
1827 wl1271_set_band_rate(wl); 1792 wl1271_set_band_rate(wl);
1828 wl->basic_rate = wl1271_min_rate_get(wl); 1793 wl->basic_rate = wl1271_min_rate_get(wl);
@@ -1871,6 +1836,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1871 } 1836 }
1872 } 1837 }
1873 1838
1839 if (changed & BSS_CHANGED_ARP_FILTER) {
1840 __be32 addr = bss_conf->arp_addr_list[0];
1841 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
1842
1843 if (bss_conf->arp_addr_cnt == 1 && bss_conf->arp_filter_enabled)
1844 ret = wl1271_acx_arp_ip_filter(wl, true, addr);
1845 else
1846 ret = wl1271_acx_arp_ip_filter(wl, false, addr);
1847
1848 if (ret < 0)
1849 goto out_sleep;
1850 }
1851
1874 if (do_join) { 1852 if (do_join) {
1875 ret = wl1271_join(wl, set_assoc); 1853 ret = wl1271_join(wl, set_assoc);
1876 if (ret < 0) { 1854 if (ret < 0) {
@@ -1929,6 +1907,48 @@ out:
1929 return ret; 1907 return ret;
1930} 1908}
1931 1909
1910static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
1911{
1912
1913 struct wl1271 *wl = hw->priv;
1914 u64 mactime = ULLONG_MAX;
1915 int ret;
1916
1917 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
1918
1919 mutex_lock(&wl->mutex);
1920
1921 ret = wl1271_ps_elp_wakeup(wl, false);
1922 if (ret < 0)
1923 goto out;
1924
1925 ret = wl1271_acx_tsf_info(wl, &mactime);
1926 if (ret < 0)
1927 goto out_sleep;
1928
1929out_sleep:
1930 wl1271_ps_elp_sleep(wl);
1931
1932out:
1933 mutex_unlock(&wl->mutex);
1934 return mactime;
1935}
1936
1937static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
1938 struct survey_info *survey)
1939{
1940 struct wl1271 *wl = hw->priv;
1941 struct ieee80211_conf *conf = &hw->conf;
1942
1943 if (idx != 0)
1944 return -ENOENT;
1945
1946 survey->channel = conf->channel;
1947 survey->filled = SURVEY_INFO_NOISE_DBM;
1948 survey->noise = wl->noise;
1949
1950 return 0;
1951}
1932 1952
1933/* can't be const, mac80211 writes to this */ 1953/* can't be const, mac80211 writes to this */
1934static struct ieee80211_rate wl1271_rates[] = { 1954static struct ieee80211_rate wl1271_rates[] = {
@@ -1991,7 +2011,7 @@ static struct ieee80211_channel wl1271_channels[] = {
1991}; 2011};
1992 2012
1993/* mapping to indexes for wl1271_rates */ 2013/* mapping to indexes for wl1271_rates */
1994const static u8 wl1271_rate_to_idx_2ghz[] = { 2014static const u8 wl1271_rate_to_idx_2ghz[] = {
1995 /* MCS rates are used only with 11n */ 2015 /* MCS rates are used only with 11n */
1996 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ 2016 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
1997 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ 2017 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
@@ -2103,7 +2123,7 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
2103}; 2123};
2104 2124
2105/* mapping to indexes for wl1271_rates_5ghz */ 2125/* mapping to indexes for wl1271_rates_5ghz */
2106const static u8 wl1271_rate_to_idx_5ghz[] = { 2126static const u8 wl1271_rate_to_idx_5ghz[] = {
2107 /* MCS rates are used only with 11n */ 2127 /* MCS rates are used only with 11n */
2108 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ 2128 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
2109 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ 2129 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
@@ -2139,7 +2159,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
2139 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 2159 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
2140}; 2160};
2141 2161
2142const static u8 *wl1271_band_rate_to_idx[] = { 2162static const u8 *wl1271_band_rate_to_idx[] = {
2143 [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz, 2163 [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
2144 [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz 2164 [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
2145}; 2165};
@@ -2158,6 +2178,8 @@ static const struct ieee80211_ops wl1271_ops = {
2158 .bss_info_changed = wl1271_op_bss_info_changed, 2178 .bss_info_changed = wl1271_op_bss_info_changed,
2159 .set_rts_threshold = wl1271_op_set_rts_threshold, 2179 .set_rts_threshold = wl1271_op_set_rts_threshold,
2160 .conf_tx = wl1271_op_conf_tx, 2180 .conf_tx = wl1271_op_conf_tx,
2181 .get_tsf = wl1271_op_get_tsf,
2182 .get_survey = wl1271_op_get_survey,
2161 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 2183 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
2162}; 2184};
2163 2185
@@ -2350,15 +2372,13 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2350 goto err_hw_alloc; 2372 goto err_hw_alloc;
2351 } 2373 }
2352 2374
2353 plat_dev = kmalloc(sizeof(wl1271_device), GFP_KERNEL); 2375 plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
2354 if (!plat_dev) { 2376 if (!plat_dev) {
2355 wl1271_error("could not allocate platform_device"); 2377 wl1271_error("could not allocate platform_device");
2356 ret = -ENOMEM; 2378 ret = -ENOMEM;
2357 goto err_plat_alloc; 2379 goto err_plat_alloc;
2358 } 2380 }
2359 2381
2360 memcpy(plat_dev, &wl1271_device, sizeof(wl1271_device));
2361
2362 wl = hw->priv; 2382 wl = hw->priv;
2363 memset(wl, 0, sizeof(*wl)); 2383 memset(wl, 0, sizeof(*wl));
2364 2384
@@ -2370,6 +2390,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2370 skb_queue_head_init(&wl->tx_queue); 2390 skb_queue_head_init(&wl->tx_queue);
2371 2391
2372 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 2392 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
2393 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
2373 wl->channel = WL1271_DEFAULT_CHANNEL; 2394 wl->channel = WL1271_DEFAULT_CHANNEL;
2374 wl->beacon_int = WL1271_DEFAULT_BEACON_INT; 2395 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
2375 wl->default_key = 0; 2396 wl->default_key = 0;
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index b98fb643fab0..019aa79cd9df 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -53,13 +53,14 @@ static void wl1271_rx_status(struct wl1271 *wl,
53 status->band = wl->band; 53 status->band = wl->band;
54 status->rate_idx = wl1271_rate_to_idx(wl, desc->rate); 54 status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
55 55
56 status->signal = desc->rssi;
57
56 /* 58 /*
57 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the 59 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
58 * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we 60 * need to divide by two for now, but TI has been discussing about
59 * only need the mactime for monitor mode. For now the mactime is 61 * changing it. This needs to be rechecked.
60 * not valid, so RX_FLAG_TSFT should not be set
61 */ 62 */
62 status->signal = desc->rssi; 63 wl->noise = desc->rssi - (desc->snr >> 1);
63 64
64 status->freq = ieee80211_channel_to_frequency(desc->channel); 65 status->freq = ieee80211_channel_to_frequency(desc->channel);
65 66
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index b89be4758e78..13a232333b13 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -113,7 +113,7 @@ struct wl1271_rx_descriptor {
113 u8 process_id; 113 u8 process_id;
114 u8 pad_len; 114 u8 pad_len;
115 u8 reserved; 115 u8 reserved;
116} __attribute__ ((packed)); 116} __packed;
117 117
118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); 119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/wl1271_scan.c
new file mode 100644
index 000000000000..fec43eed8c55
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.c
@@ -0,0 +1,257 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/ieee80211.h>
25
26#include "wl1271.h"
27#include "wl1271_cmd.h"
28#include "wl1271_scan.h"
29#include "wl1271_acx.h"
30
31static int wl1271_get_scan_channels(struct wl1271 *wl,
32 struct cfg80211_scan_request *req,
33 struct basic_scan_channel_params *channels,
34 enum ieee80211_band band, bool passive)
35{
36 int i, j;
37 u32 flags;
38
39 for (i = 0, j = 0;
40 i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
41 i++) {
42
43 flags = req->channels[i]->flags;
44
45 if (!wl->scan.scanned_ch[i] &&
46 !(flags & IEEE80211_CHAN_DISABLED) &&
47 ((!!(flags & IEEE80211_CHAN_PASSIVE_SCAN)) == passive) &&
48 (req->channels[i]->band == band)) {
49
50 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
51 req->channels[i]->band,
52 req->channels[i]->center_freq);
53 wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
54 req->channels[i]->hw_value,
55 req->channels[i]->flags);
56 wl1271_debug(DEBUG_SCAN,
57 "max_antenna_gain %d, max_power %d",
58 req->channels[i]->max_antenna_gain,
59 req->channels[i]->max_power);
60 wl1271_debug(DEBUG_SCAN, "beacon_found %d",
61 req->channels[i]->beacon_found);
62
63 channels[j].min_duration =
64 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
65 channels[j].max_duration =
66 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
67 channels[j].early_termination = 0;
68 channels[j].tx_power_att = req->channels[i]->max_power;
69 channels[j].channel = req->channels[i]->hw_value;
70
71 memset(&channels[j].bssid_lsb, 0xff, 4);
72 memset(&channels[j].bssid_msb, 0xff, 2);
73
74 /* Mark the channels we already used */
75 wl->scan.scanned_ch[i] = true;
76
77 j++;
78 }
79 }
80
81 return j;
82}
83
84#define WL1271_NOTHING_TO_SCAN 1
85
86static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
87 bool passive, u32 basic_rate)
88{
89 struct wl1271_cmd_scan *cmd;
90 struct wl1271_cmd_trigger_scan_to *trigger;
91 int ret;
92 u16 scan_options = 0;
93
94 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
95 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
96 if (!cmd || !trigger) {
97 ret = -ENOMEM;
98 goto out;
99 }
100
101 /* We always use high priority scans */
102 scan_options = WL1271_SCAN_OPT_PRIORITY_HIGH;
103 if(passive)
104 scan_options |= WL1271_SCAN_OPT_PASSIVE;
105 cmd->params.scan_options = cpu_to_le16(scan_options);
106
107 cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
108 cmd->channels,
109 band, passive);
110 if (cmd->params.n_ch == 0) {
111 ret = WL1271_NOTHING_TO_SCAN;
112 goto out;
113 }
114
115 cmd->params.tx_rate = cpu_to_le32(basic_rate);
116 cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
117 cmd->params.rx_filter_options =
118 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
119
120 cmd->params.n_probe_reqs = WL1271_SCAN_PROBE_REQS;
121 cmd->params.tx_rate = cpu_to_le32(basic_rate);
122 cmd->params.tid_trigger = 0;
123 cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
124
125 if (band == IEEE80211_BAND_2GHZ)
126 cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
127 else
128 cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
129
130 if (wl->scan.ssid_len && wl->scan.ssid) {
131 cmd->params.ssid_len = wl->scan.ssid_len;
132 memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
133 }
134
135 ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
136 wl->scan.req->ie, wl->scan.req->ie_len,
137 band);
138 if (ret < 0) {
139 wl1271_error("PROBE request template failed");
140 goto out;
141 }
142
143 /* disable the timeout */
144 trigger->timeout = 0;
145 ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
146 sizeof(*trigger), 0);
147 if (ret < 0) {
148 wl1271_error("trigger scan to failed for hw scan");
149 goto out;
150 }
151
152 wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
153
154 ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
155 if (ret < 0) {
156 wl1271_error("SCAN failed");
157 goto out;
158 }
159
160out:
161 kfree(cmd);
162 kfree(trigger);
163 return ret;
164}
165
166void wl1271_scan_stm(struct wl1271 *wl)
167{
168 int ret;
169
170 switch (wl->scan.state) {
171 case WL1271_SCAN_STATE_IDLE:
172 break;
173
174 case WL1271_SCAN_STATE_2GHZ_ACTIVE:
175 ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, false,
176 wl->conf.tx.basic_rate);
177 if (ret == WL1271_NOTHING_TO_SCAN) {
178 wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
179 wl1271_scan_stm(wl);
180 }
181
182 break;
183
184 case WL1271_SCAN_STATE_2GHZ_PASSIVE:
185 ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, true,
186 wl->conf.tx.basic_rate);
187 if (ret == WL1271_NOTHING_TO_SCAN) {
188 if (wl1271_11a_enabled())
189 wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
190 else
191 wl->scan.state = WL1271_SCAN_STATE_DONE;
192 wl1271_scan_stm(wl);
193 }
194
195 break;
196
197 case WL1271_SCAN_STATE_5GHZ_ACTIVE:
198 ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, false,
199 wl->conf.tx.basic_rate_5);
200 if (ret == WL1271_NOTHING_TO_SCAN) {
201 wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
202 wl1271_scan_stm(wl);
203 }
204
205 break;
206
207 case WL1271_SCAN_STATE_5GHZ_PASSIVE:
208 ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, true,
209 wl->conf.tx.basic_rate_5);
210 if (ret == WL1271_NOTHING_TO_SCAN) {
211 wl->scan.state = WL1271_SCAN_STATE_DONE;
212 wl1271_scan_stm(wl);
213 }
214
215 break;
216
217 case WL1271_SCAN_STATE_DONE:
218 mutex_unlock(&wl->mutex);
219 ieee80211_scan_completed(wl->hw, false);
220 mutex_lock(&wl->mutex);
221
222 kfree(wl->scan.scanned_ch);
223 wl->scan.scanned_ch = NULL;
224
225 wl->scan.state = WL1271_SCAN_STATE_IDLE;
226 break;
227
228 default:
229 wl1271_error("invalid scan state");
230 break;
231 }
232}
233
234int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
235 struct cfg80211_scan_request *req)
236{
237 if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
238 return -EBUSY;
239
240 wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
241
242 if (ssid_len && ssid) {
243 wl->scan.ssid_len = ssid_len;
244 memcpy(wl->scan.ssid, ssid, ssid_len);
245 } else {
246 wl->scan.ssid_len = 0;
247 }
248
249 wl->scan.req = req;
250
251 wl->scan.scanned_ch = kzalloc(req->n_channels *
252 sizeof(*wl->scan.scanned_ch),
253 GFP_KERNEL);
254 wl1271_scan_stm(wl);
255
256 return 0;
257}
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.h b/drivers/net/wireless/wl12xx/wl1271_scan.h
new file mode 100644
index 000000000000..f1815700f5f9
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.h
@@ -0,0 +1,109 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1271_SCAN_H__
25#define __WL1271_SCAN_H__
26
27#include "wl1271.h"
28
29int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
30 struct cfg80211_scan_request *req);
31int wl1271_scan_build_probe_req(struct wl1271 *wl,
32 const u8 *ssid, size_t ssid_len,
33 const u8 *ie, size_t ie_len, u8 band);
34void wl1271_scan_stm(struct wl1271 *wl);
35
36#define WL1271_SCAN_MAX_CHANNELS 24
37#define WL1271_SCAN_DEFAULT_TAG 1
38#define WL1271_SCAN_CURRENT_TX_PWR 0
39#define WL1271_SCAN_OPT_ACTIVE 0
40#define WL1271_SCAN_OPT_PASSIVE 1
41#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
42#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */
43#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */
44#define WL1271_SCAN_BAND_2_4_GHZ 0
45#define WL1271_SCAN_BAND_5_GHZ 1
46#define WL1271_SCAN_PROBE_REQS 3
47
48enum {
49 WL1271_SCAN_STATE_IDLE,
50 WL1271_SCAN_STATE_2GHZ_ACTIVE,
51 WL1271_SCAN_STATE_2GHZ_PASSIVE,
52 WL1271_SCAN_STATE_5GHZ_ACTIVE,
53 WL1271_SCAN_STATE_5GHZ_PASSIVE,
54 WL1271_SCAN_STATE_DONE
55};
56
57struct basic_scan_params {
58 __le32 rx_config_options;
59 __le32 rx_filter_options;
60 /* Scan option flags (WL1271_SCAN_OPT_*) */
61 __le16 scan_options;
62 /* Number of scan channels in the list (maximum 30) */
63 u8 n_ch;
64 /* This field indicates the number of probe requests to send
65 per channel for an active scan */
66 u8 n_probe_reqs;
67 /* Rate bit field for sending the probes */
68 __le32 tx_rate;
69 u8 tid_trigger;
70 u8 ssid_len;
71 /* in order to align */
72 u8 padding1[2];
73 u8 ssid[IW_ESSID_MAX_SIZE];
74 /* Band to scan */
75 u8 band;
76 u8 use_ssid_list;
77 u8 scan_tag;
78 u8 padding2;
79} __packed;
80
81struct basic_scan_channel_params {
82 /* Duration in TU to wait for frames on a channel for active scan */
83 __le32 min_duration;
84 __le32 max_duration;
85 __le32 bssid_lsb;
86 __le16 bssid_msb;
87 u8 early_termination;
88 u8 tx_power_att;
89 u8 channel;
90 /* FW internal use only! */
91 u8 dfs_candidate;
92 u8 activity_detected;
93 u8 pad;
94} __packed;
95
96struct wl1271_cmd_scan {
97 struct wl1271_cmd_header header;
98
99 struct basic_scan_params params;
100 struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
101} __packed;
102
103struct wl1271_cmd_trigger_scan_to {
104 struct wl1271_cmd_header header;
105
106 __le32 timeout;
107} __packed;
108
109#endif /* __WL1271_SCAN_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
index d3d6f302f705..7059b5cccf0f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -28,7 +28,7 @@
28#include <linux/mmc/sdio_func.h> 28#include <linux/mmc/sdio_func.h>
29#include <linux/mmc/sdio_ids.h> 29#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h> 30#include <linux/mmc/card.h>
31#include <plat/gpio.h> 31#include <linux/gpio.h>
32 32
33#include "wl1271.h" 33#include "wl1271.h"
34#include "wl12xx_80211.h" 34#include "wl12xx_80211.h"
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 5189b812f939..96d25fb50495 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -461,3 +461,4 @@ MODULE_LICENSE("GPL");
461MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 461MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
462MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 462MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
463MODULE_FIRMWARE(WL1271_FW_NAME); 463MODULE_FIRMWARE(WL1271_FW_NAME);
464MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 554deb4d024e..6e0952f79e9a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -199,7 +199,14 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
199 buf = nla_data(tb[WL1271_TM_ATTR_DATA]); 199 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
200 len = nla_len(tb[WL1271_TM_ATTR_DATA]); 200 len = nla_len(tb[WL1271_TM_ATTR_DATA]);
201 201
202 if (len != sizeof(struct wl1271_nvs_file)) { 202 /*
203 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
204 * configurations) can be removed when those NVS files stop floating
205 * around.
206 */
207 if (len != sizeof(struct wl1271_nvs_file) &&
208 (len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
209 wl1271_11a_enabled())) {
203 wl1271_error("nvs size is not as expected: %zu != %zu", 210 wl1271_error("nvs size is not as expected: %zu != %zu",
204 len, sizeof(struct wl1271_nvs_file)); 211 len, sizeof(struct wl1271_nvs_file));
205 return -EMSGSIZE; 212 return -EMSGSIZE;
@@ -209,7 +216,7 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
209 216
210 kfree(wl->nvs); 217 kfree(wl->nvs);
211 218
212 wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); 219 wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
213 if (!wl->nvs) { 220 if (!wl->nvs) {
214 wl1271_error("could not allocate memory for the nvs file"); 221 wl1271_error("could not allocate memory for the nvs file");
215 ret = -ENOMEM; 222 ret = -ENOMEM;
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 62db79508ddf..c592cc2e9fe8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -36,6 +36,7 @@ static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
37 if (wl->tx_frames[i] == NULL) { 37 if (wl->tx_frames[i] == NULL) {
38 wl->tx_frames[i] = skb; 38 wl->tx_frames[i] = skb;
39 wl->tx_frames_cnt++;
39 return i; 40 return i;
40 } 41 }
41 42
@@ -73,8 +74,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
73 wl1271_debug(DEBUG_TX, 74 wl1271_debug(DEBUG_TX,
74 "tx_allocate: size: %d, blocks: %d, id: %d", 75 "tx_allocate: size: %d, blocks: %d, id: %d",
75 total_len, total_blocks, id); 76 total_len, total_blocks, id);
76 } else 77 } else {
77 wl->tx_frames[id] = NULL; 78 wl->tx_frames[id] = NULL;
79 wl->tx_frames_cnt--;
80 }
78 81
79 return ret; 82 return ret;
80} 83}
@@ -358,6 +361,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
358 /* return the packet to the stack */ 361 /* return the packet to the stack */
359 ieee80211_tx_status(wl->hw, skb); 362 ieee80211_tx_status(wl->hw, skb);
360 wl->tx_frames[result->id] = NULL; 363 wl->tx_frames[result->id] = NULL;
364 wl->tx_frames_cnt--;
361} 365}
362 366
363/* Called upon reception of a TX complete interrupt */ 367/* Called upon reception of a TX complete interrupt */
@@ -412,7 +416,7 @@ void wl1271_tx_complete(struct wl1271 *wl)
412} 416}
413 417
414/* caller must hold wl->mutex */ 418/* caller must hold wl->mutex */
415void wl1271_tx_flush(struct wl1271 *wl) 419void wl1271_tx_reset(struct wl1271 *wl)
416{ 420{
417 int i; 421 int i;
418 struct sk_buff *skb; 422 struct sk_buff *skb;
@@ -421,7 +425,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
421/* control->flags = 0; FIXME */ 425/* control->flags = 0; FIXME */
422 426
423 while ((skb = skb_dequeue(&wl->tx_queue))) { 427 while ((skb = skb_dequeue(&wl->tx_queue))) {
424 wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb); 428 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
425 ieee80211_tx_status(wl->hw, skb); 429 ieee80211_tx_status(wl->hw, skb);
426 } 430 }
427 431
@@ -429,6 +433,32 @@ void wl1271_tx_flush(struct wl1271 *wl)
429 if (wl->tx_frames[i] != NULL) { 433 if (wl->tx_frames[i] != NULL) {
430 skb = wl->tx_frames[i]; 434 skb = wl->tx_frames[i];
431 wl->tx_frames[i] = NULL; 435 wl->tx_frames[i] = NULL;
436 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
432 ieee80211_tx_status(wl->hw, skb); 437 ieee80211_tx_status(wl->hw, skb);
433 } 438 }
439 wl->tx_frames_cnt = 0;
440}
441
442#define WL1271_TX_FLUSH_TIMEOUT 500000
443
444/* caller must *NOT* hold wl->mutex */
445void wl1271_tx_flush(struct wl1271 *wl)
446{
447 unsigned long timeout;
448 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
449
450 while (!time_after(jiffies, timeout)) {
451 mutex_lock(&wl->mutex);
452 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
453 wl->tx_frames_cnt);
454 if ((wl->tx_frames_cnt == 0) &&
455 skb_queue_empty(&wl->tx_queue)) {
456 mutex_unlock(&wl->mutex);
457 return;
458 }
459 mutex_unlock(&wl->mutex);
460 msleep(1);
461 }
462
463 wl1271_warning("Unable to flush all TX buffers, timed out.");
434} 464}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 3b8b7ac253fd..48bf92621c03 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -80,7 +80,7 @@ struct wl1271_tx_hw_descr {
80 /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ 80 /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
81 u8 aid; 81 u8 aid;
82 u8 reserved; 82 u8 reserved;
83} __attribute__ ((packed)); 83} __packed;
84 84
85enum wl1271_tx_hw_res_status { 85enum wl1271_tx_hw_res_status {
86 TX_SUCCESS = 0, 86 TX_SUCCESS = 0,
@@ -115,13 +115,13 @@ struct wl1271_tx_hw_res_descr {
115 u8 rate_class_index; 115 u8 rate_class_index;
116 /* for 4-byte alignment. */ 116 /* for 4-byte alignment. */
117 u8 spare; 117 u8 spare;
118} __attribute__ ((packed)); 118} __packed;
119 119
120struct wl1271_tx_hw_res_if { 120struct wl1271_tx_hw_res_if {
121 __le32 tx_result_fw_counter; 121 __le32 tx_result_fw_counter;
122 __le32 tx_result_host_counter; 122 __le32 tx_result_host_counter;
123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
124} __attribute__ ((packed)); 124} __packed;
125 125
126static inline int wl1271_tx_get_queue(int queue) 126static inline int wl1271_tx_get_queue(int queue)
127{ 127{
@@ -158,6 +158,7 @@ static inline int wl1271_tx_ac_to_tid(int ac)
158 158
159void wl1271_tx_work(struct work_struct *work); 159void wl1271_tx_work(struct work_struct *work);
160void wl1271_tx_complete(struct wl1271 *wl); 160void wl1271_tx_complete(struct wl1271 *wl);
161void wl1271_tx_reset(struct wl1271 *wl);
161void wl1271_tx_flush(struct wl1271 *wl); 162void wl1271_tx_flush(struct wl1271 *wl);
162u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); 163u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
163u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 164u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 055d7bc6f592..184628027213 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -66,41 +66,41 @@ struct ieee80211_header {
66 u8 bssid[ETH_ALEN]; 66 u8 bssid[ETH_ALEN];
67 __le16 seq_ctl; 67 __le16 seq_ctl;
68 u8 payload[0]; 68 u8 payload[0];
69} __attribute__ ((packed)); 69} __packed;
70 70
71struct wl12xx_ie_header { 71struct wl12xx_ie_header {
72 u8 id; 72 u8 id;
73 u8 len; 73 u8 len;
74} __attribute__ ((packed)); 74} __packed;
75 75
76/* IEs */ 76/* IEs */
77 77
78struct wl12xx_ie_ssid { 78struct wl12xx_ie_ssid {
79 struct wl12xx_ie_header header; 79 struct wl12xx_ie_header header;
80 char ssid[IW_ESSID_MAX_SIZE]; 80 char ssid[IW_ESSID_MAX_SIZE];
81} __attribute__ ((packed)); 81} __packed;
82 82
83struct wl12xx_ie_rates { 83struct wl12xx_ie_rates {
84 struct wl12xx_ie_header header; 84 struct wl12xx_ie_header header;
85 u8 rates[MAX_SUPPORTED_RATES]; 85 u8 rates[MAX_SUPPORTED_RATES];
86} __attribute__ ((packed)); 86} __packed;
87 87
88struct wl12xx_ie_ds_params { 88struct wl12xx_ie_ds_params {
89 struct wl12xx_ie_header header; 89 struct wl12xx_ie_header header;
90 u8 channel; 90 u8 channel;
91} __attribute__ ((packed)); 91} __packed;
92 92
93struct country_triplet { 93struct country_triplet {
94 u8 channel; 94 u8 channel;
95 u8 num_channels; 95 u8 num_channels;
96 u8 max_tx_power; 96 u8 max_tx_power;
97} __attribute__ ((packed)); 97} __packed;
98 98
99struct wl12xx_ie_country { 99struct wl12xx_ie_country {
100 struct wl12xx_ie_header header; 100 struct wl12xx_ie_header header;
101 u8 country_string[COUNTRY_STRING_LEN]; 101 u8 country_string[COUNTRY_STRING_LEN];
102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; 102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
103} __attribute__ ((packed)); 103} __packed;
104 104
105 105
106/* Templates */ 106/* Templates */
@@ -115,30 +115,30 @@ struct wl12xx_beacon_template {
115 struct wl12xx_ie_rates ext_rates; 115 struct wl12xx_ie_rates ext_rates;
116 struct wl12xx_ie_ds_params ds_params; 116 struct wl12xx_ie_ds_params ds_params;
117 struct wl12xx_ie_country country; 117 struct wl12xx_ie_country country;
118} __attribute__ ((packed)); 118} __packed;
119 119
120struct wl12xx_null_data_template { 120struct wl12xx_null_data_template {
121 struct ieee80211_header header; 121 struct ieee80211_header header;
122} __attribute__ ((packed)); 122} __packed;
123 123
124struct wl12xx_ps_poll_template { 124struct wl12xx_ps_poll_template {
125 __le16 fc; 125 __le16 fc;
126 __le16 aid; 126 __le16 aid;
127 u8 bssid[ETH_ALEN]; 127 u8 bssid[ETH_ALEN];
128 u8 ta[ETH_ALEN]; 128 u8 ta[ETH_ALEN];
129} __attribute__ ((packed)); 129} __packed;
130 130
131struct wl12xx_qos_null_data_template { 131struct wl12xx_qos_null_data_template {
132 struct ieee80211_header header; 132 struct ieee80211_header header;
133 __le16 qos_ctl; 133 __le16 qos_ctl;
134} __attribute__ ((packed)); 134} __packed;
135 135
136struct wl12xx_probe_req_template { 136struct wl12xx_probe_req_template {
137 struct ieee80211_header header; 137 struct ieee80211_header header;
138 struct wl12xx_ie_ssid ssid; 138 struct wl12xx_ie_ssid ssid;
139 struct wl12xx_ie_rates rates; 139 struct wl12xx_ie_rates rates;
140 struct wl12xx_ie_rates ext_rates; 140 struct wl12xx_ie_rates ext_rates;
141} __attribute__ ((packed)); 141} __packed;
142 142
143 143
144struct wl12xx_probe_resp_template { 144struct wl12xx_probe_resp_template {
@@ -151,6 +151,6 @@ struct wl12xx_probe_resp_template {
151 struct wl12xx_ie_rates ext_rates; 151 struct wl12xx_ie_rates ext_rates;
152 struct wl12xx_ie_ds_params ds_params; 152 struct wl12xx_ie_ds_params ds_params;
153 struct wl12xx_ie_country country; 153 struct wl12xx_ie_country country;
154} __attribute__ ((packed)); 154} __packed;
155 155
156#endif 156#endif
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8816e371fd0e..3fbfd19818f1 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -231,12 +231,12 @@ struct iw_mgmt_info_element {
231 but sizeof(enum) > sizeof(u8) :-( */ 231 but sizeof(enum) > sizeof(u8) :-( */
232 u8 len; 232 u8 len;
233 u8 data[0]; 233 u8 data[0];
234} __attribute__ ((packed)); 234} __packed;
235 235
236struct iw_mgmt_essid_pset { 236struct iw_mgmt_essid_pset {
237 struct iw_mgmt_info_element el; 237 struct iw_mgmt_info_element el;
238 u8 essid[IW_ESSID_MAX_SIZE]; 238 u8 essid[IW_ESSID_MAX_SIZE];
239} __attribute__ ((packed)); 239} __packed;
240 240
241/* 241/*
242 * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly 242 * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly
@@ -247,12 +247,12 @@ struct iw_mgmt_essid_pset {
247struct iw_mgmt_data_rset { 247struct iw_mgmt_data_rset {
248 struct iw_mgmt_info_element el; 248 struct iw_mgmt_info_element el;
249 u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS]; 249 u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS];
250} __attribute__ ((packed)); 250} __packed;
251 251
252struct iw_mgmt_ds_pset { 252struct iw_mgmt_ds_pset {
253 struct iw_mgmt_info_element el; 253 struct iw_mgmt_info_element el;
254 u8 chan; 254 u8 chan;
255} __attribute__ ((packed)); 255} __packed;
256 256
257struct iw_mgmt_cf_pset { 257struct iw_mgmt_cf_pset {
258 struct iw_mgmt_info_element el; 258 struct iw_mgmt_info_element el;
@@ -260,12 +260,12 @@ struct iw_mgmt_cf_pset {
260 u8 cfp_period; 260 u8 cfp_period;
261 u16 cfp_max_duration; 261 u16 cfp_max_duration;
262 u16 cfp_dur_remaining; 262 u16 cfp_dur_remaining;
263} __attribute__ ((packed)); 263} __packed;
264 264
265struct iw_mgmt_ibss_pset { 265struct iw_mgmt_ibss_pset {
266 struct iw_mgmt_info_element el; 266 struct iw_mgmt_info_element el;
267 u16 atim_window; 267 u16 atim_window;
268} __attribute__ ((packed)); 268} __packed;
269 269
270struct wl3501_tx_hdr { 270struct wl3501_tx_hdr {
271 u16 tx_cnt; 271 u16 tx_cnt;
@@ -544,12 +544,12 @@ struct wl3501_80211_tx_plcp_hdr {
544 u8 service; 544 u8 service;
545 u16 len; 545 u16 len;
546 u16 crc16; 546 u16 crc16;
547} __attribute__ ((packed)); 547} __packed;
548 548
549struct wl3501_80211_tx_hdr { 549struct wl3501_80211_tx_hdr {
550 struct wl3501_80211_tx_plcp_hdr pclp_hdr; 550 struct wl3501_80211_tx_plcp_hdr pclp_hdr;
551 struct ieee80211_hdr mac_hdr; 551 struct ieee80211_hdr mac_hdr;
552} __attribute__ ((packed)); 552} __packed;
553 553
554/* 554/*
555 Reserve the beginning Tx space for descriptor use. 555 Reserve the beginning Tx space for descriptor use.
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index b0b666019a93..43307bd42a69 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -42,7 +42,8 @@ static struct zd_reg_alpha2_map reg_alpha2_map[] = {
42 { ZD_REGDOMAIN_IC, "CA" }, 42 { ZD_REGDOMAIN_IC, "CA" },
43 { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */ 43 { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
44 { ZD_REGDOMAIN_JAPAN, "JP" }, 44 { ZD_REGDOMAIN_JAPAN, "JP" },
45 { ZD_REGDOMAIN_JAPAN_ADD, "JP" }, 45 { ZD_REGDOMAIN_JAPAN_2, "JP" },
46 { ZD_REGDOMAIN_JAPAN_3, "JP" },
46 { ZD_REGDOMAIN_SPAIN, "ES" }, 47 { ZD_REGDOMAIN_SPAIN, "ES" },
47 { ZD_REGDOMAIN_FRANCE, "FR" }, 48 { ZD_REGDOMAIN_FRANCE, "FR" },
48}; 49};
@@ -855,7 +856,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
855 if (skb == NULL) 856 if (skb == NULL)
856 return -ENOMEM; 857 return -ENOMEM;
857 if (need_padding) { 858 if (need_padding) {
858 /* Make sure the the payload data is 4 byte aligned. */ 859 /* Make sure the payload data is 4 byte aligned. */
859 skb_reserve(skb, 2); 860 skb_reserve(skb, 2);
860 } 861 }
861 862
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 630c298a730e..a6d86b996c79 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -35,7 +35,7 @@ struct zd_ctrlset {
35 __le16 current_length; 35 __le16 current_length;
36 u8 service; 36 u8 service;
37 __le16 next_frame_length; 37 __le16 next_frame_length;
38} __attribute__((packed)); 38} __packed;
39 39
40#define ZD_CS_RESERVED_SIZE 25 40#define ZD_CS_RESERVED_SIZE 25
41 41
@@ -106,7 +106,7 @@ struct zd_ctrlset {
106struct rx_length_info { 106struct rx_length_info {
107 __le16 length[3]; 107 __le16 length[3];
108 __le16 tag; 108 __le16 tag;
109} __attribute__((packed)); 109} __packed;
110 110
111#define RX_LENGTH_INFO_TAG 0x697e 111#define RX_LENGTH_INFO_TAG 0x697e
112 112
@@ -117,7 +117,7 @@ struct rx_status {
117 u8 signal_quality_ofdm; 117 u8 signal_quality_ofdm;
118 u8 decryption_type; 118 u8 decryption_type;
119 u8 frame_status; 119 u8 frame_status;
120} __attribute__((packed)); 120} __packed;
121 121
122/* rx_status field decryption_type */ 122/* rx_status field decryption_type */
123#define ZD_RX_NO_WEP 0 123#define ZD_RX_NO_WEP 0
@@ -153,7 +153,7 @@ struct tx_status {
153 u8 mac[ETH_ALEN]; 153 u8 mac[ETH_ALEN];
154 u8 retry; 154 u8 retry;
155 u8 failure; 155 u8 failure;
156} __attribute__((packed)); 156} __packed;
157 157
158enum mac_flags { 158enum mac_flags {
159 MAC_FIXED_CHANNEL = 0x01, 159 MAC_FIXED_CHANNEL = 0x01,
@@ -212,8 +212,9 @@ struct zd_mac {
212#define ZD_REGDOMAIN_ETSI 0x30 212#define ZD_REGDOMAIN_ETSI 0x30
213#define ZD_REGDOMAIN_SPAIN 0x31 213#define ZD_REGDOMAIN_SPAIN 0x31
214#define ZD_REGDOMAIN_FRANCE 0x32 214#define ZD_REGDOMAIN_FRANCE 0x32
215#define ZD_REGDOMAIN_JAPAN_ADD 0x40 215#define ZD_REGDOMAIN_JAPAN_2 0x40
216#define ZD_REGDOMAIN_JAPAN 0x41 216#define ZD_REGDOMAIN_JAPAN 0x41
217#define ZD_REGDOMAIN_JAPAN_3 0x49
217 218
218enum { 219enum {
219 MIN_CHANNEL24 = 1, 220 MIN_CHANNEL24 = 1,
@@ -225,7 +226,7 @@ enum {
225struct ofdm_plcp_header { 226struct ofdm_plcp_header {
226 u8 prefix[3]; 227 u8 prefix[3];
227 __le16 service; 228 __le16 service;
228} __attribute__((packed)); 229} __packed;
229 230
230static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) 231static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
231{ 232{
@@ -252,7 +253,7 @@ struct cck_plcp_header {
252 u8 service; 253 u8 service;
253 __le16 length; 254 __le16 length;
254 __le16 crc16; 255 __le16 crc16;
255} __attribute__((packed)); 256} __packed;
256 257
257static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) 258static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
258{ 259{
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index c257940b71b6..818e1480ca93 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -844,7 +844,7 @@ out:
844 * @usb: a &struct zd_usb pointer 844 * @usb: a &struct zd_usb pointer
845 * @urb: URB to be freed 845 * @urb: URB to be freed
846 * 846 *
847 * Frees the the transmission URB, which means to put it on the free URB 847 * Frees the transmission URB, which means to put it on the free URB
848 * list. 848 * list.
849 */ 849 */
850static void free_tx_urb(struct zd_usb *usb, struct urb *urb) 850static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 049f8b91f020..1b1655cb7cb4 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -79,17 +79,17 @@ enum control_requests {
79struct usb_req_read_regs { 79struct usb_req_read_regs {
80 __le16 id; 80 __le16 id;
81 __le16 addr[0]; 81 __le16 addr[0];
82} __attribute__((packed)); 82} __packed;
83 83
84struct reg_data { 84struct reg_data {
85 __le16 addr; 85 __le16 addr;
86 __le16 value; 86 __le16 value;
87} __attribute__((packed)); 87} __packed;
88 88
89struct usb_req_write_regs { 89struct usb_req_write_regs {
90 __le16 id; 90 __le16 id;
91 struct reg_data reg_writes[0]; 91 struct reg_data reg_writes[0];
92} __attribute__((packed)); 92} __packed;
93 93
94enum { 94enum {
95 RF_IF_LE = 0x02, 95 RF_IF_LE = 0x02,
@@ -106,7 +106,7 @@ struct usb_req_rfwrite {
106 /* RF2595: 24 */ 106 /* RF2595: 24 */
107 __le16 bit_values[0]; 107 __le16 bit_values[0];
108 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ 108 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
109} __attribute__((packed)); 109} __packed;
110 110
111/* USB interrupt */ 111/* USB interrupt */
112 112
@@ -123,12 +123,12 @@ enum usb_int_flags {
123struct usb_int_header { 123struct usb_int_header {
124 u8 type; /* must always be 1 */ 124 u8 type; /* must always be 1 */
125 u8 id; 125 u8 id;
126} __attribute__((packed)); 126} __packed;
127 127
128struct usb_int_regs { 128struct usb_int_regs {
129 struct usb_int_header hdr; 129 struct usb_int_header hdr;
130 struct reg_data regs[0]; 130 struct reg_data regs[0];
131} __attribute__((packed)); 131} __packed;
132 132
133struct usb_int_retry_fail { 133struct usb_int_retry_fail {
134 struct usb_int_header hdr; 134 struct usb_int_header hdr;
@@ -136,7 +136,7 @@ struct usb_int_retry_fail {
136 u8 _dummy; 136 u8 _dummy;
137 u8 addr[ETH_ALEN]; 137 u8 addr[ETH_ALEN];
138 u8 ibss_wakeup_dest; 138 u8 ibss_wakeup_dest;
139} __attribute__((packed)); 139} __packed;
140 140
141struct read_regs_int { 141struct read_regs_int {
142 struct completion completion; 142 struct completion completion;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d504e2b60257..b50fedcef8ac 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev,
1621 if (xennet_connect(netdev) != 0) 1621 if (xennet_connect(netdev) != 0)
1622 break; 1622 break;
1623 xenbus_switch_state(dev, XenbusStateConnected); 1623 xenbus_switch_state(dev, XenbusStateConnected);
1624 netif_notify_peers(netdev);
1624 break; 1625 break;
1625 1626
1626 case XenbusStateClosing: 1627 case XenbusStateClosing:
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index d04c5b262050..b2c2f391b29d 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -20,7 +20,7 @@
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23#include <linux/of_address.h>
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
26#include <linux/of_mdio.h> 26#include <linux/of_mdio.h>
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index fdba9cb3a599..9f12026d98e7 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -93,12 +93,20 @@ static unsigned short known_revisions[] =
93 93
94static int xtsonic_open(struct net_device *dev) 94static int xtsonic_open(struct net_device *dev)
95{ 95{
96 if (request_irq(dev->irq,sonic_interrupt,IRQF_DISABLED,"sonic",dev)) { 96 int retval;
97
98 retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
99 "sonic", dev);
100 if (retval) {
97 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 101 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
98 dev->name, dev->irq); 102 dev->name, dev->irq);
99 return -EAGAIN; 103 return -EAGAIN;
100 } 104 }
101 return sonic_open(dev); 105
106 retval = sonic_open(dev);
107 if (retval)
108 free_irq(dev->irq, dev);
109 return retval;
102} 110}
103 111
104static int xtsonic_close(struct net_device *dev) 112static int xtsonic_close(struct net_device *dev)