aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/atm/fore200e.c11
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/atm/lanai.c14
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/firmware/iscsi_ibft.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/isdn/capi/capi.c99
-rw-r--r--drivers/isdn/capi/capidrv.c55
-rw-r--r--drivers/isdn/capi/kcapi.c8
-rw-r--r--drivers/isdn/gigaset/capi.c75
-rw-r--r--drivers/isdn/hardware/avm/avmcard.h6
-rw-r--r--drivers/isdn/hardware/avm/b1.c54
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c71
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/b1pci.c4
-rw-r--r--drivers/isdn/hardware/avm/b1pcmcia.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c53
-rw-r--r--drivers/isdn/hardware/avm/t1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c2
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c40
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c45
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c48
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c198
-rw-r--r--drivers/isdn/hysdn/hycapi.c56
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c7
-rw-r--r--drivers/message/i2o/i2o_proc.c11
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c50
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/log.h31
-rw-r--r--drivers/misc/iwmc3200top/main.c59
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig29
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/ep93xx_eth.c140
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_main.c7
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/benet/be_cmds.c30
-rw-r--r--drivers/net/benet/be_cmds.h17
-rw-r--r--drivers/net/benet/be_ethtool.c63
-rw-r--r--drivers/net/bnx2.c133
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x_main.c6
-rw-r--r--drivers/net/bonding/bond_main.c23
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/mcp251x.c17
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c248
-rw-r--r--drivers/net/can/mscan/mscan.c58
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/ems_pci.c2
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/ti_hecc.c5
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/defxx.c9
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000_main.c6
-rw-r--r--drivers/net/e1000e/82571.c68
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h17
-rw-r--r--drivers/net/e1000e/es2lan.c32
-rw-r--r--drivers/net/e1000e/hw.h12
-rw-r--r--drivers/net/e1000e/ich8lan.c1
-rw-r--r--drivers/net/e1000e/lib.c230
-rw-r--r--drivers/net/e1000e/netdev.c24
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_main.c194
-rw-r--r--drivers/net/enic/enic_res.c16
-rw-r--r--drivers/net/enic/vnic_dev.c1
-rw-r--r--drivers/net/enic/vnic_enet.h5
-rw-r--r--drivers/net/enic/vnic_intr.c8
-rw-r--r--drivers/net/enic/vnic_intr.h3
-rw-r--r--drivers/net/enic/vnic_nic.h12
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/igb/igb_main.c18
-rw-r--r--drivers/net/igbvf/netdev.c15
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c2
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/via-ircc.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/ixgbe.h30
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c132
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c308
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h47
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h57
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c716
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3578
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h168
-rw-r--r--drivers/net/jme.c2
-rw-r--r--drivers/net/lib82596.c8
-rw-r--r--drivers/net/mac8390.c632
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/meth.c3
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mv643xx_eth.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/myri_sbus.c6
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h8
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h5
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c19
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c213
-rw-r--r--drivers/net/niu.c6
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c3
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/marvell.c38
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/ppp_generic.c122
-rw-r--r--drivers/net/qla3xxx.c3
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1106
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c536
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c870
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1201
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1466
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2604
-rw-r--r--drivers/net/qlge/qlge.h434
-rw-r--r--drivers/net/qlge/qlge_dbg.c1171
-rw-r--r--drivers/net/qlge/qlge_main.c353
-rw-r--r--drivers/net/qlge/qlge_mpi.c165
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/sfc/efx.c2
-rw-r--r--drivers/net/sh_eth.c10
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skfp/skfddi.c21
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c96
-rw-r--r--drivers/net/smc911x.c6
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c2
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/stmmac/Kconfig8
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h277
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)202
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c245
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)347
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c9
-rw-r--r--drivers/net/stmmac/stmmac_main.c436
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/sunvnet.c5
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tehuti.c2
-rw-r--r--drivers/net/tg3.c92
-rw-r--r--drivers/net/tg3.h25
-rw-r--r--drivers/net/tlan.c30
-rw-r--r--drivers/net/tlan.h3
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tulip/de2104x.c2
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/tun.c101
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/ucc_geth.c23
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/rtl8150.c7
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c8
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vxge/vxge-main.c8
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c17
-rw-r--r--drivers/net/wimax/i2400m/fw.c11
-rw-r--r--drivers/net/wireless/adm8211.c14
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h9
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c110
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c42
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h71
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c156
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c428
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c150
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c861
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1375
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig17
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h21
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/main.c43
-rw-r--r--drivers/net/wireless/b43/phy_lp.c24
-rw-r--r--drivers/net/wireless/b43/phy_n.c1795
-rw-r--r--drivers/net/wireless/b43/phy_n.h87
-rw-r--r--drivers/net/wireless/b43/pio.h40
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c577
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h71
-rw-r--r--drivers/net/wireless/b43legacy/main.c35
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c256
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c154
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c101
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c71
-rw-r--r--drivers/net/wireless/libertas/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c17
-rw-r--r--drivers/net/wireless/libertas/cmd.c22
-rw-r--r--drivers/net/wireless/libertas/cmd.h12
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c16
-rw-r--r--drivers/net/wireless/libertas/defs.h7
-rw-r--r--drivers/net/wireless/libertas/dev.h7
-rw-r--r--drivers/net/wireless/libertas/ethtool.c2
-rw-r--r--drivers/net/wireless/libertas/main.c42
-rw-r--r--drivers/net/wireless/libertas/mesh.c29
-rw-r--r--drivers/net/wireless/libertas/mesh.h32
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c26
-rw-r--r--drivers/net/wireless/libertas_tf/main.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c114
-rw-r--r--drivers/net/wireless/mwl8k.c2111
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/main.c12
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c42
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c38
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c117
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c57
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h34
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c79
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c37
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c28
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c15
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h87
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c83
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c23
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c351
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h35
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c134
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h33
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h100
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c62
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c20
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c492
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c43
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c14
-rw-r--r--drivers/net/xilinx_emaclite.c6
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c169
-rw-r--r--drivers/s390/net/qeth_core_mpc.h44
-rw-r--r--drivers/s390/net/qeth_core_sys.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c176
-rw-r--r--drivers/s390/net/qeth_l3_sys.c56
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h9
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c24
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c6
-rw-r--r--drivers/vhost/Kconfig11
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/net.c661
-rw-r--r--drivers/vhost/vhost.c1098
-rw-r--r--drivers/vhost/vhost.h161
408 files changed, 32594 insertions, 7042 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a..81e36596b1e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID) += hid/
106obj-$(CONFIG_PPC_PS3) += ps3/ 106obj-$(CONFIG_PPC_PS3) += ps3/
107obj-$(CONFIG_OF) += of/ 107obj-$(CONFIG_OF) += of/
108obj-$(CONFIG_SSB) += ssb/ 108obj-$(CONFIG_SSB) += ssb/
109obj-$(CONFIG_VHOST_NET) += vhost/
109obj-$(CONFIG_VIRTIO) += virtio/ 110obj-$(CONFIG_VIRTIO) += virtio/
110obj-$(CONFIG_VLYNQ) += vlynq/ 111obj-$(CONFIG_VLYNQ) += vlynq/
111obj-$(CONFIG_STAGING) += staging/ 112obj-$(CONFIG_STAGING) += staging/
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc53fed89b1..f7d6ebaa041 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2064,12 +2064,10 @@ fore200e_get_esi(struct fore200e* fore200e)
2064 return -EBUSY; 2064 return -EBUSY;
2065 } 2065 }
2066 2066
2067 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n", 2067 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
2068 fore200e->name, 2068 fore200e->name,
2069 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 2069 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2070 prom->serial_number & 0xFFFF, 2070 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
2071 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2072 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2073 2071
2074 for (i = 0; i < ESI_LEN; i++) { 2072 for (i = 0; i < ESI_LEN; i++) {
2075 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 2073 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
@@ -2845,13 +2843,12 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2845 " interrupt line:\t\t%s\n" 2843 " interrupt line:\t\t%s\n"
2846 " physical base address:\t0x%p\n" 2844 " physical base address:\t0x%p\n"
2847 " virtual base address:\t0x%p\n" 2845 " virtual base address:\t0x%p\n"
2848 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n" 2846 " factory address (ESI):\t%pM\n"
2849 " board serial number:\t\t%d\n\n", 2847 " board serial number:\t\t%d\n\n",
2850 fore200e_irq_itoa(fore200e->irq), 2848 fore200e_irq_itoa(fore200e->irq),
2851 (void*)fore200e->phys_base, 2849 (void*)fore200e->phys_base,
2852 fore200e->virt_base, 2850 fore200e->virt_base,
2853 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2], 2851 fore200e->esi,
2854 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2855 fore200e->esi[4] * 256 + fore200e->esi[5]); 2852 fore200e->esi[4] * 256 + fore200e->esi[5]);
2856 2853
2857 return len; 2854 return len;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e33ae0025b1..01f36c08cb5 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3557,10 +3557,7 @@ init_card(struct atm_dev *dev)
3557 if (tmp) { 3557 if (tmp) {
3558 memcpy(card->atmdev->esi, tmp->dev_addr, 6); 3558 memcpy(card->atmdev->esi, tmp->dev_addr, 6);
3559 3559
3560 printk("%s: ESI %02x:%02x:%02x:%02x:%02x:%02x\n", 3560 printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
3561 card->name, card->atmdev->esi[0], card->atmdev->esi[1],
3562 card->atmdev->esi[2], card->atmdev->esi[3],
3563 card->atmdev->esi[4], card->atmdev->esi[5]);
3564 } 3561 }
3565 /* 3562 /*
3566 * XXX: </hack> 3563 * XXX: </hack>
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cf97c34cbaf..7fe7c324e7e 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -998,9 +998,7 @@ static int __devinit eeprom_validate(struct lanai_dev *lanai)
998 (unsigned int) e[EEPROM_MAC_REV + i]); 998 (unsigned int) e[EEPROM_MAC_REV + i]);
999 return -EIO; 999 return -EIO;
1000 } 1000 }
1001 DPRINTK("eeprom: MAC address = %02X:%02X:%02X:%02X:%02X:%02X\n", 1001 DPRINTK("eeprom: MAC address = %pM\n", &e[EEPROM_MAC]);
1002 e[EEPROM_MAC + 0], e[EEPROM_MAC + 1], e[EEPROM_MAC + 2],
1003 e[EEPROM_MAC + 3], e[EEPROM_MAC + 4], e[EEPROM_MAC + 5]);
1004 /* Verify serial number */ 1002 /* Verify serial number */
1005 lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL); 1003 lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL);
1006 v = eeprom_be4(lanai, EEPROM_SERIAL_REV); 1004 v = eeprom_be4(lanai, EEPROM_SERIAL_REV);
@@ -2483,14 +2481,8 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
2483 return sprintf(page, "revision: board=%d, pci_if=%d\n", 2481 return sprintf(page, "revision: board=%d, pci_if=%d\n",
2484 lanai->board_rev, (int) lanai->pci->revision); 2482 lanai->board_rev, (int) lanai->pci->revision);
2485 if (left-- == 0) 2483 if (left-- == 0)
2486 return sprintf(page, "EEPROM ESI: " 2484 return sprintf(page, "EEPROM ESI: %pM\n",
2487 "%02X:%02X:%02X:%02X:%02X:%02X\n", 2485 &lanai->eeprom[EEPROM_MAC]);
2488 lanai->eeprom[EEPROM_MAC + 0],
2489 lanai->eeprom[EEPROM_MAC + 1],
2490 lanai->eeprom[EEPROM_MAC + 2],
2491 lanai->eeprom[EEPROM_MAC + 3],
2492 lanai->eeprom[EEPROM_MAC + 4],
2493 lanai->eeprom[EEPROM_MAC + 5]);
2494 if (left-- == 0) 2486 if (left-- == 0)
2495 return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, " 2487 return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, "
2496 "GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0, 2488 "GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0,
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 3da804b1627..50838407b11 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -807,9 +807,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
807 } 807 }
808 } 808 }
809 809
810 printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i, 810 printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
811 card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
812 card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
813 811
814 card->atmdev->dev_data = card; 812 card->atmdev->dev_data = card;
815 card->atmdev->ci_range.vpi_bits = card->vpibits; 813 card->atmdev->ci_range.vpi_bits = card->vpibits;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 051d1ebbd28..5aeb3b541c8 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -380,7 +380,6 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
380 struct ibft_nic *nic = entry->nic; 380 struct ibft_nic *nic = entry->nic;
381 void *ibft_loc = entry->header; 381 void *ibft_loc = entry->header;
382 char *str = buf; 382 char *str = buf;
383 char *mac;
384 int val; 383 int val;
385 384
386 if (!nic) 385 if (!nic)
@@ -421,10 +420,7 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
421 str += sprintf(str, "%d\n", nic->vlan); 420 str += sprintf(str, "%d\n", nic->vlan);
422 break; 421 break;
423 case ibft_eth_mac: 422 case ibft_eth_mac:
424 mac = nic->mac; 423 str += sprintf(str, "%pM\n", nic->mac);
425 str += sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x\n",
426 (u8)mac[0], (u8)mac[1], (u8)mac[2],
427 (u8)mac[3], (u8)mac[4], (u8)mac[5]);
428 break; 424 break;
429 case ibft_eth_hostname: 425 case ibft_eth_hostname:
430 str += sprintf_string(str, nic->hostname_len, 426 str += sprintf_string(str, nic->hostname_len,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 66b41351910..d94388b81a4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1371,15 +1371,8 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1371 tim.mac_addr = req->dst_mac; 1371 tim.mac_addr = req->dst_mac;
1372 tim.vlan_tag = ntohs(req->vlan_tag); 1372 tim.vlan_tag = ntohs(req->vlan_tag);
1373 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { 1373 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1374 printk(KERN_ERR 1374 printk(KERN_ERR "%s bad dst mac %pM\n",
1375 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", 1375 __func__, req->dst_mac);
1376 __func__,
1377 req->dst_mac[0],
1378 req->dst_mac[1],
1379 req->dst_mac[2],
1380 req->dst_mac[3],
1381 req->dst_mac[4],
1382 req->dst_mac[5]);
1383 goto reject; 1376 goto reject;
1384 } 1377 }
1385 1378
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 65bf91e16a4..79f9364aded 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -33,6 +33,7 @@
33#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */ 33#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
36#include <linux/poll.h> 37#include <linux/poll.h>
37#include <linux/capi.h> 38#include <linux/capi.h>
38#include <linux/kernelcapi.h> 39#include <linux/kernelcapi.h>
@@ -1407,114 +1408,84 @@ static void capinc_tty_exit(void)
1407 * /proc/capi/capi20: 1408 * /proc/capi/capi20:
1408 * minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt 1409 * minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
1409 */ 1410 */
1410static int proc_capidev_read_proc(char *page, char **start, off_t off, 1411static int capi20_proc_show(struct seq_file *m, void *v)
1411 int count, int *eof, void *data)
1412{ 1412{
1413 struct capidev *cdev; 1413 struct capidev *cdev;
1414 struct list_head *l; 1414 struct list_head *l;
1415 int len = 0;
1416 1415
1417 read_lock(&capidev_list_lock); 1416 read_lock(&capidev_list_lock);
1418 list_for_each(l, &capidev_list) { 1417 list_for_each(l, &capidev_list) {
1419 cdev = list_entry(l, struct capidev, list); 1418 cdev = list_entry(l, struct capidev, list);
1420 len += sprintf(page+len, "0 %d %lu %lu %lu %lu\n", 1419 seq_printf(m, "0 %d %lu %lu %lu %lu\n",
1421 cdev->ap.applid, 1420 cdev->ap.applid,
1422 cdev->ap.nrecvctlpkt, 1421 cdev->ap.nrecvctlpkt,
1423 cdev->ap.nrecvdatapkt, 1422 cdev->ap.nrecvdatapkt,
1424 cdev->ap.nsentctlpkt, 1423 cdev->ap.nsentctlpkt,
1425 cdev->ap.nsentdatapkt); 1424 cdev->ap.nsentdatapkt);
1426 if (len <= off) {
1427 off -= len;
1428 len = 0;
1429 } else {
1430 if (len-off > count)
1431 goto endloop;
1432 }
1433 } 1425 }
1434
1435endloop:
1436 read_unlock(&capidev_list_lock); 1426 read_unlock(&capidev_list_lock);
1437 if (len < count) 1427 return 0;
1438 *eof = 1;
1439 if (len > count) len = count;
1440 if (len < 0) len = 0;
1441 return len;
1442} 1428}
1443 1429
1430static int capi20_proc_open(struct inode *inode, struct file *file)
1431{
1432 return single_open(file, capi20_proc_show, NULL);
1433}
1434
1435static const struct file_operations capi20_proc_fops = {
1436 .owner = THIS_MODULE,
1437 .open = capi20_proc_open,
1438 .read = seq_read,
1439 .llseek = seq_lseek,
1440 .release = single_release,
1441};
1442
1444/* 1443/*
1445 * /proc/capi/capi20ncci: 1444 * /proc/capi/capi20ncci:
1446 * applid ncci 1445 * applid ncci
1447 */ 1446 */
1448static int proc_capincci_read_proc(char *page, char **start, off_t off, 1447static int capi20ncci_proc_show(struct seq_file *m, void *v)
1449 int count, int *eof, void *data)
1450{ 1448{
1451 struct capidev *cdev; 1449 struct capidev *cdev;
1452 struct capincci *np; 1450 struct capincci *np;
1453 struct list_head *l; 1451 struct list_head *l;
1454 int len = 0;
1455 1452
1456 read_lock(&capidev_list_lock); 1453 read_lock(&capidev_list_lock);
1457 list_for_each(l, &capidev_list) { 1454 list_for_each(l, &capidev_list) {
1458 cdev = list_entry(l, struct capidev, list); 1455 cdev = list_entry(l, struct capidev, list);
1459 for (np=cdev->nccis; np; np = np->next) { 1456 for (np=cdev->nccis; np; np = np->next) {
1460 len += sprintf(page+len, "%d 0x%x\n", 1457 seq_printf(m, "%d 0x%x\n",
1461 cdev->ap.applid, 1458 cdev->ap.applid,
1462 np->ncci); 1459 np->ncci);
1463 if (len <= off) {
1464 off -= len;
1465 len = 0;
1466 } else {
1467 if (len-off > count)
1468 goto endloop;
1469 }
1470 } 1460 }
1471 } 1461 }
1472endloop:
1473 read_unlock(&capidev_list_lock); 1462 read_unlock(&capidev_list_lock);
1474 *start = page+off; 1463 return 0;
1475 if (len < count)
1476 *eof = 1;
1477 if (len>count) len = count;
1478 if (len<0) len = 0;
1479 return len;
1480} 1464}
1481 1465
1482static struct procfsentries { 1466static int capi20ncci_proc_open(struct inode *inode, struct file *file)
1483 char *name; 1467{
1484 mode_t mode; 1468 return single_open(file, capi20ncci_proc_show, NULL);
1485 int (*read_proc)(char *page, char **start, off_t off, 1469}
1486 int count, int *eof, void *data); 1470
1487 struct proc_dir_entry *procent; 1471static const struct file_operations capi20ncci_proc_fops = {
1488} procfsentries[] = { 1472 .owner = THIS_MODULE,
1489 /* { "capi", S_IFDIR, 0 }, */ 1473 .open = capi20ncci_proc_open,
1490 { "capi/capi20", 0 , proc_capidev_read_proc }, 1474 .read = seq_read,
1491 { "capi/capi20ncci", 0 , proc_capincci_read_proc }, 1475 .llseek = seq_lseek,
1476 .release = single_release,
1492}; 1477};
1493 1478
1494static void __init proc_init(void) 1479static void __init proc_init(void)
1495{ 1480{
1496 int nelem = ARRAY_SIZE(procfsentries); 1481 proc_create("capi/capi20", 0, NULL, &capi20_proc_fops);
1497 int i; 1482 proc_create("capi/capi20ncci", 0, NULL, &capi20ncci_proc_fops);
1498
1499 for (i=0; i < nelem; i++) {
1500 struct procfsentries *p = procfsentries + i;
1501 p->procent = create_proc_entry(p->name, p->mode, NULL);
1502 if (p->procent) p->procent->read_proc = p->read_proc;
1503 }
1504} 1483}
1505 1484
1506static void __exit proc_exit(void) 1485static void __exit proc_exit(void)
1507{ 1486{
1508 int nelem = ARRAY_SIZE(procfsentries); 1487 remove_proc_entry("capi/capi20", NULL);
1509 int i; 1488 remove_proc_entry("capi/capi20ncci", NULL);
1510
1511 for (i=nelem-1; i >= 0; i--) {
1512 struct procfsentries *p = procfsentries + i;
1513 if (p->procent) {
1514 remove_proc_entry(p->name, NULL);
1515 p->procent = NULL;
1516 }
1517 }
1518} 1489}
1519 1490
1520/* -------- init function and module interface ---------------------- */ 1491/* -------- init function and module interface ---------------------- */
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 66b7d7a8647..bb450152fb7 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -24,6 +24,7 @@
24#include <linux/isdn.h> 24#include <linux/isdn.h>
25#include <linux/isdnif.h> 25#include <linux/isdnif.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
27#include <linux/capi.h> 28#include <linux/capi.h>
28#include <linux/kernelcapi.h> 29#include <linux/kernelcapi.h>
29#include <linux/ctype.h> 30#include <linux/ctype.h>
@@ -2229,59 +2230,37 @@ static void lower_callback(unsigned int cmd, u32 contr, void *data)
2229 * /proc/capi/capidrv: 2230 * /proc/capi/capidrv:
2230 * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt 2231 * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
2231 */ 2232 */
2232static int proc_capidrv_read_proc(char *page, char **start, off_t off, 2233static int capidrv_proc_show(struct seq_file *m, void *v)
2233 int count, int *eof, void *data)
2234{ 2234{
2235 int len = 0; 2235 seq_printf(m, "%lu %lu %lu %lu\n",
2236
2237 len += sprintf(page+len, "%lu %lu %lu %lu\n",
2238 global.ap.nrecvctlpkt, 2236 global.ap.nrecvctlpkt,
2239 global.ap.nrecvdatapkt, 2237 global.ap.nrecvdatapkt,
2240 global.ap.nsentctlpkt, 2238 global.ap.nsentctlpkt,
2241 global.ap.nsentdatapkt); 2239 global.ap.nsentdatapkt);
2242 if (off+count >= len) 2240 return 0;
2243 *eof = 1; 2241}
2244 if (len < off) 2242
2245 return 0; 2243static int capidrv_proc_open(struct inode *inode, struct file *file)
2246 *start = page + off; 2244{
2247 return ((count < len-off) ? count : len-off); 2245 return single_open(file, capidrv_proc_show, NULL);
2248} 2246}
2249 2247
2250static struct procfsentries { 2248static const struct file_operations capidrv_proc_fops = {
2251 char *name; 2249 .owner = THIS_MODULE,
2252 mode_t mode; 2250 .open = capidrv_proc_open,
2253 int (*read_proc)(char *page, char **start, off_t off, 2251 .read = seq_read,
2254 int count, int *eof, void *data); 2252 .llseek = seq_lseek,
2255 struct proc_dir_entry *procent; 2253 .release = single_release,
2256} procfsentries[] = {
2257 /* { "capi", S_IFDIR, 0 }, */
2258 { "capi/capidrv", 0 , proc_capidrv_read_proc },
2259}; 2254};
2260 2255
2261static void __init proc_init(void) 2256static void __init proc_init(void)
2262{ 2257{
2263 int nelem = ARRAY_SIZE(procfsentries); 2258 proc_create("capi/capidrv", 0, NULL, &capidrv_proc_fops);
2264 int i;
2265
2266 for (i=0; i < nelem; i++) {
2267 struct procfsentries *p = procfsentries + i;
2268 p->procent = create_proc_entry(p->name, p->mode, NULL);
2269 if (p->procent) p->procent->read_proc = p->read_proc;
2270 }
2271} 2259}
2272 2260
2273static void __exit proc_exit(void) 2261static void __exit proc_exit(void)
2274{ 2262{
2275 int nelem = ARRAY_SIZE(procfsentries); 2263 remove_proc_entry("capi/capidrv", NULL);
2276 int i;
2277
2278 for (i=nelem-1; i >= 0; i--) {
2279 struct procfsentries *p = procfsentries + i;
2280 if (p->procent) {
2281 remove_proc_entry(p->name, NULL);
2282 p->procent = NULL;
2283 }
2284 }
2285} 2264}
2286 2265
2287static int __init capidrv_init(void) 2266static int __init capidrv_init(void)
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index dc506ab99ca..b0bacf377c1 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -490,13 +490,7 @@ attach_capi_ctr(struct capi_ctr *card)
490 card->traceflag = showcapimsgs; 490 card->traceflag = showcapimsgs;
491 491
492 sprintf(card->procfn, "capi/controllers/%d", card->cnr); 492 sprintf(card->procfn, "capi/controllers/%d", card->cnr);
493 card->procent = create_proc_entry(card->procfn, 0, NULL); 493 card->procent = proc_create_data(card->procfn, 0, NULL, card->proc_fops, card);
494 if (card->procent) {
495 card->procent->read_proc =
496 (int (*)(char *,char **,off_t,int,int *,void *))
497 card->ctr_read_proc;
498 card->procent->data = card;
499 }
500 494
501 ncards++; 495 ncards++;
502 printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n", 496 printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n",
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 3f5cd06af10..6f0ae32906b 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -13,6 +13,8 @@
13 13
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/ctype.h> 15#include <linux/ctype.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
16#include <linux/isdn/capilli.h> 18#include <linux/isdn/capilli.h>
17#include <linux/isdn/capicmd.h> 19#include <linux/isdn/capicmd.h>
18#include <linux/isdn/capiutil.h> 20#include <linux/isdn/capiutil.h>
@@ -2106,35 +2108,22 @@ static char *gigaset_procinfo(struct capi_ctr *ctr)
2106 return ctr->name; /* ToDo: more? */ 2108 return ctr->name; /* ToDo: more? */
2107} 2109}
2108 2110
2109/** 2111static int gigaset_proc_show(struct seq_file *m, void *v)
2110 * gigaset_ctr_read_proc() - build controller proc file entry
2111 * @page: buffer of PAGE_SIZE bytes for receiving the entry.
2112 * @start: unused.
2113 * @off: unused.
2114 * @count: unused.
2115 * @eof: unused.
2116 * @ctr: controller descriptor structure.
2117 *
2118 * Return value: length of generated entry
2119 */
2120static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2121 int count, int *eof, struct capi_ctr *ctr)
2122{ 2112{
2113 struct capi_ctr *ctr = m->private;
2123 struct cardstate *cs = ctr->driverdata; 2114 struct cardstate *cs = ctr->driverdata;
2124 char *s; 2115 char *s;
2125 int i; 2116 int i;
2126 int len = 0; 2117
2127 len += sprintf(page+len, "%-16s %s\n", "name", ctr->name); 2118 seq_printf(m, "%-16s %s\n", "name", ctr->name);
2128 len += sprintf(page+len, "%-16s %s %s\n", "dev", 2119 seq_printf(m, "%-16s %s %s\n", "dev",
2129 dev_driver_string(cs->dev), dev_name(cs->dev)); 2120 dev_driver_string(cs->dev), dev_name(cs->dev));
2130 len += sprintf(page+len, "%-16s %d\n", "id", cs->myid); 2121 seq_printf(m, "%-16s %d\n", "id", cs->myid);
2131 if (cs->gotfwver) 2122 if (cs->gotfwver)
2132 len += sprintf(page+len, "%-16s %d.%d.%d.%d\n", "firmware", 2123 seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
2133 cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]); 2124 cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
2134 len += sprintf(page+len, "%-16s %d\n", "channels", 2125 seq_printf(m, "%-16s %d\n", "channels", cs->channels);
2135 cs->channels); 2126 seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
2136 len += sprintf(page+len, "%-16s %s\n", "onechannel",
2137 cs->onechannel ? "yes" : "no");
2138 2127
2139 switch (cs->mode) { 2128 switch (cs->mode) {
2140 case M_UNKNOWN: 2129 case M_UNKNOWN:
@@ -2152,7 +2141,7 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2152 default: 2141 default:
2153 s = "??"; 2142 s = "??";
2154 } 2143 }
2155 len += sprintf(page+len, "%-16s %s\n", "mode", s); 2144 seq_printf(m, "%-16s %s\n", "mode", s);
2156 2145
2157 switch (cs->mstate) { 2146 switch (cs->mstate) {
2158 case MS_UNINITIALIZED: 2147 case MS_UNINITIALIZED:
@@ -2176,25 +2165,21 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2176 default: 2165 default:
2177 s = "??"; 2166 s = "??";
2178 } 2167 }
2179 len += sprintf(page+len, "%-16s %s\n", "mstate", s); 2168 seq_printf(m, "%-16s %s\n", "mstate", s);
2180 2169
2181 len += sprintf(page+len, "%-16s %s\n", "running", 2170 seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
2182 cs->running ? "yes" : "no"); 2171 seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
2183 len += sprintf(page+len, "%-16s %s\n", "connected", 2172 seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
2184 cs->connected ? "yes" : "no"); 2173 seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
2185 len += sprintf(page+len, "%-16s %s\n", "isdn_up",
2186 cs->isdn_up ? "yes" : "no");
2187 len += sprintf(page+len, "%-16s %s\n", "cidmode",
2188 cs->cidmode ? "yes" : "no");
2189 2174
2190 for (i = 0; i < cs->channels; i++) { 2175 for (i = 0; i < cs->channels; i++) {
2191 len += sprintf(page+len, "[%d]%-13s %d\n", i, "corrupted", 2176 seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
2192 cs->bcs[i].corrupted); 2177 cs->bcs[i].corrupted);
2193 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_down", 2178 seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
2194 cs->bcs[i].trans_down); 2179 cs->bcs[i].trans_down);
2195 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_up", 2180 seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
2196 cs->bcs[i].trans_up); 2181 cs->bcs[i].trans_up);
2197 len += sprintf(page+len, "[%d]%-13s %d\n", i, "chstate", 2182 seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
2198 cs->bcs[i].chstate); 2183 cs->bcs[i].chstate);
2199 switch (cs->bcs[i].proto2) { 2184 switch (cs->bcs[i].proto2) {
2200 case L2_BITSYNC: 2185 case L2_BITSYNC:
@@ -2209,11 +2194,23 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2209 default: 2194 default:
2210 s = "??"; 2195 s = "??";
2211 } 2196 }
2212 len += sprintf(page+len, "[%d]%-13s %s\n", i, "proto2", s); 2197 seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
2213 } 2198 }
2214 return len; 2199 return 0;
2215} 2200}
2216 2201
2202static int gigaset_proc_open(struct inode *inode, struct file *file)
2203{
2204 return single_open(file, gigaset_proc_show, PDE(inode)->data);
2205}
2206
2207static const struct file_operations gigaset_proc_fops = {
2208 .owner = THIS_MODULE,
2209 .open = gigaset_proc_open,
2210 .read = seq_read,
2211 .llseek = seq_lseek,
2212 .release = single_release,
2213};
2217 2214
2218static struct capi_driver capi_driver_gigaset = { 2215static struct capi_driver capi_driver_gigaset = {
2219 .name = "gigaset", 2216 .name = "gigaset",
@@ -2256,7 +2253,7 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
2256 iif->ctr.release_appl = gigaset_release_appl; 2253 iif->ctr.release_appl = gigaset_release_appl;
2257 iif->ctr.send_message = gigaset_send_message; 2254 iif->ctr.send_message = gigaset_send_message;
2258 iif->ctr.procinfo = gigaset_procinfo; 2255 iif->ctr.procinfo = gigaset_procinfo;
2259 iif->ctr.ctr_read_proc = gigaset_ctr_read_proc; 2256 iif->ctr.proc_fops = &gigaset_proc_fops;
2260 INIT_LIST_HEAD(&iif->appls); 2257 INIT_LIST_HEAD(&iif->appls);
2261 skb_queue_head_init(&iif->sendqueue); 2258 skb_queue_head_init(&iif->sendqueue);
2262 atomic_set(&iif->sendqlen, 0); 2259 atomic_set(&iif->sendqlen, 0);
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h
index d964f07e4a5..a70e8854461 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/isdn/hardware/avm/avmcard.h
@@ -556,8 +556,7 @@ u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
556void b1_parse_version(avmctrl_info *card); 556void b1_parse_version(avmctrl_info *card);
557irqreturn_t b1_interrupt(int interrupt, void *devptr); 557irqreturn_t b1_interrupt(int interrupt, void *devptr);
558 558
559int b1ctl_read_proc(char *page, char **start, off_t off, 559extern const struct file_operations b1ctl_proc_fops;
560 int count, int *eof, struct capi_ctr *ctrl);
561 560
562avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *, 561avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
563 long rsize, long ssize); 562 long rsize, long ssize);
@@ -577,7 +576,6 @@ void b1dma_register_appl(struct capi_ctr *ctrl,
577 capi_register_params *rp); 576 capi_register_params *rp);
578void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl); 577void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
579u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb); 578u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
580int b1dmactl_read_proc(char *page, char **start, off_t off, 579extern const struct file_operations b1dmactl_proc_fops;
581 int count, int *eof, struct capi_ctr *ctrl);
582 580
583#endif /* _AVMCARD_H_ */ 581#endif /* _AVMCARD_H_ */
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index a7c0083e78a..c38fa0f4c72 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -12,6 +12,8 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
15#include <linux/skbuff.h> 17#include <linux/skbuff.h>
16#include <linux/delay.h> 18#include <linux/delay.h>
17#include <linux/mm.h> 19#include <linux/mm.h>
@@ -634,18 +636,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
634} 636}
635 637
636/* ------------------------------------------------------------- */ 638/* ------------------------------------------------------------- */
637int b1ctl_read_proc(char *page, char **start, off_t off, 639static int b1ctl_proc_show(struct seq_file *m, void *v)
638 int count, int *eof, struct capi_ctr *ctrl)
639{ 640{
641 struct capi_ctr *ctrl = m->private;
640 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); 642 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
641 avmcard *card = cinfo->card; 643 avmcard *card = cinfo->card;
642 u8 flag; 644 u8 flag;
643 int len = 0;
644 char *s; 645 char *s;
645 646
646 len += sprintf(page+len, "%-16s %s\n", "name", card->name); 647 seq_printf(m, "%-16s %s\n", "name", card->name);
647 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port); 648 seq_printf(m, "%-16s 0x%x\n", "io", card->port);
648 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq); 649 seq_printf(m, "%-16s %d\n", "irq", card->irq);
649 switch (card->cardtype) { 650 switch (card->cardtype) {
650 case avm_b1isa: s = "B1 ISA"; break; 651 case avm_b1isa: s = "B1 ISA"; break;
651 case avm_b1pci: s = "B1 PCI"; break; 652 case avm_b1pci: s = "B1 PCI"; break;
@@ -658,20 +659,20 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
658 case avm_c2: s = "C2"; break; 659 case avm_c2: s = "C2"; break;
659 default: s = "???"; break; 660 default: s = "???"; break;
660 } 661 }
661 len += sprintf(page+len, "%-16s %s\n", "type", s); 662 seq_printf(m, "%-16s %s\n", "type", s);
662 if (card->cardtype == avm_t1isa) 663 if (card->cardtype == avm_t1isa)
663 len += sprintf(page+len, "%-16s %d\n", "cardnr", card->cardnr); 664 seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
664 if ((s = cinfo->version[VER_DRIVER]) != NULL) 665 if ((s = cinfo->version[VER_DRIVER]) != NULL)
665 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 666 seq_printf(m, "%-16s %s\n", "ver_driver", s);
666 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 667 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
667 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 668 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
668 if ((s = cinfo->version[VER_SERIAL]) != NULL) 669 if ((s = cinfo->version[VER_SERIAL]) != NULL)
669 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 670 seq_printf(m, "%-16s %s\n", "ver_serial", s);
670 671
671 if (card->cardtype != avm_m1) { 672 if (card->cardtype != avm_m1) {
672 flag = ((u8 *)(ctrl->profile.manu))[3]; 673 flag = ((u8 *)(ctrl->profile.manu))[3];
673 if (flag) 674 if (flag)
674 len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n", 675 seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
675 "protocol", 676 "protocol",
676 (flag & 0x01) ? " DSS1" : "", 677 (flag & 0x01) ? " DSS1" : "",
677 (flag & 0x02) ? " CT1" : "", 678 (flag & 0x02) ? " CT1" : "",
@@ -685,7 +686,7 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
685 if (card->cardtype != avm_m1) { 686 if (card->cardtype != avm_m1) {
686 flag = ((u8 *)(ctrl->profile.manu))[5]; 687 flag = ((u8 *)(ctrl->profile.manu))[5];
687 if (flag) 688 if (flag)
688 len += sprintf(page+len, "%-16s%s%s%s%s\n", 689 seq_printf(m, "%-16s%s%s%s%s\n",
689 "linetype", 690 "linetype",
690 (flag & 0x01) ? " point to point" : "", 691 (flag & 0x01) ? " point to point" : "",
691 (flag & 0x02) ? " point to multipoint" : "", 692 (flag & 0x02) ? " point to multipoint" : "",
@@ -693,16 +694,25 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
693 (flag & 0x04) ? " leased line with D-channel" : "" 694 (flag & 0x04) ? " leased line with D-channel" : ""
694 ); 695 );
695 } 696 }
696 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 697 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
697 698
698 if (off+count >= len) 699 return 0;
699 *eof = 1; 700}
700 if (len < off) 701
701 return 0; 702static int b1ctl_proc_open(struct inode *inode, struct file *file)
702 *start = page + off; 703{
703 return ((count < len-off) ? count : len-off); 704 return single_open(file, b1ctl_proc_show, PDE(inode)->data);
704} 705}
705 706
707const struct file_operations b1ctl_proc_fops = {
708 .owner = THIS_MODULE,
709 .open = b1ctl_proc_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
714EXPORT_SYMBOL(b1ctl_proc_fops);
715
706/* ------------------------------------------------------------- */ 716/* ------------------------------------------------------------- */
707 717
708#ifdef CONFIG_PCI 718#ifdef CONFIG_PCI
@@ -781,8 +791,6 @@ EXPORT_SYMBOL(b1_send_message);
781EXPORT_SYMBOL(b1_parse_version); 791EXPORT_SYMBOL(b1_parse_version);
782EXPORT_SYMBOL(b1_interrupt); 792EXPORT_SYMBOL(b1_interrupt);
783 793
784EXPORT_SYMBOL(b1ctl_read_proc);
785
786static int __init b1_init(void) 794static int __init b1_init(void)
787{ 795{
788 char *p; 796 char *p;
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 0e84aaae43f..124550d0dbf 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -11,6 +11,8 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
15#include <linux/delay.h> 17#include <linux/delay.h>
16#include <linux/mm.h> 18#include <linux/mm.h>
@@ -855,21 +857,20 @@ u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
855 857
856/* ------------------------------------------------------------- */ 858/* ------------------------------------------------------------- */
857 859
858int b1dmactl_read_proc(char *page, char **start, off_t off, 860static int b1dmactl_proc_show(struct seq_file *m, void *v)
859 int count, int *eof, struct capi_ctr *ctrl)
860{ 861{
862 struct capi_ctr *ctrl = m->private;
861 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); 863 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
862 avmcard *card = cinfo->card; 864 avmcard *card = cinfo->card;
863 u8 flag; 865 u8 flag;
864 int len = 0;
865 char *s; 866 char *s;
866 u32 txoff, txlen, rxoff, rxlen, csr; 867 u32 txoff, txlen, rxoff, rxlen, csr;
867 unsigned long flags; 868 unsigned long flags;
868 869
869 len += sprintf(page+len, "%-16s %s\n", "name", card->name); 870 seq_printf(m, "%-16s %s\n", "name", card->name);
870 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port); 871 seq_printf(m, "%-16s 0x%x\n", "io", card->port);
871 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq); 872 seq_printf(m, "%-16s %d\n", "irq", card->irq);
872 len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase); 873 seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
873 switch (card->cardtype) { 874 switch (card->cardtype) {
874 case avm_b1isa: s = "B1 ISA"; break; 875 case avm_b1isa: s = "B1 ISA"; break;
875 case avm_b1pci: s = "B1 PCI"; break; 876 case avm_b1pci: s = "B1 PCI"; break;
@@ -882,18 +883,18 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
882 case avm_c2: s = "C2"; break; 883 case avm_c2: s = "C2"; break;
883 default: s = "???"; break; 884 default: s = "???"; break;
884 } 885 }
885 len += sprintf(page+len, "%-16s %s\n", "type", s); 886 seq_printf(m, "%-16s %s\n", "type", s);
886 if ((s = cinfo->version[VER_DRIVER]) != NULL) 887 if ((s = cinfo->version[VER_DRIVER]) != NULL)
887 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 888 seq_printf(m, "%-16s %s\n", "ver_driver", s);
888 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 889 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
889 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 890 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
890 if ((s = cinfo->version[VER_SERIAL]) != NULL) 891 if ((s = cinfo->version[VER_SERIAL]) != NULL)
891 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 892 seq_printf(m, "%-16s %s\n", "ver_serial", s);
892 893
893 if (card->cardtype != avm_m1) { 894 if (card->cardtype != avm_m1) {
894 flag = ((u8 *)(ctrl->profile.manu))[3]; 895 flag = ((u8 *)(ctrl->profile.manu))[3];
895 if (flag) 896 if (flag)
896 len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n", 897 seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
897 "protocol", 898 "protocol",
898 (flag & 0x01) ? " DSS1" : "", 899 (flag & 0x01) ? " DSS1" : "",
899 (flag & 0x02) ? " CT1" : "", 900 (flag & 0x02) ? " CT1" : "",
@@ -907,7 +908,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
907 if (card->cardtype != avm_m1) { 908 if (card->cardtype != avm_m1) {
908 flag = ((u8 *)(ctrl->profile.manu))[5]; 909 flag = ((u8 *)(ctrl->profile.manu))[5];
909 if (flag) 910 if (flag)
910 len += sprintf(page+len, "%-16s%s%s%s%s\n", 911 seq_printf(m, "%-16s%s%s%s%s\n",
911 "linetype", 912 "linetype",
912 (flag & 0x01) ? " point to point" : "", 913 (flag & 0x01) ? " point to point" : "",
913 (flag & 0x02) ? " point to multipoint" : "", 914 (flag & 0x02) ? " point to multipoint" : "",
@@ -915,7 +916,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
915 (flag & 0x04) ? " leased line with D-channel" : "" 916 (flag & 0x04) ? " leased line with D-channel" : ""
916 ); 917 );
917 } 918 }
918 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 919 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
919 920
920 921
921 spin_lock_irqsave(&card->lock, flags); 922 spin_lock_irqsave(&card->lock, flags);
@@ -930,27 +931,30 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
930 931
931 spin_unlock_irqrestore(&card->lock, flags); 932 spin_unlock_irqrestore(&card->lock, flags);
932 933
933 len += sprintf(page+len, "%-16s 0x%lx\n", 934 seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
934 "csr (cached)", (unsigned long)card->csr); 935 seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
935 len += sprintf(page+len, "%-16s 0x%lx\n", 936 seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
936 "csr", (unsigned long)csr); 937 seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
937 len += sprintf(page+len, "%-16s %lu\n", 938 seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
938 "txoff", (unsigned long)txoff); 939 seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
939 len += sprintf(page+len, "%-16s %lu\n", 940
940 "txlen", (unsigned long)txlen); 941 return 0;
941 len += sprintf(page+len, "%-16s %lu\n", 942}
942 "rxoff", (unsigned long)rxoff); 943
943 len += sprintf(page+len, "%-16s %lu\n", 944static int b1dmactl_proc_open(struct inode *inode, struct file *file)
944 "rxlen", (unsigned long)rxlen); 945{
945 946 return single_open(file, b1dmactl_proc_show, PDE(inode)->data);
946 if (off+count >= len)
947 *eof = 1;
948 if (len < off)
949 return 0;
950 *start = page + off;
951 return ((count < len-off) ? count : len-off);
952} 947}
953 948
949const struct file_operations b1dmactl_proc_fops = {
950 .owner = THIS_MODULE,
951 .open = b1dmactl_proc_open,
952 .read = seq_read,
953 .llseek = seq_lseek,
954 .release = single_release,
955};
956EXPORT_SYMBOL(b1dmactl_proc_fops);
957
954/* ------------------------------------------------------------- */ 958/* ------------------------------------------------------------- */
955 959
956EXPORT_SYMBOL(b1dma_reset); 960EXPORT_SYMBOL(b1dma_reset);
@@ -963,7 +967,6 @@ EXPORT_SYMBOL(b1dma_reset_ctr);
963EXPORT_SYMBOL(b1dma_register_appl); 967EXPORT_SYMBOL(b1dma_register_appl);
964EXPORT_SYMBOL(b1dma_release_appl); 968EXPORT_SYMBOL(b1dma_release_appl);
965EXPORT_SYMBOL(b1dma_send_message); 969EXPORT_SYMBOL(b1dma_send_message);
966EXPORT_SYMBOL(b1dmactl_read_proc);
967 970
968static int __init b1dma_init(void) 971static int __init b1dma_init(void)
969{ 972{
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 6461a32bc83..ff5390546f9 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -121,7 +121,7 @@ static int b1isa_probe(struct pci_dev *pdev)
121 cinfo->capi_ctrl.load_firmware = b1_load_firmware; 121 cinfo->capi_ctrl.load_firmware = b1_load_firmware;
122 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; 122 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
123 cinfo->capi_ctrl.procinfo = b1isa_procinfo; 123 cinfo->capi_ctrl.procinfo = b1isa_procinfo;
124 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 124 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
125 strcpy(cinfo->capi_ctrl.name, card->name); 125 strcpy(cinfo->capi_ctrl.name, card->name);
126 126
127 retval = attach_capi_ctr(&cinfo->capi_ctrl); 127 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/isdn/hardware/avm/b1pci.c
index 5b314a2c404..c97e4315079 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/isdn/hardware/avm/b1pci.c
@@ -112,7 +112,7 @@ static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
112 cinfo->capi_ctrl.load_firmware = b1_load_firmware; 112 cinfo->capi_ctrl.load_firmware = b1_load_firmware;
113 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; 113 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
114 cinfo->capi_ctrl.procinfo = b1pci_procinfo; 114 cinfo->capi_ctrl.procinfo = b1pci_procinfo;
115 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 115 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
116 strcpy(cinfo->capi_ctrl.name, card->name); 116 strcpy(cinfo->capi_ctrl.name, card->name);
117 cinfo->capi_ctrl.owner = THIS_MODULE; 117 cinfo->capi_ctrl.owner = THIS_MODULE;
118 118
@@ -251,7 +251,7 @@ static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
251 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; 251 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
252 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; 252 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
253 cinfo->capi_ctrl.procinfo = b1pciv4_procinfo; 253 cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
254 cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc; 254 cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
255 strcpy(cinfo->capi_ctrl.name, card->name); 255 strcpy(cinfo->capi_ctrl.name, card->name);
256 256
257 retval = attach_capi_ctr(&cinfo->capi_ctrl); 257 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pcmcia.c b/drivers/isdn/hardware/avm/b1pcmcia.c
index 7740403b40e..d6391e0afee 100644
--- a/drivers/isdn/hardware/avm/b1pcmcia.c
+++ b/drivers/isdn/hardware/avm/b1pcmcia.c
@@ -108,7 +108,7 @@ static int b1pcmcia_add_card(unsigned int port, unsigned irq,
108 cinfo->capi_ctrl.load_firmware = b1_load_firmware; 108 cinfo->capi_ctrl.load_firmware = b1_load_firmware;
109 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; 109 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
110 cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo; 110 cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo;
111 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 111 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
112 strcpy(cinfo->capi_ctrl.name, card->name); 112 strcpy(cinfo->capi_ctrl.name, card->name);
113 113
114 retval = attach_capi_ctr(&cinfo->capi_ctrl); 114 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 6833301a45f..de6e6b31181 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -11,6 +11,8 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
15#include <linux/delay.h> 17#include <linux/delay.h>
16#include <linux/mm.h> 18#include <linux/mm.h>
@@ -1062,19 +1064,18 @@ static char *c4_procinfo(struct capi_ctr *ctrl)
1062 return cinfo->infobuf; 1064 return cinfo->infobuf;
1063} 1065}
1064 1066
1065static int c4_read_proc(char *page, char **start, off_t off, 1067static int c4_proc_show(struct seq_file *m, void *v)
1066 int count, int *eof, struct capi_ctr *ctrl)
1067{ 1068{
1069 struct capi_ctr *ctrl = m->private;
1068 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); 1070 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
1069 avmcard *card = cinfo->card; 1071 avmcard *card = cinfo->card;
1070 u8 flag; 1072 u8 flag;
1071 int len = 0;
1072 char *s; 1073 char *s;
1073 1074
1074 len += sprintf(page+len, "%-16s %s\n", "name", card->name); 1075 seq_printf(m, "%-16s %s\n", "name", card->name);
1075 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port); 1076 seq_printf(m, "%-16s 0x%x\n", "io", card->port);
1076 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq); 1077 seq_printf(m, "%-16s %d\n", "irq", card->irq);
1077 len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase); 1078 seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
1078 switch (card->cardtype) { 1079 switch (card->cardtype) {
1079 case avm_b1isa: s = "B1 ISA"; break; 1080 case avm_b1isa: s = "B1 ISA"; break;
1080 case avm_b1pci: s = "B1 PCI"; break; 1081 case avm_b1pci: s = "B1 PCI"; break;
@@ -1087,18 +1088,18 @@ static int c4_read_proc(char *page, char **start, off_t off,
1087 case avm_c2: s = "C2"; break; 1088 case avm_c2: s = "C2"; break;
1088 default: s = "???"; break; 1089 default: s = "???"; break;
1089 } 1090 }
1090 len += sprintf(page+len, "%-16s %s\n", "type", s); 1091 seq_printf(m, "%-16s %s\n", "type", s);
1091 if ((s = cinfo->version[VER_DRIVER]) != NULL) 1092 if ((s = cinfo->version[VER_DRIVER]) != NULL)
1092 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 1093 seq_printf(m, "%-16s %s\n", "ver_driver", s);
1093 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 1094 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
1094 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 1095 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
1095 if ((s = cinfo->version[VER_SERIAL]) != NULL) 1096 if ((s = cinfo->version[VER_SERIAL]) != NULL)
1096 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 1097 seq_printf(m, "%-16s %s\n", "ver_serial", s);
1097 1098
1098 if (card->cardtype != avm_m1) { 1099 if (card->cardtype != avm_m1) {
1099 flag = ((u8 *)(ctrl->profile.manu))[3]; 1100 flag = ((u8 *)(ctrl->profile.manu))[3];
1100 if (flag) 1101 if (flag)
1101 len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n", 1102 seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
1102 "protocol", 1103 "protocol",
1103 (flag & 0x01) ? " DSS1" : "", 1104 (flag & 0x01) ? " DSS1" : "",
1104 (flag & 0x02) ? " CT1" : "", 1105 (flag & 0x02) ? " CT1" : "",
@@ -1112,7 +1113,7 @@ static int c4_read_proc(char *page, char **start, off_t off,
1112 if (card->cardtype != avm_m1) { 1113 if (card->cardtype != avm_m1) {
1113 flag = ((u8 *)(ctrl->profile.manu))[5]; 1114 flag = ((u8 *)(ctrl->profile.manu))[5];
1114 if (flag) 1115 if (flag)
1115 len += sprintf(page+len, "%-16s%s%s%s%s\n", 1116 seq_printf(m, "%-16s%s%s%s%s\n",
1116 "linetype", 1117 "linetype",
1117 (flag & 0x01) ? " point to point" : "", 1118 (flag & 0x01) ? " point to point" : "",
1118 (flag & 0x02) ? " point to multipoint" : "", 1119 (flag & 0x02) ? " point to multipoint" : "",
@@ -1120,16 +1121,24 @@ static int c4_read_proc(char *page, char **start, off_t off,
1120 (flag & 0x04) ? " leased line with D-channel" : "" 1121 (flag & 0x04) ? " leased line with D-channel" : ""
1121 ); 1122 );
1122 } 1123 }
1123 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 1124 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
1124 1125
1125 if (off+count >= len) 1126 return 0;
1126 *eof = 1;
1127 if (len < off)
1128 return 0;
1129 *start = page + off;
1130 return ((count < len-off) ? count : len-off);
1131} 1127}
1132 1128
1129static int c4_proc_open(struct inode *inode, struct file *file)
1130{
1131 return single_open(file, c4_proc_show, PDE(inode)->data);
1132}
1133
1134static const struct file_operations c4_proc_fops = {
1135 .owner = THIS_MODULE,
1136 .open = c4_proc_open,
1137 .read = seq_read,
1138 .llseek = seq_lseek,
1139 .release = single_release,
1140};
1141
1133/* ------------------------------------------------------------- */ 1142/* ------------------------------------------------------------- */
1134 1143
1135static int c4_add_card(struct capicardparams *p, struct pci_dev *dev, 1144static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
@@ -1201,7 +1210,7 @@ static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
1201 cinfo->capi_ctrl.load_firmware = c4_load_firmware; 1210 cinfo->capi_ctrl.load_firmware = c4_load_firmware;
1202 cinfo->capi_ctrl.reset_ctr = c4_reset_ctr; 1211 cinfo->capi_ctrl.reset_ctr = c4_reset_ctr;
1203 cinfo->capi_ctrl.procinfo = c4_procinfo; 1212 cinfo->capi_ctrl.procinfo = c4_procinfo;
1204 cinfo->capi_ctrl.ctr_read_proc = c4_read_proc; 1213 cinfo->capi_ctrl.proc_fops = &c4_proc_fops;
1205 strcpy(cinfo->capi_ctrl.name, card->name); 1214 strcpy(cinfo->capi_ctrl.name, card->name);
1206 1215
1207 retval = attach_capi_ctr(&cinfo->capi_ctrl); 1216 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c
index 1c53fd49adb..baeeb3c2a3e 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/isdn/hardware/avm/t1isa.c
@@ -429,7 +429,7 @@ static int t1isa_probe(struct pci_dev *pdev, int cardnr)
429 cinfo->capi_ctrl.load_firmware = t1isa_load_firmware; 429 cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
430 cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr; 430 cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr;
431 cinfo->capi_ctrl.procinfo = t1isa_procinfo; 431 cinfo->capi_ctrl.procinfo = t1isa_procinfo;
432 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 432 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
433 strcpy(cinfo->capi_ctrl.name, card->name); 433 strcpy(cinfo->capi_ctrl.name, card->name);
434 434
435 retval = attach_capi_ctr(&cinfo->capi_ctrl); 435 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index e6d298d7514..5a3f8309801 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -119,7 +119,7 @@ static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
119 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; 119 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
120 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; 120 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
121 cinfo->capi_ctrl.procinfo = t1pci_procinfo; 121 cinfo->capi_ctrl.procinfo = t1pci_procinfo;
122 cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc; 122 cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
123 strcpy(cinfo->capi_ctrl.name, card->name); 123 strcpy(cinfo->capi_ctrl.name, card->name);
124 124
125 retval = attach_capi_ctr(&cinfo->capi_ctrl); 125 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index 98fcdfc7ca5..0f073cd7376 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <linux/seq_file.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17 18
18#include "os_capi.h" 19#include "os_capi.h"
@@ -75,25 +76,32 @@ void diva_os_free_message_buffer(diva_os_message_buffer_s * dmb)
75/* 76/*
76 * proc function for controller info 77 * proc function for controller info
77 */ 78 */
78static int diva_ctl_read_proc(char *page, char **start, off_t off, 79static int diva_ctl_proc_show(struct seq_file *m, void *v)
79 int count, int *eof, struct capi_ctr *ctrl)
80{ 80{
81 struct capi_ctr *ctrl = m->private;
81 diva_card *card = (diva_card *) ctrl->driverdata; 82 diva_card *card = (diva_card *) ctrl->driverdata;
82 int len = 0; 83
83 84 seq_printf(m, "%s\n", ctrl->name);
84 len += sprintf(page + len, "%s\n", ctrl->name); 85 seq_printf(m, "Serial No. : %s\n", ctrl->serial);
85 len += sprintf(page + len, "Serial No. : %s\n", ctrl->serial); 86 seq_printf(m, "Id : %d\n", card->Id);
86 len += sprintf(page + len, "Id : %d\n", card->Id); 87 seq_printf(m, "Channels : %d\n", card->d.channels);
87 len += sprintf(page + len, "Channels : %d\n", card->d.channels); 88
88 89 return 0;
89 if (off + count >= len) 90}
90 *eof = 1; 91
91 if (len < off) 92static int diva_ctl_proc_open(struct inode *inode, struct file *file)
92 return 0; 93{
93 *start = page + off; 94 return single_open(file, diva_ctl_proc_show, NULL);
94 return ((count < len - off) ? count : len - off);
95} 95}
96 96
97static const struct file_operations diva_ctl_proc_fops = {
98 .owner = THIS_MODULE,
99 .open = diva_ctl_proc_open,
100 .read = seq_read,
101 .llseek = seq_lseek,
102 .release = single_release,
103};
104
97/* 105/*
98 * set additional os settings in capi_ctr struct 106 * set additional os settings in capi_ctr struct
99 */ 107 */
@@ -102,7 +110,7 @@ void diva_os_set_controller_struct(struct capi_ctr *ctrl)
102 ctrl->driver_name = DRIVERLNAME; 110 ctrl->driver_name = DRIVERLNAME;
103 ctrl->load_firmware = NULL; 111 ctrl->load_firmware = NULL;
104 ctrl->reset_ctr = NULL; 112 ctrl->reset_ctr = NULL;
105 ctrl->ctr_read_proc = diva_ctl_read_proc; 113 ctrl->proc_fops = &diva_ctl_proc_fops;
106 ctrl->owner = THIS_MODULE; 114 ctrl->owner = THIS_MODULE;
107} 115}
108 116
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index 993b14cf177..5d06a743782 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19 20
20#include "platform.h" 21#include "platform.h"
@@ -62,39 +63,41 @@ static char *getrev(const char *revision)
62 return rev; 63 return rev;
63} 64}
64 65
65static int 66static int divadidd_proc_show(struct seq_file *m, void *v)
66proc_read(char *page, char **start, off_t off, int count, int *eof,
67 void *data)
68{ 67{
69 int len = 0;
70 char tmprev[32]; 68 char tmprev[32];
71 69
72 strcpy(tmprev, main_revision); 70 strcpy(tmprev, main_revision);
73 len += sprintf(page + len, "%s\n", DRIVERNAME); 71 seq_printf(m, "%s\n", DRIVERNAME);
74 len += sprintf(page + len, "name : %s\n", DRIVERLNAME); 72 seq_printf(m, "name : %s\n", DRIVERLNAME);
75 len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_DIDD); 73 seq_printf(m, "release : %s\n", DRIVERRELEASE_DIDD);
76 len += sprintf(page + len, "build : %s(%s)\n", 74 seq_printf(m, "build : %s(%s)\n",
77 diva_didd_common_code_build, DIVA_BUILD); 75 diva_didd_common_code_build, DIVA_BUILD);
78 len += sprintf(page + len, "revision : %s\n", getrev(tmprev)); 76 seq_printf(m, "revision : %s\n", getrev(tmprev));
79 77
80 if (off + count >= len) 78 return 0;
81 *eof = 1;
82 if (len < off)
83 return 0;
84 *start = page + off;
85 return ((count < len - off) ? count : len - off);
86} 79}
87 80
81static int divadidd_proc_open(struct inode *inode, struct file *file)
82{
83 return single_open(file, divadidd_proc_show, NULL);
84}
85
86static const struct file_operations divadidd_proc_fops = {
87 .owner = THIS_MODULE,
88 .open = divadidd_proc_open,
89 .read = seq_read,
90 .llseek = seq_lseek,
91 .release = single_release,
92};
93
88static int DIVA_INIT_FUNCTION create_proc(void) 94static int DIVA_INIT_FUNCTION create_proc(void)
89{ 95{
90 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net); 96 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
91 97
92 if (proc_net_eicon) { 98 if (proc_net_eicon) {
93 if ((proc_didd = 99 proc_didd = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
94 create_proc_entry(DRIVERLNAME, S_IFREG | S_IRUGO, 100 &divadidd_proc_fops);
95 proc_net_eicon))) {
96 proc_didd->read_proc = proc_read;
97 }
98 return (1); 101 return (1);
99 } 102 }
100 return (0); 103 return (0);
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 69e71ebe784..f577719ab3f 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -17,6 +17,7 @@
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/seq_file.h>
20#include <linux/smp_lock.h> 21#include <linux/smp_lock.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22 23
@@ -86,39 +87,40 @@ static void diva_um_timer_function(unsigned long data);
86extern struct proc_dir_entry *proc_net_eicon; 87extern struct proc_dir_entry *proc_net_eicon;
87static struct proc_dir_entry *um_idi_proc_entry = NULL; 88static struct proc_dir_entry *um_idi_proc_entry = NULL;
88 89
89static int 90static int um_idi_proc_show(struct seq_file *m, void *v)
90um_idi_proc_read(char *page, char **start, off_t off, int count, int *eof,
91 void *data)
92{ 91{
93 int len = 0;
94 char tmprev[32]; 92 char tmprev[32];
95 93
96 len += sprintf(page + len, "%s\n", DRIVERNAME); 94 seq_printf(m, "%s\n", DRIVERNAME);
97 len += sprintf(page + len, "name : %s\n", DRIVERLNAME); 95 seq_printf(m, "name : %s\n", DRIVERLNAME);
98 len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_IDI); 96 seq_printf(m, "release : %s\n", DRIVERRELEASE_IDI);
99 strcpy(tmprev, main_revision); 97 strcpy(tmprev, main_revision);
100 len += sprintf(page + len, "revision : %s\n", getrev(tmprev)); 98 seq_printf(m, "revision : %s\n", getrev(tmprev));
101 len += sprintf(page + len, "build : %s\n", DIVA_BUILD); 99 seq_printf(m, "build : %s\n", DIVA_BUILD);
102 len += sprintf(page + len, "major : %d\n", major); 100 seq_printf(m, "major : %d\n", major);
103 101
104 if (off + count >= len) 102 return 0;
105 *eof = 1; 103}
106 if (len < off) 104
107 return 0; 105static int um_idi_proc_open(struct inode *inode, struct file *file)
108 *start = page + off; 106{
109 return ((count < len - off) ? count : len - off); 107 return single_open(file, um_idi_proc_show, NULL);
110} 108}
111 109
110static const struct file_operations um_idi_proc_fops = {
111 .owner = THIS_MODULE,
112 .open = um_idi_proc_open,
113 .read = seq_read,
114 .llseek = seq_lseek,
115 .release = single_release,
116};
117
112static int DIVA_INIT_FUNCTION create_um_idi_proc(void) 118static int DIVA_INIT_FUNCTION create_um_idi_proc(void)
113{ 119{
114 um_idi_proc_entry = create_proc_entry(DRIVERLNAME, 120 um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
115 S_IFREG | S_IRUGO | S_IWUSR, 121 &um_idi_proc_fops);
116 proc_net_eicon);
117 if (!um_idi_proc_entry) 122 if (!um_idi_proc_entry)
118 return (0); 123 return (0);
119
120 um_idi_proc_entry->read_proc = um_idi_proc_read;
121
122 return (1); 124 return (1);
123} 125}
124 126
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 040827288ec..46d44a94262 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
17#include <linux/list.h> 18#include <linux/list.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19 20
@@ -141,14 +142,10 @@ void remove_divas_proc(void)
141 } 142 }
142} 143}
143 144
144/* 145static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
145** write group_optimization 146 size_t count, loff_t *pos)
146*/
147static int
148write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
149 void *data)
150{ 147{
151 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 148 diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
152 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 149 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
153 150
154 if ((count == 1) || (count == 2)) { 151 if ((count == 1) || (count == 2)) {
@@ -172,14 +169,10 @@ write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
172 return (-EINVAL); 169 return (-EINVAL);
173} 170}
174 171
175/* 172static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
176** write dynamic_l1_down 173 size_t count, loff_t *pos)
177*/
178static int
179write_d_l1_down(struct file *file, const char __user *buffer, unsigned long count,
180 void *data)
181{ 174{
182 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 175 diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
183 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 176 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
184 177
185 if ((count == 1) || (count == 2)) { 178 if ((count == 1) || (count == 2)) {
@@ -203,63 +196,62 @@ write_d_l1_down(struct file *file, const char __user *buffer, unsigned long coun
203 return (-EINVAL); 196 return (-EINVAL);
204} 197}
205 198
206 199static int d_l1_down_proc_show(struct seq_file *m, void *v)
207/*
208** read dynamic_l1_down
209*/
210static int
211read_d_l1_down(char *page, char **start, off_t off, int count, int *eof,
212 void *data)
213{ 200{
214 int len = 0; 201 diva_os_xdi_adapter_t *a = m->private;
215 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
216 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 202 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
217 203
218 len += sprintf(page + len, "%s\n", 204 seq_printf(m, "%s\n",
219 (IoAdapter->capi_cfg. 205 (IoAdapter->capi_cfg.
220 cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" : 206 cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" :
221 "0"); 207 "0");
208 return 0;
209}
222 210
223 if (off + count >= len) 211static int d_l1_down_proc_open(struct inode *inode, struct file *file)
224 *eof = 1; 212{
225 if (len < off) 213 return single_open(file, d_l1_down_proc_show, PDE(inode)->data);
226 return 0;
227 *start = page + off;
228 return ((count < len - off) ? count : len - off);
229} 214}
230 215
231/* 216static const struct file_operations d_l1_down_proc_fops = {
232** read group_optimization 217 .owner = THIS_MODULE,
233*/ 218 .open = d_l1_down_proc_open,
234static int 219 .read = seq_read,
235read_grp_opt(char *page, char **start, off_t off, int count, int *eof, 220 .llseek = seq_lseek,
236 void *data) 221 .release = single_release,
222 .write = d_l1_down_proc_write,
223};
224
225static int grp_opt_proc_show(struct seq_file *m, void *v)
237{ 226{
238 int len = 0; 227 diva_os_xdi_adapter_t *a = m->private;
239 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
240 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 228 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
241 229
242 len += sprintf(page + len, "%s\n", 230 seq_printf(m, "%s\n",
243 (IoAdapter->capi_cfg. 231 (IoAdapter->capi_cfg.
244 cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) 232 cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON)
245 ? "1" : "0"); 233 ? "1" : "0");
234 return 0;
235}
246 236
247 if (off + count >= len) 237static int grp_opt_proc_open(struct inode *inode, struct file *file)
248 *eof = 1; 238{
249 if (len < off) 239 return single_open(file, grp_opt_proc_show, PDE(inode)->data);
250 return 0;
251 *start = page + off;
252 return ((count < len - off) ? count : len - off);
253} 240}
254 241
255/* 242static const struct file_operations grp_opt_proc_fops = {
256** info write 243 .owner = THIS_MODULE,
257*/ 244 .open = grp_opt_proc_open,
258static int 245 .read = seq_read,
259info_write(struct file *file, const char __user *buffer, unsigned long count, 246 .llseek = seq_lseek,
260 void *data) 247 .release = single_release,
248 .write = grp_opt_proc_write,
249};
250
251static ssize_t info_proc_write(struct file *file, const char __user *buffer,
252 size_t count, loff_t *pos)
261{ 253{
262 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 254 diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
263 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 255 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
264 char c[4]; 256 char c[4];
265 257
@@ -277,63 +269,46 @@ info_write(struct file *file, const char __user *buffer, unsigned long count,
277 return (-EINVAL); 269 return (-EINVAL);
278} 270}
279 271
280/* 272static int info_proc_show(struct seq_file *m, void *v)
281** info read
282*/
283static int
284info_read(char *page, char **start, off_t off, int count, int *eof,
285 void *data)
286{ 273{
287 int i = 0; 274 int i = 0;
288 int len = 0;
289 char *p; 275 char *p;
290 char tmpser[16]; 276 char tmpser[16];
291 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 277 diva_os_xdi_adapter_t *a = m->private;
292 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 278 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
293 279
294 len += 280 seq_printf(m, "Name : %s\n", IoAdapter->Properties.Name);
295 sprintf(page + len, "Name : %s\n", 281 seq_printf(m, "DSP state : %08x\n", a->dsp_mask);
296 IoAdapter->Properties.Name); 282 seq_printf(m, "Channels : %02d\n", IoAdapter->Properties.Channels);
297 len += sprintf(page + len, "DSP state : %08x\n", a->dsp_mask); 283 seq_printf(m, "E. max/used : %03d/%03d\n",
298 len += sprintf(page + len, "Channels : %02d\n",
299 IoAdapter->Properties.Channels);
300 len += sprintf(page + len, "E. max/used : %03d/%03d\n",
301 IoAdapter->e_max, IoAdapter->e_count); 284 IoAdapter->e_max, IoAdapter->e_count);
302 diva_get_vserial_number(IoAdapter, tmpser); 285 diva_get_vserial_number(IoAdapter, tmpser);
303 len += sprintf(page + len, "Serial : %s\n", tmpser); 286 seq_printf(m, "Serial : %s\n", tmpser);
304 len += 287 seq_printf(m, "IRQ : %d\n", IoAdapter->irq_info.irq_nr);
305 sprintf(page + len, "IRQ : %d\n", 288 seq_printf(m, "CardIndex : %d\n", a->CardIndex);
306 IoAdapter->irq_info.irq_nr); 289 seq_printf(m, "CardOrdinal : %d\n", a->CardOrdinal);
307 len += sprintf(page + len, "CardIndex : %d\n", a->CardIndex); 290 seq_printf(m, "Controller : %d\n", a->controller);
308 len += sprintf(page + len, "CardOrdinal : %d\n", a->CardOrdinal); 291 seq_printf(m, "Bus-Type : %s\n",
309 len += sprintf(page + len, "Controller : %d\n", a->controller);
310 len += sprintf(page + len, "Bus-Type : %s\n",
311 (a->Bus == 292 (a->Bus ==
312 DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI"); 293 DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI");
313 len += sprintf(page + len, "Port-Name : %s\n", a->port_name); 294 seq_printf(m, "Port-Name : %s\n", a->port_name);
314 if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) { 295 if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) {
315 len += 296 seq_printf(m, "PCI-bus : %d\n", a->resources.pci.bus);
316 sprintf(page + len, "PCI-bus : %d\n", 297 seq_printf(m, "PCI-func : %d\n", a->resources.pci.func);
317 a->resources.pci.bus);
318 len +=
319 sprintf(page + len, "PCI-func : %d\n",
320 a->resources.pci.func);
321 for (i = 0; i < 8; i++) { 298 for (i = 0; i < 8; i++) {
322 if (a->resources.pci.bar[i]) { 299 if (a->resources.pci.bar[i]) {
323 len += 300 seq_printf(m,
324 sprintf(page + len,
325 "Mem / I/O %d : 0x%x / mapped : 0x%lx", 301 "Mem / I/O %d : 0x%x / mapped : 0x%lx",
326 i, a->resources.pci.bar[i], 302 i, a->resources.pci.bar[i],
327 (unsigned long) a->resources. 303 (unsigned long) a->resources.
328 pci.addr[i]); 304 pci.addr[i]);
329 if (a->resources.pci.length[i]) { 305 if (a->resources.pci.length[i]) {
330 len += 306 seq_printf(m,
331 sprintf(page + len,
332 " / length : %d", 307 " / length : %d",
333 a->resources.pci. 308 a->resources.pci.
334 length[i]); 309 length[i]);
335 } 310 }
336 len += sprintf(page + len, "\n"); 311 seq_putc(m, '\n');
337 } 312 }
338 } 313 }
339 } 314 }
@@ -353,16 +328,25 @@ info_read(char *page, char **start, off_t off, int count, int *eof,
353 } else { 328 } else {
354 p = "ready"; 329 p = "ready";
355 } 330 }
356 len += sprintf(page + len, "State : %s\n", p); 331 seq_printf(m, "State : %s\n", p);
357 332
358 if (off + count >= len) 333 return 0;
359 *eof = 1; 334}
360 if (len < off) 335
361 return 0; 336static int info_proc_open(struct inode *inode, struct file *file)
362 *start = page + off; 337{
363 return ((count < len - off) ? count : len - off); 338 return single_open(file, info_proc_show, PDE(inode)->data);
364} 339}
365 340
341static const struct file_operations info_proc_fops = {
342 .owner = THIS_MODULE,
343 .open = info_proc_open,
344 .read = seq_read,
345 .llseek = seq_lseek,
346 .release = single_release,
347 .write = info_proc_write,
348};
349
366/* 350/*
367** adapter proc init/de-init 351** adapter proc init/de-init
368*/ 352*/
@@ -380,28 +364,20 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a)
380 return (0); 364 return (0);
381 a->proc_adapter_dir = (void *) de; 365 a->proc_adapter_dir = (void *) de;
382 366
383 if (!(pe = 367 pe = proc_create_data(info_proc_name, S_IRUGO | S_IWUSR, de,
384 create_proc_entry(info_proc_name, S_IFREG | S_IRUGO | S_IWUSR, de))) 368 &info_proc_fops, a);
369 if (!pe)
385 return (0); 370 return (0);
386 a->proc_info = (void *) pe; 371 a->proc_info = (void *) pe;
387 pe->write_proc = info_write;
388 pe->read_proc = info_read;
389 pe->data = a;
390 372
391 if ((pe = create_proc_entry(grp_opt_proc_name, 373 pe = proc_create_data(grp_opt_proc_name, S_IRUGO | S_IWUSR, de,
392 S_IFREG | S_IRUGO | S_IWUSR, de))) { 374 &grp_opt_proc_fops, a);
375 if (pe)
393 a->proc_grp_opt = (void *) pe; 376 a->proc_grp_opt = (void *) pe;
394 pe->write_proc = write_grp_opt; 377 pe = proc_create_data(d_l1_down_proc_name, S_IRUGO | S_IWUSR, de,
395 pe->read_proc = read_grp_opt; 378 &d_l1_down_proc_fops, a);
396 pe->data = a; 379 if (pe)
397 }
398 if ((pe = create_proc_entry(d_l1_down_proc_name,
399 S_IFREG | S_IRUGO | S_IWUSR, de))) {
400 a->proc_d_l1_down = (void *) pe; 380 a->proc_d_l1_down = (void *) pe;
401 pe->write_proc = write_d_l1_down;
402 pe->read_proc = read_d_l1_down;
403 pe->data = a;
404 }
405 381
406 DBG_TRC(("proc entry %s created", tmp)); 382 DBG_TRC(("proc entry %s created", tmp));
407 383
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 4ffaa14b9fc..fe874afa4f8 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -11,6 +11,8 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
14#include <linux/signal.h> 16#include <linux/signal.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/skbuff.h> 18#include <linux/skbuff.h>
@@ -432,26 +434,16 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
432 return retval; 434 return retval;
433} 435}
434 436
435/********************************************************************* 437static int hycapi_proc_show(struct seq_file *m, void *v)
436hycapi_read_proc
437
438Informations provided in the /proc/capi-entries.
439
440*********************************************************************/
441
442static int hycapi_read_proc(char *page, char **start, off_t off,
443 int count, int *eof, struct capi_ctr *ctrl)
444{ 438{
439 struct capi_ctr *ctrl = m->private;
445 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); 440 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
446 hysdn_card *card = cinfo->card; 441 hysdn_card *card = cinfo->card;
447 int len = 0;
448 char *s; 442 char *s;
449#ifdef HYCAPI_PRINTFNAMES 443
450 printk(KERN_NOTICE "hycapi_read_proc\n"); 444 seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
451#endif 445 seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
452 len += sprintf(page+len, "%-16s %s\n", "name", cinfo->cardname); 446 seq_printf(m, "%-16s %d\n", "irq", card->irq);
453 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->iobase);
454 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
455 447
456 switch (card->brdtype) { 448 switch (card->brdtype) {
457 case BD_PCCARD: s = "HYSDN Hycard"; break; 449 case BD_PCCARD: s = "HYSDN Hycard"; break;
@@ -461,24 +453,32 @@ static int hycapi_read_proc(char *page, char **start, off_t off,
461 case BD_PLEXUS: s = "HYSDN Plexus30"; break; 453 case BD_PLEXUS: s = "HYSDN Plexus30"; break;
462 default: s = "???"; break; 454 default: s = "???"; break;
463 } 455 }
464 len += sprintf(page+len, "%-16s %s\n", "type", s); 456 seq_printf(m, "%-16s %s\n", "type", s);
465 if ((s = cinfo->version[VER_DRIVER]) != NULL) 457 if ((s = cinfo->version[VER_DRIVER]) != NULL)
466 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 458 seq_printf(m, "%-16s %s\n", "ver_driver", s);
467 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 459 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
468 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 460 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
469 if ((s = cinfo->version[VER_SERIAL]) != NULL) 461 if ((s = cinfo->version[VER_SERIAL]) != NULL)
470 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 462 seq_printf(m, "%-16s %s\n", "ver_serial", s);
471 463
472 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 464 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
473 465
474 if (off+count >= len) 466 return 0;
475 *eof = 1; 467}
476 if (len < off) 468
477 return 0; 469static int hycapi_proc_open(struct inode *inode, struct file *file)
478 *start = page + off; 470{
479 return ((count < len-off) ? count : len-off); 471 return single_open(file, hycapi_proc_show, PDE(inode)->data);
480} 472}
481 473
474static const struct file_operations hycapi_proc_fops = {
475 .owner = THIS_MODULE,
476 .open = hycapi_proc_open,
477 .read = seq_read,
478 .llseek = seq_lseek,
479 .release = single_release,
480};
481
482/************************************************************** 482/**************************************************************
483hycapi_load_firmware 483hycapi_load_firmware
484 484
@@ -774,7 +774,7 @@ hycapi_capi_create(hysdn_card *card)
774 ctrl->load_firmware = hycapi_load_firmware; 774 ctrl->load_firmware = hycapi_load_firmware;
775 ctrl->reset_ctr = hycapi_reset_ctr; 775 ctrl->reset_ctr = hycapi_reset_ctr;
776 ctrl->procinfo = hycapi_procinfo; 776 ctrl->procinfo = hycapi_procinfo;
777 ctrl->ctr_read_proc = hycapi_read_proc; 777 ctrl->proc_fops = &hycapi_proc_fops;
778 strcpy(ctrl->name, cinfo->cardname); 778 strcpy(ctrl->name, cinfo->cardname);
779 ctrl->owner = THIS_MODULE; 779 ctrl->owner = THIS_MODULE;
780 780
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8b8558fcb04..da6552d32cf 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -949,11 +949,8 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
949 (*secfilter)->filter_mask[10] = mac_mask[1]; 949 (*secfilter)->filter_mask[10] = mac_mask[1];
950 (*secfilter)->filter_mask[11]=mac_mask[0]; 950 (*secfilter)->filter_mask[11]=mac_mask[0];
951 951
952 dprintk("%s: filter mac=%02x %02x %02x %02x %02x %02x\n", 952 dprintk("%s: filter mac=%pM\n", dev->name, mac);
953 dev->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 953 dprintk("%s: filter mask=%pM\n", dev->name, mac_mask);
954 dprintk("%s: filter mask=%02x %02x %02x %02x %02x %02x\n",
955 dev->name, mac_mask[0], mac_mask[1], mac_mask[2],
956 mac_mask[3], mac_mask[4], mac_mask[5]);
957 954
958 return 0; 955 return 0;
959} 956}
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 7045c45da9b..949a648f8e2 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -111,10 +111,7 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
111 break; 111 break;
112 112
113 case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ 113 case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
114 seq_printf(seq, 114 seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
115 "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X",
116 serialno[2], serialno[3],
117 serialno[4], serialno[5], serialno[6], serialno[7]);
118 break; 115 break;
119 116
120 case I2O_SNFORMAT_WAN: /* WAN MAC Address */ 117 case I2O_SNFORMAT_WAN: /* WAN MAC Address */
@@ -126,10 +123,8 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
126 case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ 123 case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
127 /* FIXME: Figure out what a LAN-64 address really looks like?? */ 124 /* FIXME: Figure out what a LAN-64 address really looks like?? */
128 seq_printf(seq, 125 seq_printf(seq,
129 "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X", 126 "LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
130 serialno[8], serialno[9], 127 serialno[8], serialno[9], &serialno[2]);
131 serialno[2], serialno[3],
132 serialno[4], serialno[5], serialno[6], serialno[7]);
133 break; 128 break;
134 129
135 case I2O_SNFORMAT_DDM: /* I2O DDM */ 130 case I2O_SNFORMAT_DDM: /* I2O DDM */
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
index 50d431e469f..9dbaeb574e6 100644
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -43,15 +43,14 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
43 struct iwmct_parser *parser = &priv->parser; 43 struct iwmct_parser *parser = &priv->parser;
44 struct iwmct_fw_hdr *fw_hdr = &parser->versions; 44 struct iwmct_fw_hdr *fw_hdr = &parser->versions;
45 45
46 LOG_INFOEX(priv, INIT, "-->\n"); 46 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
47 47
48 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size); 48 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
49 49
50 parser->file = file; 50 parser->file = file;
51 parser->file_size = file_size; 51 parser->file_size = file_size;
52 parser->cur_pos = 0; 52 parser->cur_pos = 0;
53 parser->buf = NULL; 53 parser->entry_point = 0;
54
55 parser->buf = kzalloc(block_size, GFP_KERNEL); 54 parser->buf = kzalloc(block_size, GFP_KERNEL);
56 if (!parser->buf) { 55 if (!parser->buf) {
57 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n"); 56 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
@@ -70,7 +69,7 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
70 69
71 parser->cur_pos += sizeof(struct iwmct_fw_hdr); 70 parser->cur_pos += sizeof(struct iwmct_fw_hdr);
72 71
73 LOG_INFOEX(priv, INIT, "<--\n"); 72 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
74 return 0; 73 return 0;
75} 74}
76 75
@@ -113,7 +112,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
113 struct iwmct_dbg *dbg = &priv->dbg; 112 struct iwmct_dbg *dbg = &priv->dbg;
114 struct iwmct_fw_sec_hdr *sec_hdr; 113 struct iwmct_fw_sec_hdr *sec_hdr;
115 114
116 LOG_INFOEX(priv, INIT, "-->\n"); 115 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
117 116
118 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr) 117 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
119 <= parser->file_size) { 118 <= parser->file_size) {
@@ -152,7 +151,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
152 "finished with section cur_pos=%zd\n", parser->cur_pos); 151 "finished with section cur_pos=%zd\n", parser->cur_pos);
153 } 152 }
154 153
155 LOG_INFOEX(priv, INIT, "<--\n"); 154 LOG_TRACE(priv, INIT, "<--\n");
156 return 0; 155 return 0;
157} 156}
158 157
@@ -167,7 +166,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
167 int ret = 0; 166 int ret = 0;
168 u32 cmd = 0; 167 u32 cmd = 0;
169 168
170 LOG_INFOEX(priv, INIT, "-->\n"); 169 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
171 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n", 170 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
172 addr, sec_size); 171 addr, sec_size);
173 172
@@ -229,7 +228,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
229 hdr->cmd = cpu_to_le32(cmd); 228 hdr->cmd = cpu_to_le32(cmd);
230 /* send it down */ 229 /* send it down */
231 /* TODO: add more proper sending and error checking */ 230 /* TODO: add more proper sending and error checking */
232 ret = iwmct_tx(priv, 0, parser->buf, trans_size); 231 ret = iwmct_tx(priv, parser->buf, trans_size);
233 if (ret != 0) { 232 if (ret != 0) {
234 LOG_INFO(priv, FW_DOWNLOAD, 233 LOG_INFO(priv, FW_DOWNLOAD,
235 "iwmct_tx returned %d\n", ret); 234 "iwmct_tx returned %d\n", ret);
@@ -251,7 +250,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
251 if (sent < sec_size) 250 if (sent < sec_size)
252 ret = -EINVAL; 251 ret = -EINVAL;
253exit: 252exit:
254 LOG_INFOEX(priv, INIT, "<--\n"); 253 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
255 return ret; 254 return ret;
256} 255}
257 256
@@ -262,7 +261,7 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
262 int ret; 261 int ret;
263 u32 cmd; 262 u32 cmd;
264 263
265 LOG_INFOEX(priv, INIT, "-->\n"); 264 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
266 265
267 memset(parser->buf, 0, parser->buf_size); 266 memset(parser->buf, 0, parser->buf_size);
268 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS; 267 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
@@ -281,11 +280,11 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
281 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr)); 280 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
282 /* send it down */ 281 /* send it down */
283 /* TODO: add more proper sending and error checking */ 282 /* TODO: add more proper sending and error checking */
284 ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE); 283 ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
285 if (ret) 284 if (ret)
286 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret); 285 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
287 286
288 LOG_INFOEX(priv, INIT, "<--\n"); 287 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
289 return 0; 288 return 0;
290} 289}
291 290
@@ -298,8 +297,16 @@ int iwmct_fw_load(struct iwmct_priv *priv)
298 __le32 addr; 297 __le32 addr;
299 int ret; 298 int ret;
300 299
301 /* clear parser struct */ 300
302 memset(&priv->parser, 0, sizeof(struct iwmct_parser)); 301 LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
302 priv->barker);
303 LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
304 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
305 LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
306 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
307 LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
308 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
309
303 310
304 /* get the firmware */ 311 /* get the firmware */
305 ret = request_firmware(&raw, fw_name, &priv->func->dev); 312 ret = request_firmware(&raw, fw_name, &priv->func->dev);
@@ -317,6 +324,7 @@ int iwmct_fw_load(struct iwmct_priv *priv)
317 324
318 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name); 325 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
319 326
327 /* clear parser struct */
320 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len); 328 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
321 if (ret < 0) { 329 if (ret < 0) {
322 LOG_ERROR(priv, FW_DOWNLOAD, 330 LOG_ERROR(priv, FW_DOWNLOAD,
@@ -324,7 +332,6 @@ int iwmct_fw_load(struct iwmct_priv *priv)
324 goto exit; 332 goto exit;
325 } 333 }
326 334
327 /* checksum */
328 if (!iwmct_checksum(priv)) { 335 if (!iwmct_checksum(priv)) {
329 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n"); 336 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
330 ret = -EINVAL; 337 ret = -EINVAL;
@@ -333,23 +340,18 @@ int iwmct_fw_load(struct iwmct_priv *priv)
333 340
334 /* download firmware to device */ 341 /* download firmware to device */
335 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) { 342 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
336 if (iwmct_download_section(priv, pdata, len, addr)) { 343 ret = iwmct_download_section(priv, pdata, len, addr);
344 if (ret) {
337 LOG_ERROR(priv, FW_DOWNLOAD, 345 LOG_ERROR(priv, FW_DOWNLOAD,
338 "%s download section failed\n", fw_name); 346 "%s download section failed\n", fw_name);
339 ret = -EIO;
340 goto exit; 347 goto exit;
341 } 348 }
342 } 349 }
343 350
344 iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK)); 351 ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
345 352
346exit: 353exit:
347 kfree(priv->parser.buf); 354 kfree(priv->parser.buf);
348 355 release_firmware(raw);
349 if (raw)
350 release_firmware(raw);
351
352 raw = NULL;
353
354 return ret; 356 return ret;
355} 357}
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 43bd510e187..740ff0738ea 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -196,9 +196,7 @@ struct iwmct_priv {
196 struct list_head read_req_list; 196 struct list_head read_req_list;
197}; 197};
198 198
199extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr, 199extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
200 void *src, int count);
201
202extern int iwmct_fw_load(struct iwmct_priv *priv); 200extern int iwmct_fw_load(struct iwmct_priv *priv);
203 201
204extern void iwmct_dbg_init_params(struct iwmct_priv *drv); 202extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
index aba8121f978..4434bb16cea 100644
--- a/drivers/misc/iwmc3200top/log.h
+++ b/drivers/misc/iwmc3200top/log.h
@@ -37,13 +37,26 @@
37#define LOG_SEV_INFO 3 37#define LOG_SEV_INFO 3
38#define LOG_SEV_INFOEX 4 38#define LOG_SEV_INFOEX 4
39 39
40#define LOG_SEV_FILTER_ALL \ 40/* Log levels not defined for FW */
41 (BIT(LOG_SEV_CRITICAL) | \ 41#define LOG_SEV_TRACE 5
42 BIT(LOG_SEV_ERROR) | \ 42#define LOG_SEV_DUMP 6
43 BIT(LOG_SEV_WARNING) | \ 43
44 BIT(LOG_SEV_INFO) | \ 44#define LOG_SEV_FW_FILTER_ALL \
45 (BIT(LOG_SEV_CRITICAL) | \
46 BIT(LOG_SEV_ERROR) | \
47 BIT(LOG_SEV_WARNING) | \
48 BIT(LOG_SEV_INFO) | \
45 BIT(LOG_SEV_INFOEX)) 49 BIT(LOG_SEV_INFOEX))
46 50
51#define LOG_SEV_FILTER_ALL \
52 (BIT(LOG_SEV_CRITICAL) | \
53 BIT(LOG_SEV_ERROR) | \
54 BIT(LOG_SEV_WARNING) | \
55 BIT(LOG_SEV_INFO) | \
56 BIT(LOG_SEV_INFOEX) | \
57 BIT(LOG_SEV_TRACE) | \
58 BIT(LOG_SEV_DUMP))
59
47/* log source */ 60/* log source */
48#define LOG_SRC_INIT 0 61#define LOG_SRC_INIT 0
49#define LOG_SRC_DEBUGFS 1 62#define LOG_SRC_DEBUGFS 1
@@ -104,16 +117,16 @@ do { \
104 __func__, __LINE__, ##args); \ 117 __func__, __LINE__, ##args); \
105} while (0) 118} while (0)
106 119
107#define LOG_INFOEX(priv, src, fmt, args...) \ 120#define LOG_TRACE(priv, src, fmt, args...) \
108do { \ 121do { \
109 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \ 122 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
110 dev_dbg(priv2dev(priv), "%s %d: " fmt, \ 123 dev_dbg(priv2dev(priv), "%s %d: " fmt, \
111 __func__, __LINE__, ##args); \ 124 __func__, __LINE__, ##args); \
112} while (0) 125} while (0)
113 126
114#define LOG_HEXDUMP(src, ptr, len) \ 127#define LOG_HEXDUMP(src, ptr, len) \
115do { \ 128do { \
116 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \ 129 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
117 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \ 130 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
118 16, 1, ptr, len, false); \ 131 16, 1, ptr, len, false); \
119} while (0) 132} while (0)
@@ -142,7 +155,7 @@ ssize_t store_iwmct_log_level_fw(struct device *d,
142#define LOG_ERROR(priv, src, fmt, args...) 155#define LOG_ERROR(priv, src, fmt, args...)
143#define LOG_WARNING(priv, src, fmt, args...) 156#define LOG_WARNING(priv, src, fmt, args...)
144#define LOG_INFO(priv, src, fmt, args...) 157#define LOG_INFO(priv, src, fmt, args...)
145#define LOG_INFOEX(priv, src, fmt, args...) 158#define LOG_TRACE(priv, src, fmt, args...)
146#define LOG_HEXDUMP(src, ptr, len) 159#define LOG_HEXDUMP(src, ptr, len)
147 160
148static inline void iwmct_log_top_message(struct iwmct_priv *priv, 161static inline void iwmct_log_top_message(struct iwmct_priv *priv,
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index fafcaa481d7..dd0a3913bf6 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -49,6 +49,20 @@ MODULE_LICENSE("GPL");
49MODULE_AUTHOR(DRIVER_COPYRIGHT); 49MODULE_AUTHOR(DRIVER_COPYRIGHT);
50MODULE_FIRMWARE(FW_NAME(FW_API_VER)); 50MODULE_FIRMWARE(FW_NAME(FW_API_VER));
51 51
52
53static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
54{
55 return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
56
57}
58int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
59{
60 int ret;
61 sdio_claim_host(priv->func);
62 ret = __iwmct_tx(priv, src, count);
63 sdio_release_host(priv->func);
64 return ret;
65}
52/* 66/*
53 * This workers main task is to wait for OP_OPR_ALIVE 67 * This workers main task is to wait for OP_OPR_ALIVE
54 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. 68 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
@@ -66,7 +80,7 @@ static void iwmct_rescan_worker(struct work_struct *ws)
66 80
67 ret = bus_rescan_devices(priv->func->dev.bus); 81 ret = bus_rescan_devices(priv->func->dev.bus);
68 if (ret < 0) 82 if (ret < 0)
69 LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n"); 83 LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
70} 84}
71 85
72static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg) 86static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
@@ -137,7 +151,7 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
137 int ret; 151 int ret;
138 u8 *buf; 152 u8 *buf;
139 153
140 LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n"); 154 LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
141 155
142 /* add padding to 256 for IWMC */ 156 /* add padding to 256 for IWMC */
143 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256; 157 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
@@ -158,27 +172,12 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
158 } 172 }
159 173
160 memcpy(buf, cmd, len); 174 memcpy(buf, cmd, len);
161 175 ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
162 sdio_claim_host(priv->func);
163 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
164 FW_HCMD_BLOCK_SIZE);
165 sdio_release_host(priv->func);
166 176
167 kfree(buf); 177 kfree(buf);
168 return ret; 178 return ret;
169} 179}
170 180
171int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
172 void *src, int count)
173{
174 int ret;
175
176 sdio_claim_host(priv->func);
177 ret = sdio_memcpy_toio(priv->func, addr, src, count);
178 sdio_release_host(priv->func);
179
180 return ret;
181}
182 181
183static void iwmct_irq_read_worker(struct work_struct *ws) 182static void iwmct_irq_read_worker(struct work_struct *ws)
184{ 183{
@@ -192,7 +191,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
192 191
193 priv = container_of(ws, struct iwmct_priv, isr_worker); 192 priv = container_of(ws, struct iwmct_priv, isr_worker);
194 193
195 LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws); 194 LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
196 195
197 /* --------------------- Handshake with device -------------------- */ 196 /* --------------------- Handshake with device -------------------- */
198 sdio_claim_host(priv->func); 197 sdio_claim_host(priv->func);
@@ -273,8 +272,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
273 272
274 if (barker & BARKER_DNLOAD_SYNC_MSK) { 273 if (barker & BARKER_DNLOAD_SYNC_MSK) {
275 /* Send the same barker back */ 274 /* Send the same barker back */
276 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, 275 ret = __iwmct_tx(priv, buf, iosize);
277 buf, iosize);
278 if (ret) { 276 if (ret) {
279 LOG_ERROR(priv, IRQ, 277 LOG_ERROR(priv, IRQ,
280 "error %d echoing barker\n", ret); 278 "error %d echoing barker\n", ret);
@@ -292,15 +290,6 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
292 290
293 sdio_release_host(priv->func); 291 sdio_release_host(priv->func);
294 292
295
296 LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
297 LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
298 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
299 LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
300 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
301 LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
302 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
303
304 if (priv->dbg.fw_download) 293 if (priv->dbg.fw_download)
305 iwmct_fw_load(priv); 294 iwmct_fw_load(priv);
306 else 295 else
@@ -312,7 +301,7 @@ exit_release:
312 sdio_release_host(priv->func); 301 sdio_release_host(priv->func);
313exit: 302exit:
314 kfree(buf); 303 kfree(buf);
315 LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n"); 304 LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
316} 305}
317 306
318static void iwmct_irq(struct sdio_func *func) 307static void iwmct_irq(struct sdio_func *func)
@@ -325,12 +314,12 @@ static void iwmct_irq(struct sdio_func *func)
325 314
326 priv = sdio_get_drvdata(func); 315 priv = sdio_get_drvdata(func);
327 316
328 LOG_INFO(priv, IRQ, "enter iwmct_irq\n"); 317 LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
329 318
330 /* read the function's status register */ 319 /* read the function's status register */
331 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret); 320 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
332 321
333 LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret); 322 LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
334 323
335 if (!val) { 324 if (!val) {
336 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n"); 325 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
@@ -372,7 +361,7 @@ static void iwmct_irq(struct sdio_func *func)
372 361
373 queue_work(priv->wq, &priv->isr_worker); 362 queue_work(priv->wq, &priv->isr_worker);
374 363
375 LOG_INFO(priv, IRQ, "exit iwmct_irq\n"); 364 LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
376 365
377 return; 366 return;
378 367
@@ -660,7 +649,7 @@ static int __init iwmct_init(void)
660 649
661 /* Default log filter settings */ 650 /* Default log filter settings */
662 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME); 651 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
663 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL); 652 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
664 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME); 653 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
665 654
666 rc = sdio_register_driver(&iwmct_driver); 655 rc = sdio_register_driver(&iwmct_driver);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815..5df46c230b0 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
375}; 375};
376 376
377 377
378static struct pci_device_id vortex_pci_tbl[] = { 378static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
379 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, 379 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
380 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, 380 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
381 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, 381 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9..9d59654748b 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -394,7 +394,7 @@ static int cp_get_eeprom(struct net_device *dev,
394static int cp_set_eeprom(struct net_device *dev, 394static int cp_set_eeprom(struct net_device *dev,
395 struct ethtool_eeprom *eeprom, u8 *data); 395 struct ethtool_eeprom *eeprom, u8 *data);
396 396
397static struct pci_device_id cp_pci_tbl[] = { 397static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
398 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, 398 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
399 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, 399 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
400 { }, 400 { },
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daab..321e73aabb2 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -231,7 +231,7 @@ static const struct {
231}; 231};
232 232
233 233
234static struct pci_device_id rtl8139_pci_tbl[] = { 234static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
235 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 235 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
236 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 236 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
237 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 237 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dd9a09c72df..cb0e534418e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2618,6 +2618,28 @@ config IXGBE_DCB
2618 2618
2619 If unsure, say N. 2619 If unsure, say N.
2620 2620
2621config IXGBEVF
2622 tristate "Intel(R) 82599 Virtual Function Ethernet support"
2623 depends on PCI_MSI
2624 ---help---
2625 This driver supports Intel(R) 82599 virtual functions. For more
2626 information on how to identify your adapter, go to the Adapter &
2627 Driver ID Guide at:
2628
2629 <http://support.intel.com/support/network/sb/CS-008441.htm>
2630
2631 For general information and support, go to the Intel support
2632 website at:
2633
2634 <http://support.intel.com>
2635
2636 More specific information on configuring the driver is in
2637 <file:Documentation/networking/ixgbevf.txt>.
2638
2639 To compile this driver as a module, choose M here. The module
2640 will be called ixgbevf. MSI-X interrupt support is required
2641 for this driver to work correctly.
2642
2621config IXGB 2643config IXGB
2622 tristate "Intel(R) PRO/10GbE support" 2644 tristate "Intel(R) PRO/10GbE support"
2623 depends on PCI 2645 depends on PCI
@@ -2756,6 +2778,13 @@ config BNX2X
2756 To compile this driver as a module, choose M here: the module 2778 To compile this driver as a module, choose M here: the module
2757 will be called bnx2x. This is recommended. 2779 will be called bnx2x. This is recommended.
2758 2780
2781config QLCNIC
2782 tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
2783 depends on PCI
2784 help
2785 This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
2786 devices.
2787
2759config QLGE 2788config QLGE
2760 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 2789 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
2761 depends on PCI 2790 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da..0b763cbe9b1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
14obj-$(CONFIG_IGB) += igb/ 14obj-$(CONFIG_IGB) += igb/
15obj-$(CONFIG_IGBVF) += igbvf/ 15obj-$(CONFIG_IGBVF) += igbvf/
16obj-$(CONFIG_IXGBE) += ixgbe/ 16obj-$(CONFIG_IXGBE) += ixgbe/
17obj-$(CONFIG_IXGBEVF) += ixgbevf/
17obj-$(CONFIG_IXGB) += ixgb/ 18obj-$(CONFIG_IXGB) += ixgb/
18obj-$(CONFIG_IP1000) += ipg.o 19obj-$(CONFIG_IP1000) += ipg.o
19obj-$(CONFIG_CHELSIO_T1) += chelsio/ 20obj-$(CONFIG_CHELSIO_T1) += chelsio/
@@ -148,6 +149,7 @@ ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
148obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o 149obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
149obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o 150obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
150obj-$(CONFIG_QLA3XXX) += qla3xxx.o 151obj-$(CONFIG_QLA3XXX) += qla3xxx.o
152obj-$(CONFIG_QLCNIC) += qlcnic/
151obj-$(CONFIG_QLGE) += qlge/ 153obj-$(CONFIG_QLGE) += qlge/
152 154
153obj-$(CONFIG_PPP) += ppp_generic.o 155obj-$(CONFIG_PPP) += ppp_generic.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a99475..ec624ab03e8 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -134,7 +134,7 @@
134#define PCI_DEVICE_ID_SGI_ACENIC 0x0009 134#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
135#endif 135#endif
136 136
137static struct pci_device_id acenic_pci_tbl[] = { 137static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
138 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, 138 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
139 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 139 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
140 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, 140 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc7..545c791f477 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
113module_param_array(dynamic_ipg, bool, NULL, 0); 113module_param_array(dynamic_ipg, bool, NULL, 0);
114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); 114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
115 115
116static struct pci_device_id amd8111e_pci_tbl[] = { 116static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
117 117
118 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462, 118 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754..b68e1eb405f 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -144,7 +144,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
144 free_netdev(dev); 144 free_netdev(dev);
145} 145}
146 146
147static struct pci_device_id com20020pci_id_table[] = { 147static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
148 { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 148 { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
149 { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 149 { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
150 { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 150 { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index c35af3e106b..e2c202493fa 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -123,9 +123,7 @@ static void ariadne_reset(struct net_device *dev);
123static irqreturn_t ariadne_interrupt(int irq, void *data); 123static irqreturn_t ariadne_interrupt(int irq, void *data);
124static int ariadne_close(struct net_device *dev); 124static int ariadne_close(struct net_device *dev);
125static struct net_device_stats *ariadne_get_stats(struct net_device *dev); 125static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
126#ifdef HAVE_MULTICAST
127static void set_multicast_list(struct net_device *dev); 126static void set_multicast_list(struct net_device *dev);
128#endif
129 127
130 128
131static void memcpyw(volatile u_short *dest, u_short *src, int len) 129static void memcpyw(volatile u_short *dest, u_short *src, int len)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895..bf72d57a0af 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
9 * (at your option) any later version. 9 * (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
13
12#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -20,9 +22,9 @@
20#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
21#include <linux/platform_device.h> 23#include <linux/platform_device.h>
22#include <linux/delay.h> 24#include <linux/delay.h>
23#include <mach/ep93xx-regs.h> 25#include <linux/io.h>
24#include <mach/platform.h> 26
25#include <asm/io.h> 27#include <mach/hardware.h>
26 28
27#define DRV_MODULE_NAME "ep93xx-eth" 29#define DRV_MODULE_NAME "ep93xx-eth"
28#define DRV_MODULE_VERSION "0.1" 30#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +187,47 @@ struct ep93xx_priv
185#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) 187#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
186#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) 188#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
187 189
188static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg); 190static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
191{
192 struct ep93xx_priv *ep = netdev_priv(dev);
193 int data;
194 int i;
195
196 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
197
198 for (i = 0; i < 10; i++) {
199 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
200 break;
201 msleep(1);
202 }
203
204 if (i == 10) {
205 pr_info("mdio read timed out\n");
206 data = 0xffff;
207 } else {
208 data = rdl(ep, REG_MIIDATA);
209 }
210
211 return data;
212}
213
214static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
215{
216 struct ep93xx_priv *ep = netdev_priv(dev);
217 int i;
218
219 wrl(ep, REG_MIIDATA, data);
220 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
221
222 for (i = 0; i < 10; i++) {
223 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
224 break;
225 msleep(1);
226 }
227
228 if (i == 10)
229 pr_info("mdio write timed out\n");
230}
189 231
190static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) 232static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
191{ 233{
@@ -217,14 +259,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
217 rstat->rstat1 = 0; 259 rstat->rstat1 = 0;
218 260
219 if (!(rstat0 & RSTAT0_EOF)) 261 if (!(rstat0 & RSTAT0_EOF))
220 printk(KERN_CRIT "ep93xx_rx: not end-of-frame " 262 pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
221 " %.8x %.8x\n", rstat0, rstat1);
222 if (!(rstat0 & RSTAT0_EOB)) 263 if (!(rstat0 & RSTAT0_EOB))
223 printk(KERN_CRIT "ep93xx_rx: not end-of-buffer " 264 pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
224 " %.8x %.8x\n", rstat0, rstat1);
225 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) 265 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
226 printk(KERN_CRIT "ep93xx_rx: entry mismatch " 266 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
227 " %.8x %.8x\n", rstat0, rstat1);
228 267
229 if (!(rstat0 & RSTAT0_RWE)) { 268 if (!(rstat0 & RSTAT0_RWE)) {
230 ep->stats.rx_errors++; 269 ep->stats.rx_errors++;
@@ -241,8 +280,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
241 280
242 length = rstat1 & RSTAT1_FRAME_LENGTH; 281 length = rstat1 & RSTAT1_FRAME_LENGTH;
243 if (length > MAX_PKT_SIZE) { 282 if (length > MAX_PKT_SIZE) {
244 printk(KERN_NOTICE "ep93xx_rx: invalid length " 283 pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
245 " %.8x %.8x\n", rstat0, rstat1);
246 goto err; 284 goto err;
247 } 285 }
248 286
@@ -371,11 +409,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
371 tstat->tstat0 = 0; 409 tstat->tstat0 = 0;
372 410
373 if (tstat0 & TSTAT0_FA) 411 if (tstat0 & TSTAT0_FA)
374 printk(KERN_CRIT "ep93xx_tx_complete: frame aborted " 412 pr_crit("frame aborted %.8x\n", tstat0);
375 " %.8x\n", tstat0);
376 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) 413 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
377 printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch " 414 pr_crit("entry mismatch %.8x\n", tstat0);
378 " %.8x\n", tstat0);
379 415
380 if (tstat0 & TSTAT0_TXWE) { 416 if (tstat0 & TSTAT0_TXWE) {
381 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 417 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +572,7 @@ static int ep93xx_start_hw(struct net_device *dev)
536 } 572 }
537 573
538 if (i == 10) { 574 if (i == 10) {
539 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); 575 pr_crit("hw failed to reset\n");
540 return 1; 576 return 1;
541 } 577 }
542 578
@@ -581,7 +617,7 @@ static int ep93xx_start_hw(struct net_device *dev)
581 } 617 }
582 618
583 if (i == 10) { 619 if (i == 10) {
584 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n"); 620 pr_crit("hw failed to start\n");
585 return 1; 621 return 1;
586 } 622 }
587 623
@@ -617,7 +653,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
617 } 653 }
618 654
619 if (i == 10) 655 if (i == 10)
620 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); 656 pr_crit("hw failed to reset\n");
621} 657}
622 658
623static int ep93xx_open(struct net_device *dev) 659static int ep93xx_open(struct net_device *dev)
@@ -681,48 +717,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
681 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); 717 return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
682} 718}
683 719
684static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
685{
686 struct ep93xx_priv *ep = netdev_priv(dev);
687 int data;
688 int i;
689
690 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
691
692 for (i = 0; i < 10; i++) {
693 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
694 break;
695 msleep(1);
696 }
697
698 if (i == 10) {
699 printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
700 data = 0xffff;
701 } else {
702 data = rdl(ep, REG_MIIDATA);
703 }
704
705 return data;
706}
707
708static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
709{
710 struct ep93xx_priv *ep = netdev_priv(dev);
711 int i;
712
713 wrl(ep, REG_MIIDATA, data);
714 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
715
716 for (i = 0; i < 10; i++) {
717 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
718 break;
719 msleep(1);
720 }
721
722 if (i == 10)
723 printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
724}
725
726static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 720static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
727{ 721{
728 strcpy(info->driver, DRV_MODULE_NAME); 722 strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +819,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
825 struct ep93xx_eth_data *data; 819 struct ep93xx_eth_data *data;
826 struct net_device *dev; 820 struct net_device *dev;
827 struct ep93xx_priv *ep; 821 struct ep93xx_priv *ep;
822 struct resource *mem;
823 int irq;
828 int err; 824 int err;
829 825
830 if (pdev == NULL) 826 if (pdev == NULL)
831 return -ENODEV; 827 return -ENODEV;
832 data = pdev->dev.platform_data; 828 data = pdev->dev.platform_data;
833 829
830 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
831 irq = platform_get_irq(pdev, 0);
832 if (!mem || irq < 0)
833 return -ENXIO;
834
834 dev = ep93xx_dev_alloc(data); 835 dev = ep93xx_dev_alloc(data);
835 if (dev == NULL) { 836 if (dev == NULL) {
836 err = -ENOMEM; 837 err = -ENOMEM;
@@ -842,23 +843,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
842 843
843 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
844 845
845 ep->res = request_mem_region(pdev->resource[0].start, 846 ep->res = request_mem_region(mem->start, resource_size(mem),
846 pdev->resource[0].end - pdev->resource[0].start + 1, 847 dev_name(&pdev->dev));
847 dev_name(&pdev->dev));
848 if (ep->res == NULL) { 848 if (ep->res == NULL) {
849 dev_err(&pdev->dev, "Could not reserve memory region\n"); 849 dev_err(&pdev->dev, "Could not reserve memory region\n");
850 err = -ENOMEM; 850 err = -ENOMEM;
851 goto err_out; 851 goto err_out;
852 } 852 }
853 853
854 ep->base_addr = ioremap(pdev->resource[0].start, 854 ep->base_addr = ioremap(mem->start, resource_size(mem));
855 pdev->resource[0].end - pdev->resource[0].start);
856 if (ep->base_addr == NULL) { 855 if (ep->base_addr == NULL) {
857 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 856 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
858 err = -EIO; 857 err = -EIO;
859 goto err_out; 858 goto err_out;
860 } 859 }
861 ep->irq = pdev->resource[1].start; 860 ep->irq = irq;
862 861
863 ep->mii.phy_id = data->phy_id; 862 ep->mii.phy_id = data->phy_id;
864 ep->mii.phy_id_mask = 0x1f; 863 ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +876,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
877 goto err_out; 876 goto err_out;
878 } 877 }
879 878
880 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, " 879 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
881 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name, 880 dev->name, ep->irq, dev->dev_addr);
882 ep->irq, data->dev_addr[0], data->dev_addr[1],
883 data->dev_addr[2], data->dev_addr[3],
884 data->dev_addr[4], data->dev_addr[5]);
885 881
886 return 0; 882 return 0;
887 883
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0..d98095df05b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -35,7 +35,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) } 36 * Class, Class Mask, private data (not used) }
37 */ 37 */
38static struct pci_device_id atl1c_pci_tbl[] = { 38static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
41 /* required last entry */ 41 /* required last entry */
@@ -2596,11 +2596,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2596 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 2596 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2597 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 2597 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2598 if (netif_msg_probe(adapter)) 2598 if (netif_msg_probe(adapter))
2599 dev_dbg(&pdev->dev, 2599 dev_dbg(&pdev->dev, "mac address : %pM\n",
2600 "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n", 2600 adapter->hw.mac_addr);
2601 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2602 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2603 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2604 2601
2605 atl1c_hw_set_mac_addr(&adapter->hw); 2602 atl1c_hw_set_mac_addr(&adapter->hw);
2606 INIT_WORK(&adapter->common_task, atl1c_common_task); 2603 INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9..d59f8e89c65 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) } 36 * Class, Class Mask, private data (not used) }
37 */ 37 */
38static struct pci_device_id atl1e_pci_tbl[] = { 38static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
41 /* required last entry */ 41 /* required last entry */
@@ -2378,10 +2378,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2378 2378
2379 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 2379 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2380 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 2380 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2381 dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n", 2381 dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr);
2382 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2383 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2384 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2385 2382
2386 INIT_WORK(&adapter->reset_task, atl1e_reset_task); 2383 INIT_WORK(&adapter->reset_task, atl1e_reset_task);
2387 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); 2384 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127..9ba547069db 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
232/* 232/*
233 * atl1_pci_tbl - PCI Device ID Table 233 * atl1_pci_tbl - PCI Device ID Table
234 */ 234 */
235static const struct pci_device_id atl1_pci_tbl[] = { 235static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
236 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, 236 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
237 /* required last entry */ 237 /* required last entry */
238 {0,} 238 {0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ec52529394a..40cf9e5cb9e 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -63,7 +63,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
63/* 63/*
64 * atl2_pci_tbl - PCI Device ID Table 64 * atl2_pci_tbl - PCI Device ID Table
65 */ 65 */
66static struct pci_device_id atl2_pci_tbl[] = { 66static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, 67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
68 /* required last entry */ 68 /* required last entry */
69 {0,} 69 {0,}
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb6958..44b66be3813 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -102,7 +102,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
102 102
103 103
104#ifdef CONFIG_B44_PCI 104#ifdef CONFIG_B44_PCI
105static const struct pci_device_id b44_pci_tbl[] = { 105static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, 108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index fee6eee7ae5..3227b11131c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1607,3 +1607,33 @@ err:
1607 spin_unlock_bh(&adapter->mcc_lock); 1607 spin_unlock_bh(&adapter->mcc_lock);
1608 return status; 1608 return status;
1609} 1609}
1610
1611extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1612 struct be_dma_mem *nonemb_cmd)
1613{
1614 struct be_mcc_wrb *wrb;
1615 struct be_cmd_req_seeprom_read *req;
1616 struct be_sge *sge;
1617 int status;
1618
1619 spin_lock_bh(&adapter->mcc_lock);
1620
1621 wrb = wrb_from_mccq(adapter);
1622 req = nonemb_cmd->va;
1623 sge = nonembedded_sgl(wrb);
1624
1625 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1626 OPCODE_COMMON_SEEPROM_READ);
1627
1628 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1629 OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1630
1631 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1632 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1633 sge->len = cpu_to_le32(nonemb_cmd->size);
1634
1635 status = be_mcc_notify_wait(adapter);
1636
1637 spin_unlock_bh(&adapter->mcc_lock);
1638 return status;
1639}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 13b33c84108..c622a968c37 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
124#define OPCODE_COMMON_CQ_CREATE 12 124#define OPCODE_COMMON_CQ_CREATE 12
125#define OPCODE_COMMON_EQ_CREATE 13 125#define OPCODE_COMMON_EQ_CREATE 13
126#define OPCODE_COMMON_MCC_CREATE 21 126#define OPCODE_COMMON_MCC_CREATE 21
127#define OPCODE_COMMON_SEEPROM_READ 30
127#define OPCODE_COMMON_NTWK_RX_FILTER 34 128#define OPCODE_COMMON_NTWK_RX_FILTER 34
128#define OPCODE_COMMON_GET_FW_VERSION 35 129#define OPCODE_COMMON_GET_FW_VERSION 35
129#define OPCODE_COMMON_SET_FLOW_CONTROL 36 130#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -855,6 +856,19 @@ struct be_cmd_resp_ddrdma_test {
855 u8 rcv_buff[4096]; 856 u8 rcv_buff[4096];
856}; 857};
857 858
859/*********************** SEEPROM Read ***********************/
860
861#define BE_READ_SEEPROM_LEN 1024
862struct be_cmd_req_seeprom_read {
863 struct be_cmd_req_hdr hdr;
864 u8 rsvd0[BE_READ_SEEPROM_LEN];
865};
866
867struct be_cmd_resp_seeprom_read {
868 struct be_cmd_req_hdr hdr;
869 u8 seeprom_data[BE_READ_SEEPROM_LEN];
870};
871
858extern int be_pci_fnum_get(struct be_adapter *adapter); 872extern int be_pci_fnum_get(struct be_adapter *adapter);
859extern int be_cmd_POST(struct be_adapter *adapter); 873extern int be_cmd_POST(struct be_adapter *adapter);
860extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 874extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -927,5 +941,8 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
927 u32 num_pkts, u64 pattern); 941 u32 num_pkts, u64 pattern);
928extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 942extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
929 u32 byte_cnt, struct be_dma_mem *cmd); 943 u32 byte_cnt, struct be_dma_mem *cmd);
944extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
945 struct be_dma_mem *nonemb_cmd);
930extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 946extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
931 u8 loopback_type, u8 enable); 947 u8 loopback_type, u8 enable);
948
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 5d001c4deac..09d8899b2de 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -112,6 +112,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
112 "PHY Loopback test", 112 "PHY Loopback test",
113 "External Loopback test", 113 "External Loopback test",
114 "DDR DMA test" 114 "DDR DMA test"
115 "Link test"
115}; 116};
116 117
117#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) 118#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
@@ -529,6 +530,9 @@ static void
529be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 530be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
530{ 531{
531 struct be_adapter *adapter = netdev_priv(netdev); 532 struct be_adapter *adapter = netdev_priv(netdev);
533 bool link_up;
534 u8 mac_speed = 0;
535 u16 qos_link_speed = 0;
532 536
533 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 537 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
534 538
@@ -545,12 +549,20 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
545 &data[2]) != 0) { 549 &data[2]) != 0) {
546 test->flags |= ETH_TEST_FL_FAILED; 550 test->flags |= ETH_TEST_FL_FAILED;
547 } 551 }
552 }
548 553
549 data[3] = be_test_ddr_dma(adapter); 554 if (be_test_ddr_dma(adapter) != 0) {
550 if (data[3] != 0) 555 data[3] = 1;
551 test->flags |= ETH_TEST_FL_FAILED; 556 test->flags |= ETH_TEST_FL_FAILED;
552 } 557 }
553 558
559 if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
560 &qos_link_speed) != 0) {
561 test->flags |= ETH_TEST_FL_FAILED;
562 data[4] = -1;
563 } else if (mac_speed) {
564 data[4] = 1;
565 }
554} 566}
555 567
556static int 568static int
@@ -567,12 +579,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
567 return be_load_fw(adapter, file_name); 579 return be_load_fw(adapter, file_name);
568} 580}
569 581
582static int
583be_get_eeprom_len(struct net_device *netdev)
584{
585 return BE_READ_SEEPROM_LEN;
586}
587
588static int
589be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
590 uint8_t *data)
591{
592 struct be_adapter *adapter = netdev_priv(netdev);
593 struct be_dma_mem eeprom_cmd;
594 struct be_cmd_resp_seeprom_read *resp;
595 int status;
596
597 if (!eeprom->len)
598 return -EINVAL;
599
600 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
601
602 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
603 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
604 eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
605 &eeprom_cmd.dma);
606
607 if (!eeprom_cmd.va) {
608 dev_err(&adapter->pdev->dev,
609 "Memory allocation failure. Could not read eeprom\n");
610 return -ENOMEM;
611 }
612
613 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
614
615 if (!status) {
616 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
617 memcpy(data, resp->seeprom_data, eeprom->len);
618 }
619 pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
620 eeprom_cmd.dma);
621
622 return status;
623}
624
570const struct ethtool_ops be_ethtool_ops = { 625const struct ethtool_ops be_ethtool_ops = {
571 .get_settings = be_get_settings, 626 .get_settings = be_get_settings,
572 .get_drvinfo = be_get_drvinfo, 627 .get_drvinfo = be_get_drvinfo,
573 .get_wol = be_get_wol, 628 .get_wol = be_get_wol,
574 .set_wol = be_set_wol, 629 .set_wol = be_set_wol,
575 .get_link = ethtool_op_get_link, 630 .get_link = ethtool_op_get_link,
631 .get_eeprom_len = be_get_eeprom_len,
632 .get_eeprom = be_read_eeprom,
576 .get_coalesce = be_get_coalesce, 633 .get_coalesce = be_get_coalesce,
577 .set_coalesce = be_set_coalesce, 634 .set_coalesce = be_set_coalesce,
578 .get_ringparam = be_get_ringparam, 635 .get_ringparam = be_get_ringparam,
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e..5917b941aca 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -48,7 +48,6 @@
48#include <linux/cache.h> 48#include <linux/cache.h>
49#include <linux/firmware.h> 49#include <linux/firmware.h>
50#include <linux/log2.h> 50#include <linux/log2.h>
51#include <linux/list.h>
52 51
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 52#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1 53#define BCM_CNIC 1
@@ -3579,14 +3578,14 @@ bnx2_set_rx_mode(struct net_device *dev)
3579 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3578 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3580 } 3579 }
3581 3580
3582 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) { 3581 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3583 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; 3582 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3584 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | 3583 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3585 BNX2_RPM_SORT_USER0_PROM_VLAN; 3584 BNX2_RPM_SORT_USER0_PROM_VLAN;
3586 } else if (!(dev->flags & IFF_PROMISC)) { 3585 } else if (!(dev->flags & IFF_PROMISC)) {
3587 /* Add all entries into to the match filter list */ 3586 /* Add all entries into to the match filter list */
3588 i = 0; 3587 i = 0;
3589 list_for_each_entry(ha, &dev->uc.list, list) { 3588 netdev_for_each_uc_addr(ha, dev) {
3590 bnx2_set_mac_addr(bp, ha->addr, 3589 bnx2_set_mac_addr(bp, ha->addr,
3591 i + BNX2_START_UNICAST_ADDRESS_INDEX); 3590 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3592 sort_mode |= (1 << 3591 sort_mode |= (1 <<
@@ -6145,6 +6144,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6145 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); 6144 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6146 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); 6145 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6147 6146
6147 /* Need to flush the previous three writes to ensure MSI-X
6148 * is setup properly */
6149 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6150
6148 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 6151 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6149 msix_ent[i].entry = i; 6152 msix_ent[i].entry = i;
6150 msix_ent[i].vector = 0; 6153 msix_ent[i].vector = 0;
@@ -6227,6 +6230,8 @@ bnx2_open(struct net_device *dev)
6227 6230
6228 atomic_set(&bp->intr_sem, 0); 6231 atomic_set(&bp->intr_sem, 0);
6229 6232
6233 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6234
6230 bnx2_enable_int(bp); 6235 bnx2_enable_int(bp);
6231 6236
6232 if (bp->flags & BNX2_FLAG_USING_MSI) { 6237 if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6538,92 +6543,121 @@ bnx2_close(struct net_device *dev)
6538 return 0; 6543 return 0;
6539} 6544}
6540 6545
6541#define GET_NET_STATS64(ctr) \ 6546static void
6547bnx2_save_stats(struct bnx2 *bp)
6548{
6549 u32 *hw_stats = (u32 *) bp->stats_blk;
6550 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6551 int i;
6552
6553 /* The 1st 10 counters are 64-bit counters */
6554 for (i = 0; i < 20; i += 2) {
6555 u32 hi;
6556 u64 lo;
6557
6558 hi = *(temp_stats + i) + *(hw_stats + i);
6559 lo = *(temp_stats + i + 1) + *(hw_stats + i + 1);
6560 if (lo > 0xffffffff)
6561 hi++;
6562 *(temp_stats + i) = hi;
6563 *(temp_stats + i + 1) = lo & 0xffffffff;
6564 }
6565
6566 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6567 *(temp_stats + i) = *(temp_stats + i) + *(hw_stats + i);
6568}
6569
6570#define GET_64BIT_NET_STATS64(ctr) \
6542 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ 6571 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6543 (unsigned long) (ctr##_lo) 6572 (unsigned long) (ctr##_lo)
6544 6573
6545#define GET_NET_STATS32(ctr) \ 6574#define GET_64BIT_NET_STATS32(ctr) \
6546 (ctr##_lo) 6575 (ctr##_lo)
6547 6576
6548#if (BITS_PER_LONG == 64) 6577#if (BITS_PER_LONG == 64)
6549#define GET_NET_STATS GET_NET_STATS64 6578#define GET_64BIT_NET_STATS(ctr) \
6579 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6580 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6550#else 6581#else
6551#define GET_NET_STATS GET_NET_STATS32 6582#define GET_64BIT_NET_STATS(ctr) \
6583 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6584 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6552#endif 6585#endif
6553 6586
6587#define GET_32BIT_NET_STATS(ctr) \
6588 (unsigned long) (bp->stats_blk->ctr + \
6589 bp->temp_stats_blk->ctr)
6590
6554static struct net_device_stats * 6591static struct net_device_stats *
6555bnx2_get_stats(struct net_device *dev) 6592bnx2_get_stats(struct net_device *dev)
6556{ 6593{
6557 struct bnx2 *bp = netdev_priv(dev); 6594 struct bnx2 *bp = netdev_priv(dev);
6558 struct statistics_block *stats_blk = bp->stats_blk;
6559 struct net_device_stats *net_stats = &dev->stats; 6595 struct net_device_stats *net_stats = &dev->stats;
6560 6596
6561 if (bp->stats_blk == NULL) { 6597 if (bp->stats_blk == NULL) {
6562 return net_stats; 6598 return net_stats;
6563 } 6599 }
6564 net_stats->rx_packets = 6600 net_stats->rx_packets =
6565 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) + 6601 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6566 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) + 6602 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6567 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts); 6603 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6568 6604
6569 net_stats->tx_packets = 6605 net_stats->tx_packets =
6570 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) + 6606 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6571 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) + 6607 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6572 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts); 6608 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6573 6609
6574 net_stats->rx_bytes = 6610 net_stats->rx_bytes =
6575 GET_NET_STATS(stats_blk->stat_IfHCInOctets); 6611 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6576 6612
6577 net_stats->tx_bytes = 6613 net_stats->tx_bytes =
6578 GET_NET_STATS(stats_blk->stat_IfHCOutOctets); 6614 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6579 6615
6580 net_stats->multicast = 6616 net_stats->multicast =
6581 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts); 6617 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6582 6618
6583 net_stats->collisions = 6619 net_stats->collisions =
6584 (unsigned long) stats_blk->stat_EtherStatsCollisions; 6620 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6585 6621
6586 net_stats->rx_length_errors = 6622 net_stats->rx_length_errors =
6587 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts + 6623 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6588 stats_blk->stat_EtherStatsOverrsizePkts); 6624 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6589 6625
6590 net_stats->rx_over_errors = 6626 net_stats->rx_over_errors =
6591 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6627 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6592 stats_blk->stat_IfInMBUFDiscards); 6628 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6593 6629
6594 net_stats->rx_frame_errors = 6630 net_stats->rx_frame_errors =
6595 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors; 6631 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6596 6632
6597 net_stats->rx_crc_errors = 6633 net_stats->rx_crc_errors =
6598 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors; 6634 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6599 6635
6600 net_stats->rx_errors = net_stats->rx_length_errors + 6636 net_stats->rx_errors = net_stats->rx_length_errors +
6601 net_stats->rx_over_errors + net_stats->rx_frame_errors + 6637 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6602 net_stats->rx_crc_errors; 6638 net_stats->rx_crc_errors;
6603 6639
6604 net_stats->tx_aborted_errors = 6640 net_stats->tx_aborted_errors =
6605 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions + 6641 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6606 stats_blk->stat_Dot3StatsLateCollisions); 6642 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6607 6643
6608 if ((CHIP_NUM(bp) == CHIP_NUM_5706) || 6644 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6609 (CHIP_ID(bp) == CHIP_ID_5708_A0)) 6645 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6610 net_stats->tx_carrier_errors = 0; 6646 net_stats->tx_carrier_errors = 0;
6611 else { 6647 else {
6612 net_stats->tx_carrier_errors = 6648 net_stats->tx_carrier_errors =
6613 (unsigned long) 6649 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6614 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6615 } 6650 }
6616 6651
6617 net_stats->tx_errors = 6652 net_stats->tx_errors =
6618 (unsigned long) 6653 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6619 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6620 +
6621 net_stats->tx_aborted_errors + 6654 net_stats->tx_aborted_errors +
6622 net_stats->tx_carrier_errors; 6655 net_stats->tx_carrier_errors;
6623 6656
6624 net_stats->rx_missed_errors = 6657 net_stats->rx_missed_errors =
6625 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6658 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6626 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop); 6659 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6660 GET_32BIT_NET_STATS(stat_FwRxDrop);
6627 6661
6628 return net_stats; 6662 return net_stats;
6629} 6663}
@@ -7083,6 +7117,9 @@ static int
7083bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) 7117bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7084{ 7118{
7085 if (netif_running(bp->dev)) { 7119 if (netif_running(bp->dev)) {
7120 /* Reset will erase chipset stats; save them */
7121 bnx2_save_stats(bp);
7122
7086 bnx2_netif_stop(bp); 7123 bnx2_netif_stop(bp);
7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7124 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7088 bnx2_free_skbs(bp); 7125 bnx2_free_skbs(bp);
@@ -7427,6 +7464,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7427 struct bnx2 *bp = netdev_priv(dev); 7464 struct bnx2 *bp = netdev_priv(dev);
7428 int i; 7465 int i;
7429 u32 *hw_stats = (u32 *) bp->stats_blk; 7466 u32 *hw_stats = (u32 *) bp->stats_blk;
7467 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7430 u8 *stats_len_arr = NULL; 7468 u8 *stats_len_arr = NULL;
7431 7469
7432 if (hw_stats == NULL) { 7470 if (hw_stats == NULL) {
@@ -7443,21 +7481,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7443 stats_len_arr = bnx2_5708_stats_len_arr; 7481 stats_len_arr = bnx2_5708_stats_len_arr;
7444 7482
7445 for (i = 0; i < BNX2_NUM_STATS; i++) { 7483 for (i = 0; i < BNX2_NUM_STATS; i++) {
7484 unsigned long offset;
7485
7446 if (stats_len_arr[i] == 0) { 7486 if (stats_len_arr[i] == 0) {
7447 /* skip this counter */ 7487 /* skip this counter */
7448 buf[i] = 0; 7488 buf[i] = 0;
7449 continue; 7489 continue;
7450 } 7490 }
7491
7492 offset = bnx2_stats_offset_arr[i];
7451 if (stats_len_arr[i] == 4) { 7493 if (stats_len_arr[i] == 4) {
7452 /* 4-byte counter */ 7494 /* 4-byte counter */
7453 buf[i] = (u64) 7495 buf[i] = (u64) *(hw_stats + offset) +
7454 *(hw_stats + bnx2_stats_offset_arr[i]); 7496 *(temp_stats + offset);
7455 continue; 7497 continue;
7456 } 7498 }
7457 /* 8-byte counter */ 7499 /* 8-byte counter */
7458 buf[i] = (((u64) *(hw_stats + 7500 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7459 bnx2_stats_offset_arr[i])) << 32) + 7501 *(hw_stats + offset + 1) +
7460 *(hw_stats + bnx2_stats_offset_arr[i] + 1); 7502 (((u64) *(temp_stats + offset)) << 32) +
7503 *(temp_stats + offset + 1);
7461 } 7504 }
7462} 7505}
7463 7506
@@ -7625,7 +7668,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
7625 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size)); 7668 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7626} 7669}
7627 7670
7628#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 7671#ifdef CONFIG_NET_POLL_CONTROLLER
7629static void 7672static void
7630poll_bnx2(struct net_device *dev) 7673poll_bnx2(struct net_device *dev)
7631{ 7674{
@@ -7825,6 +7868,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7825 bp->flags = 0; 7868 bp->flags = 0;
7826 bp->phy_flags = 0; 7869 bp->phy_flags = 0;
7827 7870
7871 bp->temp_stats_blk =
7872 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7873
7874 if (bp->temp_stats_blk == NULL) {
7875 rc = -ENOMEM;
7876 goto err_out;
7877 }
7878
7828 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 7879 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7829 rc = pci_enable_device(pdev); 7880 rc = pci_enable_device(pdev);
7830 if (rc) { 7881 if (rc) {
@@ -8229,7 +8280,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8229#ifdef BCM_VLAN 8280#ifdef BCM_VLAN
8230 .ndo_vlan_rx_register = bnx2_vlan_rx_register, 8281 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8231#endif 8282#endif
8232#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 8283#ifdef CONFIG_NET_POLL_CONTROLLER
8233 .ndo_poll_controller = poll_bnx2, 8284 .ndo_poll_controller = poll_bnx2,
8234#endif 8285#endif
8235}; 8286};
@@ -8346,6 +8397,8 @@ bnx2_remove_one(struct pci_dev *pdev)
8346 if (bp->regview) 8397 if (bp->regview)
8347 iounmap(bp->regview); 8398 iounmap(bp->regview);
8348 8399
8400 kfree(bp->temp_stats_blk);
8401
8349 free_netdev(dev); 8402 free_netdev(dev);
8350 pci_release_regions(pdev); 8403 pci_release_regions(pdev);
8351 pci_disable_device(pdev); 8404 pci_disable_device(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 939dc44d50a..b860fbbff35 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6851,6 +6851,7 @@ struct bnx2 {
6851 dma_addr_t status_blk_mapping; 6851 dma_addr_t status_blk_mapping;
6852 6852
6853 struct statistics_block *stats_blk; 6853 struct statistics_block *stats_blk;
6854 struct statistics_block *temp_stats_blk;
6854 dma_addr_t stats_blk_mapping; 6855 dma_addr_t stats_blk_mapping;
6855 6856
6856 int ctx_pages; 6857 int ctx_pages;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 306c2b8165e..ffc7381969a 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -140,7 +140,7 @@ static struct {
140}; 140};
141 141
142 142
143static const struct pci_device_id bnx2x_pci_tbl[] = { 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -11731,7 +11731,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
11731 11731
11732#endif 11732#endif
11733 11733
11734#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11734#ifdef CONFIG_NET_POLL_CONTROLLER
11735static void poll_bnx2x(struct net_device *dev) 11735static void poll_bnx2x(struct net_device *dev)
11736{ 11736{
11737 struct bnx2x *bp = netdev_priv(dev); 11737 struct bnx2x *bp = netdev_priv(dev);
@@ -11755,7 +11755,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11755#ifdef BCM_VLAN 11755#ifdef BCM_VLAN
11756 .ndo_vlan_rx_register = bnx2x_vlan_rx_register, 11756 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11757#endif 11757#endif
11758#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11758#ifdef CONFIG_NET_POLL_CONTROLLER
11759 .ndo_poll_controller = poll_bnx2x, 11759 .ndo_poll_controller = poll_bnx2x,
11760#endif 11760#endif
11761}; 11761};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index efa0e41bf3e..6221936e957 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2615,6 +2615,17 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2615 unsigned char *arp_ptr; 2615 unsigned char *arp_ptr;
2616 __be32 sip, tip; 2616 __be32 sip, tip;
2617 2617
2618 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2619 /*
2620 * When using VLANS and bonding, dev and oriv_dev may be
2621 * incorrect if the physical interface supports VLAN
2622 * acceleration. With this change ARP validation now
2623 * works for hosts only reachable on the VLAN interface.
2624 */
2625 dev = vlan_dev_real_dev(dev);
2626 orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
2627 }
2628
2618 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2629 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2619 goto out; 2630 goto out;
2620 2631
@@ -3296,7 +3307,7 @@ static void bond_remove_proc_entry(struct bonding *bond)
3296/* Create the bonding directory under /proc/net, if doesn't exist yet. 3307/* Create the bonding directory under /proc/net, if doesn't exist yet.
3297 * Caller must hold rtnl_lock. 3308 * Caller must hold rtnl_lock.
3298 */ 3309 */
3299static void bond_create_proc_dir(struct bond_net *bn) 3310static void __net_init bond_create_proc_dir(struct bond_net *bn)
3300{ 3311{
3301 if (!bn->proc_dir) { 3312 if (!bn->proc_dir) {
3302 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); 3313 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
@@ -3309,7 +3320,7 @@ static void bond_create_proc_dir(struct bond_net *bn)
3309/* Destroy the bonding directory under /proc/net, if empty. 3320/* Destroy the bonding directory under /proc/net, if empty.
3310 * Caller must hold rtnl_lock. 3321 * Caller must hold rtnl_lock.
3311 */ 3322 */
3312static void bond_destroy_proc_dir(struct bond_net *bn) 3323static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
3313{ 3324{
3314 if (bn->proc_dir) { 3325 if (bn->proc_dir) {
3315 remove_proc_entry(DRV_NAME, bn->net->proc_net); 3326 remove_proc_entry(DRV_NAME, bn->net->proc_net);
@@ -3327,11 +3338,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3327{ 3338{
3328} 3339}
3329 3340
3330static void bond_create_proc_dir(struct bond_net *bn) 3341static inline void bond_create_proc_dir(struct bond_net *bn)
3331{ 3342{
3332} 3343}
3333 3344
3334static void bond_destroy_proc_dir(struct bond_net *bn) 3345static inline void bond_destroy_proc_dir(struct bond_net *bn)
3335{ 3346{
3336} 3347}
3337 3348
@@ -4944,7 +4955,7 @@ out_netdev:
4944 goto out; 4955 goto out;
4945} 4956}
4946 4957
4947static int bond_net_init(struct net *net) 4958static int __net_init bond_net_init(struct net *net)
4948{ 4959{
4949 struct bond_net *bn = net_generic(net, bond_net_id); 4960 struct bond_net *bn = net_generic(net, bond_net_id);
4950 4961
@@ -4956,7 +4967,7 @@ static int bond_net_init(struct net *net)
4956 return 0; 4967 return 0;
4957} 4968}
4958 4969
4959static void bond_net_exit(struct net *net) 4970static void __net_exit bond_net_exit(struct net *net)
4960{ 4971{
4961 struct bond_net *bn = net_generic(net, bond_net_id); 4972 struct bond_net *bn = net_generic(net, bond_net_id);
4962 4973
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c..a2f29a38798 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
342 unsigned int mb, prio; 342 unsigned int mb, prio;
343 u32 reg_mid, reg_mcr; 343 u32 reg_mid, reg_mcr;
344 344
345 if (can_dropped_invalid_skb(dev, skb))
346 return NETDEV_TX_OK;
347
345 mb = get_tx_next_mb(priv); 348 mb = get_tx_next_mb(priv);
346 prio = get_tx_next_prio(priv); 349 prio = get_tx_next_prio(priv);
347 350
@@ -1070,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1070 priv->can.bittiming_const = &at91_bittiming_const; 1073 priv->can.bittiming_const = &at91_bittiming_const;
1071 priv->can.do_set_bittiming = at91_set_bittiming; 1074 priv->can.do_set_bittiming = at91_set_bittiming;
1072 priv->can.do_set_mode = at91_set_mode; 1075 priv->can.do_set_mode = at91_set_mode;
1076 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1073 priv->reg_base = addr; 1077 priv->reg_base = addr;
1074 priv->dev = dev; 1078 priv->dev = dev;
1075 priv->clk = clk; 1079 priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523c..bf7f9ba2d90 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -318,6 +318,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
318 u16 val; 318 u16 val;
319 int i; 319 int i;
320 320
321 if (can_dropped_invalid_skb(dev, skb))
322 return NETDEV_TX_OK;
323
321 netif_stop_queue(dev); 324 netif_stop_queue(dev);
322 325
323 /* fill id */ 326 /* fill id */
@@ -600,6 +603,7 @@ struct net_device *alloc_bfin_candev(void)
600 priv->can.bittiming_const = &bfin_can_bittiming_const; 603 priv->can.bittiming_const = &bfin_can_bittiming_const;
601 priv->can.do_set_bittiming = bfin_can_set_bittiming; 604 priv->can.do_set_bittiming = bfin_can_set_bittiming;
602 priv->can.do_set_mode = bfin_can_set_mode; 605 priv->can.do_set_mode = bfin_can_set_mode;
606 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
603 607
604 return dev; 608 return dev;
605} 609}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1bb29f0322..f08f1202ff0 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -592,6 +592,8 @@ static int can_changelink(struct net_device *dev,
592 if (dev->flags & IFF_UP) 592 if (dev->flags & IFF_UP)
593 return -EBUSY; 593 return -EBUSY;
594 cm = nla_data(data[IFLA_CAN_CTRLMODE]); 594 cm = nla_data(data[IFLA_CAN_CTRLMODE]);
595 if (cm->flags & ~priv->ctrlmode_supported)
596 return -EOPNOTSUPP;
595 priv->ctrlmode &= ~cm->mask; 597 priv->ctrlmode &= ~cm->mask;
596 priv->ctrlmode |= cm->flags; 598 priv->ctrlmode |= cm->flags;
597 } 599 }
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 1a72ca066a1..bbe186b5a0e 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -494,12 +494,8 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
494 return NETDEV_TX_BUSY; 494 return NETDEV_TX_BUSY;
495 } 495 }
496 496
497 if (skb->len != sizeof(struct can_frame)) { 497 if (can_dropped_invalid_skb(net, skb))
498 dev_err(&spi->dev, "dropping packet - bad length\n");
499 dev_kfree_skb(skb);
500 net->stats.tx_dropped++;
501 return NETDEV_TX_OK; 498 return NETDEV_TX_OK;
502 }
503 499
504 netif_stop_queue(net); 500 netif_stop_queue(net);
505 priv->tx_skb = skb; 501 priv->tx_skb = skb;
@@ -543,9 +539,14 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
543 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 539 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
544 /* Put device into loopback mode */ 540 /* Put device into loopback mode */
545 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK); 541 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
542 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
543 /* Put device into listen-only mode */
544 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
546 } else { 545 } else {
547 /* Put device into normal mode */ 546 /* Put device into normal mode */
548 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL); 547 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL |
548 (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ?
549 CANCTRL_OSM : 0));
549 550
550 /* Wait for the device to enter normal mode */ 551 /* Wait for the device to enter normal mode */
551 timeout = jiffies + HZ; 552 timeout = jiffies + HZ;
@@ -952,6 +953,10 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
952 priv->can.bittiming_const = &mcp251x_bittiming_const; 953 priv->can.bittiming_const = &mcp251x_bittiming_const;
953 priv->can.do_set_mode = mcp251x_do_set_mode; 954 priv->can.do_set_mode = mcp251x_do_set_mode;
954 priv->can.clock.freq = pdata->oscillator_frequency / 2; 955 priv->can.clock.freq = pdata->oscillator_frequency / 2;
956 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
957 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
958 if (pdata->model == CAN_MCP251X_MCP2515)
959 priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
955 priv->net = net; 960 priv->net = net;
956 dev_set_drvdata(&spi->dev, priv); 961 dev_set_drvdata(&spi->dev, priv);
957 962
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375..27d1d398e25 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
11 11
12config CAN_MPC5XXX 12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller" 13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on PPC_MPC52xx 14 depends on (PPC_MPC52xx || PPC_MPC512x)
15 ---help--- 15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx 16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller. 17 onboard CAN controller. Currently, the MPC5200, MPC5200B and
18 MPC5121 (Rev. 2 and later) are supported.
18 19
19 This driver can also be built as a module. If so, the module 20 This driver can also be built as a module. If so, the module
20 will be called mscan-mpc5xxx.ko. 21 will be called mscan-mpc5xxx.ko.
21 22
22endif 23endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b1..03e7c48465a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
29#include <linux/can/dev.h> 29#include <linux/can/dev.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h> 31#include <sysdev/fsl_soc.h>
32#include <linux/clk.h>
32#include <linux/io.h> 33#include <linux/io.h>
33#include <asm/mpc52xx.h> 34#include <asm/mpc52xx.h>
34 35
@@ -36,22 +37,21 @@
36 37
37#define DRV_NAME "mpc5xxx_can" 38#define DRV_NAME "mpc5xxx_can"
38 39
39static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = { 40struct mpc5xxx_can_data {
41 unsigned int type;
42 u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
43 int *mscan_clksrc);
44};
45
46#ifdef CONFIG_PPC_MPC52xx
47static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
40 { .compatible = "fsl,mpc5200-cdm", }, 48 { .compatible = "fsl,mpc5200-cdm", },
41 {} 49 {}
42}; 50};
43 51
44/* 52static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
45 * Get frequency of the MSCAN clock source 53 const char *clock_name,
46 * 54 int *mscan_clksrc)
47 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
48 * can be selected. According to the MPC5200 user's manual, the oscillator
49 * clock is the better choice as it has less jitter but due to a hardware
50 * bug, it can not be selected for the old MPC5200 Rev. A chips.
51 */
52
53static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
54 int clock_src)
55{ 55{
56 unsigned int pvr; 56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm; 57 struct mpc52xx_cdm __iomem *cdm;
@@ -61,21 +61,33 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
61 61
62 pvr = mfspr(SPRN_PVR); 62 pvr = mfspr(SPRN_PVR);
63 63
64 freq = mpc5xxx_get_bus_frequency(of->node); 64 /*
65 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
66 * (IP_CLK) can be selected as MSCAN clock source. According to
67 * the MPC5200 user's manual, the oscillator clock is the better
68 * choice as it has less jitter. For this reason, it is selected
69 * by default. Unfortunately, it can not be selected for the old
70 * MPC5200 Rev. A chips due to a hardware bug (check errata).
71 */
72 if (clock_name && strcmp(clock_name, "ip") == 0)
73 *mscan_clksrc = MSCAN_CLKSRC_BUS;
74 else
75 *mscan_clksrc = MSCAN_CLKSRC_XTAL;
76
77 freq = mpc5xxx_get_bus_frequency(ofdev->node);
65 if (!freq) 78 if (!freq)
66 return 0; 79 return 0;
67 80
68 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011) 81 if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
69 return freq; 82 return freq;
70 83
71 /* Determine SYS_XTAL_IN frequency from the clock domain settings */ 84 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
72 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); 85 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
73 if (!np_cdm) { 86 if (!np_cdm) {
74 dev_err(&of->dev, "can't get clock node!\n"); 87 dev_err(&ofdev->dev, "can't get clock node!\n");
75 return 0; 88 return 0;
76 } 89 }
77 cdm = of_iomap(np_cdm, 0); 90 cdm = of_iomap(np_cdm, 0);
78 of_node_put(np_cdm);
79 91
80 if (in_8(&cdm->ipb_clk_sel) & 0x1) 92 if (in_8(&cdm->ipb_clk_sel) & 0x1)
81 freq *= 2; 93 freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
84 freq *= (val & (1 << 5)) ? 8 : 4; 96 freq *= (val & (1 << 5)) ? 8 : 4;
85 freq /= (val & (1 << 6)) ? 12 : 16; 97 freq /= (val & (1 << 6)) ? 12 : 16;
86 98
99 of_node_put(np_cdm);
87 iounmap(cdm); 100 iounmap(cdm);
88 101
89 return freq; 102 return freq;
90} 103}
104#else /* !CONFIG_PPC_MPC52xx */
105static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
106 const char *clock_name,
107 int *mscan_clksrc)
108{
109 return 0;
110}
111#endif /* CONFIG_PPC_MPC52xx */
112
113#ifdef CONFIG_PPC_MPC512x
114struct mpc512x_clockctl {
115 u32 spmr; /* System PLL Mode Reg */
116 u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
117 u32 scfr1; /* System Clk Freq Reg 1 */
118 u32 scfr2; /* System Clk Freq Reg 2 */
119 u32 reserved;
120 u32 bcr; /* Bread Crumb Reg */
121 u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
122 u32 spccr; /* SPDIF Clk Ctrl Reg */
123 u32 cccr; /* CFM Clk Ctrl Reg */
124 u32 dccr; /* DIU Clk Cnfg Reg */
125 u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
126};
127
128static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
129 { .compatible = "fsl,mpc5121-clock", },
130 {}
131};
132
133static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
134 const char *clock_name,
135 int *mscan_clksrc)
136{
137 struct mpc512x_clockctl __iomem *clockctl;
138 struct device_node *np_clock;
139 struct clk *sys_clk, *ref_clk;
140 int plen, clockidx, clocksrc = -1;
141 u32 sys_freq, val, clockdiv = 1, freq = 0;
142 const u32 *pval;
143
144 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
145 if (!np_clock) {
146 dev_err(&ofdev->dev, "couldn't find clock node\n");
147 return -ENODEV;
148 }
149 clockctl = of_iomap(np_clock, 0);
150 if (!clockctl) {
151 dev_err(&ofdev->dev, "couldn't map clock registers\n");
152 return 0;
153 }
154
155 /* Determine the MSCAN device index from the physical address */
156 pval = of_get_property(ofdev->node, "reg", &plen);
157 BUG_ON(!pval || plen < sizeof(*pval));
158 clockidx = (*pval & 0x80) ? 1 : 0;
159 if (*pval & 0x2000)
160 clockidx += 2;
161
162 /*
163 * Clock source and divider selection: 3 different clock sources
164 * can be selected: "ip", "ref" or "sys". For the latter two, a
165 * clock divider can be defined as well. If the clock source is
166 * not specified by the device tree, we first try to find an
167 * optimal CAN source clock based on the system clock. If that
168 * is not posslible, the reference clock will be used.
169 */
170 if (clock_name && !strcmp(clock_name, "ip")) {
171 *mscan_clksrc = MSCAN_CLKSRC_IPS;
172 freq = mpc5xxx_get_bus_frequency(ofdev->node);
173 } else {
174 *mscan_clksrc = MSCAN_CLKSRC_BUS;
175
176 pval = of_get_property(ofdev->node,
177 "fsl,mscan-clock-divider", &plen);
178 if (pval && plen == sizeof(*pval))
179 clockdiv = *pval;
180 if (!clockdiv)
181 clockdiv = 1;
182
183 if (!clock_name || !strcmp(clock_name, "sys")) {
184 sys_clk = clk_get(&ofdev->dev, "sys_clk");
185 if (!sys_clk) {
186 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
187 goto exit_unmap;
188 }
189 /* Get and round up/down sys clock rate */
190 sys_freq = 1000000 *
191 ((clk_get_rate(sys_clk) + 499999) / 1000000);
192
193 if (!clock_name) {
194 /* A multiple of 16 MHz would be optimal */
195 if ((sys_freq % 16000000) == 0) {
196 clocksrc = 0;
197 clockdiv = sys_freq / 16000000;
198 freq = sys_freq / clockdiv;
199 }
200 } else {
201 clocksrc = 0;
202 freq = sys_freq / clockdiv;
203 }
204 }
205
206 if (clocksrc < 0) {
207 ref_clk = clk_get(&ofdev->dev, "ref_clk");
208 if (!ref_clk) {
209 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
210 goto exit_unmap;
211 }
212 clocksrc = 1;
213 freq = clk_get_rate(ref_clk) / clockdiv;
214 }
215 }
216
217 /* Disable clock */
218 out_be32(&clockctl->mccr[clockidx], 0x0);
219 if (clocksrc >= 0) {
220 /* Set source and divider */
221 val = (clocksrc << 14) | ((clockdiv - 1) << 17);
222 out_be32(&clockctl->mccr[clockidx], val);
223 /* Enable clock */
224 out_be32(&clockctl->mccr[clockidx], val | 0x10000);
225 }
226
227 /* Enable MSCAN clock domain */
228 val = in_be32(&clockctl->sccr[1]);
229 if (!(val & (1 << 25)))
230 out_be32(&clockctl->sccr[1], val | (1 << 25));
231
232 dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
233 *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
234 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
235
236exit_unmap:
237 of_node_put(np_clock);
238 iounmap(clockctl);
239
240 return freq;
241}
242#else /* !CONFIG_PPC_MPC512x */
243static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
244 const char *clock_name,
245 int *mscan_clksrc)
246{
247 return 0;
248}
249#endif /* CONFIG_PPC_MPC512x */
91 250
92static int __devinit mpc5xxx_can_probe(struct of_device *ofdev, 251static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
93 const struct of_device_id *id) 252 const struct of_device_id *id)
94{ 253{
254 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
95 struct device_node *np = ofdev->node; 255 struct device_node *np = ofdev->node;
96 struct net_device *dev; 256 struct net_device *dev;
97 struct mscan_priv *priv; 257 struct mscan_priv *priv;
98 void __iomem *base; 258 void __iomem *base;
99 const char *clk_src; 259 const char *clock_name = NULL;
100 int err, irq, clock_src; 260 int irq, mscan_clksrc = 0;
261 int err = -ENOMEM;
101 262
102 base = of_iomap(ofdev->node, 0); 263 base = of_iomap(np, 0);
103 if (!base) { 264 if (!base) {
104 dev_err(&ofdev->dev, "couldn't ioremap\n"); 265 dev_err(&ofdev->dev, "couldn't ioremap\n");
105 err = -ENOMEM; 266 return err;
106 goto exit_release_mem;
107 } 267 }
108 268
109 irq = irq_of_parse_and_map(np, 0); 269 irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
114 } 274 }
115 275
116 dev = alloc_mscandev(); 276 dev = alloc_mscandev();
117 if (!dev) { 277 if (!dev)
118 err = -ENOMEM;
119 goto exit_dispose_irq; 278 goto exit_dispose_irq;
120 }
121 279
122 priv = netdev_priv(dev); 280 priv = netdev_priv(dev);
123 priv->reg_base = base; 281 priv->reg_base = base;
124 dev->irq = irq; 282 dev->irq = irq;
125 283
126 /* 284 clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
127 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock 285
128 * (IP_CLK) can be selected as MSCAN clock source. According to 286 BUG_ON(!data);
129 * the MPC5200 user's manual, the oscillator clock is the better 287 priv->type = data->type;
130 * choice as it has less jitter. For this reason, it is selected 288 priv->can.clock.freq = data->get_clock(ofdev, clock_name,
131 * by default. 289 &mscan_clksrc);
132 */
133 clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
134 if (clk_src && strcmp(clk_src, "ip") == 0)
135 clock_src = MSCAN_CLKSRC_BUS;
136 else
137 clock_src = MSCAN_CLKSRC_XTAL;
138 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
139 if (!priv->can.clock.freq) { 290 if (!priv->can.clock.freq) {
140 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n"); 291 dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
141 err = -ENODEV;
142 goto exit_free_mscan; 292 goto exit_free_mscan;
143 } 293 }
144 294
145 SET_NETDEV_DEV(dev, &ofdev->dev); 295 SET_NETDEV_DEV(dev, &ofdev->dev);
146 296
147 err = register_mscandev(dev, clock_src); 297 err = register_mscandev(dev, mscan_clksrc);
148 if (err) { 298 if (err) {
149 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", 299 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
150 DRV_NAME, err); 300 DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
164 irq_dispose_mapping(irq); 314 irq_dispose_mapping(irq);
165exit_unmap_mem: 315exit_unmap_mem:
166 iounmap(base); 316 iounmap(base);
167exit_release_mem: 317
168 return err; 318 return err;
169} 319}
170 320
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
225} 375}
226#endif 376#endif
227 377
378static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
379 .type = MSCAN_TYPE_MPC5200,
380 .get_clock = mpc52xx_can_get_clock,
381};
382
383static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
384 .type = MSCAN_TYPE_MPC5121,
385 .get_clock = mpc512x_can_get_clock,
386};
387
228static struct of_device_id __devinitdata mpc5xxx_can_table[] = { 388static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
229 {.compatible = "fsl,mpc5200-mscan"}, 389 { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
390 /* Note that only MPC5121 Rev. 2 (and later) is supported */
391 { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
230 {}, 392 {},
231}; 393};
232 394
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
255module_exit(mpc5xxx_can_exit); 417module_exit(mpc5xxx_can_exit);
256 418
257MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); 419MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
258MODULE_DESCRIPTION("Freescale MPC5200 CAN driver"); 420MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
259MODULE_LICENSE("GPL v2"); 421MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca..6b7dd578d41 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>, 4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy 5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> 6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de> 7 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License 10 * it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
152 priv->shadow_canrier = 0; 152 priv->shadow_canrier = 0;
153 priv->flags = 0; 153 priv->flags = 0;
154 154
155 if (priv->type == MSCAN_TYPE_MPC5121) {
156 /* Clear pending bus-off condition */
157 if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
158 out_8(&regs->canmisc, MSCAN_BOHOLD);
159 }
160
155 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE); 161 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
156 if (err) 162 if (err)
157 return err; 163 return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
163 out_8(&regs->cantier, 0); 169 out_8(&regs->cantier, 0);
164 170
165 /* Enable receive interrupts. */ 171 /* Enable receive interrupts. */
166 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | 172 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
167 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0); 173
174 return 0;
175}
176
177static int mscan_restart(struct net_device *dev)
178{
179 struct mscan_priv *priv = netdev_priv(dev);
180
181 if (priv->type == MSCAN_TYPE_MPC5121) {
182 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
183
184 priv->can.state = CAN_STATE_ERROR_ACTIVE;
185 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
186 "bus-off state expected");
187 out_8(&regs->canmisc, MSCAN_BOHOLD);
188 /* Re-enable receive interrupts. */
189 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
190 } else {
191 if (priv->can.state <= CAN_STATE_BUS_OFF)
192 mscan_set_mode(dev, MSCAN_INIT_MODE);
193 return mscan_start(dev);
194 }
168 195
169 return 0; 196 return 0;
170} 197}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
177 int i, rtr, buf_id; 204 int i, rtr, buf_id;
178 u32 can_id; 205 u32 can_id;
179 206
180 if (frame->can_dlc > 8) 207 if (can_dropped_invalid_skb(dev, skb))
181 return -EINVAL; 208 return NETDEV_TX_OK;
182 209
183 out_8(&regs->cantier, 0); 210 out_8(&regs->cantier, 0);
184 211
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
359 * automatically. To avoid that we stop the chip doing 386 * automatically. To avoid that we stop the chip doing
360 * a light-weight stop (we are in irq-context). 387 * a light-weight stop (we are in irq-context).
361 */ 388 */
362 out_8(&regs->cantier, 0); 389 if (priv->type != MSCAN_TYPE_MPC5121) {
363 out_8(&regs->canrier, 0); 390 out_8(&regs->cantier, 0);
364 setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); 391 out_8(&regs->canrier, 0);
392 setbits8(&regs->canctl0,
393 MSCAN_SLPRQ | MSCAN_INITRQ);
394 }
365 can_bus_off(dev); 395 can_bus_off(dev);
366 break; 396 break;
367 default: 397 default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
491 521
492 switch (mode) { 522 switch (mode) {
493 case CAN_MODE_START: 523 case CAN_MODE_START:
494 if (priv->can.state <= CAN_STATE_BUS_OFF) 524 ret = mscan_restart(dev);
495 mscan_set_mode(dev, MSCAN_INIT_MODE);
496 ret = mscan_start(dev);
497 if (ret) 525 if (ret)
498 break; 526 break;
499 if (netif_queue_stopped(dev)) 527 if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
592 .ndo_start_xmit = mscan_start_xmit, 620 .ndo_start_xmit = mscan_start_xmit,
593}; 621};
594 622
595int register_mscandev(struct net_device *dev, int clock_src) 623int register_mscandev(struct net_device *dev, int mscan_clksrc)
596{ 624{
597 struct mscan_priv *priv = netdev_priv(dev); 625 struct mscan_priv *priv = netdev_priv(dev);
598 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; 626 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
599 u8 ctl1; 627 u8 ctl1;
600 628
601 ctl1 = in_8(&regs->canctl1); 629 ctl1 = in_8(&regs->canctl1);
602 if (clock_src) 630 if (mscan_clksrc)
603 ctl1 |= MSCAN_CLKSRC; 631 ctl1 |= MSCAN_CLKSRC;
604 else 632 else
605 ctl1 &= ~MSCAN_CLKSRC; 633 ctl1 &= ~MSCAN_CLKSRC;
606 634
635 if (priv->type == MSCAN_TYPE_MPC5121)
636 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
637
607 ctl1 |= MSCAN_CANE; 638 ctl1 |= MSCAN_CANE;
608 out_8(&regs->canctl1, ctl1); 639 out_8(&regs->canctl1, ctl1);
609 udelay(100); 640 udelay(100);
@@ -655,6 +686,7 @@ struct net_device *alloc_mscandev(void)
655 priv->can.bittiming_const = &mscan_bittiming_const; 686 priv->can.bittiming_const = &mscan_bittiming_const;
656 priv->can.do_set_bittiming = mscan_do_set_bittiming; 687 priv->can.do_set_bittiming = mscan_do_set_bittiming;
657 priv->can.do_set_mode = mscan_do_set_mode; 688 priv->can.do_set_mode = mscan_do_set_mode;
689 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
658 690
659 for (i = 0; i < TX_QUEUE_SIZE; i++) { 691 for (i = 0; i < TX_QUEUE_SIZE; i++) {
660 priv->tx_queue[i].id = i; 692 priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed..4ff966473bc 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
38#define MSCAN_CLKSRC 0x40 38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20 39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10 40#define MSCAN_LISTEN 0x10
41#define MSCAN_BORM 0x08
41#define MSCAN_WUPM 0x04 42#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02 43#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01 44#define MSCAN_INITAK 0x01
44 45
45/* Use the MPC5200 MSCAN variant? */ 46/* Use the MPC5XXX MSCAN variant? */
46#ifdef CONFIG_PPC 47#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200 48#define MSCAN_FOR_MPC5XXX
48#endif 49#endif
49 50
50#ifdef MSCAN_FOR_MPC5200 51#ifdef MSCAN_FOR_MPC5XXX
51#define MSCAN_CLKSRC_BUS 0 52#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC 53#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
54#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
53#else 55#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC 56#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0 57#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
136#define MSCAN_EFF_RTR_SHIFT 0 138#define MSCAN_EFF_RTR_SHIFT 0
137#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */ 139#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
138 140
139#ifdef MSCAN_FOR_MPC5200 141#ifdef MSCAN_FOR_MPC5XXX
140#define _MSCAN_RESERVED_(n, num) u8 _res##n[num] 142#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
141#define _MSCAN_RESERVED_DSR_SIZE 2 143#define _MSCAN_RESERVED_DSR_SIZE 2
142#else 144#else
@@ -165,67 +167,66 @@ struct mscan_regs {
165 u8 cantbsel; /* + 0x14 0x0a */ 167 u8 cantbsel; /* + 0x14 0x0a */
166 u8 canidac; /* + 0x15 0x0b */ 168 u8 canidac; /* + 0x15 0x0b */
167 u8 reserved; /* + 0x16 0x0c */ 169 u8 reserved; /* + 0x16 0x0c */
168 _MSCAN_RESERVED_(6, 5); /* + 0x17 */ 170 _MSCAN_RESERVED_(6, 2); /* + 0x17 */
169#ifndef MSCAN_FOR_MPC5200 171 u8 canmisc; /* + 0x19 0x0d */
170 u8 canmisc; /* 0x0d */ 172 _MSCAN_RESERVED_(7, 2); /* + 0x1a */
171#endif
172 u8 canrxerr; /* + 0x1c 0x0e */ 173 u8 canrxerr; /* + 0x1c 0x0e */
173 u8 cantxerr; /* + 0x1d 0x0f */ 174 u8 cantxerr; /* + 0x1d 0x0f */
174 _MSCAN_RESERVED_(7, 2); /* + 0x1e */ 175 _MSCAN_RESERVED_(8, 2); /* + 0x1e */
175 u16 canidar1_0; /* + 0x20 0x10 */ 176 u16 canidar1_0; /* + 0x20 0x10 */
176 _MSCAN_RESERVED_(8, 2); /* + 0x22 */ 177 _MSCAN_RESERVED_(9, 2); /* + 0x22 */
177 u16 canidar3_2; /* + 0x24 0x12 */ 178 u16 canidar3_2; /* + 0x24 0x12 */
178 _MSCAN_RESERVED_(9, 2); /* + 0x26 */ 179 _MSCAN_RESERVED_(10, 2); /* + 0x26 */
179 u16 canidmr1_0; /* + 0x28 0x14 */ 180 u16 canidmr1_0; /* + 0x28 0x14 */
180 _MSCAN_RESERVED_(10, 2); /* + 0x2a */ 181 _MSCAN_RESERVED_(11, 2); /* + 0x2a */
181 u16 canidmr3_2; /* + 0x2c 0x16 */ 182 u16 canidmr3_2; /* + 0x2c 0x16 */
182 _MSCAN_RESERVED_(11, 2); /* + 0x2e */ 183 _MSCAN_RESERVED_(12, 2); /* + 0x2e */
183 u16 canidar5_4; /* + 0x30 0x18 */ 184 u16 canidar5_4; /* + 0x30 0x18 */
184 _MSCAN_RESERVED_(12, 2); /* + 0x32 */ 185 _MSCAN_RESERVED_(13, 2); /* + 0x32 */
185 u16 canidar7_6; /* + 0x34 0x1a */ 186 u16 canidar7_6; /* + 0x34 0x1a */
186 _MSCAN_RESERVED_(13, 2); /* + 0x36 */ 187 _MSCAN_RESERVED_(14, 2); /* + 0x36 */
187 u16 canidmr5_4; /* + 0x38 0x1c */ 188 u16 canidmr5_4; /* + 0x38 0x1c */
188 _MSCAN_RESERVED_(14, 2); /* + 0x3a */ 189 _MSCAN_RESERVED_(15, 2); /* + 0x3a */
189 u16 canidmr7_6; /* + 0x3c 0x1e */ 190 u16 canidmr7_6; /* + 0x3c 0x1e */
190 _MSCAN_RESERVED_(15, 2); /* + 0x3e */ 191 _MSCAN_RESERVED_(16, 2); /* + 0x3e */
191 struct { 192 struct {
192 u16 idr1_0; /* + 0x40 0x20 */ 193 u16 idr1_0; /* + 0x40 0x20 */
193 _MSCAN_RESERVED_(16, 2); /* + 0x42 */ 194 _MSCAN_RESERVED_(17, 2); /* + 0x42 */
194 u16 idr3_2; /* + 0x44 0x22 */ 195 u16 idr3_2; /* + 0x44 0x22 */
195 _MSCAN_RESERVED_(17, 2); /* + 0x46 */ 196 _MSCAN_RESERVED_(18, 2); /* + 0x46 */
196 u16 dsr1_0; /* + 0x48 0x24 */ 197 u16 dsr1_0; /* + 0x48 0x24 */
197 _MSCAN_RESERVED_(18, 2); /* + 0x4a */ 198 _MSCAN_RESERVED_(19, 2); /* + 0x4a */
198 u16 dsr3_2; /* + 0x4c 0x26 */ 199 u16 dsr3_2; /* + 0x4c 0x26 */
199 _MSCAN_RESERVED_(19, 2); /* + 0x4e */ 200 _MSCAN_RESERVED_(20, 2); /* + 0x4e */
200 u16 dsr5_4; /* + 0x50 0x28 */ 201 u16 dsr5_4; /* + 0x50 0x28 */
201 _MSCAN_RESERVED_(20, 2); /* + 0x52 */ 202 _MSCAN_RESERVED_(21, 2); /* + 0x52 */
202 u16 dsr7_6; /* + 0x54 0x2a */ 203 u16 dsr7_6; /* + 0x54 0x2a */
203 _MSCAN_RESERVED_(21, 2); /* + 0x56 */ 204 _MSCAN_RESERVED_(22, 2); /* + 0x56 */
204 u8 dlr; /* + 0x58 0x2c */ 205 u8 dlr; /* + 0x58 0x2c */
205 u8:8; /* + 0x59 0x2d */ 206 u8 reserved; /* + 0x59 0x2d */
206 _MSCAN_RESERVED_(22, 2); /* + 0x5a */ 207 _MSCAN_RESERVED_(23, 2); /* + 0x5a */
207 u16 time; /* + 0x5c 0x2e */ 208 u16 time; /* + 0x5c 0x2e */
208 } rx; 209 } rx;
209 _MSCAN_RESERVED_(23, 2); /* + 0x5e */ 210 _MSCAN_RESERVED_(24, 2); /* + 0x5e */
210 struct { 211 struct {
211 u16 idr1_0; /* + 0x60 0x30 */ 212 u16 idr1_0; /* + 0x60 0x30 */
212 _MSCAN_RESERVED_(24, 2); /* + 0x62 */ 213 _MSCAN_RESERVED_(25, 2); /* + 0x62 */
213 u16 idr3_2; /* + 0x64 0x32 */ 214 u16 idr3_2; /* + 0x64 0x32 */
214 _MSCAN_RESERVED_(25, 2); /* + 0x66 */ 215 _MSCAN_RESERVED_(26, 2); /* + 0x66 */
215 u16 dsr1_0; /* + 0x68 0x34 */ 216 u16 dsr1_0; /* + 0x68 0x34 */
216 _MSCAN_RESERVED_(26, 2); /* + 0x6a */ 217 _MSCAN_RESERVED_(27, 2); /* + 0x6a */
217 u16 dsr3_2; /* + 0x6c 0x36 */ 218 u16 dsr3_2; /* + 0x6c 0x36 */
218 _MSCAN_RESERVED_(27, 2); /* + 0x6e */ 219 _MSCAN_RESERVED_(28, 2); /* + 0x6e */
219 u16 dsr5_4; /* + 0x70 0x38 */ 220 u16 dsr5_4; /* + 0x70 0x38 */
220 _MSCAN_RESERVED_(28, 2); /* + 0x72 */ 221 _MSCAN_RESERVED_(29, 2); /* + 0x72 */
221 u16 dsr7_6; /* + 0x74 0x3a */ 222 u16 dsr7_6; /* + 0x74 0x3a */
222 _MSCAN_RESERVED_(29, 2); /* + 0x76 */ 223 _MSCAN_RESERVED_(30, 2); /* + 0x76 */
223 u8 dlr; /* + 0x78 0x3c */ 224 u8 dlr; /* + 0x78 0x3c */
224 u8 tbpr; /* + 0x79 0x3d */ 225 u8 tbpr; /* + 0x79 0x3d */
225 _MSCAN_RESERVED_(30, 2); /* + 0x7a */ 226 _MSCAN_RESERVED_(31, 2); /* + 0x7a */
226 u16 time; /* + 0x7c 0x3e */ 227 u16 time; /* + 0x7c 0x3e */
227 } tx; 228 } tx;
228 _MSCAN_RESERVED_(31, 2); /* + 0x7e */ 229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */
229} __attribute__ ((packed)); 230} __attribute__ ((packed));
230 231
231#undef _MSCAN_RESERVED_ 232#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
237#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ) 238#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
238#define MSCAN_SET_MODE_RETRIES 255 239#define MSCAN_SET_MODE_RETRIES 255
239#define MSCAN_ECHO_SKB_MAX 3 240#define MSCAN_ECHO_SKB_MAX 3
241#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
242 MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
243 MSCAN_TSTATE1 | MSCAN_TSTATE0)
244
245/* MSCAN type variants */
246enum {
247 MSCAN_TYPE_MPC5200,
248 MSCAN_TYPE_MPC5121
249};
240 250
241#define BTR0_BRP_MASK 0x3f 251#define BTR0_BRP_MASK 0x3f
242#define BTR0_SJW_SHIFT 6 252#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
270 280
271struct mscan_priv { 281struct mscan_priv {
272 struct can_priv can; /* must be the first member */ 282 struct can_priv can; /* must be the first member */
283 unsigned int type; /* MSCAN type variants */
273 long open_time; 284 long open_time;
274 unsigned long flags; 285 unsigned long flags;
275 void __iomem *reg_base; /* ioremap'ed address to registers */ 286 void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
285}; 296};
286 297
287extern struct net_device *alloc_mscandev(void); 298extern struct net_device *alloc_mscandev(void);
288/* 299extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
289 * clock_src:
290 * 1 = The MSCAN clock source is the onchip Bus Clock.
291 * 0 = The MSCAN clock source is the chip Oscillator Clock.
292 */
293extern int register_mscandev(struct net_device *dev, int clock_src);
294extern void unregister_mscandev(struct net_device *dev); 300extern void unregister_mscandev(struct net_device *dev);
295 301
296#endif /* __MSCAN_H__ */ 302#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d337..87300606abb 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -102,7 +102,7 @@ struct ems_pci_card {
102 102
103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ 103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
104 104
105static struct pci_device_id ems_pci_tbl[] = { 105static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
106 /* CPC-PCI v1 */ 106 /* CPC-PCI v1 */
107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, 107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
108 /* CPC-PCI v2 */ 108 /* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b971..441e776a7f5 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */ 109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
110#define KVASER_PCI_DEVICE_ID2 0x0008 110#define KVASER_PCI_DEVICE_ID2 0x0008
111 111
112static struct pci_device_id kvaser_pci_tbl[] = { 112static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,}, 113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,}, 114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
115 { 0,} 115 { 0,}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b..ace103a4483 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -249,6 +249,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
249 uint8_t dreg; 249 uint8_t dreg;
250 int i; 250 int i;
251 251
252 if (can_dropped_invalid_skb(dev, skb))
253 return NETDEV_TX_OK;
254
252 netif_stop_queue(dev); 255 netif_stop_queue(dev);
253 256
254 fi = dlc = cf->can_dlc; 257 fi = dlc = cf->can_dlc;
@@ -564,6 +567,7 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
564 priv->can.bittiming_const = &sja1000_bittiming_const; 567 priv->can.bittiming_const = &sja1000_bittiming_const;
565 priv->can.do_set_bittiming = sja1000_set_bittiming; 568 priv->can.do_set_bittiming = sja1000_set_bittiming;
566 priv->can.do_set_mode = sja1000_set_mode; 569 priv->can.do_set_mode = sja1000_set_mode;
570 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
567 571
568 if (sizeof_priv) 572 if (sizeof_priv)
569 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 573 priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da52..8332e242b0b 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -477,6 +477,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
477 u32 mbxno, mbx_mask, data; 477 u32 mbxno, mbx_mask, data;
478 unsigned long flags; 478 unsigned long flags;
479 479
480 if (can_dropped_invalid_skb(ndev, skb))
481 return NETDEV_TX_OK;
482
480 mbxno = get_tx_head_mb(priv); 483 mbxno = get_tx_head_mb(priv);
481 mbx_mask = BIT(mbxno); 484 mbx_mask = BIT(mbxno);
482 spin_lock_irqsave(&priv->mbx_lock, flags); 485 spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +494,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
491 spin_unlock_irqrestore(&priv->mbx_lock, flags); 494 spin_unlock_irqrestore(&priv->mbx_lock, flags);
492 495
493 /* Prepare mailbox for transmission */ 496 /* Prepare mailbox for transmission */
494 data = min_t(u8, cf->can_dlc, 8);
495 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ 497 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
496 data |= HECC_CANMCF_RTR; 498 data |= HECC_CANMCF_RTR;
497 data |= get_tx_head_prio(priv) << 8; 499 data |= get_tx_head_prio(priv) << 8;
@@ -907,6 +909,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
907 priv->can.bittiming_const = &ti_hecc_bittiming_const; 909 priv->can.bittiming_const = &ti_hecc_bittiming_const;
908 priv->can.do_set_mode = ti_hecc_do_set_mode; 910 priv->can.do_set_mode = ti_hecc_do_set_mode;
909 priv->can.do_get_state = ti_hecc_get_state; 911 priv->can.do_get_state = ti_hecc_get_state;
912 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
910 913
911 ndev->irq = irq->start; 914 ndev->irq = irq->start;
912 ndev->flags |= IFF_ECHO; 915 ndev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf..bfab283ba9b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
767 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN 767 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
768 + sizeof(struct cpc_can_msg); 768 + sizeof(struct cpc_can_msg);
769 769
770 if (can_dropped_invalid_skb(netdev, skb))
771 return NETDEV_TX_OK;
772
770 /* create a URB, and a buffer for it, and copy the data to the URB */ 773 /* create a URB, and a buffer for it, and copy the data to the URB */
771 urb = usb_alloc_urb(0, GFP_ATOMIC); 774 urb = usb_alloc_urb(0, GFP_ATOMIC);
772 if (!urb) { 775 if (!urb) {
@@ -1019,6 +1022,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1019 dev->can.bittiming_const = &ems_usb_bittiming_const; 1022 dev->can.bittiming_const = &ems_usb_bittiming_const;
1020 dev->can.do_set_bittiming = ems_usb_set_bittiming; 1023 dev->can.do_set_bittiming = ems_usb_set_bittiming;
1021 dev->can.do_set_mode = ems_usb_set_mode; 1024 dev->can.do_set_mode = ems_usb_set_mode;
1025 dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1022 1026
1023 netdev->flags |= IFF_ECHO; /* we support local echo */ 1027 netdev->flags |= IFF_ECHO; /* we support local echo */
1024 1028
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac5631398..d124d837ae5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,7 @@
47#include <linux/if_arp.h> 47#include <linux/if_arp.h>
48#include <linux/if_ether.h> 48#include <linux/if_ether.h>
49#include <linux/can.h> 49#include <linux/can.h>
50#include <linux/can/dev.h>
50#include <net/rtnetlink.h> 51#include <net/rtnetlink.h>
51 52
52static __initdata const char banner[] = 53static __initdata const char banner[] =
@@ -70,10 +71,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
70 71
71static void vcan_rx(struct sk_buff *skb, struct net_device *dev) 72static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
72{ 73{
74 struct can_frame *cf = (struct can_frame *)skb->data;
73 struct net_device_stats *stats = &dev->stats; 75 struct net_device_stats *stats = &dev->stats;
74 76
75 stats->rx_packets++; 77 stats->rx_packets++;
76 stats->rx_bytes += skb->len; 78 stats->rx_bytes += cf->can_dlc;
77 79
78 skb->protocol = htons(ETH_P_CAN); 80 skb->protocol = htons(ETH_P_CAN);
79 skb->pkt_type = PACKET_BROADCAST; 81 skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +87,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
85 87
86static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) 88static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
87{ 89{
90 struct can_frame *cf = (struct can_frame *)skb->data;
88 struct net_device_stats *stats = &dev->stats; 91 struct net_device_stats *stats = &dev->stats;
89 int loop; 92 int loop;
90 93
94 if (can_dropped_invalid_skb(dev, skb))
95 return NETDEV_TX_OK;
96
91 stats->tx_packets++; 97 stats->tx_packets++;
92 stats->tx_bytes += skb->len; 98 stats->tx_bytes += cf->can_dlc;
93 99
94 /* set flag whether this packet has to be looped back */ 100 /* set flag whether this packet has to be looped back */
95 loop = skb->pkt_type == PACKET_LOOPBACK; 101 loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +109,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
103 * CAN core already did the echo for us 109 * CAN core already did the echo for us
104 */ 110 */
105 stats->rx_packets++; 111 stats->rx_packets++;
106 stats->rx_bytes += skb->len; 112 stats->rx_bytes += cf->can_dlc;
107 } 113 }
108 kfree_skb(skb); 114 kfree_skb(skb);
109 return NETDEV_TX_OK; 115 return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e48..ad47e5126fd 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -106,7 +106,7 @@
106#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 106#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
107#define CAS_NCPUS num_online_cpus() 107#define CAS_NCPUS num_online_cpus()
108 108
109#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) 109#ifdef CONFIG_CASSINI_NAPI
110#define USE_NAPI 110#define USE_NAPI
111#define cas_skb_release(x) netif_receive_skb(x) 111#define cas_skb_release(x) netif_receive_skb(x)
112#else 112#else
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ 236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
237}; 237};
238 238
239static struct pci_device_id cas_pci_tbl[] __devinitdata = { 239static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, 240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, 242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe0..f6462b54f82 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -334,7 +334,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
334 return adapter->params.is_asic; 334 return adapter->params.is_asic;
335} 335}
336 336
337extern struct pci_device_id t1_pci_tbl[]; 337extern const struct pci_device_id t1_pci_tbl[];
338 338
339static inline int adapter_matches_type(const adapter_t *adapter, 339static inline int adapter_matches_type(const adapter_t *adapter,
340 int version, int revision) 340 int version, int revision)
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bf..2402d372c88 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
528 528
529}; 529};
530 530
531struct pci_device_id t1_pci_tbl[] = { 531DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
532 CH_DEVICE(8, 0, CH_BRD_T110_1CU), 532 CH_DEVICE(8, 0, CH_BRD_T110_1CU),
533 CH_DEVICE(8, 1, CH_BRD_T110_1CU), 533 CH_DEVICE(8, 1, CH_BRD_T110_1CU),
534 CH_DEVICE(7, 0, CH_BRD_N110_1F), 534 CH_DEVICE(7, 0, CH_BRD_N110_1F),
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c14..73622f5312c 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -80,7 +80,7 @@ enum {
80#define CH_DEVICE(devid, idx) \ 80#define CH_DEVICE(devid, idx) \
81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } 81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 82
83static const struct pci_device_id cxgb3_pci_tbl[] = { 83static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
84 CH_DEVICE(0x20, 0), /* PE9000 */ 84 CH_DEVICE(0x20, 0), /* PE9000 */
85 CH_DEVICE(0x21, 1), /* T302E */ 85 CH_DEVICE(0x21, 1), /* T302E */
86 CH_DEVICE(0x22, 2), /* T310E */ 86 CH_DEVICE(0x22, 2), /* T310E */
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 75064eea1d8..9498361119d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1252,7 +1252,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1252 struct mtutab mtutab; 1252 struct mtutab mtutab;
1253 unsigned int l2t_capacity; 1253 unsigned int l2t_capacity;
1254 1254
1255 t = kcalloc(1, sizeof(*t), GFP_KERNEL); 1255 t = kzalloc(sizeof(*t), GFP_KERNEL);
1256 if (!t) 1256 if (!t)
1257 return -ENOMEM; 1257 return -ENOMEM;
1258 1258
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a..98da085445e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1052 board_name = "DEFEA"; 1052 board_name = "DEFEA";
1053 if (dfx_bus_pci) 1053 if (dfx_bus_pci)
1054 board_name = "DEFPA"; 1054 board_name = "DEFPA";
1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, " 1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1056 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
1057 print_name, board_name, dfx_use_mmio ? "" : "I/O ", 1056 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1058 (long long)bar_start, dev->irq, 1057 (long long)bar_start, dev->irq, dev->dev_addr);
1059 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1060 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1061 1058
1062 /* 1059 /*
1063 * Get memory for descriptor block, consumer block, and other buffers 1060 * Get memory for descriptor block, consumer block, and other buffers
@@ -3631,7 +3628,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
3631 const struct pci_device_id *); 3628 const struct pci_device_id *);
3632static void __devexit dfx_pci_unregister(struct pci_dev *); 3629static void __devexit dfx_pci_unregister(struct pci_dev *);
3633 3630
3634static struct pci_device_id dfx_pci_table[] = { 3631static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3635 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, 3632 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3636 { } 3633 { }
3637}; 3634};
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca..7caab3d26a9 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
537 driver_data Data private to the driver. 537 driver_data Data private to the driver.
538*/ 538*/
539 539
540static const struct pci_device_id rio_pci_tbl[] = { 540static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
541 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, 541 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
542 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, 542 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
543 { } 543 { }
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d..5c7a155e849 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } 210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static struct pci_device_id e100_id_table[] = { 211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), 212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), 213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), 214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d29bb532ecc..b608528f26f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
42 * Macro expands to... 42 * Macro expands to...
43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
44 */ 44 */
45static struct pci_device_id e1000_pci_tbl[] = { 45static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
46 INTEL_E1000_ETHERNET_DEVICE(0x1000), 46 INTEL_E1000_ETHERNET_DEVICE(0x1000),
47 INTEL_E1000_ETHERNET_DEVICE(0x1001), 47 INTEL_E1000_ETHERNET_DEVICE(0x1001),
48 INTEL_E1000_ETHERNET_DEVICE(0x1004), 48 INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -2127,7 +2127,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2127 rctl |= E1000_RCTL_VFE; 2127 rctl |= E1000_RCTL_VFE;
2128 } 2128 }
2129 2129
2130 if (netdev->uc.count > rar_entries - 1) { 2130 if (netdev_uc_count(netdev) > rar_entries - 1) {
2131 rctl |= E1000_RCTL_UPE; 2131 rctl |= E1000_RCTL_UPE;
2132 } else if (!(netdev->flags & IFF_PROMISC)) { 2132 } else if (!(netdev->flags & IFF_PROMISC)) {
2133 rctl &= ~E1000_RCTL_UPE; 2133 rctl &= ~E1000_RCTL_UPE;
@@ -2150,7 +2150,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2150 */ 2150 */
2151 i = 1; 2151 i = 1;
2152 if (use_uc) 2152 if (use_uc)
2153 list_for_each_entry(ha, &netdev->uc.list, list) { 2153 netdev_for_each_uc_addr(ha, netdev) {
2154 if (i == rar_entries) 2154 if (i == rar_entries)
2155 break; 2155 break;
2156 e1000_rar_set(hw, ha->addr, i++); 2156 e1000_rar_set(hw, ha->addr, i++);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 02d67d047d9..3c95acb3a87 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -267,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
267 } 267 }
268 268
269 switch (hw->mac.type) { 269 switch (hw->mac.type) {
270 case e1000_82573:
271 func->set_lan_id = e1000_set_lan_id_single_port;
272 func->check_mng_mode = e1000e_check_mng_mode_generic;
273 func->led_on = e1000e_led_on_generic;
274 break;
270 case e1000_82574: 275 case e1000_82574:
271 case e1000_82583: 276 case e1000_82583:
277 func->set_lan_id = e1000_set_lan_id_single_port;
272 func->check_mng_mode = e1000_check_mng_mode_82574; 278 func->check_mng_mode = e1000_check_mng_mode_82574;
273 func->led_on = e1000_led_on_82574; 279 func->led_on = e1000_led_on_82574;
274 break; 280 break;
@@ -922,9 +928,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
922 ew32(IMC, 0xffffffff); 928 ew32(IMC, 0xffffffff);
923 icr = er32(ICR); 929 icr = er32(ICR);
924 930
925 if (hw->mac.type == e1000_82571 && 931 /* Install any alternate MAC address into RAR0 */
926 hw->dev_spec.e82571.alt_mac_addr_is_present) 932 ret_val = e1000_check_alt_mac_addr_generic(hw);
927 e1000e_set_laa_state_82571(hw, true); 933 if (ret_val)
934 return ret_val;
935
936 e1000e_set_laa_state_82571(hw, true);
928 937
929 /* Reinitialize the 82571 serdes link state machine */ 938 /* Reinitialize the 82571 serdes link state machine */
930 if (hw->phy.media_type == e1000_media_type_internal_serdes) 939 if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1225,32 +1234,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
1225} 1234}
1226 1235
1227/** 1236/**
1228 * e1000_update_mc_addr_list_82571 - Update Multicast addresses
1229 * @hw: pointer to the HW structure
1230 * @mc_addr_list: array of multicast addresses to program
1231 * @mc_addr_count: number of multicast addresses to program
1232 * @rar_used_count: the first RAR register free to program
1233 * @rar_count: total number of supported Receive Address Registers
1234 *
1235 * Updates the Receive Address Registers and Multicast Table Array.
1236 * The caller must have a packed mc_addr_list of multicast addresses.
1237 * The parameter rar_count will usually be hw->mac.rar_entry_count
1238 * unless there are workarounds that change this.
1239 **/
1240static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
1241 u8 *mc_addr_list,
1242 u32 mc_addr_count,
1243 u32 rar_used_count,
1244 u32 rar_count)
1245{
1246 if (e1000e_get_laa_state_82571(hw))
1247 rar_count--;
1248
1249 e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
1250 rar_used_count, rar_count);
1251}
1252
1253/**
1254 * e1000_setup_link_82571 - Setup flow control and link settings 1237 * e1000_setup_link_82571 - Setup flow control and link settings
1255 * @hw: pointer to the HW structure 1238 * @hw: pointer to the HW structure
1256 * 1239 *
@@ -1621,6 +1604,29 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1621} 1604}
1622 1605
1623/** 1606/**
1607 * e1000_read_mac_addr_82571 - Read device MAC address
1608 * @hw: pointer to the HW structure
1609 **/
1610static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
1611{
1612 s32 ret_val = 0;
1613
1614 /*
1615 * If there's an alternate MAC address place it in RAR0
1616 * so that it will override the Si installed default perm
1617 * address.
1618 */
1619 ret_val = e1000_check_alt_mac_addr_generic(hw);
1620 if (ret_val)
1621 goto out;
1622
1623 ret_val = e1000_read_mac_addr_generic(hw);
1624
1625out:
1626 return ret_val;
1627}
1628
1629/**
1624 * e1000_power_down_phy_copper_82571 - Remove link during PHY power down 1630 * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
1625 * @hw: pointer to the HW structure 1631 * @hw: pointer to the HW structure
1626 * 1632 *
@@ -1695,10 +1701,11 @@ static struct e1000_mac_operations e82571_mac_ops = {
1695 .cleanup_led = e1000e_cleanup_led_generic, 1701 .cleanup_led = e1000e_cleanup_led_generic,
1696 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, 1702 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1697 .get_bus_info = e1000e_get_bus_info_pcie, 1703 .get_bus_info = e1000e_get_bus_info_pcie,
1704 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1698 /* .get_link_up_info: media type dependent */ 1705 /* .get_link_up_info: media type dependent */
1699 /* .led_on: mac type dependent */ 1706 /* .led_on: mac type dependent */
1700 .led_off = e1000e_led_off_generic, 1707 .led_off = e1000e_led_off_generic,
1701 .update_mc_addr_list = e1000_update_mc_addr_list_82571, 1708 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
1702 .write_vfta = e1000_write_vfta_generic, 1709 .write_vfta = e1000_write_vfta_generic,
1703 .clear_vfta = e1000_clear_vfta_82571, 1710 .clear_vfta = e1000_clear_vfta_82571,
1704 .reset_hw = e1000_reset_hw_82571, 1711 .reset_hw = e1000_reset_hw_82571,
@@ -1706,6 +1713,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
1706 .setup_link = e1000_setup_link_82571, 1713 .setup_link = e1000_setup_link_82571,
1707 /* .setup_physical_interface: media type dependent */ 1714 /* .setup_physical_interface: media type dependent */
1708 .setup_led = e1000e_setup_led_generic, 1715 .setup_led = e1000e_setup_led_generic,
1716 .read_mac_addr = e1000_read_mac_addr_82571,
1709}; 1717};
1710 1718
1711static struct e1000_phy_operations e82_phy_ops_igp = { 1719static struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e02e38221ed..db05ec35574 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -460,6 +460,8 @@
460 */ 460 */
461#define E1000_RAR_ENTRIES 15 461#define E1000_RAR_ENTRIES 15
462#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 462#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
463#define E1000_RAL_MAC_ADDR_LEN 4
464#define E1000_RAH_MAC_ADDR_LEN 2
463 465
464/* Error Codes */ 466/* Error Codes */
465#define E1000_ERR_NVM 1 467#define E1000_ERR_NVM 1
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d236efaf747..318bdb28a7c 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -503,6 +503,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
503extern s32 e1000e_led_on_generic(struct e1000_hw *hw); 503extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
504extern s32 e1000e_led_off_generic(struct e1000_hw *hw); 504extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
505extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); 505extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
506extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
507extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
506extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); 508extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
507extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); 509extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
508extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); 510extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
@@ -517,9 +519,7 @@ extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
517extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 519extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
518extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 520extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
519 u8 *mc_addr_list, 521 u8 *mc_addr_list,
520 u32 mc_addr_count, 522 u32 mc_addr_count);
521 u32 rar_used_count,
522 u32 rar_count);
523extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 523extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
524extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); 524extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
525extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); 525extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -530,6 +530,7 @@ extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
530extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); 530extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
531extern s32 e1000e_blink_led(struct e1000_hw *hw); 531extern s32 e1000e_blink_led(struct e1000_hw *hw);
532extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); 532extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
533extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
533extern void e1000e_reset_adaptive(struct e1000_hw *hw); 534extern void e1000e_reset_adaptive(struct e1000_hw *hw);
534extern void e1000e_update_adaptive(struct e1000_hw *hw); 535extern void e1000e_update_adaptive(struct e1000_hw *hw);
535 536
@@ -629,7 +630,15 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16
629extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); 630extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
630extern void e1000e_release_nvm(struct e1000_hw *hw); 631extern void e1000e_release_nvm(struct e1000_hw *hw);
631extern void e1000e_reload_nvm(struct e1000_hw *hw); 632extern void e1000e_reload_nvm(struct e1000_hw *hw);
632extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); 633extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
634
635static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
636{
637 if (hw->mac.ops.read_mac_addr)
638 return hw->mac.ops.read_mac_addr(hw);
639
640 return e1000_read_mac_addr_generic(hw);
641}
633 642
634static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) 643static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
635{ 644{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e2aa3b78856..27d21589a69 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -246,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
246 break; 246 break;
247 } 247 }
248 248
249 /* set lan id for port to determine which phy lock to use */
250 hw->mac.ops.set_lan_id(hw);
251
249 return 0; 252 return 0;
250} 253}
251 254
@@ -814,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
814 ew32(IMC, 0xffffffff); 817 ew32(IMC, 0xffffffff);
815 icr = er32(ICR); 818 icr = er32(ICR);
816 819
817 return 0; 820 ret_val = e1000_check_alt_mac_addr_generic(hw);
821
822 return ret_val;
818} 823}
819 824
820/** 825/**
@@ -1340,6 +1345,29 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1340} 1345}
1341 1346
1342/** 1347/**
1348 * e1000_read_mac_addr_80003es2lan - Read device MAC address
1349 * @hw: pointer to the HW structure
1350 **/
1351static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
1352{
1353 s32 ret_val = 0;
1354
1355 /*
1356 * If there's an alternate MAC address place it in RAR0
1357 * so that it will override the Si installed default perm
1358 * address.
1359 */
1360 ret_val = e1000_check_alt_mac_addr_generic(hw);
1361 if (ret_val)
1362 goto out;
1363
1364 ret_val = e1000_read_mac_addr_generic(hw);
1365
1366out:
1367 return ret_val;
1368}
1369
1370/**
1343 * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down 1371 * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
1344 * @hw: pointer to the HW structure 1372 * @hw: pointer to the HW structure
1345 * 1373 *
@@ -1403,12 +1431,14 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1403} 1431}
1404 1432
1405static struct e1000_mac_operations es2_mac_ops = { 1433static struct e1000_mac_operations es2_mac_ops = {
1434 .read_mac_addr = e1000_read_mac_addr_80003es2lan,
1406 .id_led_init = e1000e_id_led_init, 1435 .id_led_init = e1000e_id_led_init,
1407 .check_mng_mode = e1000e_check_mng_mode_generic, 1436 .check_mng_mode = e1000e_check_mng_mode_generic,
1408 /* check_for_link dependent on media type */ 1437 /* check_for_link dependent on media type */
1409 .cleanup_led = e1000e_cleanup_led_generic, 1438 .cleanup_led = e1000e_cleanup_led_generic,
1410 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, 1439 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
1411 .get_bus_info = e1000e_get_bus_info_pcie, 1440 .get_bus_info = e1000e_get_bus_info_pcie,
1441 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1412 .get_link_up_info = e1000_get_link_up_info_80003es2lan, 1442 .get_link_up_info = e1000_get_link_up_info_80003es2lan,
1413 .led_on = e1000e_led_on_generic, 1443 .led_on = e1000e_led_on_generic,
1414 .led_off = e1000e_led_off_generic, 1444 .led_off = e1000e_led_off_generic,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index eccf29b75c4..8bdcd5f24ef 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -389,6 +389,9 @@ enum e1e_registers {
389 389
390#define E1000_FUNC_1 1 390#define E1000_FUNC_1 1
391 391
392#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
393#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
394
392enum e1000_mac_type { 395enum e1000_mac_type {
393 e1000_82571, 396 e1000_82571,
394 e1000_82572, 397 e1000_82572,
@@ -746,16 +749,18 @@ struct e1000_mac_operations {
746 void (*clear_hw_cntrs)(struct e1000_hw *); 749 void (*clear_hw_cntrs)(struct e1000_hw *);
747 void (*clear_vfta)(struct e1000_hw *); 750 void (*clear_vfta)(struct e1000_hw *);
748 s32 (*get_bus_info)(struct e1000_hw *); 751 s32 (*get_bus_info)(struct e1000_hw *);
752 void (*set_lan_id)(struct e1000_hw *);
749 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); 753 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
750 s32 (*led_on)(struct e1000_hw *); 754 s32 (*led_on)(struct e1000_hw *);
751 s32 (*led_off)(struct e1000_hw *); 755 s32 (*led_off)(struct e1000_hw *);
752 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); 756 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
753 s32 (*reset_hw)(struct e1000_hw *); 757 s32 (*reset_hw)(struct e1000_hw *);
754 s32 (*init_hw)(struct e1000_hw *); 758 s32 (*init_hw)(struct e1000_hw *);
755 s32 (*setup_link)(struct e1000_hw *); 759 s32 (*setup_link)(struct e1000_hw *);
756 s32 (*setup_physical_interface)(struct e1000_hw *); 760 s32 (*setup_physical_interface)(struct e1000_hw *);
757 s32 (*setup_led)(struct e1000_hw *); 761 s32 (*setup_led)(struct e1000_hw *);
758 void (*write_vfta)(struct e1000_hw *, u32, u32); 762 void (*write_vfta)(struct e1000_hw *, u32, u32);
763 s32 (*read_mac_addr)(struct e1000_hw *);
759}; 764};
760 765
761/* Function pointers for the PHY. */ 766/* Function pointers for the PHY. */
@@ -814,6 +819,10 @@ struct e1000_mac_info {
814 u16 ifs_ratio; 819 u16 ifs_ratio;
815 u16 ifs_step_size; 820 u16 ifs_step_size;
816 u16 mta_reg_count; 821 u16 mta_reg_count;
822
823 /* Maximum size of the MTA register table in all supported adapters */
824 #define MAX_MTA_REG 128
825 u32 mta_shadow[MAX_MTA_REG];
817 u16 rar_entry_count; 826 u16 rar_entry_count;
818 827
819 u8 forced_speed_duplex; 828 u8 forced_speed_duplex;
@@ -897,7 +906,6 @@ struct e1000_fc_info {
897 906
898struct e1000_dev_spec_82571 { 907struct e1000_dev_spec_82571 {
899 bool laa_is_present; 908 bool laa_is_present;
900 bool alt_mac_addr_is_present;
901 u32 smb_counter; 909 u32 smb_counter;
902}; 910};
903 911
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b6ecd12788..54d03a0ce3c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -3368,6 +3368,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
3368 /* cleanup_led dependent on mac type */ 3368 /* cleanup_led dependent on mac type */
3369 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 3369 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
3370 .get_bus_info = e1000_get_bus_info_ich8lan, 3370 .get_bus_info = e1000_get_bus_info_ich8lan,
3371 .set_lan_id = e1000_set_lan_id_single_port,
3371 .get_link_up_info = e1000_get_link_up_info_ich8lan, 3372 .get_link_up_info = e1000_get_link_up_info_ich8lan,
3372 /* led_on dependent on mac type */ 3373 /* led_on dependent on mac type */
3373 /* led_off dependent on mac type */ 3374 /* led_off dependent on mac type */
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 2fa9b36a2c5..2425ed11d5c 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -51,10 +51,10 @@ enum e1000_mng_mode {
51 **/ 51 **/
52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) 52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
53{ 53{
54 struct e1000_mac_info *mac = &hw->mac;
54 struct e1000_bus_info *bus = &hw->bus; 55 struct e1000_bus_info *bus = &hw->bus;
55 struct e1000_adapter *adapter = hw->adapter; 56 struct e1000_adapter *adapter = hw->adapter;
56 u32 status; 57 u16 pcie_link_status, cap_offset;
57 u16 pcie_link_status, pci_header_type, cap_offset;
58 58
59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
60 if (!cap_offset) { 60 if (!cap_offset) {
@@ -68,20 +68,46 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
68 PCIE_LINK_WIDTH_SHIFT); 68 PCIE_LINK_WIDTH_SHIFT);
69 } 69 }
70 70
71 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, 71 mac->ops.set_lan_id(hw);
72 &pci_header_type);
73 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
74 status = er32(STATUS);
75 bus->func = (status & E1000_STATUS_FUNC_MASK)
76 >> E1000_STATUS_FUNC_SHIFT;
77 } else {
78 bus->func = 0;
79 }
80 72
81 return 0; 73 return 0;
82} 74}
83 75
84/** 76/**
77 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
78 *
79 * @hw: pointer to the HW structure
80 *
81 * Determines the LAN function id by reading memory-mapped registers
82 * and swaps the port value if requested.
83 **/
84void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
85{
86 struct e1000_bus_info *bus = &hw->bus;
87 u32 reg;
88
89 /*
90 * The status register reports the correct function number
91 * for the device regardless of function swap state.
92 */
93 reg = er32(STATUS);
94 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
95}
96
97/**
98 * e1000_set_lan_id_single_port - Set LAN id for a single port device
99 * @hw: pointer to the HW structure
100 *
101 * Sets the LAN function id to zero for a single port device.
102 **/
103void e1000_set_lan_id_single_port(struct e1000_hw *hw)
104{
105 struct e1000_bus_info *bus = &hw->bus;
106
107 bus->func = 0;
108}
109
110/**
85 * e1000_clear_vfta_generic - Clear VLAN filter table 111 * e1000_clear_vfta_generic - Clear VLAN filter table
86 * @hw: pointer to the HW structure 112 * @hw: pointer to the HW structure
87 * 113 *
@@ -139,6 +165,68 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
139} 165}
140 166
141/** 167/**
168 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
169 * @hw: pointer to the HW structure
170 *
171 * Checks the nvm for an alternate MAC address. An alternate MAC address
172 * can be setup by pre-boot software and must be treated like a permanent
173 * address and must override the actual permanent MAC address. If an
174 * alternate MAC address is found it is programmed into RAR0, replacing
175 * the permanent address that was installed into RAR0 by the Si on reset.
176 * This function will return SUCCESS unless it encounters an error while
177 * reading the EEPROM.
178 **/
179s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
180{
181 u32 i;
182 s32 ret_val = 0;
183 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
184 u8 alt_mac_addr[ETH_ALEN];
185
186 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
187 &nvm_alt_mac_addr_offset);
188 if (ret_val) {
189 e_dbg("NVM Read Error\n");
190 goto out;
191 }
192
193 if (nvm_alt_mac_addr_offset == 0xFFFF) {
194 /* There is no Alternate MAC Address */
195 goto out;
196 }
197
198 if (hw->bus.func == E1000_FUNC_1)
199 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
200 for (i = 0; i < ETH_ALEN; i += 2) {
201 offset = nvm_alt_mac_addr_offset + (i >> 1);
202 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
203 if (ret_val) {
204 e_dbg("NVM Read Error\n");
205 goto out;
206 }
207
208 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
209 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
210 }
211
212 /* if multicast bit is set, the alternate address will not be used */
213 if (alt_mac_addr[0] & 0x01) {
214 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
215 goto out;
216 }
217
218 /*
219 * We have a valid alternate MAC address, and we want to treat it the
220 * same as the normal permanent MAC address stored by the HW into the
221 * RAR. Do this by mapping this address into RAR0.
222 */
223 e1000e_rar_set(hw, alt_mac_addr, 0);
224
225out:
226 return ret_val;
227}
228
229/**
142 * e1000e_rar_set - Set receive address register 230 * e1000e_rar_set - Set receive address register
143 * @hw: pointer to the HW structure 231 * @hw: pointer to the HW structure
144 * @addr: pointer to the receive address 232 * @addr: pointer to the receive address
@@ -252,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
252 * @hw: pointer to the HW structure 340 * @hw: pointer to the HW structure
253 * @mc_addr_list: array of multicast addresses to program 341 * @mc_addr_list: array of multicast addresses to program
254 * @mc_addr_count: number of multicast addresses to program 342 * @mc_addr_count: number of multicast addresses to program
255 * @rar_used_count: the first RAR register free to program
256 * @rar_count: total number of supported Receive Address Registers
257 * 343 *
258 * Updates the Receive Address Registers and Multicast Table Array. 344 * Updates entire Multicast Table Array.
259 * The caller must have a packed mc_addr_list of multicast addresses. 345 * The caller must have a packed mc_addr_list of multicast addresses.
260 * The parameter rar_count will usually be hw->mac.rar_entry_count
261 * unless there are workarounds that change this.
262 **/ 346 **/
263void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 347void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
264 u8 *mc_addr_list, u32 mc_addr_count, 348 u8 *mc_addr_list, u32 mc_addr_count)
265 u32 rar_used_count, u32 rar_count)
266{ 349{
267 u32 i; 350 u32 hash_value, hash_bit, hash_reg;
268 u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC); 351 int i;
269 352
270 if (!mcarray) { 353 /* clear mta_shadow */
271 printk(KERN_ERR "multicast array memory allocation failed\n"); 354 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
272 return;
273 }
274 355
275 /* 356 /* update mta_shadow from mc_addr_list */
276 * Load the first set of multicast addresses into the exact 357 for (i = 0; (u32) i < mc_addr_count; i++) {
277 * filters (RAR). If there are not enough to fill the RAR
278 * array, clear the filters.
279 */
280 for (i = rar_used_count; i < rar_count; i++) {
281 if (mc_addr_count) {
282 e1000e_rar_set(hw, mc_addr_list, i);
283 mc_addr_count--;
284 mc_addr_list += ETH_ALEN;
285 } else {
286 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
287 e1e_flush();
288 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
289 e1e_flush();
290 }
291 }
292
293 /* Load any remaining multicast addresses into the hash table. */
294 for (; mc_addr_count > 0; mc_addr_count--) {
295 u32 hash_value, hash_reg, hash_bit, mta;
296 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); 358 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
297 e_dbg("Hash value = 0x%03X\n", hash_value); 359
298 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 360 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
299 hash_bit = hash_value & 0x1F; 361 hash_bit = hash_value & 0x1F;
300 mta = (1 << hash_bit);
301 mcarray[hash_reg] |= mta;
302 mc_addr_list += ETH_ALEN;
303 }
304 362
305 /* write the hash table completely */ 363 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
306 for (i = 0; i < hw->mac.mta_reg_count; i++) 364 mc_addr_list += (ETH_ALEN);
307 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]); 365 }
308 366
367 /* replace the entire MTA table */
368 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
369 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
309 e1e_flush(); 370 e1e_flush();
310 kfree(mcarray);
311} 371}
312 372
313/** 373/**
@@ -2072,67 +2132,27 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2072} 2132}
2073 2133
2074/** 2134/**
2075 * e1000e_read_mac_addr - Read device MAC address 2135 * e1000_read_mac_addr_generic - Read device MAC address
2076 * @hw: pointer to the HW structure 2136 * @hw: pointer to the HW structure
2077 * 2137 *
2078 * Reads the device MAC address from the EEPROM and stores the value. 2138 * Reads the device MAC address from the EEPROM and stores the value.
2079 * Since devices with two ports use the same EEPROM, we increment the 2139 * Since devices with two ports use the same EEPROM, we increment the
2080 * last bit in the MAC address for the second port. 2140 * last bit in the MAC address for the second port.
2081 **/ 2141 **/
2082s32 e1000e_read_mac_addr(struct e1000_hw *hw) 2142s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
2083{ 2143{
2084 s32 ret_val; 2144 u32 rar_high;
2085 u16 offset, nvm_data, i; 2145 u32 rar_low;
2086 u16 mac_addr_offset = 0; 2146 u16 i;
2087
2088 if (hw->mac.type == e1000_82571) {
2089 /* Check for an alternate MAC address. An alternate MAC
2090 * address can be setup by pre-boot software and must be
2091 * treated like a permanent address and must override the
2092 * actual permanent MAC address.*/
2093 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2094 &mac_addr_offset);
2095 if (ret_val) {
2096 e_dbg("NVM Read Error\n");
2097 return ret_val;
2098 }
2099 if (mac_addr_offset == 0xFFFF)
2100 mac_addr_offset = 0;
2101
2102 if (mac_addr_offset) {
2103 if (hw->bus.func == E1000_FUNC_1)
2104 mac_addr_offset += ETH_ALEN/sizeof(u16);
2105
2106 /* make sure we have a valid mac address here
2107 * before using it */
2108 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2109 &nvm_data);
2110 if (ret_val) {
2111 e_dbg("NVM Read Error\n");
2112 return ret_val;
2113 }
2114 if (nvm_data & 0x0001)
2115 mac_addr_offset = 0;
2116 }
2117 2147
2118 if (mac_addr_offset) 2148 rar_high = er32(RAH(0));
2119 hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2149 rar_low = er32(RAL(0));
2120 }
2121 2150
2122 for (i = 0; i < ETH_ALEN; i += 2) { 2151 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
2123 offset = mac_addr_offset + (i >> 1); 2152 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
2124 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2125 if (ret_val) {
2126 e_dbg("NVM Read Error\n");
2127 return ret_val;
2128 }
2129 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2130 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2131 }
2132 2153
2133 /* Flip last bit of mac address if we're on second port */ 2154 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
2134 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1) 2155 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
2135 hw->mac.perm_addr[5] ^= 1;
2136 2156
2137 for (i = 0; i < ETH_ALEN; i++) 2157 for (i = 0; i < ETH_ALEN; i++)
2138 hw->mac.addr[i] = hw->mac.perm_addr[i]; 2158 hw->mac.addr[i] = hw->mac.perm_addr[i];
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57f149b75fb..14a80f8f611 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2541,22 +2541,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2541 * @hw: pointer to the HW structure 2541 * @hw: pointer to the HW structure
2542 * @mc_addr_list: array of multicast addresses to program 2542 * @mc_addr_list: array of multicast addresses to program
2543 * @mc_addr_count: number of multicast addresses to program 2543 * @mc_addr_count: number of multicast addresses to program
2544 * @rar_used_count: the first RAR register free to program
2545 * @rar_count: total number of supported Receive Address Registers
2546 * 2544 *
2547 * Updates the Receive Address Registers and Multicast Table Array. 2545 * Updates the Multicast Table Array.
2548 * The caller must have a packed mc_addr_list of multicast addresses. 2546 * The caller must have a packed mc_addr_list of multicast addresses.
2549 * The parameter rar_count will usually be hw->mac.rar_entry_count
2550 * unless there are workarounds that change this. Currently no func pointer
2551 * exists and all implementations are handled in the generic version of this
2552 * function.
2553 **/ 2547 **/
2554static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, 2548static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2555 u32 mc_addr_count, u32 rar_used_count, 2549 u32 mc_addr_count)
2556 u32 rar_count)
2557{ 2550{
2558 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2551 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2559 rar_used_count, rar_count);
2560} 2552}
2561 2553
2562/** 2554/**
@@ -2572,7 +2564,6 @@ static void e1000_set_multi(struct net_device *netdev)
2572{ 2564{
2573 struct e1000_adapter *adapter = netdev_priv(netdev); 2565 struct e1000_adapter *adapter = netdev_priv(netdev);
2574 struct e1000_hw *hw = &adapter->hw; 2566 struct e1000_hw *hw = &adapter->hw;
2575 struct e1000_mac_info *mac = &hw->mac;
2576 struct dev_mc_list *mc_ptr; 2567 struct dev_mc_list *mc_ptr;
2577 u8 *mta_list; 2568 u8 *mta_list;
2578 u32 rctl; 2569 u32 rctl;
@@ -2614,15 +2605,14 @@ static void e1000_set_multi(struct net_device *netdev)
2614 mc_ptr = mc_ptr->next; 2605 mc_ptr = mc_ptr->next;
2615 } 2606 }
2616 2607
2617 e1000_update_mc_addr_list(hw, mta_list, i, 1, 2608 e1000_update_mc_addr_list(hw, mta_list, i);
2618 mac->rar_entry_count);
2619 kfree(mta_list); 2609 kfree(mta_list);
2620 } else { 2610 } else {
2621 /* 2611 /*
2622 * if we're called from probe, we might not have 2612 * if we're called from probe, we might not have
2623 * anything to do here, so clear out the list 2613 * anything to do here, so clear out the list
2624 */ 2614 */
2625 e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count); 2615 e1000_update_mc_addr_list(hw, NULL, 0);
2626 } 2616 }
2627} 2617}
2628 2618
@@ -5134,7 +5124,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5134 5124
5135 e1000_eeprom_checks(adapter); 5125 e1000_eeprom_checks(adapter);
5136 5126
5137 /* copy the MAC address out of the NVM */ 5127 /* copy the MAC address */
5138 if (e1000e_read_mac_addr(&adapter->hw)) 5128 if (e1000e_read_mac_addr(&adapter->hw))
5139 e_err("NVM Read Error while reading MAC address\n"); 5129 e_err("NVM Read Error while reading MAC address\n");
5140 5130
@@ -5326,7 +5316,7 @@ static struct pci_error_handlers e1000_err_handler = {
5326 .resume = e1000_io_resume, 5316 .resume = e1000_io_resume,
5327}; 5317};
5328 5318
5329static struct pci_device_id e1000_pci_tbl[] = { 5319static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5330 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 5320 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5331 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 5321 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5332 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 5322 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1c2076228b..ee01f5a6d0d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -34,7 +34,7 @@
34 34
35#define DRV_NAME "enic" 35#define DRV_NAME "enic"
36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" 36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
37#define DRV_VERSION "1.1.0.100" 37#define DRV_VERSION "1.1.0.241a"
38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" 38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
39#define PFX DRV_NAME ": " 39#define PFX DRV_NAME ": "
40 40
@@ -89,9 +89,12 @@ struct enic {
89 spinlock_t devcmd_lock; 89 spinlock_t devcmd_lock;
90 u8 mac_addr[ETH_ALEN]; 90 u8 mac_addr[ETH_ALEN];
91 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 91 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
92 unsigned int flags;
92 unsigned int mc_count; 93 unsigned int mc_count;
93 int csum_rx_enabled; 94 int csum_rx_enabled;
94 u32 port_mtu; 95 u32 port_mtu;
96 u32 rx_coalesce_usecs;
97 u32 tx_coalesce_usecs;
95 98
96 /* work queue cache line section */ 99 /* work queue cache line section */
97 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; 100 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f875751af15..c81bc4b1816 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
52 52
53/* Supported devices */ 53/* Supported devices */
54static struct pci_device_id enic_id_table[] = { 54static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
56 { 0, } /* end of table */ 56 { 0, } /* end of table */
57}; 57};
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value)
261 enic->msg_enable = value; 261 enic->msg_enable = value;
262} 262}
263 263
264static int enic_get_coalesce(struct net_device *netdev,
265 struct ethtool_coalesce *ecmd)
266{
267 struct enic *enic = netdev_priv(netdev);
268
269 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
270 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
271
272 return 0;
273}
274
275static int enic_set_coalesce(struct net_device *netdev,
276 struct ethtool_coalesce *ecmd)
277{
278 struct enic *enic = netdev_priv(netdev);
279 u32 tx_coalesce_usecs;
280 u32 rx_coalesce_usecs;
281
282 tx_coalesce_usecs = min_t(u32,
283 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
284 ecmd->tx_coalesce_usecs);
285 rx_coalesce_usecs = min_t(u32,
286 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
287 ecmd->rx_coalesce_usecs);
288
289 switch (vnic_dev_get_intr_mode(enic->vdev)) {
290 case VNIC_DEV_INTR_MODE_INTX:
291 if (tx_coalesce_usecs != rx_coalesce_usecs)
292 return -EINVAL;
293
294 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
295 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
296 break;
297 case VNIC_DEV_INTR_MODE_MSI:
298 if (tx_coalesce_usecs != rx_coalesce_usecs)
299 return -EINVAL;
300
301 vnic_intr_coalescing_timer_set(&enic->intr[0],
302 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
303 break;
304 case VNIC_DEV_INTR_MODE_MSIX:
305 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
306 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
307 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
308 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
309 break;
310 default:
311 break;
312 }
313
314 enic->tx_coalesce_usecs = tx_coalesce_usecs;
315 enic->rx_coalesce_usecs = rx_coalesce_usecs;
316
317 return 0;
318}
319
264static const struct ethtool_ops enic_ethtool_ops = { 320static const struct ethtool_ops enic_ethtool_ops = {
265 .get_settings = enic_get_settings, 321 .get_settings = enic_get_settings,
266 .get_drvinfo = enic_get_drvinfo, 322 .get_drvinfo = enic_get_drvinfo,
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
278 .set_sg = ethtool_op_set_sg, 334 .set_sg = ethtool_op_set_sg,
279 .get_tso = ethtool_op_get_tso, 335 .get_tso = ethtool_op_get_tso,
280 .set_tso = enic_set_tso, 336 .set_tso = enic_set_tso,
337 .get_coalesce = enic_get_coalesce,
338 .set_coalesce = enic_set_coalesce,
281 .get_flags = ethtool_op_get_flags, 339 .get_flags = ethtool_op_get_flags,
282 .set_flags = ethtool_op_set_flags, 340 .set_flags = ethtool_op_set_flags,
283}; 341};
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic)
363 u32 mtu = vnic_dev_mtu(enic->vdev); 421 u32 mtu = vnic_dev_mtu(enic->vdev);
364 422
365 if (mtu && mtu != enic->port_mtu) { 423 if (mtu && mtu != enic->port_mtu) {
424 enic->port_mtu = mtu;
366 if (mtu < enic->netdev->mtu) 425 if (mtu < enic->netdev->mtu)
367 printk(KERN_WARNING PFX 426 printk(KERN_WARNING PFX
368 "%s: interface MTU (%d) set higher " 427 "%s: interface MTU (%d) set higher "
369 "than switch port MTU (%d)\n", 428 "than switch port MTU (%d)\n",
370 enic->netdev->name, enic->netdev->mtu, mtu); 429 enic->netdev->name, enic->netdev->mtu, mtu);
371 enic->port_mtu = mtu;
372 } 430 }
373} 431}
374 432
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
673 731
674/* netif_tx_lock held, process context with BHs disabled, or BH */ 732/* netif_tx_lock held, process context with BHs disabled, or BH */
675static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 733static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
676 struct net_device *netdev) 734 struct net_device *netdev)
677{ 735{
678 struct enic *enic = netdev_priv(netdev); 736 struct enic *enic = netdev_priv(netdev);
679 struct vnic_wq *wq = &enic->wq[0]; 737 struct vnic_wq *wq = &enic->wq[0];
@@ -771,6 +829,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
771 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; 829 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
772 int allmulti = (netdev->flags & IFF_ALLMULTI) || 830 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
773 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); 831 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
832 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
774 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 833 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
775 unsigned int mc_count = netdev->mc_count; 834 unsigned int mc_count = netdev->mc_count;
776 unsigned int i, j; 835 unsigned int i, j;
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
780 839
781 spin_lock(&enic->devcmd_lock); 840 spin_lock(&enic->devcmd_lock);
782 841
783 vnic_dev_packet_filter(enic->vdev, directed, 842 if (enic->flags != flags) {
784 multicast, broadcast, promisc, allmulti); 843 enic->flags = flags;
844 vnic_dev_packet_filter(enic->vdev, directed,
845 multicast, broadcast, promisc, allmulti);
846 }
785 847
786 /* Is there an easier way? Trying to minimize to 848 /* Is there an easier way? Trying to minimize to
787 * calls to add/del multicast addrs. We keep the 849 * calls to add/del multicast addrs. We keep the
@@ -1084,34 +1146,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1084 return 0; 1146 return 0;
1085} 1147}
1086 1148
1087static void enic_rq_drop_buf(struct vnic_rq *rq,
1088 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1089 int skipped, void *opaque)
1090{
1091 struct enic *enic = vnic_dev_priv(rq->vdev);
1092 struct sk_buff *skb = buf->os_buf;
1093
1094 if (skipped)
1095 return;
1096
1097 pci_unmap_single(enic->pdev, buf->dma_addr,
1098 buf->len, PCI_DMA_FROMDEVICE);
1099
1100 dev_kfree_skb_any(skb);
1101}
1102
1103static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1104 u8 type, u16 q_number, u16 completed_index, void *opaque)
1105{
1106 struct enic *enic = vnic_dev_priv(vdev);
1107
1108 vnic_rq_service(&enic->rq[q_number], cq_desc,
1109 completed_index, VNIC_RQ_RETURN_DESC,
1110 enic_rq_drop_buf, opaque);
1111
1112 return 0;
1113}
1114
1115static int enic_poll(struct napi_struct *napi, int budget) 1149static int enic_poll(struct napi_struct *napi, int budget)
1116{ 1150{
1117 struct enic *enic = container_of(napi, struct enic, napi); 1151 struct enic *enic = container_of(napi, struct enic, napi);
@@ -1119,6 +1153,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1119 unsigned int rq_work_to_do = budget; 1153 unsigned int rq_work_to_do = budget;
1120 unsigned int wq_work_to_do = -1; /* no limit */ 1154 unsigned int wq_work_to_do = -1; /* no limit */
1121 unsigned int work_done, rq_work_done, wq_work_done; 1155 unsigned int work_done, rq_work_done, wq_work_done;
1156 int err;
1122 1157
1123 /* Service RQ (first) and WQ 1158 /* Service RQ (first) and WQ
1124 */ 1159 */
@@ -1142,16 +1177,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
1142 0 /* don't unmask intr */, 1177 0 /* don't unmask intr */,
1143 0 /* don't reset intr timer */); 1178 0 /* don't reset intr timer */);
1144 1179
1145 if (rq_work_done > 0) { 1180 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1146 1181
1147 /* Replenish RQ 1182 /* Buffer allocation failed. Stay in polling
1148 */ 1183 * mode so we can try to fill the ring again.
1184 */
1149 1185
1150 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1186 if (err)
1187 rq_work_done = rq_work_to_do;
1151 1188
1152 } else { 1189 if (rq_work_done < rq_work_to_do) {
1153 1190
1154 /* If no work done, flush all LROs and exit polling 1191 /* Some work done, but not enough to stay in polling,
1192 * flush all LROs and exit polling
1155 */ 1193 */
1156 1194
1157 if (netdev->features & NETIF_F_LRO) 1195 if (netdev->features & NETIF_F_LRO)
@@ -1170,6 +1208,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1170 struct net_device *netdev = enic->netdev; 1208 struct net_device *netdev = enic->netdev;
1171 unsigned int work_to_do = budget; 1209 unsigned int work_to_do = budget;
1172 unsigned int work_done; 1210 unsigned int work_done;
1211 int err;
1173 1212
1174 /* Service RQ 1213 /* Service RQ
1175 */ 1214 */
@@ -1177,25 +1216,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1177 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1216 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1178 work_to_do, enic_rq_service, NULL); 1217 work_to_do, enic_rq_service, NULL);
1179 1218
1180 if (work_done > 0) { 1219 /* Return intr event credits for this polling
1181 1220 * cycle. An intr event is the completion of a
1182 /* Replenish RQ 1221 * RQ packet.
1183 */ 1222 */
1184
1185 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1186
1187 /* Return intr event credits for this polling
1188 * cycle. An intr event is the completion of a
1189 * RQ packet.
1190 */
1191 1223
1224 if (work_done > 0)
1192 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], 1225 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1193 work_done, 1226 work_done,
1194 0 /* don't unmask intr */, 1227 0 /* don't unmask intr */,
1195 0 /* don't reset intr timer */); 1228 0 /* don't reset intr timer */);
1196 } else {
1197 1229
1198 /* If no work done, flush all LROs and exit polling 1230 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1231
1232 /* Buffer allocation failed. Stay in polling mode
1233 * so we can try to fill the ring again.
1234 */
1235
1236 if (err)
1237 work_done = work_to_do;
1238
1239 if (work_done < work_to_do) {
1240
1241 /* Some work done, but not enough to stay in polling,
1242 * flush all LROs and exit polling
1199 */ 1243 */
1200 1244
1201 if (netdev->features & NETIF_F_LRO) 1245 if (netdev->features & NETIF_F_LRO)
@@ -1304,6 +1348,24 @@ static int enic_request_intr(struct enic *enic)
1304 return err; 1348 return err;
1305} 1349}
1306 1350
1351static void enic_synchronize_irqs(struct enic *enic)
1352{
1353 unsigned int i;
1354
1355 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1356 case VNIC_DEV_INTR_MODE_INTX:
1357 case VNIC_DEV_INTR_MODE_MSI:
1358 synchronize_irq(enic->pdev->irq);
1359 break;
1360 case VNIC_DEV_INTR_MODE_MSIX:
1361 for (i = 0; i < enic->intr_count; i++)
1362 synchronize_irq(enic->msix_entry[i].vector);
1363 break;
1364 default:
1365 break;
1366 }
1367}
1368
1307static int enic_notify_set(struct enic *enic) 1369static int enic_notify_set(struct enic *enic)
1308{ 1370{
1309 int err; 1371 int err;
@@ -1360,11 +1422,13 @@ static int enic_open(struct net_device *netdev)
1360 } 1422 }
1361 1423
1362 for (i = 0; i < enic->rq_count; i++) { 1424 for (i = 0; i < enic->rq_count; i++) {
1363 err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1425 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
1364 if (err) { 1426 /* Need at least one buffer on ring to get going */
1427 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1365 printk(KERN_ERR PFX 1428 printk(KERN_ERR PFX
1366 "%s: Unable to alloc receive buffers.\n", 1429 "%s: Unable to alloc receive buffers.\n",
1367 netdev->name); 1430 netdev->name);
1431 err = -ENOMEM;
1368 goto err_out_notify_unset; 1432 goto err_out_notify_unset;
1369 } 1433 }
1370 } 1434 }
@@ -1409,16 +1473,19 @@ static int enic_stop(struct net_device *netdev)
1409 unsigned int i; 1473 unsigned int i;
1410 int err; 1474 int err;
1411 1475
1476 for (i = 0; i < enic->intr_count; i++)
1477 vnic_intr_mask(&enic->intr[i]);
1478
1479 enic_synchronize_irqs(enic);
1480
1412 del_timer_sync(&enic->notify_timer); 1481 del_timer_sync(&enic->notify_timer);
1413 1482
1414 spin_lock(&enic->devcmd_lock); 1483 spin_lock(&enic->devcmd_lock);
1415 vnic_dev_disable(enic->vdev); 1484 vnic_dev_disable(enic->vdev);
1416 spin_unlock(&enic->devcmd_lock); 1485 spin_unlock(&enic->devcmd_lock);
1417 napi_disable(&enic->napi); 1486 napi_disable(&enic->napi);
1418 netif_stop_queue(netdev); 1487 netif_carrier_off(netdev);
1419 1488 netif_tx_disable(netdev);
1420 for (i = 0; i < enic->intr_count; i++)
1421 vnic_intr_mask(&enic->intr[i]);
1422 1489
1423 for (i = 0; i < enic->wq_count; i++) { 1490 for (i = 0; i < enic->wq_count; i++) {
1424 err = vnic_wq_disable(&enic->wq[i]); 1491 err = vnic_wq_disable(&enic->wq[i]);
@@ -1436,11 +1503,6 @@ static int enic_stop(struct net_device *netdev)
1436 spin_unlock(&enic->devcmd_lock); 1503 spin_unlock(&enic->devcmd_lock);
1437 enic_free_intr(enic); 1504 enic_free_intr(enic);
1438 1505
1439 (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1440 -1, enic_rq_service_drop, NULL);
1441 (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1442 -1, enic_wq_service, NULL);
1443
1444 for (i = 0; i < enic->wq_count; i++) 1506 for (i = 0; i < enic->wq_count; i++)
1445 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1507 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1446 for (i = 0; i < enic->rq_count; i++) 1508 for (i = 0; i < enic->rq_count; i++)
@@ -1762,7 +1824,8 @@ int enic_dev_init(struct enic *enic)
1762 err = enic_set_intr_mode(enic); 1824 err = enic_set_intr_mode(enic);
1763 if (err) { 1825 if (err) {
1764 printk(KERN_ERR PFX 1826 printk(KERN_ERR PFX
1765 "Failed to set intr mode, aborting.\n"); 1827 "Failed to set intr mode based on resource "
1828 "counts and system capabilities, aborting.\n");
1766 return err; 1829 return err;
1767 } 1830 }
1768 1831
@@ -1986,6 +2049,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1986 goto err_out_dev_deinit; 2049 goto err_out_dev_deinit;
1987 } 2050 }
1988 2051
2052 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2053 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2054
1989 netdev->netdev_ops = &enic_netdev_ops; 2055 netdev->netdev_ops = &enic_netdev_ops;
1990 netdev->watchdog_timeo = 2 * HZ; 2056 netdev->watchdog_timeo = 2 * HZ;
1991 netdev->ethtool_ops = &enic_ethtool_ops; 2057 netdev->ethtool_ops = &enic_ethtool_ops;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 32111144efc..02839bf0fe8 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -66,21 +66,21 @@ int enic_get_vnic_config(struct enic *enic)
66 GET_CONFIG(wq_desc_count); 66 GET_CONFIG(wq_desc_count);
67 GET_CONFIG(rq_desc_count); 67 GET_CONFIG(rq_desc_count);
68 GET_CONFIG(mtu); 68 GET_CONFIG(mtu);
69 GET_CONFIG(intr_timer);
70 GET_CONFIG(intr_timer_type); 69 GET_CONFIG(intr_timer_type);
71 GET_CONFIG(intr_mode); 70 GET_CONFIG(intr_mode);
71 GET_CONFIG(intr_timer_usec);
72 72
73 c->wq_desc_count = 73 c->wq_desc_count =
74 min_t(u32, ENIC_MAX_WQ_DESCS, 74 min_t(u32, ENIC_MAX_WQ_DESCS,
75 max_t(u32, ENIC_MIN_WQ_DESCS, 75 max_t(u32, ENIC_MIN_WQ_DESCS,
76 c->wq_desc_count)); 76 c->wq_desc_count));
77 c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ 77 c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
78 78
79 c->rq_desc_count = 79 c->rq_desc_count =
80 min_t(u32, ENIC_MAX_RQ_DESCS, 80 min_t(u32, ENIC_MAX_RQ_DESCS,
81 max_t(u32, ENIC_MIN_RQ_DESCS, 81 max_t(u32, ENIC_MIN_RQ_DESCS,
82 c->rq_desc_count)); 82 c->rq_desc_count));
83 c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ 83 c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
84 84
85 if (c->mtu == 0) 85 if (c->mtu == 0)
86 c->mtu = 1500; 86 c->mtu = 1500;
@@ -88,15 +88,17 @@ int enic_get_vnic_config(struct enic *enic)
88 max_t(u16, ENIC_MIN_MTU, 88 max_t(u16, ENIC_MIN_MTU,
89 c->mtu)); 89 c->mtu));
90 90
91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); 91 c->intr_timer_usec = min_t(u32,
92 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
93 c->intr_timer_usec);
92 94
93 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n", 95 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
94 enic->mac_addr, c->wq_desc_count, c->rq_desc_count); 96 enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
95 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d " 97 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
96 "intr timer %d\n", 98 "intr timer %d usec\n",
97 c->mtu, ENIC_SETTING(enic, TXCSUM), 99 c->mtu, ENIC_SETTING(enic, TXCSUM),
98 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO), 100 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
99 ENIC_SETTING(enic, LRO), c->intr_timer); 101 ENIC_SETTING(enic, LRO), c->intr_timer_usec);
100 102
101 return 0; 103 return 0;
102} 104}
@@ -303,7 +305,7 @@ void enic_init_vnic_resources(struct enic *enic)
303 305
304 for (i = 0; i < enic->intr_count; i++) { 306 for (i = 0; i < enic->intr_count; i++) {
305 vnic_intr_init(&enic->intr[i], 307 vnic_intr_init(&enic->intr[i],
306 enic->config.intr_timer, 308 INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
307 enic->config.intr_timer_type, 309 enic->config.intr_timer_type,
308 mask_on_assertion); 310 mask_on_assertion);
309 } 311 }
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 29a48e8b59d..69b9b70c7da 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -36,7 +36,6 @@ struct vnic_res {
36}; 36};
37 37
38#define VNIC_DEV_CAP_INIT 0x0001 38#define VNIC_DEV_CAP_INIT 0x0001
39#define VNIC_DEV_CAP_PERBI 0x0002
40 39
41struct vnic_dev { 40struct vnic_dev {
42 void *priv; 41 void *priv;
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 6332ac9391b..8eeb6758491 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,6 +20,10 @@
20#ifndef _VNIC_ENIC_H_ 20#ifndef _VNIC_ENIC_H_
21#define _VNIC_ENIC_H_ 21#define _VNIC_ENIC_H_
22 22
23/* Hardware intr coalesce timer is in units of 1.5us */
24#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
25#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
26
23/* Device-specific region: enet configuration */ 27/* Device-specific region: enet configuration */
24struct vnic_enet_config { 28struct vnic_enet_config {
25 u32 flags; 29 u32 flags;
@@ -30,6 +34,7 @@ struct vnic_enet_config {
30 u8 intr_timer_type; 34 u8 intr_timer_type;
31 u8 intr_mode; 35 u8 intr_mode;
32 char devname[16]; 36 char devname[16];
37 u32 intr_timer_usec;
33}; 38};
34 39
35#define VENETF_TSO 0x1 /* TSO enabled */ 40#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 1f8786d7195..3934309a949 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -50,12 +50,18 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, 50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
51 unsigned int coalescing_type, unsigned int mask_on_assertion) 51 unsigned int coalescing_type, unsigned int mask_on_assertion)
52{ 52{
53 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); 53 vnic_intr_coalescing_timer_set(intr, coalescing_timer);
54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type); 54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); 55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
56 iowrite32(0, &intr->ctrl->int_credits); 56 iowrite32(0, &intr->ctrl->int_credits);
57} 57}
58 58
59void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
60 unsigned int coalescing_timer)
61{
62 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
63}
64
59void vnic_intr_clean(struct vnic_intr *intr) 65void vnic_intr_clean(struct vnic_intr *intr)
60{ 66{
61 iowrite32(0, &intr->ctrl->int_credits); 67 iowrite32(0, &intr->ctrl->int_credits);
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 9a53604edce..2fe6c6339e3 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -61,6 +61,7 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
61static inline void vnic_intr_mask(struct vnic_intr *intr) 61static inline void vnic_intr_mask(struct vnic_intr *intr)
62{ 62{
63 iowrite32(1, &intr->ctrl->mask); 63 iowrite32(1, &intr->ctrl->mask);
64 (void)ioread32(&intr->ctrl->mask);
64} 65}
65 66
66static inline void vnic_intr_return_credits(struct vnic_intr *intr, 67static inline void vnic_intr_return_credits(struct vnic_intr *intr,
@@ -101,6 +102,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
101 unsigned int index); 102 unsigned int index);
102void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, 103void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
103 unsigned int coalescing_type, unsigned int mask_on_assertion); 104 unsigned int coalescing_type, unsigned int mask_on_assertion);
105void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
106 unsigned int coalescing_timer);
104void vnic_intr_clean(struct vnic_intr *intr); 107void vnic_intr_clean(struct vnic_intr *intr);
105 108
106#endif /* _VNIC_INTR_H_ */ 109#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index eeaf329945d..cf80ab46d58 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -41,12 +41,12 @@
41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL 41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
43 43
44#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 0) 44#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
45#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 1) 45#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
46#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 2) 46#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
47#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 3) 47#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
48#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 4) 48#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
49#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 5) 49#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
50 50
51static inline void vnic_set_nic_cfg(u32 *nic_cfg, 51static inline void vnic_set_nic_cfg(u32 *nic_cfg,
52 u8 rss_default_cpu, u8 rss_hash_type, 52 u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec..1f8b11449fa 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -167,7 +167,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
167}; 167};
168 168
169 169
170static struct pci_device_id epic_pci_tbl[] = { 170static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
171 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 }, 171 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
172 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 }, 172 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
173 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 173 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index bd1db92aec1..f9d5ca07874 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -904,7 +904,7 @@ static int ethoc_probe(struct platform_device *pdev)
904 } 904 }
905 905
906 mmio = devm_request_mem_region(&pdev->dev, res->start, 906 mmio = devm_request_mem_region(&pdev->dev, res->start,
907 res->end - res->start + 1, res->name); 907 resource_size(res), res->name);
908 if (!mmio) { 908 if (!mmio) {
909 dev_err(&pdev->dev, "cannot request I/O memory space\n"); 909 dev_err(&pdev->dev, "cannot request I/O memory space\n");
910 ret = -ENXIO; 910 ret = -ENXIO;
@@ -917,7 +917,7 @@ static int ethoc_probe(struct platform_device *pdev)
917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
918 if (res) { 918 if (res) {
919 mem = devm_request_mem_region(&pdev->dev, res->start, 919 mem = devm_request_mem_region(&pdev->dev, res->start,
920 res->end - res->start + 1, res->name); 920 resource_size(res), res->name);
921 if (!mem) { 921 if (!mem) {
922 dev_err(&pdev->dev, "cannot request memory space\n"); 922 dev_err(&pdev->dev, "cannot request memory space\n");
923 ret = -ENXIO; 923 ret = -ENXIO;
@@ -945,7 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
945 priv->dma_alloc = 0; 945 priv->dma_alloc = 0;
946 946
947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
948 mmio->end - mmio->start + 1); 948 resource_size(mmio));
949 if (!priv->iobase) { 949 if (!priv->iobase) {
950 dev_err(&pdev->dev, "cannot remap I/O memory space\n"); 950 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
951 ret = -ENXIO; 951 ret = -ENXIO;
@@ -954,7 +954,7 @@ static int ethoc_probe(struct platform_device *pdev)
954 954
955 if (netdev->mem_end) { 955 if (netdev->mem_end) {
956 priv->membase = devm_ioremap_nocache(&pdev->dev, 956 priv->membase = devm_ioremap_nocache(&pdev->dev,
957 netdev->mem_start, mem->end - mem->start + 1); 957 netdev->mem_start, resource_size(mem));
958 if (!priv->membase) { 958 if (!priv->membase) {
959 dev_err(&pdev->dev, "cannot remap memory space\n"); 959 dev_err(&pdev->dev, "cannot remap memory space\n");
960 ret = -ENXIO; 960 ret = -ENXIO;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589..e6a98129d78 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1941,7 +1941,7 @@ static int netdev_close(struct net_device *dev)
1941 return 0; 1941 return 0;
1942} 1942}
1943 1943
1944static struct pci_device_id fealnx_pci_tbl[] = { 1944static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
1945 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1945 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1946 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1946 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1947 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1947 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804..3eb713b014f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -6198,7 +6198,7 @@ static void nv_shutdown(struct pci_dev *pdev)
6198#define nv_resume NULL 6198#define nv_resume NULL
6199#endif /* CONFIG_PM */ 6199#endif /* CONFIG_PM */
6200 6200
6201static struct pci_device_id pci_tbl[] = { 6201static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6202 { /* nForce Ethernet Controller */ 6202 { /* nForce Ethernet Controller */
6203 PCI_DEVICE(0x10DE, 0x01C3), 6203 PCI_DEVICE(0x10DE, 0x01C3),
6204 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6204 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a..dd72c5025e6 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1990,7 +1990,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
1990 } 1990 }
1991} 1991}
1992 1992
1993static struct pci_device_id hamachi_pci_tbl[] = { 1993static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
1994 { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, }, 1994 { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
1995 { 0, } 1995 { 0, }
1996}; 1996};
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e..0c2f2e8b1c4 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -210,7 +210,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
210#endif 210#endif
211 211
212#ifdef CONFIG_PCI 212#ifdef CONFIG_PCI
213static struct pci_device_id hp100_pci_tbl[] = { 213static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
214 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, 214 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
215 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, 215 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
216 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,}, 216 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 997124d2992..0a064ce3eb4 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -60,7 +60,7 @@ static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info, 60 [board_82575] = &e1000_82575_info,
61}; 61};
62 62
63static struct pci_device_id igb_pci_tbl[] = { 63static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -2905,12 +2905,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
2905 int count = 0; 2905 int count = 0;
2906 2906
2907 /* return ENOMEM indicating insufficient memory for addresses */ 2907 /* return ENOMEM indicating insufficient memory for addresses */
2908 if (netdev->uc.count > rar_entries) 2908 if (netdev_uc_count(netdev) > rar_entries)
2909 return -ENOMEM; 2909 return -ENOMEM;
2910 2910
2911 if (netdev->uc.count && rar_entries) { 2911 if (!netdev_uc_empty(netdev) && rar_entries) {
2912 struct netdev_hw_addr *ha; 2912 struct netdev_hw_addr *ha;
2913 list_for_each_entry(ha, &netdev->uc.list, list) { 2913
2914 netdev_for_each_uc_addr(ha, netdev) {
2914 if (!rar_entries) 2915 if (!rar_entries)
2915 break; 2916 break;
2916 igb_rar_set_qsel(adapter, ha->addr, 2917 igb_rar_set_qsel(adapter, ha->addr,
@@ -4105,6 +4106,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4105 u32 icr = rd32(E1000_ICR); 4106 u32 icr = rd32(E1000_ICR);
4106 /* reading ICR causes bit 31 of EICR to be cleared */ 4107 /* reading ICR causes bit 31 of EICR to be cleared */
4107 4108
4109 if (icr & E1000_ICR_DRSTA)
4110 schedule_work(&adapter->reset_task);
4111
4108 if (icr & E1000_ICR_DOUTSYNC) { 4112 if (icr & E1000_ICR_DOUTSYNC) {
4109 /* HW is reporting DMA is out of sync */ 4113 /* HW is reporting DMA is out of sync */
4110 adapter->stats.doosync++; 4114 adapter->stats.doosync++;
@@ -4728,6 +4732,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4728 4732
4729 igb_write_itr(q_vector); 4733 igb_write_itr(q_vector);
4730 4734
4735 if (icr & E1000_ICR_DRSTA)
4736 schedule_work(&adapter->reset_task);
4737
4731 if (icr & E1000_ICR_DOUTSYNC) { 4738 if (icr & E1000_ICR_DOUTSYNC) {
4732 /* HW is reporting DMA is out of sync */ 4739 /* HW is reporting DMA is out of sync */
4733 adapter->stats.doosync++; 4740 adapter->stats.doosync++;
@@ -4767,6 +4774,9 @@ static irqreturn_t igb_intr(int irq, void *data)
4767 if (!(icr & E1000_ICR_INT_ASSERTED)) 4774 if (!(icr & E1000_ICR_INT_ASSERTED))
4768 return IRQ_NONE; 4775 return IRQ_NONE;
4769 4776
4777 if (icr & E1000_ICR_DRSTA)
4778 schedule_work(&adapter->reset_task);
4779
4770 if (icr & E1000_ICR_DOUTSYNC) { 4780 if (icr & E1000_ICR_DOUTSYNC) {
4771 /* HW is reporting DMA is out of sync */ 4781 /* HW is reporting DMA is out of sync */
4772 adapter->stats.doosync++; 4782 adapter->stats.doosync++;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 2aa71a766c3..23ce07d3de0 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2609,11 +2609,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2609 struct pci_dev *pdev = adapter->pdev; 2609 struct pci_dev *pdev = adapter->pdev;
2610 2610
2611 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2611 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2612 dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n", 2612 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2613 /* MAC address */
2614 netdev->dev_addr[0], netdev->dev_addr[1],
2615 netdev->dev_addr[2], netdev->dev_addr[3],
2616 netdev->dev_addr[4], netdev->dev_addr[5]);
2617 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); 2613 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
2618} 2614}
2619 2615
@@ -2779,11 +2775,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2779 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2775 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2780 2776
2781 if (!is_valid_ether_addr(netdev->perm_addr)) { 2777 if (!is_valid_ether_addr(netdev->perm_addr)) {
2782 dev_err(&pdev->dev, "Invalid MAC Address: " 2778 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2783 "%02x:%02x:%02x:%02x:%02x:%02x\n", 2779 netdev->dev_addr);
2784 netdev->dev_addr[0], netdev->dev_addr[1],
2785 netdev->dev_addr[2], netdev->dev_addr[3],
2786 netdev->dev_addr[4], netdev->dev_addr[5]);
2787 err = -EIO; 2780 err = -EIO;
2788 goto err_hw_init; 2781 goto err_hw_init;
2789 } 2782 }
@@ -2885,7 +2878,7 @@ static struct pci_error_handlers igbvf_err_handler = {
2885 .resume = igbvf_io_resume, 2878 .resume = igbvf_io_resume,
2886}; 2879};
2887 2880
2888static struct pci_device_id igbvf_pci_tbl[] = { 2881static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2889 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2882 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2890 { } /* terminate list */ 2883 { } /* terminate list */
2891}; 2884};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c..81a4c5d3073 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1383,7 +1383,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
1383 */ 1383 */
1384} 1384}
1385 1385
1386static struct pci_device_id ioc3_pci_tbl[] = { 1386static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
1387 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID }, 1387 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1388 { 0 } 1388 { 0 }
1389}; 1389};
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a..49f35e2ed19 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -92,7 +92,7 @@ static const char *ipg_brand_name[] = {
92 "D-Link NIC IP1000A" 92 "D-Link NIC IP1000A"
93}; 93};
94 94
95static struct pci_device_id ipg_pci_tbl[] __devinitdata = { 95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d557..b7e6625ca75 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
184#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC 184#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
185#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX 185#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
186 186
187static struct pci_device_id toshoboe_pci_tbl[] = { 187static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
188 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, }, 188 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
189 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, }, 189 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
190 { } /* Terminating entry */ 190 { } /* Terminating entry */
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd545..6533c010cf5 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
121 } 121 }
122} 122}
123 123
124static struct pci_device_id via_pci_tbl[] = { 124static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 }, 125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 }, 126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 }, 127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76..209d4bcface 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
59 59
60static /* const */ char drivername[] = DRIVER_NAME; 60static /* const */ char drivername[] = DRIVER_NAME;
61 61
62static struct pci_device_id vlsi_irda_table [] = { 62static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
63 { 63 {
64 .class = PCI_CLASS_WIRELESS_IRDA << 8, 64 .class = PCI_CLASS_WIRELESS_IRDA << 8,
65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 593d1a4f217..c56ea69762c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
51 * Class, Class Mask, private data (not used) } 51 * Class, Class Mask, private data (not used) }
52 */ 52 */
53static struct pci_device_id ixgb_pci_tbl[] = { 53static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, 54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, 56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index bfef0ebcba9..8f81efb4916 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,7 +33,8 @@
33obj-$(CONFIG_IXGBE) += ixgbe.o 33obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o
37 38
38ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
39 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 303e7bd39b6..e576fb4740b 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -98,6 +98,22 @@
98 98
99#define IXGBE_MAX_RSC_INT_RATE 162760 99#define IXGBE_MAX_RSC_INT_RATE 162760
100 100
101#define IXGBE_MAX_VF_MC_ENTRIES 30
102#define IXGBE_MAX_VF_FUNCTIONS 64
103#define IXGBE_MAX_VFTA_ENTRIES 128
104#define MAX_EMULATION_MAC_ADDRS 16
105#define VMDQ_P(p) ((p) + adapter->num_vfs)
106
107struct vf_data_storage {
108 unsigned char vf_mac_addresses[ETH_ALEN];
109 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
110 u16 num_vf_mc_hashes;
111 u16 default_vf_vlan_id;
112 u16 vlans_enabled;
113 bool clear_to_send;
114 int rar;
115};
116
101/* wrapper around a pointer to a socket buffer, 117/* wrapper around a pointer to a socket buffer,
102 * so a DMA handle can be stored along with the buffer */ 118 * so a DMA handle can be stored along with the buffer */
103struct ixgbe_tx_buffer { 119struct ixgbe_tx_buffer {
@@ -171,7 +187,7 @@ struct ixgbe_ring {
171enum ixgbe_ring_f_enum { 187enum ixgbe_ring_f_enum {
172 RING_F_NONE = 0, 188 RING_F_NONE = 0,
173 RING_F_DCB, 189 RING_F_DCB,
174 RING_F_VMDQ, 190 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
175 RING_F_RSS, 191 RING_F_RSS,
176 RING_F_FDIR, 192 RING_F_FDIR,
177#ifdef IXGBE_FCOE 193#ifdef IXGBE_FCOE
@@ -183,7 +199,7 @@ enum ixgbe_ring_f_enum {
183 199
184#define IXGBE_MAX_DCB_INDICES 8 200#define IXGBE_MAX_DCB_INDICES 8
185#define IXGBE_MAX_RSS_INDICES 16 201#define IXGBE_MAX_RSS_INDICES 16
186#define IXGBE_MAX_VMDQ_INDICES 16 202#define IXGBE_MAX_VMDQ_INDICES 64
187#define IXGBE_MAX_FDIR_INDICES 64 203#define IXGBE_MAX_FDIR_INDICES 64
188#ifdef IXGBE_FCOE 204#ifdef IXGBE_FCOE
189#define IXGBE_MAX_FCOE_INDICES 8 205#define IXGBE_MAX_FCOE_INDICES 8
@@ -288,6 +304,8 @@ struct ixgbe_adapter {
288 /* RX */ 304 /* RX */
289 struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */ 305 struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
290 int num_rx_queues; 306 int num_rx_queues;
307 int num_rx_pools; /* == num_rx_queues in 82598 */
308 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
291 u64 hw_csum_rx_error; 309 u64 hw_csum_rx_error;
292 u64 hw_rx_no_dma_resources; 310 u64 hw_rx_no_dma_resources;
293 u64 non_eop_descs; 311 u64 non_eop_descs;
@@ -330,6 +348,8 @@ struct ixgbe_adapter {
330#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) 348#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
331#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) 349#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
332#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) 350#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
351#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 30)
352#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 31)
333 353
334 u32 flags2; 354 u32 flags2;
335#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 355#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +399,11 @@ struct ixgbe_adapter {
379 u64 rsc_total_flush; 399 u64 rsc_total_flush;
380 u32 wol; 400 u32 wol;
381 u16 eeprom_version; 401 u16 eeprom_version;
402
403 /* SR-IOV */
404 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
405 unsigned int num_vfs;
406 struct vf_data_storage *vfinfo;
382}; 407};
383 408
384enum ixbge_state_t { 409enum ixbge_state_t {
@@ -440,6 +465,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
440 u16 flex_byte); 465 u16 flex_byte);
441extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, 466extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
442 u8 l4type); 467 u8 l4type);
468extern void ixgbe_set_rx_mode(struct net_device *netdev);
443#ifdef IXGBE_FCOE 469#ifdef IXGBE_FCOE
444extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 470extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
445extern int ixgbe_fso(struct ixgbe_adapter *adapter, 471extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b49bd6b9feb..d4ed6adb797 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -31,6 +31,7 @@
31 31
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34#include "ixgbe_mbx.h"
34 35
35#define IXGBE_82599_MAX_TX_QUEUES 128 36#define IXGBE_82599_MAX_TX_QUEUES 128
36#define IXGBE_82599_MAX_RX_QUEUES 128 37#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -889,7 +890,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
889static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 890static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
890{ 891{
891 s32 status = 0; 892 s32 status = 0;
892 u32 ctrl, ctrl_ext; 893 u32 ctrl;
893 u32 i; 894 u32 i;
894 u32 autoc; 895 u32 autoc;
895 u32 autoc2; 896 u32 autoc2;
@@ -944,15 +945,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
944 status = IXGBE_ERR_RESET_FAILED; 945 status = IXGBE_ERR_RESET_FAILED;
945 hw_dbg(hw, "Reset polling failed to complete.\n"); 946 hw_dbg(hw, "Reset polling failed to complete.\n");
946 } 947 }
947 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
948 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
949 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
950 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
951 948
952 msleep(50); 949 msleep(50);
953 950
954
955
956 /* 951 /*
957 * Store the original AUTOC/AUTOC2 values if they have not been 952 * Store the original AUTOC/AUTOC2 values if they have not been
958 * stored off yet. Otherwise restore the stored original 953 * stored off yet. Otherwise restore the stored original
@@ -1095,9 +1090,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1095 bool vlan_on) 1090 bool vlan_on)
1096{ 1091{
1097 u32 regindex; 1092 u32 regindex;
1093 u32 vlvf_index;
1098 u32 bitindex; 1094 u32 bitindex;
1099 u32 bits; 1095 u32 bits;
1100 u32 first_empty_slot; 1096 u32 first_empty_slot;
1097 u32 vt_ctl;
1101 1098
1102 if (vlan > 4095) 1099 if (vlan > 4095)
1103 return IXGBE_ERR_PARAM; 1100 return IXGBE_ERR_PARAM;
@@ -1124,76 +1121,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1124 1121
1125 1122
1126 /* Part 2 1123 /* Part 2
1127 * If the vind is set 1124 * If VT mode is set
1128 * Either vlan_on 1125 * Either vlan_on
1129 * make sure the vlan is in VLVF 1126 * make sure the vlan is in VLVF
1130 * set the vind bit in the matching VLVFB 1127 * set the vind bit in the matching VLVFB
1131 * Or !vlan_on 1128 * Or !vlan_on
1132 * clear the pool bit and possibly the vind 1129 * clear the pool bit and possibly the vind
1133 */ 1130 */
1134 if (vind) { 1131 vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1135 /* find the vlanid or the first empty slot */ 1132 if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
1136 first_empty_slot = 0; 1133 goto out;
1137
1138 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
1139 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
1140 if (!bits && !first_empty_slot)
1141 first_empty_slot = regindex;
1142 else if ((bits & 0x0FFF) == vlan)
1143 break;
1144 }
1145 1134
1146 if (regindex >= IXGBE_VLVF_ENTRIES) { 1135 /* find the vlanid or the first empty slot */
1147 if (first_empty_slot) 1136 first_empty_slot = 0;
1148 regindex = first_empty_slot; 1137
1149 else { 1138 for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
1150 hw_dbg(hw, "No space in VLVF.\n"); 1139 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
1151 goto out; 1140 if (!bits && !first_empty_slot)
1152 } 1141 first_empty_slot = vlvf_index;
1142 else if ((bits & 0x0FFF) == vlan)
1143 break;
1144 }
1145
1146 if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
1147 if (first_empty_slot)
1148 vlvf_index = first_empty_slot;
1149 else {
1150 hw_dbg(hw, "No space in VLVF.\n");
1151 goto out;
1153 } 1152 }
1153 }
1154 1154
1155 if (vlan_on) { 1155 if (vlan_on) {
1156 /* set the pool bit */ 1156 /* set the pool bit */
1157 if (vind < 32) { 1157 if (vind < 32) {
1158 bits = IXGBE_READ_REG(hw, 1158 bits = IXGBE_READ_REG(hw,
1159 IXGBE_VLVFB(regindex * 2)); 1159 IXGBE_VLVFB(vlvf_index * 2));
1160 bits |= (1 << vind); 1160 bits |= (1 << vind);
1161 IXGBE_WRITE_REG(hw, 1161 IXGBE_WRITE_REG(hw,
1162 IXGBE_VLVFB(regindex * 2), bits); 1162 IXGBE_VLVFB(vlvf_index * 2), bits);
1163 } else {
1164 bits = IXGBE_READ_REG(hw,
1165 IXGBE_VLVFB((regindex * 2) + 1));
1166 bits |= (1 << vind);
1167 IXGBE_WRITE_REG(hw,
1168 IXGBE_VLVFB((regindex * 2) + 1), bits);
1169 }
1170 } else { 1163 } else {
1171 /* clear the pool bit */ 1164 bits = IXGBE_READ_REG(hw,
1172 if (vind < 32) { 1165 IXGBE_VLVFB((vlvf_index * 2) + 1));
1173 bits = IXGBE_READ_REG(hw, 1166 bits |= (1 << (vind - 32));
1174 IXGBE_VLVFB(regindex * 2)); 1167 IXGBE_WRITE_REG(hw,
1168 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1169 }
1170 } else {
1171 /* clear the pool bit */
1172 if (vind < 32) {
1173 bits = IXGBE_READ_REG(hw,
1174 IXGBE_VLVFB(vlvf_index * 2));
1175 bits &= ~(1 << vind); 1175 bits &= ~(1 << vind);
1176 IXGBE_WRITE_REG(hw, 1176 IXGBE_WRITE_REG(hw,
1177 IXGBE_VLVFB(regindex * 2), bits); 1177 IXGBE_VLVFB(vlvf_index * 2), bits);
1178 bits |= IXGBE_READ_REG(hw, 1178 bits |= IXGBE_READ_REG(hw,
1179 IXGBE_VLVFB((regindex * 2) + 1)); 1179 IXGBE_VLVFB((vlvf_index * 2) + 1));
1180 } else { 1180 } else {
1181 bits = IXGBE_READ_REG(hw, 1181 bits = IXGBE_READ_REG(hw,
1182 IXGBE_VLVFB((regindex * 2) + 1)); 1182 IXGBE_VLVFB((vlvf_index * 2) + 1));
1183 bits &= ~(1 << vind); 1183 bits &= ~(1 << (vind - 32));
1184 IXGBE_WRITE_REG(hw, 1184 IXGBE_WRITE_REG(hw,
1185 IXGBE_VLVFB((regindex * 2) + 1), bits); 1185 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1186 bits |= IXGBE_READ_REG(hw, 1186 bits |= IXGBE_READ_REG(hw,
1187 IXGBE_VLVFB(regindex * 2)); 1187 IXGBE_VLVFB(vlvf_index * 2));
1188 }
1189 } 1188 }
1189 }
1190 1190
1191 if (bits) 1191 if (bits) {
1192 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 1192 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
1193 (IXGBE_VLVF_VIEN | vlan)); 1193 (IXGBE_VLVF_VIEN | vlan));
1194 else 1194 /* if bits is non-zero then some pools/VFs are still
1195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0); 1195 * using this VLAN ID. Force the VFTA entry to on */
1196 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1197 bits |= (1 << bitindex);
1198 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1196 } 1199 }
1200 else
1201 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
1197 1202
1198out: 1203out:
1199 return 0; 1204 return 0;
@@ -2655,4 +2660,5 @@ struct ixgbe_info ixgbe_82599_info = {
2655 .mac_ops = &mac_ops_82599, 2660 .mac_ops = &mac_ops_82599,
2656 .eeprom_ops = &eeprom_ops_82599, 2661 .eeprom_ops = &eeprom_ops_82599,
2657 .phy_ops = &phy_ops_82599, 2662 .phy_ops = &phy_ops_82599,
2663 .mbx_ops = &mbx_ops_82599,
2658}; 2664};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 21f158f79dd..eb49020903c 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -28,7 +28,6 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/list.h>
32#include <linux/netdevice.h> 31#include <linux/netdevice.h>
33 32
34#include "ixgbe.h" 33#include "ixgbe.h"
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1278 /* Get the MAC address from the RAR0 for later reference */ 1277 /* Get the MAC address from the RAR0 for later reference */
1279 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1278 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1280 1279
1281 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1280 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1282 hw->mac.addr[0], hw->mac.addr[1],
1283 hw->mac.addr[2]);
1284 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1285 hw->mac.addr[4], hw->mac.addr[5]);
1286 } else { 1281 } else {
1287 /* Setup the receive address. */ 1282 /* Setup the receive address. */
1288 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1283 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1289 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", 1284 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1290 hw->mac.addr[0], hw->mac.addr[1],
1291 hw->mac.addr[2]);
1292 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1293 hw->mac.addr[4], hw->mac.addr[5]);
1294 1285
1295 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1286 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1296 } 1287 }
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1346/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1347 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1348 * @hw: pointer to hardware structure
1358 * @uc_list: the list of new addresses 1349 * @netdev: pointer to net device structure
1359 * 1350 *
1360 * The given list replaces any existing list. Clears the secondary addrs from 1351 * The given list replaces any existing list. Clears the secondary addrs from
1361 * receive address registers. Uses unused receive address registers for the 1352 * receive address registers. Uses unused receive address registers for the
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1365 * manually putting the device into promiscuous mode. 1356 * manually putting the device into promiscuous mode.
1366 **/ 1357 **/
1367s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 1358s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1368 struct list_head *uc_list) 1359 struct net_device *netdev)
1369{ 1360{
1370 u32 i; 1361 u32 i;
1371 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 1362 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1389 } 1380 }
1390 1381
1391 /* Add the new addresses */ 1382 /* Add the new addresses */
1392 list_for_each_entry(ha, uc_list, list) { 1383 netdev_for_each_uc_addr(ha, netdev) {
1393 hw_dbg(hw, " Adding the secondary addresses:\n"); 1384 hw_dbg(hw, " Adding the secondary addresses:\n");
1394 ixgbe_add_uc_addr(hw, ha->addr, 0); 1385 ixgbe_add_uc_addr(hw, ha->addr, 0);
1395 } 1386 }
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index dfff0ffaa50..13606d4809c 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
60 u32 mc_addr_count, 60 u32 mc_addr_count,
61 ixgbe_mc_addr_itr func); 61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 struct list_head *uc_list); 63 struct net_device *netdev);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index d77961fc75f..1525c86cbcc 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1867,11 +1867,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
1867 if (ixgbe_intr_test(adapter, &data[2])) 1867 if (ixgbe_intr_test(adapter, &data[2]))
1868 eth_test->flags |= ETH_TEST_FL_FAILED; 1868 eth_test->flags |= ETH_TEST_FL_FAILED;
1869 1869
1870 /* If SRIOV or VMDq is enabled then skip MAC
1871 * loopback diagnostic. */
1872 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1873 IXGBE_FLAG_VMDQ_ENABLED)) {
1874 DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
1875 "mode\n");
1876 data[3] = 0;
1877 goto skip_loopback;
1878 }
1879
1870 ixgbe_reset(adapter); 1880 ixgbe_reset(adapter);
1871 DPRINTK(HW, INFO, "loopback testing starting\n"); 1881 DPRINTK(HW, INFO, "loopback testing starting\n");
1872 if (ixgbe_loopback_test(adapter, &data[3])) 1882 if (ixgbe_loopback_test(adapter, &data[3]))
1873 eth_test->flags |= ETH_TEST_FL_FAILED; 1883 eth_test->flags |= ETH_TEST_FL_FAILED;
1874 1884
1885skip_loopback:
1875 ixgbe_reset(adapter); 1886 ixgbe_reset(adapter);
1876 1887
1877 clear_bit(__IXGBE_TESTING, &adapter->state); 1888 clear_bit(__IXGBE_TESTING, &adapter->state);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index b5f64ad6797..636985224af 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -45,6 +45,7 @@
45#include "ixgbe.h" 45#include "ixgbe.h"
46#include "ixgbe_common.h" 46#include "ixgbe_common.h"
47#include "ixgbe_dcb_82599.h" 47#include "ixgbe_dcb_82599.h"
48#include "ixgbe_sriov.h"
48 49
49char ixgbe_driver_name[] = "ixgbe"; 50char ixgbe_driver_name[] = "ixgbe";
50static const char ixgbe_driver_string[] = 51static const char ixgbe_driver_string[] =
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68 * Class, Class Mask, private data (not used) } 69 * Class, Class Mask, private data (not used) }
69 */ 70 */
70static struct pci_device_id ixgbe_pci_tbl[] = { 71static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
72 board_82598 }, 73 board_82598 },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
124}; 125};
125#endif 126#endif
126 127
128#ifdef CONFIG_PCI_IOV
129static unsigned int max_vfs;
130module_param(max_vfs, uint, 0);
131MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
132 "per physical function");
133#endif /* CONFIG_PCI_IOV */
134
127MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 135MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
128MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 136MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
129MODULE_LICENSE("GPL"); 137MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
131 139
132#define DEFAULT_DEBUG_LEVEL_SHIFT 3 140#define DEFAULT_DEBUG_LEVEL_SHIFT 3
133 141
142static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
143{
144 struct ixgbe_hw *hw = &adapter->hw;
145 u32 gcr;
146 u32 gpie;
147 u32 vmdctl;
148
149#ifdef CONFIG_PCI_IOV
150 /* disable iov and allow time for transactions to clear */
151 pci_disable_sriov(adapter->pdev);
152#endif
153
154 /* turn off device IOV mode */
155 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
156 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
157 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
158 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
159 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
160 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
161
162 /* set default pool back to 0 */
163 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
164 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
165 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
166
167 /* take a breather then clean up driver data */
168 msleep(100);
169 if (adapter->vfinfo)
170 kfree(adapter->vfinfo);
171 adapter->vfinfo = NULL;
172
173 adapter->num_vfs = 0;
174 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
175}
176
134static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 177static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
135{ 178{
136 u32 ctrl_ext; 179 u32 ctrl_ext;
@@ -1025,7 +1068,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1025 1068
1026 /* set up to autoclear timer, and the vectors */ 1069 /* set up to autoclear timer, and the vectors */
1027 mask = IXGBE_EIMS_ENABLE_MASK; 1070 mask = IXGBE_EIMS_ENABLE_MASK;
1028 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1071 if (adapter->num_vfs)
1072 mask &= ~(IXGBE_EIMS_OTHER |
1073 IXGBE_EIMS_MAILBOX |
1074 IXGBE_EIMS_LSC);
1075 else
1076 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1029 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 1077 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1030} 1078}
1031 1079
@@ -1254,6 +1302,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1254 if (eicr & IXGBE_EICR_LSC) 1302 if (eicr & IXGBE_EICR_LSC)
1255 ixgbe_check_lsc(adapter); 1303 ixgbe_check_lsc(adapter);
1256 1304
1305 if (eicr & IXGBE_EICR_MAILBOX)
1306 ixgbe_msg_task(adapter);
1307
1257 if (hw->mac.type == ixgbe_mac_82598EB) 1308 if (hw->mac.type == ixgbe_mac_82598EB)
1258 ixgbe_check_fan_failure(adapter, eicr); 1309 ixgbe_check_fan_failure(adapter, eicr);
1259 1310
@@ -1768,6 +1819,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1768 mask |= IXGBE_EIMS_ECC; 1819 mask |= IXGBE_EIMS_ECC;
1769 mask |= IXGBE_EIMS_GPI_SDP1; 1820 mask |= IXGBE_EIMS_GPI_SDP1;
1770 mask |= IXGBE_EIMS_GPI_SDP2; 1821 mask |= IXGBE_EIMS_GPI_SDP2;
1822 if (adapter->num_vfs)
1823 mask |= IXGBE_EIMS_MAILBOX;
1771 } 1824 }
1772 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 1825 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1773 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 1826 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1776,6 +1829,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1829 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1777 ixgbe_irq_enable_queues(adapter, ~0); 1830 ixgbe_irq_enable_queues(adapter, ~0);
1778 IXGBE_WRITE_FLUSH(&adapter->hw); 1831 IXGBE_WRITE_FLUSH(&adapter->hw);
1832
1833 if (adapter->num_vfs > 32) {
1834 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1835 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1836 }
1779} 1837}
1780 1838
1781/** 1839/**
@@ -1905,6 +1963,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 1963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 1964 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
1907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 1965 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1966 if (adapter->num_vfs > 32)
1967 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
1908 } 1968 }
1909 IXGBE_WRITE_FLUSH(&adapter->hw); 1969 IXGBE_WRITE_FLUSH(&adapter->hw);
1910 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1970 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1989,18 +2049,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1989 2049
1990 if (hw->mac.type == ixgbe_mac_82599EB) { 2050 if (hw->mac.type == ixgbe_mac_82599EB) {
1991 u32 rttdcs; 2051 u32 rttdcs;
2052 u32 mask;
1992 2053
1993 /* disable the arbiter while setting MTQC */ 2054 /* disable the arbiter while setting MTQC */
1994 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2055 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
1995 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2056 rttdcs |= IXGBE_RTTDCS_ARBDIS;
1996 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2057 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
1997 2058
1998 /* We enable 8 traffic classes, DCB only */ 2059 /* set transmit pool layout */
1999 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 2060 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2000 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | 2061 switch (adapter->flags & mask) {
2001 IXGBE_MTQC_8TC_8TQ)); 2062
2002 else 2063 case (IXGBE_FLAG_SRIOV_ENABLED):
2064 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2065 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2066 break;
2067
2068 case (IXGBE_FLAG_DCB_ENABLED):
2069 /* We enable 8 traffic classes, DCB only */
2070 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2071 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2072 break;
2073
2074 default:
2003 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2075 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2076 break;
2077 }
2004 2078
2005 /* re-eable the arbiter */ 2079 /* re-eable the arbiter */
2006 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2080 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2059,12 +2133,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2059#ifdef CONFIG_IXGBE_DCB 2133#ifdef CONFIG_IXGBE_DCB
2060 | IXGBE_FLAG_DCB_ENABLED 2134 | IXGBE_FLAG_DCB_ENABLED
2061#endif 2135#endif
2136 | IXGBE_FLAG_SRIOV_ENABLED
2062 ); 2137 );
2063 2138
2064 switch (mask) { 2139 switch (mask) {
2065 case (IXGBE_FLAG_RSS_ENABLED): 2140 case (IXGBE_FLAG_RSS_ENABLED):
2066 mrqc = IXGBE_MRQC_RSSEN; 2141 mrqc = IXGBE_MRQC_RSSEN;
2067 break; 2142 break;
2143 case (IXGBE_FLAG_SRIOV_ENABLED):
2144 mrqc = IXGBE_MRQC_VMDQEN;
2145 break;
2068#ifdef CONFIG_IXGBE_DCB 2146#ifdef CONFIG_IXGBE_DCB
2069 case (IXGBE_FLAG_DCB_ENABLED): 2147 case (IXGBE_FLAG_DCB_ENABLED):
2070 mrqc = IXGBE_MRQC_RT8TCEN; 2148 mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2145,7 +2223,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2145 int rx_buf_len; 2223 int rx_buf_len;
2146 2224
2147 /* Decide whether to use packet split mode or not */ 2225 /* Decide whether to use packet split mode or not */
2148 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 2226 /* Do not use packet split if we're in SR-IOV Mode */
2227 if (!adapter->num_vfs)
2228 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2149 2229
2150 /* Set the RX buffer length according to the mode */ 2230 /* Set the RX buffer length according to the mode */
2151 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2231 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2157,7 +2237,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2157 IXGBE_PSRTYPE_IPV4HDR | 2237 IXGBE_PSRTYPE_IPV4HDR |
2158 IXGBE_PSRTYPE_IPV6HDR | 2238 IXGBE_PSRTYPE_IPV6HDR |
2159 IXGBE_PSRTYPE_L2HDR; 2239 IXGBE_PSRTYPE_L2HDR;
2160 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2240 IXGBE_WRITE_REG(hw,
2241 IXGBE_PSRTYPE(adapter->num_vfs),
2242 psrtype);
2161 } 2243 }
2162 } else { 2244 } else {
2163 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2245 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2243,6 +2325,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2243 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2325 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2244 } 2326 }
2245 2327
2328 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2329 u32 vt_reg_bits;
2330 u32 reg_offset, vf_shift;
2331 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2332 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2333 | IXGBE_VT_CTL_REPLEN;
2334 vt_reg_bits |= (adapter->num_vfs <<
2335 IXGBE_VT_CTL_POOL_SHIFT);
2336 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2337 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2338
2339 vf_shift = adapter->num_vfs % 32;
2340 reg_offset = adapter->num_vfs / 32;
2341 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2342 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2343 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2344 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2345 /* Enable only the PF's pool for Tx/Rx */
2346 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2347 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2348 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2349 ixgbe_set_vmolr(hw, adapter->num_vfs);
2350 }
2351
2246 /* Program MRQC for the distribution of queues */ 2352 /* Program MRQC for the distribution of queues */
2247 mrqc = ixgbe_setup_mrqc(adapter); 2353 mrqc = ixgbe_setup_mrqc(adapter);
2248 2354
@@ -2274,6 +2380,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2274 } 2380 }
2275 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2381 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2276 2382
2383 if (adapter->num_vfs) {
2384 u32 reg;
2385
2386 /* Map PF MAC address in RAR Entry 0 to first pool
2387 * following VFs */
2388 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2389
2390 /* Set up VF register offsets for selected VT Mode, i.e.
2391 * 64 VFs for SR-IOV */
2392 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2393 reg |= IXGBE_GCR_EXT_SRIOV;
2394 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2395 }
2396
2277 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2397 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2278 2398
2279 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 2399 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2312,15 +2432,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2312{ 2432{
2313 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2433 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2314 struct ixgbe_hw *hw = &adapter->hw; 2434 struct ixgbe_hw *hw = &adapter->hw;
2435 int pool_ndx = adapter->num_vfs;
2315 2436
2316 /* add VID to filter table */ 2437 /* add VID to filter table */
2317 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); 2438 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
2318} 2439}
2319 2440
2320static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2441static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2321{ 2442{
2322 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2443 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2323 struct ixgbe_hw *hw = &adapter->hw; 2444 struct ixgbe_hw *hw = &adapter->hw;
2445 int pool_ndx = adapter->num_vfs;
2324 2446
2325 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2447 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2326 ixgbe_irq_disable(adapter); 2448 ixgbe_irq_disable(adapter);
@@ -2331,7 +2453,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2331 ixgbe_irq_enable(adapter); 2453 ixgbe_irq_enable(adapter);
2332 2454
2333 /* remove VID from filter table */ 2455 /* remove VID from filter table */
2334 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); 2456 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2335} 2457}
2336 2458
2337static void ixgbe_vlan_rx_register(struct net_device *netdev, 2459static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2414,7 +2536,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2414 * responsible for configuring the hardware for proper unicast, multicast and 2536 * responsible for configuring the hardware for proper unicast, multicast and
2415 * promiscuous mode. 2537 * promiscuous mode.
2416 **/ 2538 **/
2417static void ixgbe_set_rx_mode(struct net_device *netdev) 2539void ixgbe_set_rx_mode(struct net_device *netdev)
2418{ 2540{
2419 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2541 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2420 struct ixgbe_hw *hw = &adapter->hw; 2542 struct ixgbe_hw *hw = &adapter->hw;
@@ -2446,7 +2568,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
2446 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2568 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2447 2569
2448 /* reprogram secondary unicast list */ 2570 /* reprogram secondary unicast list */
2449 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list); 2571 hw->mac.ops.update_uc_addr_list(hw, netdev);
2450 2572
2451 /* reprogram multicast list */ 2573 /* reprogram multicast list */
2452 addr_count = netdev->mc_count; 2574 addr_count = netdev->mc_count;
@@ -2454,6 +2576,8 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
2454 addr_list = netdev->mc_list->dmi_addr; 2576 addr_list = netdev->mc_list->dmi_addr;
2455 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 2577 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2456 ixgbe_addr_list_itr); 2578 ixgbe_addr_list_itr);
2579 if (adapter->num_vfs)
2580 ixgbe_restore_vf_multicasts(adapter);
2457} 2581}
2458 2582
2459static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 2583static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2702,6 +2826,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2702 u32 txdctl, rxdctl, mhadd; 2826 u32 txdctl, rxdctl, mhadd;
2703 u32 dmatxctl; 2827 u32 dmatxctl;
2704 u32 gpie; 2828 u32 gpie;
2829 u32 ctrl_ext;
2705 2830
2706 ixgbe_get_hw_control(adapter); 2831 ixgbe_get_hw_control(adapter);
2707 2832
@@ -2714,6 +2839,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2714 /* MSI only */ 2839 /* MSI only */
2715 gpie = 0; 2840 gpie = 0;
2716 } 2841 }
2842 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2843 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
2844 gpie |= IXGBE_GPIE_VTMODE_64;
2845 }
2717 /* XXX: to interrupt immediately for EICS writes, enable this */ 2846 /* XXX: to interrupt immediately for EICS writes, enable this */
2718 /* gpie |= IXGBE_GPIE_EIMEN; */ 2847 /* gpie |= IXGBE_GPIE_EIMEN; */
2719 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2848 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2788,6 +2917,18 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2788 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2917 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2789 txdctl |= IXGBE_TXDCTL_ENABLE; 2918 txdctl |= IXGBE_TXDCTL_ENABLE;
2790 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2919 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2920 if (hw->mac.type == ixgbe_mac_82599EB) {
2921 int wait_loop = 10;
2922 /* poll for Tx Enable ready */
2923 do {
2924 msleep(1);
2925 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2926 } while (--wait_loop &&
2927 !(txdctl & IXGBE_TXDCTL_ENABLE));
2928 if (!wait_loop)
2929 DPRINTK(DRV, ERR, "Could not enable "
2930 "Tx Queue %d\n", j);
2931 }
2791 } 2932 }
2792 2933
2793 for (i = 0; i < num_rx_rings; i++) { 2934 for (i = 0; i < num_rx_rings; i++) {
@@ -2875,6 +3016,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2875 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3016 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2876 adapter->link_check_timeout = jiffies; 3017 adapter->link_check_timeout = jiffies;
2877 mod_timer(&adapter->watchdog_timer, jiffies); 3018 mod_timer(&adapter->watchdog_timer, jiffies);
3019
3020 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3021 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3022 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3023 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3024
2878 return 0; 3025 return 0;
2879} 3026}
2880 3027
@@ -2923,7 +3070,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
2923 } 3070 }
2924 3071
2925 /* reprogram the RAR[0] in case user changed it. */ 3072 /* reprogram the RAR[0] in case user changed it. */
2926 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3073 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3074 IXGBE_RAH_AV);
2927} 3075}
2928 3076
2929/** 3077/**
@@ -3055,6 +3203,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3055 /* signal that we are down to the interrupt handler */ 3203 /* signal that we are down to the interrupt handler */
3056 set_bit(__IXGBE_DOWN, &adapter->state); 3204 set_bit(__IXGBE_DOWN, &adapter->state);
3057 3205
3206 /* disable receive for all VFs and wait one second */
3207 if (adapter->num_vfs) {
3208 for (i = 0 ; i < adapter->num_vfs; i++)
3209 adapter->vfinfo[i].clear_to_send = 0;
3210
3211 /* ping all the active vfs to let them know we are going down */
3212 ixgbe_ping_all_vfs(adapter);
3213 /* Disable all VFTE/VFRE TX/RX */
3214 ixgbe_disable_tx_rx(adapter);
3215 }
3216
3058 /* disable receives */ 3217 /* disable receives */
3059 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3218 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3060 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3219 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3291,6 +3450,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3291} 3450}
3292 3451
3293#endif /* IXGBE_FCOE */ 3452#endif /* IXGBE_FCOE */
3453/**
3454 * ixgbe_set_sriov_queues: Allocate queues for IOV use
3455 * @adapter: board private structure to initialize
3456 *
3457 * IOV doesn't actually use anything, so just NAK the
3458 * request for now and let the other queue routines
3459 * figure out what to do.
3460 */
3461static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
3462{
3463 return false;
3464}
3465
3294/* 3466/*
3295 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 3467 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3296 * @adapter: board private structure to initialize 3468 * @adapter: board private structure to initialize
@@ -3304,6 +3476,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3304 **/ 3476 **/
3305static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 3477static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3306{ 3478{
3479 /* Start with base case */
3480 adapter->num_rx_queues = 1;
3481 adapter->num_tx_queues = 1;
3482 adapter->num_rx_pools = adapter->num_rx_queues;
3483 adapter->num_rx_queues_per_pool = 1;
3484
3485 if (ixgbe_set_sriov_queues(adapter))
3486 return;
3487
3307#ifdef IXGBE_FCOE 3488#ifdef IXGBE_FCOE
3308 if (ixgbe_set_fcoe_queues(adapter)) 3489 if (ixgbe_set_fcoe_queues(adapter))
3309 goto done; 3490 goto done;
@@ -3575,6 +3756,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3575 3756
3576#endif /* IXGBE_FCOE */ 3757#endif /* IXGBE_FCOE */
3577/** 3758/**
3759 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
3760 * @adapter: board private structure to initialize
3761 *
3762 * SR-IOV doesn't use any descriptor rings but changes the default if
3763 * no other mapping is used.
3764 *
3765 */
3766static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
3767{
3768 adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
3769 adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
3770 if (adapter->num_vfs)
3771 return true;
3772 else
3773 return false;
3774}
3775
3776/**
3578 * ixgbe_cache_ring_register - Descriptor ring to register mapping 3777 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3579 * @adapter: board private structure to initialize 3778 * @adapter: board private structure to initialize
3580 * 3779 *
@@ -3591,6 +3790,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3591 adapter->rx_ring[0].reg_idx = 0; 3790 adapter->rx_ring[0].reg_idx = 0;
3592 adapter->tx_ring[0].reg_idx = 0; 3791 adapter->tx_ring[0].reg_idx = 0;
3593 3792
3793 if (ixgbe_cache_ring_sriov(adapter))
3794 return;
3795
3594#ifdef IXGBE_FCOE 3796#ifdef IXGBE_FCOE
3595 if (ixgbe_cache_ring_fcoe(adapter)) 3797 if (ixgbe_cache_ring_fcoe(adapter))
3596 return; 3798 return;
@@ -3700,6 +3902,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3700 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 3902 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3701 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 3903 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3702 adapter->atr_sample_rate = 0; 3904 adapter->atr_sample_rate = 0;
3905 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3906 ixgbe_disable_sriov(adapter);
3907
3703 ixgbe_set_num_queues(adapter); 3908 ixgbe_set_num_queues(adapter);
3704 3909
3705 err = pci_enable_msi(adapter->pdev); 3910 err = pci_enable_msi(adapter->pdev);
@@ -5484,7 +5689,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
5484 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 5689 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5485 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 5690 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5486 5691
5487 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 5692 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
5693 IXGBE_RAH_AV);
5488 5694
5489 return 0; 5695 return 0;
5490} 5696}
@@ -5621,6 +5827,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5621#endif /* IXGBE_FCOE */ 5827#endif /* IXGBE_FCOE */
5622}; 5828};
5623 5829
5830static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
5831 const struct ixgbe_info *ii)
5832{
5833#ifdef CONFIG_PCI_IOV
5834 struct ixgbe_hw *hw = &adapter->hw;
5835 int err;
5836
5837 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
5838 return;
5839
5840 /* The 82599 supports up to 64 VFs per physical function
5841 * but this implementation limits allocation to 63 so that
5842 * basic networking resources are still available to the
5843 * physical function
5844 */
5845 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
5846 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
5847 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
5848 if (err) {
5849 DPRINTK(PROBE, ERR,
5850 "Failed to enable PCI sriov: %d\n", err);
5851 goto err_novfs;
5852 }
5853 /* If call to enable VFs succeeded then allocate memory
5854 * for per VF control structures.
5855 */
5856 adapter->vfinfo =
5857 kcalloc(adapter->num_vfs,
5858 sizeof(struct vf_data_storage), GFP_KERNEL);
5859 if (adapter->vfinfo) {
5860 /* Now that we're sure SR-IOV is enabled
5861 * and memory allocated set up the mailbox parameters
5862 */
5863 ixgbe_init_mbx_params_pf(hw);
5864 memcpy(&hw->mbx.ops, ii->mbx_ops,
5865 sizeof(hw->mbx.ops));
5866
5867 /* Disable RSC when in SR-IOV mode */
5868 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
5869 IXGBE_FLAG2_RSC_ENABLED);
5870 return;
5871 }
5872
5873 /* Oh oh */
5874 DPRINTK(PROBE, ERR,
5875 "Unable to allocate memory for VF "
5876 "Data Storage - SRIOV disabled\n");
5877 pci_disable_sriov(adapter->pdev);
5878
5879err_novfs:
5880 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
5881 adapter->num_vfs = 0;
5882#endif /* CONFIG_PCI_IOV */
5883}
5884
5624/** 5885/**
5625 * ixgbe_probe - Device Initialization Routine 5886 * ixgbe_probe - Device Initialization Routine
5626 * @pdev: PCI device information struct 5887 * @pdev: PCI device information struct
@@ -5795,6 +6056,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5795 goto err_sw_init; 6056 goto err_sw_init;
5796 } 6057 }
5797 6058
6059 ixgbe_probe_vf(adapter, ii);
6060
5798 netdev->features = NETIF_F_SG | 6061 netdev->features = NETIF_F_SG |
5799 NETIF_F_IP_CSUM | 6062 NETIF_F_IP_CSUM |
5800 NETIF_F_HW_VLAN_TX | 6063 NETIF_F_HW_VLAN_TX |
@@ -5815,6 +6078,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5815 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 6078 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
5816 netdev->vlan_features |= NETIF_F_SG; 6079 netdev->vlan_features |= NETIF_F_SG;
5817 6080
6081 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6082 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6083 IXGBE_FLAG_DCB_ENABLED);
5818 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 6084 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5819 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 6085 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5820 6086
@@ -5941,6 +6207,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5941 ixgbe_setup_dca(adapter); 6207 ixgbe_setup_dca(adapter);
5942 } 6208 }
5943#endif 6209#endif
6210 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6211 DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
6212 adapter->num_vfs);
6213 for (i = 0; i < adapter->num_vfs; i++)
6214 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6215 }
6216
5944 /* add san mac addr to netdev */ 6217 /* add san mac addr to netdev */
5945 ixgbe_add_sanmac_netdev(netdev); 6218 ixgbe_add_sanmac_netdev(netdev);
5946 6219
@@ -5953,6 +6226,8 @@ err_register:
5953 ixgbe_clear_interrupt_scheme(adapter); 6226 ixgbe_clear_interrupt_scheme(adapter);
5954err_sw_init: 6227err_sw_init:
5955err_eeprom: 6228err_eeprom:
6229 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6230 ixgbe_disable_sriov(adapter);
5956 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 6231 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5957 del_timer_sync(&adapter->sfp_timer); 6232 del_timer_sync(&adapter->sfp_timer);
5958 cancel_work_sync(&adapter->sfp_task); 6233 cancel_work_sync(&adapter->sfp_task);
@@ -6021,6 +6296,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6021 if (netdev->reg_state == NETREG_REGISTERED) 6296 if (netdev->reg_state == NETREG_REGISTERED)
6022 unregister_netdev(netdev); 6297 unregister_netdev(netdev);
6023 6298
6299 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6300 ixgbe_disable_sriov(adapter);
6301
6024 ixgbe_clear_interrupt_scheme(adapter); 6302 ixgbe_clear_interrupt_scheme(adapter);
6025 6303
6026 ixgbe_release_hw_control(adapter); 6304 ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 00000000000..d75f9148eb1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include "ixgbe_type.h"
31#include "ixgbe_common.h"
32#include "ixgbe_mbx.h"
33
34/**
35 * ixgbe_read_mbx - Reads a message from the mailbox
36 * @hw: pointer to the HW structure
37 * @msg: The message buffer
38 * @size: Length of buffer
39 * @mbx_id: id of mailbox to read
40 *
41 * returns SUCCESS if it successfuly read message from buffer
42 **/
43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
44{
45 struct ixgbe_mbx_info *mbx = &hw->mbx;
46 s32 ret_val = IXGBE_ERR_MBX;
47
48 /* limit read to size of mailbox */
49 if (size > mbx->size)
50 size = mbx->size;
51
52 if (mbx->ops.read)
53 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
54
55 return ret_val;
56}
57
58/**
59 * ixgbe_write_mbx - Write a message to the mailbox
60 * @hw: pointer to the HW structure
61 * @msg: The message buffer
62 * @size: Length of buffer
63 * @mbx_id: id of mailbox to write
64 *
65 * returns SUCCESS if it successfully copied message into the buffer
66 **/
67s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
68{
69 struct ixgbe_mbx_info *mbx = &hw->mbx;
70 s32 ret_val = 0;
71
72 if (size > mbx->size)
73 ret_val = IXGBE_ERR_MBX;
74
75 else if (mbx->ops.write)
76 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
77
78 return ret_val;
79}
80
81/**
82 * ixgbe_check_for_msg - checks to see if someone sent us mail
83 * @hw: pointer to the HW structure
84 * @mbx_id: id of mailbox to check
85 *
86 * returns SUCCESS if the Status bit was found or else ERR_MBX
87 **/
88s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
89{
90 struct ixgbe_mbx_info *mbx = &hw->mbx;
91 s32 ret_val = IXGBE_ERR_MBX;
92
93 if (mbx->ops.check_for_msg)
94 ret_val = mbx->ops.check_for_msg(hw, mbx_id);
95
96 return ret_val;
97}
98
99/**
100 * ixgbe_check_for_ack - checks to see if someone sent us ACK
101 * @hw: pointer to the HW structure
102 * @mbx_id: id of mailbox to check
103 *
104 * returns SUCCESS if the Status bit was found or else ERR_MBX
105 **/
106s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
107{
108 struct ixgbe_mbx_info *mbx = &hw->mbx;
109 s32 ret_val = IXGBE_ERR_MBX;
110
111 if (mbx->ops.check_for_ack)
112 ret_val = mbx->ops.check_for_ack(hw, mbx_id);
113
114 return ret_val;
115}
116
117/**
118 * ixgbe_check_for_rst - checks to see if other side has reset
119 * @hw: pointer to the HW structure
120 * @mbx_id: id of mailbox to check
121 *
122 * returns SUCCESS if the Status bit was found or else ERR_MBX
123 **/
124s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
125{
126 struct ixgbe_mbx_info *mbx = &hw->mbx;
127 s32 ret_val = IXGBE_ERR_MBX;
128
129 if (mbx->ops.check_for_rst)
130 ret_val = mbx->ops.check_for_rst(hw, mbx_id);
131
132 return ret_val;
133}
134
135/**
136 * ixgbe_poll_for_msg - Wait for message notification
137 * @hw: pointer to the HW structure
138 * @mbx_id: id of mailbox to write
139 *
140 * returns SUCCESS if it successfully received a message notification
141 **/
142static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
143{
144 struct ixgbe_mbx_info *mbx = &hw->mbx;
145 int countdown = mbx->timeout;
146
147 if (!countdown || !mbx->ops.check_for_msg)
148 goto out;
149
150 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
151 countdown--;
152 if (!countdown)
153 break;
154 udelay(mbx->usec_delay);
155 }
156
157 /* if we failed, all future posted messages fail until reset */
158 if (!countdown)
159 mbx->timeout = 0;
160out:
161 return countdown ? 0 : IXGBE_ERR_MBX;
162}
163
164/**
165 * ixgbe_poll_for_ack - Wait for message acknowledgement
166 * @hw: pointer to the HW structure
167 * @mbx_id: id of mailbox to write
168 *
169 * returns SUCCESS if it successfully received a message acknowledgement
170 **/
171static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
172{
173 struct ixgbe_mbx_info *mbx = &hw->mbx;
174 int countdown = mbx->timeout;
175
176 if (!countdown || !mbx->ops.check_for_ack)
177 goto out;
178
179 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
180 countdown--;
181 if (!countdown)
182 break;
183 udelay(mbx->usec_delay);
184 }
185
186 /* if we failed, all future posted messages fail until reset */
187 if (!countdown)
188 mbx->timeout = 0;
189out:
190 return countdown ? 0 : IXGBE_ERR_MBX;
191}
192
193/**
194 * ixgbe_read_posted_mbx - Wait for message notification and receive message
195 * @hw: pointer to the HW structure
196 * @msg: The message buffer
197 * @size: Length of buffer
198 * @mbx_id: id of mailbox to write
199 *
200 * returns SUCCESS if it successfully received a message notification and
201 * copied it into the receive buffer.
202 **/
203s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
204{
205 struct ixgbe_mbx_info *mbx = &hw->mbx;
206 s32 ret_val = IXGBE_ERR_MBX;
207
208 if (!mbx->ops.read)
209 goto out;
210
211 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
212
213 /* if ack received read message, otherwise we timed out */
214 if (!ret_val)
215 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
216out:
217 return ret_val;
218}
219
220/**
221 * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
222 * @hw: pointer to the HW structure
223 * @msg: The message buffer
224 * @size: Length of buffer
225 * @mbx_id: id of mailbox to write
226 *
227 * returns SUCCESS if it successfully copied message into the buffer and
228 * received an ack to that message within delay * timeout period
229 **/
230s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
231 u16 mbx_id)
232{
233 struct ixgbe_mbx_info *mbx = &hw->mbx;
234 s32 ret_val = IXGBE_ERR_MBX;
235
236 /* exit if either we can't write or there isn't a defined timeout */
237 if (!mbx->ops.write || !mbx->timeout)
238 goto out;
239
240 /* send msg */
241 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
242
243 /* if msg sent wait until we receive an ack */
244 if (!ret_val)
245 ret_val = ixgbe_poll_for_ack(hw, mbx_id);
246out:
247 return ret_val;
248}
249
250/**
251 * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
252 * @hw: pointer to the HW structure
253 *
254 * Setup the mailbox read and write message function pointers
255 **/
256void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
257{
258 struct ixgbe_mbx_info *mbx = &hw->mbx;
259
260 mbx->ops.read_posted = ixgbe_read_posted_mbx;
261 mbx->ops.write_posted = ixgbe_write_posted_mbx;
262}
263
264static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
265{
266 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
267 s32 ret_val = IXGBE_ERR_MBX;
268
269 if (mbvficr & mask) {
270 ret_val = 0;
271 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
272 }
273
274 return ret_val;
275}
276
277/**
278 * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
279 * @hw: pointer to the HW structure
280 * @vf_number: the VF index
281 *
282 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
283 **/
284static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
285{
286 s32 ret_val = IXGBE_ERR_MBX;
287 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
288 u32 vf_bit = vf_number % 16;
289
290 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
291 index)) {
292 ret_val = 0;
293 hw->mbx.stats.reqs++;
294 }
295
296 return ret_val;
297}
298
299/**
300 * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
301 * @hw: pointer to the HW structure
302 * @vf_number: the VF index
303 *
304 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
305 **/
306static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
307{
308 s32 ret_val = IXGBE_ERR_MBX;
309 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
310 u32 vf_bit = vf_number % 16;
311
312 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
313 index)) {
314 ret_val = 0;
315 hw->mbx.stats.acks++;
316 }
317
318 return ret_val;
319}
320
321/**
322 * ixgbe_check_for_rst_pf - checks to see if the VF has reset
323 * @hw: pointer to the HW structure
324 * @vf_number: the VF index
325 *
326 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
327 **/
328static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
329{
330 u32 reg_offset = (vf_number < 32) ? 0 : 1;
331 u32 vf_shift = vf_number % 32;
332 u32 vflre = 0;
333 s32 ret_val = IXGBE_ERR_MBX;
334
335 if (hw->mac.type == ixgbe_mac_82599EB)
336 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
337
338 if (vflre & (1 << vf_shift)) {
339 ret_val = 0;
340 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
341 hw->mbx.stats.rsts++;
342 }
343
344 return ret_val;
345}
346
347/**
348 * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
349 * @hw: pointer to the HW structure
350 * @vf_number: the VF index
351 *
352 * return SUCCESS if we obtained the mailbox lock
353 **/
354static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
355{
356 s32 ret_val = IXGBE_ERR_MBX;
357 u32 p2v_mailbox;
358
359 /* Take ownership of the buffer */
360 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
361
362 /* reserve mailbox for vf use */
363 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
364 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
365 ret_val = 0;
366
367 return ret_val;
368}
369
370/**
371 * ixgbe_write_mbx_pf - Places a message in the mailbox
372 * @hw: pointer to the HW structure
373 * @msg: The message buffer
374 * @size: Length of buffer
375 * @vf_number: the VF index
376 *
377 * returns SUCCESS if it successfully copied message into the buffer
378 **/
379static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
380 u16 vf_number)
381{
382 s32 ret_val;
383 u16 i;
384
385 /* lock the mailbox to prevent pf/vf race condition */
386 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
387 if (ret_val)
388 goto out_no_write;
389
390 /* flush msg and acks as we are overwriting the message buffer */
391 ixgbe_check_for_msg_pf(hw, vf_number);
392 ixgbe_check_for_ack_pf(hw, vf_number);
393
394 /* copy the caller specified message to the mailbox memory buffer */
395 for (i = 0; i < size; i++)
396 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
397
398 /* Interrupt VF to tell it a message has been sent and release buffer*/
399 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
400
401 /* update stats */
402 hw->mbx.stats.msgs_tx++;
403
404out_no_write:
405 return ret_val;
406
407}
408
409/**
410 * ixgbe_read_mbx_pf - Read a message from the mailbox
411 * @hw: pointer to the HW structure
412 * @msg: The message buffer
413 * @size: Length of buffer
414 * @vf_number: the VF index
415 *
416 * This function copies a message from the mailbox buffer to the caller's
417 * memory buffer. The presumption is that the caller knows that there was
418 * a message due to a VF request so no polling for message is needed.
419 **/
420static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
421 u16 vf_number)
422{
423 s32 ret_val;
424 u16 i;
425
426 /* lock the mailbox to prevent pf/vf race condition */
427 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
428 if (ret_val)
429 goto out_no_read;
430
431 /* copy the message to the mailbox memory buffer */
432 for (i = 0; i < size; i++)
433 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
434
435 /* Acknowledge the message and release buffer */
436 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
437
438 /* update stats */
439 hw->mbx.stats.msgs_rx++;
440
441out_no_read:
442 return ret_val;
443}
444
445/**
446 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
447 * @hw: pointer to the HW structure
448 *
449 * Initializes the hw->mbx struct to correct values for pf mailbox
450 */
451void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
452{
453 struct ixgbe_mbx_info *mbx = &hw->mbx;
454
455 if (hw->mac.type != ixgbe_mac_82599EB)
456 return;
457
458 mbx->timeout = 0;
459 mbx->usec_delay = 0;
460
461 mbx->size = IXGBE_VFMAILBOX_SIZE;
462
463 mbx->stats.msgs_tx = 0;
464 mbx->stats.msgs_rx = 0;
465 mbx->stats.reqs = 0;
466 mbx->stats.acks = 0;
467 mbx->stats.rsts = 0;
468}
469
470struct ixgbe_mbx_operations mbx_ops_82599 = {
471 .read = ixgbe_read_mbx_pf,
472 .write = ixgbe_write_mbx_pf,
473 .read_posted = ixgbe_read_posted_mbx,
474 .write_posted = ixgbe_write_posted_mbx,
475 .check_for_msg = ixgbe_check_for_msg_pf,
476 .check_for_ack = ixgbe_check_for_ack_pf,
477 .check_for_rst = ixgbe_check_for_rst_pf,
478};
479
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 00000000000..be7ab3309ab
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_MBX_H_
29#define _IXGBE_MBX_H_
30
31#include "ixgbe_type.h"
32
33#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
34#define IXGBE_ERR_MBX -100
35
36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200
38
39#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
40#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
41
42#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
43#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
44#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
45#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
46#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
47
48#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
49#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
50#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
51#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
52
53
54/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
55 * PF. The reverse is true if it is IXGBE_PF_*.
56 * Message ACK's are the value or'd with 0xF0000000
57 */
58#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
59 * this are the ACK */
60#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
61 * this are the NACK */
62#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
63 clear to send requests */
64#define IXGBE_VT_MSGINFO_SHIFT 16
65/* bits 23:16 are used for exra info for certain messages */
66#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
67
68#define IXGBE_VF_RESET 0x01 /* VF requests reset */
69#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
70#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
71#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
72#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
73
74/* length of permanent address message returned from PF */
75#define IXGBE_VF_PERMADDR_MSG_LEN 4
76/* word in permanent address message with the current multicast type */
77#define IXGBE_VF_MC_TYPE_WORD 3
78
79#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
80
81#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
82#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
83
84s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
85s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
87s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
88s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
89s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
90s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
91void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
92void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
93
94extern struct ixgbe_mbx_operations mbx_ops_82599;
95
96#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 00000000000..d4cd20f3019
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,362 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#ifdef NETIF_F_HW_VLAN_TX
40#include <linux/if_vlan.h>
41#endif
42
43#include "ixgbe.h"
44
45#include "ixgbe_sriov.h"
46
47int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
48 int entries, u16 *hash_list, u32 vf)
49{
50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
51 int i;
52
53 /* only so many hash values supported */
54 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
55
56 /*
57 * salt away the number of multi cast addresses assigned
58 * to this VF for later use to restore when the PF multi cast
59 * list changes
60 */
61 vfinfo->num_vf_mc_hashes = entries;
62
63 /*
64 * VFs are limited to using the MTA hash table for their multicast
65 * addresses
66 */
67 for (i = 0; i < entries; i++) {
68 vfinfo->vf_mc_hashes[i] = hash_list[i];;
69 }
70
71 /* Flush and reset the mta with the new values */
72 ixgbe_set_rx_mode(adapter->netdev);
73
74 return 0;
75}
76
77void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
78{
79 struct ixgbe_hw *hw = &adapter->hw;
80 struct vf_data_storage *vfinfo;
81 int i, j;
82 u32 vector_bit;
83 u32 vector_reg;
84 u32 mta_reg;
85
86 for (i = 0; i < adapter->num_vfs; i++) {
87 vfinfo = &adapter->vfinfo[i];
88 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
89 hw->addr_ctrl.mta_in_use++;
90 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
91 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
92 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
93 mta_reg |= (1 << vector_bit);
94 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
95 }
96 }
97}
98
99int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
100{
101 u32 ctrl;
102
103 /* Check if global VLAN already set, if not set it */
104 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
105 if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
106 /* enable VLAN tag insert/strip */
107 ctrl |= IXGBE_VLNCTRL_VFE;
108 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
110 }
111
112 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
113}
114
115
116void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
117{
118 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
119 vmolr |= (IXGBE_VMOLR_AUPE |
120 IXGBE_VMOLR_ROMPE |
121 IXGBE_VMOLR_ROPE |
122 IXGBE_VMOLR_BAM);
123 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
124}
125
126inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
127{
128 struct ixgbe_hw *hw = &adapter->hw;
129
130 /* reset offloads to defaults */
131 ixgbe_set_vmolr(hw, vf);
132
133
134 /* reset multicast table array for vf */
135 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
136
137 /* Flush and reset the mta with the new values */
138 ixgbe_set_rx_mode(adapter->netdev);
139
140 if (adapter->vfinfo[vf].rar > 0) {
141 adapter->hw.mac.ops.clear_rar(&adapter->hw,
142 adapter->vfinfo[vf].rar);
143 adapter->vfinfo[vf].rar = -1;
144 }
145}
146
147int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
148 int vf, unsigned char *mac_addr)
149{
150 struct ixgbe_hw *hw = &adapter->hw;
151
152 adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
153 vf, IXGBE_RAH_AV);
154 if (adapter->vfinfo[vf].rar < 0) {
155 DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
156 return -1;
157 }
158
159 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
160
161 return 0;
162}
163
164int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
165{
166 unsigned char vf_mac_addr[6];
167 struct net_device *netdev = pci_get_drvdata(pdev);
168 struct ixgbe_adapter *adapter = netdev_priv(netdev);
169 unsigned int vfn = (event_mask & 0x3f);
170
171 bool enable = ((event_mask & 0x10000000U) != 0);
172
173 if (enable) {
174 random_ether_addr(vf_mac_addr);
175 DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
176 "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
177 vfn,
178 vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
179 vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
180 /*
181 * Store away the VF "permananet" MAC address, it will ask
182 * for it later.
183 */
184 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
185 }
186
187 return 0;
188}
189
190inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
191{
192 struct ixgbe_hw *hw = &adapter->hw;
193 u32 reg;
194 u32 reg_offset, vf_shift;
195
196 vf_shift = vf % 32;
197 reg_offset = vf / 32;
198
199 /* enable transmit and receive for vf */
200 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
201 reg |= (reg | (1 << vf_shift));
202 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
203
204 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
205 reg |= (reg | (1 << vf_shift));
206 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
207
208 ixgbe_vf_reset_event(adapter, vf);
209}
210
211static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
212{
213 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
214 u32 msgbuf[mbx_size];
215 struct ixgbe_hw *hw = &adapter->hw;
216 s32 retval;
217 int entries;
218 u16 *hash_list;
219 int add, vid;
220
221 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
222
223 if (retval)
224 printk(KERN_ERR "Error receiving message from VF\n");
225
226 /* this is a message we already processed, do nothing */
227 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
228 return retval;
229
230 /*
231 * until the vf completes a virtual function reset it should not be
232 * allowed to start any configuration.
233 */
234
235 if (msgbuf[0] == IXGBE_VF_RESET) {
236 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
237 u8 *addr = (u8 *)(&msgbuf[1]);
238 DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
239 adapter->vfinfo[vf].clear_to_send = false;
240 ixgbe_vf_reset_msg(adapter, vf);
241 adapter->vfinfo[vf].clear_to_send = true;
242
243 /* reply to reset with ack and vf mac address */
244 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
245 memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
246 /*
247 * Piggyback the multicast filter type so VF can compute the
248 * correct vectors
249 */
250 msgbuf[3] = hw->mac.mc_filter_type;
251 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
252
253 return retval;
254 }
255
256 if (!adapter->vfinfo[vf].clear_to_send) {
257 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
258 ixgbe_write_mbx(hw, msgbuf, 1, vf);
259 return retval;
260 }
261
262 switch ((msgbuf[0] & 0xFFFF)) {
263 case IXGBE_VF_SET_MAC_ADDR:
264 {
265 u8 *new_mac = ((u8 *)(&msgbuf[1]));
266 if (is_valid_ether_addr(new_mac))
267 ixgbe_set_vf_mac(adapter, vf, new_mac);
268 else
269 retval = -1;
270 }
271 break;
272 case IXGBE_VF_SET_MULTICAST:
273 entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
274 >> IXGBE_VT_MSGINFO_SHIFT;
275 hash_list = (u16 *)&msgbuf[1];
276 retval = ixgbe_set_vf_multicasts(adapter, entries,
277 hash_list, vf);
278 break;
279 case IXGBE_VF_SET_LPE:
280 WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
281 break;
282 case IXGBE_VF_SET_VLAN:
283 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
284 >> IXGBE_VT_MSGINFO_SHIFT;
285 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
286 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
287 break;
288 default:
289 DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
290 retval = IXGBE_ERR_MBX;
291 break;
292 }
293
294 /* notify the VF of the results of what it sent us */
295 if (retval)
296 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
297 else
298 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
299
300 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
301
302 ixgbe_write_mbx(hw, msgbuf, 1, vf);
303
304 return retval;
305}
306
307static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
308{
309 struct ixgbe_hw *hw = &adapter->hw;
310 u32 msg = IXGBE_VT_MSGTYPE_NACK;
311
312 /* if device isn't clear to send it shouldn't be reading either */
313 if (!adapter->vfinfo[vf].clear_to_send)
314 ixgbe_write_mbx(hw, &msg, 1, vf);
315}
316
317void ixgbe_msg_task(struct ixgbe_adapter *adapter)
318{
319 struct ixgbe_hw *hw = &adapter->hw;
320 u32 vf;
321
322 for (vf = 0; vf < adapter->num_vfs; vf++) {
323 /* process any reset requests */
324 if (!ixgbe_check_for_rst(hw, vf))
325 ixgbe_vf_reset_event(adapter, vf);
326
327 /* process any messages pending */
328 if (!ixgbe_check_for_msg(hw, vf))
329 ixgbe_rcv_msg_from_vf(adapter, vf);
330
331 /* process any acks */
332 if (!ixgbe_check_for_ack(hw, vf))
333 ixgbe_rcv_ack_from_vf(adapter, vf);
334 }
335}
336
337void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
338{
339 struct ixgbe_hw *hw = &adapter->hw;
340
341 /* disable transmit and receive for all vfs */
342 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
343 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
344
345 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
346 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
347}
348
349void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
350{
351 struct ixgbe_hw *hw = &adapter->hw;
352 u32 ping;
353 int i;
354
355 for (i = 0 ; i < adapter->num_vfs; i++) {
356 ping = IXGBE_PF_CONTROL_MSG;
357 if (adapter->vfinfo[i].clear_to_send)
358 ping |= IXGBE_VT_MSGTYPE_CTS;
359 ixgbe_write_mbx(hw, &ping, 1, i);
360 }
361}
362
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 00000000000..51d1106c45a
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_SRIOV_H_
29#define _IXGBE_SRIOV_H_
30
31int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
40 int vf, unsigned char *mac_addr);
41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
45
46#endif /* _IXGBE_SRIOV_H_ */
47
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9eafddfa1b9..0db67c19b2c 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -30,7 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/mdio.h> 32#include <linux/mdio.h>
33#include <linux/list.h> 33#include <linux/netdevice.h>
34 34
35/* Vendor ID */ 35/* Vendor ID */
36#define IXGBE_INTEL_VENDOR_ID 0x8086 36#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -277,6 +277,7 @@
277#define IXGBE_DTXCTL 0x07E00 277#define IXGBE_DTXCTL 0x07E00
278 278
279#define IXGBE_DMATXCTL 0x04A80 279#define IXGBE_DMATXCTL 0x04A80
280#define IXGBE_PFDTXGSWC 0x08220
280#define IXGBE_DTXMXSZRQ 0x08100 281#define IXGBE_DTXMXSZRQ 0x08100
281#define IXGBE_DTXTCPFLGL 0x04A88 282#define IXGBE_DTXTCPFLGL 0x04A88
282#define IXGBE_DTXTCPFLGH 0x04A8C 283#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
287#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ 288#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
288#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ 289#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
289#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ 290#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
291
292#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
290#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 293#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
291/* Tx DCA Control register : 128 of these (0-127) */ 294/* Tx DCA Control register : 128 of these (0-127) */
292#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) 295#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
497/* DCB registers */ 500/* DCB registers */
498#define IXGBE_RTRPCS 0x02430 501#define IXGBE_RTRPCS 0x02430
499#define IXGBE_RTTDCS 0x04900 502#define IXGBE_RTTDCS 0x04900
503#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
500#define IXGBE_RTTPCS 0x0CD00 504#define IXGBE_RTTPCS 0x0CD00
501#define IXGBE_RTRUP2TC 0x03020 505#define IXGBE_RTRUP2TC 0x03020
502#define IXGBE_RTTUP2TC 0x0C800 506#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
730#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 734#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
731#define IXGBE_GCR_CAP_VER2 0x00040000 735#define IXGBE_GCR_CAP_VER2 0x00040000
732 736
737#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
738#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
739#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
740#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
741#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
742 IXGBE_GCR_EXT_VT_MODE_64)
743
733/* Time Sync Registers */ 744/* Time Sync Registers */
734#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 745#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
735#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ 746#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
1065/* VFRE bitmask */ 1076/* VFRE bitmask */
1066#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF 1077#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
1067 1078
1079#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
1080
1068/* RDHMPN and TDHMPN bitmasks */ 1081/* RDHMPN and TDHMPN bitmasks */
1069#define IXGBE_RDHMPN_RDICADDR 0x007FF800 1082#define IXGBE_RDHMPN_RDICADDR 0x007FF800
1070#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 1083#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1295,6 +1308,7 @@
1295/* VLAN pool filtering masks */ 1308/* VLAN pool filtering masks */
1296#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ 1309#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
1297#define IXGBE_VLVF_ENTRIES 64 1310#define IXGBE_VLVF_ENTRIES 64
1311#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
1298 1312
1299#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 1313#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
1300 1314
@@ -1843,6 +1857,12 @@
1843#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ 1857#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
1844#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 1858#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
1845 1859
1860/* SR-IOV specific macros */
1861#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
1862#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
1863#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
1864#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
1865
1846/* Little Endian defines */ 1866/* Little Endian defines */
1847#ifndef __le32 1867#ifndef __le32
1848#define __le32 u32 1868#define __le32 u32
@@ -2385,7 +2405,7 @@ struct ixgbe_mac_operations {
2385 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2405 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2386 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2406 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2387 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2407 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2388 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *); 2408 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2389 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2409 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
2390 ixgbe_mc_addr_itr); 2410 ixgbe_mc_addr_itr);
2391 s32 (*enable_mc)(struct ixgbe_hw *); 2411 s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2463,6 +2483,37 @@ struct ixgbe_phy_info {
2463 bool multispeed_fiber; 2483 bool multispeed_fiber;
2464}; 2484};
2465 2485
2486#include "ixgbe_mbx.h"
2487
2488struct ixgbe_mbx_operations {
2489 s32 (*init_params)(struct ixgbe_hw *hw);
2490 s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
2491 s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
2492 s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
2493 s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
2494 s32 (*check_for_msg)(struct ixgbe_hw *, u16);
2495 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
2496 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
2497};
2498
2499struct ixgbe_mbx_stats {
2500 u32 msgs_tx;
2501 u32 msgs_rx;
2502
2503 u32 acks;
2504 u32 reqs;
2505 u32 rsts;
2506};
2507
2508struct ixgbe_mbx_info {
2509 struct ixgbe_mbx_operations ops;
2510 struct ixgbe_mbx_stats stats;
2511 u32 timeout;
2512 u32 usec_delay;
2513 u32 v2p_mailbox;
2514 u16 size;
2515};
2516
2466struct ixgbe_hw { 2517struct ixgbe_hw {
2467 u8 __iomem *hw_addr; 2518 u8 __iomem *hw_addr;
2468 void *back; 2519 void *back;
@@ -2472,6 +2523,7 @@ struct ixgbe_hw {
2472 struct ixgbe_phy_info phy; 2523 struct ixgbe_phy_info phy;
2473 struct ixgbe_eeprom_info eeprom; 2524 struct ixgbe_eeprom_info eeprom;
2474 struct ixgbe_bus_info bus; 2525 struct ixgbe_bus_info bus;
2526 struct ixgbe_mbx_info mbx;
2475 u16 device_id; 2527 u16 device_id;
2476 u16 vendor_id; 2528 u16 vendor_id;
2477 u16 subsystem_device_id; 2529 u16 subsystem_device_id;
@@ -2486,6 +2538,7 @@ struct ixgbe_info {
2486 struct ixgbe_mac_operations *mac_ops; 2538 struct ixgbe_mac_operations *mac_ops;
2487 struct ixgbe_eeprom_operations *eeprom_ops; 2539 struct ixgbe_eeprom_operations *eeprom_ops;
2488 struct ixgbe_phy_operations *phy_ops; 2540 struct ixgbe_phy_operations *phy_ops;
2541 struct ixgbe_mbx_operations *mbx_ops;
2489}; 2542};
2490 2543
2491 2544
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 00000000000..dd4e0d27e8c
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
1################################################################################
2#
3# Intel 82599 Virtual Function driver
4# Copyright(c) 1999 - 2009 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27
28#
29# Makefile for the Intel(R) 82599 VF ethernet driver
30#
31
32obj-$(CONFIG_IXGBEVF) += ixgbevf.o
33
34ixgbevf-objs := vf.o \
35 mbx.o \
36 ethtool.o \
37 ixgbevf_main.o
38
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 00000000000..c44fdb05447
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_DEFINES_H_
29#define _IXGBEVF_DEFINES_H_
30
31/* Device IDs */
32#define IXGBE_DEV_ID_82599_VF 0x10ED
33
34#define IXGBE_VF_IRQ_CLEAR_MASK 7
35#define IXGBE_VF_MAX_TX_QUEUES 1
36#define IXGBE_VF_MAX_RX_QUEUES 1
37#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
38
39/* Link speed */
40typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
46#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
47#define IXGBE_LINKS_UP 0x40000000
48#define IXGBE_LINKS_SPEED 0x20000000
49
50/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
51#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
52#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
53#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
54
55/* Interrupt Vector Allocation Registers */
56#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
57
58#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
59
60/* Receive Config masks */
61#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
62#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
63#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
64#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
65
66/* DCA Control */
67#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
68
69/* PSRTYPE bit definitions */
70#define IXGBE_PSRTYPE_TCPHDR 0x00000010
71#define IXGBE_PSRTYPE_UDPHDR 0x00000020
72#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
73#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
74#define IXGBE_PSRTYPE_L2HDR 0x00001000
75
76/* SRRCTL bit definitions */
77#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
78#define IXGBE_SRRCTL_RDMTS_SHIFT 22
79#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
80#define IXGBE_SRRCTL_DROP_EN 0x10000000
81#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
82#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
83#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
84#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
85#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
86#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
87#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
88#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
89
90/* Receive Descriptor bit definitions */
91#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
92#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
93#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
94#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
95#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
96#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
97#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
98#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
99#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
100#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
101#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
102#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
103#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
104#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
105#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
106#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
107#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
108#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
109#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
110#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
111#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
112#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
113#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
114#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
115#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
116#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
117#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
118#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
119#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
120#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
121#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
122#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
123#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
124#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
125#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
126#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
127#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
128#define IXGBE_RXD_PRI_SHIFT 13
129#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
130#define IXGBE_RXD_CFI_SHIFT 12
131
132#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
133#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
134#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
135#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
136#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
137#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
138#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
139#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
140#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
141#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
142#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
143
144#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
145#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
146#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
147#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
148#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
149#define IXGBE_RXDADV_RSCCNT_SHIFT 17
150#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
151#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
152#define IXGBE_RXDADV_SPH 0x8000
153
154#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
155 IXGBE_RXD_ERR_CE | \
156 IXGBE_RXD_ERR_LE | \
157 IXGBE_RXD_ERR_PE | \
158 IXGBE_RXD_ERR_OSE | \
159 IXGBE_RXD_ERR_USE)
160
161#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
162 IXGBE_RXDADV_ERR_CE | \
163 IXGBE_RXDADV_ERR_LE | \
164 IXGBE_RXDADV_ERR_PE | \
165 IXGBE_RXDADV_ERR_OSE | \
166 IXGBE_RXDADV_ERR_USE)
167
168#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
169#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
170#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
171#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
172#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
173#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
174#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
175#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
176#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
177
178/* Transmit Descriptor - Advanced */
179union ixgbe_adv_tx_desc {
180 struct {
181 __le64 buffer_addr; /* Address of descriptor's data buf */
182 __le32 cmd_type_len;
183 __le32 olinfo_status;
184 } read;
185 struct {
186 __le64 rsvd; /* Reserved */
187 __le32 nxtseq_seed;
188 __le32 status;
189 } wb;
190};
191
192/* Receive Descriptor - Advanced */
193union ixgbe_adv_rx_desc {
194 struct {
195 __le64 pkt_addr; /* Packet buffer address */
196 __le64 hdr_addr; /* Header buffer address */
197 } read;
198 struct {
199 struct {
200 union {
201 __le32 data;
202 struct {
203 __le16 pkt_info; /* RSS, Pkt type */
204 __le16 hdr_info; /* Splithdr, hdrlen */
205 } hs_rss;
206 } lo_dword;
207 union {
208 __le32 rss; /* RSS Hash */
209 struct {
210 __le16 ip_id; /* IP id */
211 __le16 csum; /* Packet Checksum */
212 } csum_ip;
213 } hi_dword;
214 } lower;
215 struct {
216 __le32 status_error; /* ext status/error */
217 __le16 length; /* Packet length */
218 __le16 vlan; /* VLAN tag */
219 } upper;
220 } wb; /* writeback */
221};
222
223/* Context descriptors */
224struct ixgbe_adv_tx_context_desc {
225 __le32 vlan_macip_lens;
226 __le32 seqnum_seed;
227 __le32 type_tucmd_mlhl;
228 __le32 mss_l4len_idx;
229};
230
231/* Adv Transmit Descriptor Config Masks */
232#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
233#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
234#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
235#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
236#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
237#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
238#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
239#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
240#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
241#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
242#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
243#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
244#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
245#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
246#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
247#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
248#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
249#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
250 IXGBE_ADVTXD_POPTS_SHIFT)
251#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
252 IXGBE_ADVTXD_POPTS_SHIFT)
253#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
254#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
255#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
256#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
257#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
258
259/* Interrupt register bitmasks */
260
261/* Extended Interrupt Cause Read */
262#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
263#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
264#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
265
266/* Extended Interrupt Cause Set */
267#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
268#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
269#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
270
271/* Extended Interrupt Mask Set */
272#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
273#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
274#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
275
276/* Extended Interrupt Mask Clear */
277#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
278#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
279#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
280
281#define IXGBE_EIMS_ENABLE_MASK ( \
282 IXGBE_EIMS_RTX_QUEUE | \
283 IXGBE_EIMS_MAILBOX | \
284 IXGBE_EIMS_OTHER)
285
286#define IXGBE_EITR_CNT_WDIS 0x80000000
287
288/* Error Codes */
289#define IXGBE_ERR_INVALID_MAC_ADDR -1
290#define IXGBE_ERR_RESET_FAILED -2
291
292#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 00000000000..399be0c34c3
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,716 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for ixgbevf */
29
30#include <linux/types.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/ethtool.h>
35#include <linux/vmalloc.h>
36#include <linux/if_vlan.h>
37#include <linux/uaccess.h>
38
39#include "ixgbevf.h"
40
41#define IXGBE_ALL_RAR_ENTRIES 16
42
43#ifdef ETHTOOL_GSTATS
44struct ixgbe_stats {
45 char stat_string[ETH_GSTRING_LEN];
46 int sizeof_stat;
47 int stat_offset;
48 int base_stat_offset;
49};
50
51#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \
52 offsetof(struct ixgbevf_adapter, m), \
53 offsetof(struct ixgbevf_adapter, b)
54static struct ixgbe_stats ixgbe_gstrings_stats[] = {
55 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
56 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
57 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
58 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
59 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
60 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
61 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
62 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
63 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
64 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
65};
66
67#define IXGBE_QUEUE_STATS_LEN 0
68#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
69
70#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
71#endif /* ETHTOOL_GSTATS */
72#ifdef ETHTOOL_TEST
73static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
74 "Register test (offline)",
75 "Link test (on/offline)"
76};
77#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
78#endif /* ETHTOOL_TEST */
79
80static int ixgbevf_get_settings(struct net_device *netdev,
81 struct ethtool_cmd *ecmd)
82{
83 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
84 struct ixgbe_hw *hw = &adapter->hw;
85 u32 link_speed = 0;
86 bool link_up;
87
88 ecmd->supported = SUPPORTED_10000baseT_Full;
89 ecmd->autoneg = AUTONEG_DISABLE;
90 ecmd->transceiver = XCVR_DUMMY1;
91 ecmd->port = -1;
92
93 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
94
95 if (link_up) {
96 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
97 SPEED_10000 : SPEED_1000;
98 ecmd->duplex = DUPLEX_FULL;
99 } else {
100 ecmd->speed = -1;
101 ecmd->duplex = -1;
102 }
103
104 return 0;
105}
106
107static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
108{
109 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
110 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
111}
112
113static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
114{
115 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
116 if (data)
117 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
118 else
119 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
120
121 if (netif_running(netdev)) {
122 if (!adapter->dev_closed)
123 ixgbevf_reinit_locked(adapter);
124 } else {
125 ixgbevf_reset(adapter);
126 }
127
128 return 0;
129}
130
131static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
132{
133 if (data) {
134 netdev->features |= NETIF_F_TSO;
135 netdev->features |= NETIF_F_TSO6;
136 } else {
137 netif_tx_stop_all_queues(netdev);
138 netdev->features &= ~NETIF_F_TSO;
139 netdev->features &= ~NETIF_F_TSO6;
140 netif_tx_start_all_queues(netdev);
141 }
142 return 0;
143}
144
145static u32 ixgbevf_get_msglevel(struct net_device *netdev)
146{
147 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
148 return adapter->msg_enable;
149}
150
151static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
152{
153 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
154 adapter->msg_enable = data;
155}
156
157#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
158
159static char *ixgbevf_reg_names[] = {
160 "IXGBE_VFCTRL",
161 "IXGBE_VFSTATUS",
162 "IXGBE_VFLINKS",
163 "IXGBE_VFRXMEMWRAP",
164 "IXGBE_VFRTIMER",
165 "IXGBE_VTEICR",
166 "IXGBE_VTEICS",
167 "IXGBE_VTEIMS",
168 "IXGBE_VTEIMC",
169 "IXGBE_VTEIAC",
170 "IXGBE_VTEIAM",
171 "IXGBE_VTEITR",
172 "IXGBE_VTIVAR",
173 "IXGBE_VTIVAR_MISC",
174 "IXGBE_VFRDBAL0",
175 "IXGBE_VFRDBAL1",
176 "IXGBE_VFRDBAH0",
177 "IXGBE_VFRDBAH1",
178 "IXGBE_VFRDLEN0",
179 "IXGBE_VFRDLEN1",
180 "IXGBE_VFRDH0",
181 "IXGBE_VFRDH1",
182 "IXGBE_VFRDT0",
183 "IXGBE_VFRDT1",
184 "IXGBE_VFRXDCTL0",
185 "IXGBE_VFRXDCTL1",
186 "IXGBE_VFSRRCTL0",
187 "IXGBE_VFSRRCTL1",
188 "IXGBE_VFPSRTYPE",
189 "IXGBE_VFTDBAL0",
190 "IXGBE_VFTDBAL1",
191 "IXGBE_VFTDBAH0",
192 "IXGBE_VFTDBAH1",
193 "IXGBE_VFTDLEN0",
194 "IXGBE_VFTDLEN1",
195 "IXGBE_VFTDH0",
196 "IXGBE_VFTDH1",
197 "IXGBE_VFTDT0",
198 "IXGBE_VFTDT1",
199 "IXGBE_VFTXDCTL0",
200 "IXGBE_VFTXDCTL1",
201 "IXGBE_VFTDWBAL0",
202 "IXGBE_VFTDWBAL1",
203 "IXGBE_VFTDWBAH0",
204 "IXGBE_VFTDWBAH1"
205};
206
207
208static int ixgbevf_get_regs_len(struct net_device *netdev)
209{
210 return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
211}
212
213static void ixgbevf_get_regs(struct net_device *netdev,
214 struct ethtool_regs *regs,
215 void *p)
216{
217 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
218 struct ixgbe_hw *hw = &adapter->hw;
219 u32 *regs_buff = p;
220 u32 regs_len = ixgbevf_get_regs_len(netdev);
221 u8 i;
222
223 memset(p, 0, regs_len);
224
225 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
226
227 /* General Registers */
228 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
229 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
230 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
231 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
232 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
233
234 /* Interrupt */
235 /* don't read EICR because it can clear interrupt causes, instead
236 * read EICS which is a shadow but doesn't clear EICR */
237 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
238 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
239 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
240 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
241 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
242 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
243 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
244 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
245 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
246
247 /* Receive DMA */
248 for (i = 0; i < 2; i++)
249 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
250 for (i = 0; i < 2; i++)
251 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
252 for (i = 0; i < 2; i++)
253 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
254 for (i = 0; i < 2; i++)
255 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
256 for (i = 0; i < 2; i++)
257 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
258 for (i = 0; i < 2; i++)
259 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
260 for (i = 0; i < 2; i++)
261 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
262
263 /* Receive */
264 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
265
266 /* Transmit */
267 for (i = 0; i < 2; i++)
268 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
269 for (i = 0; i < 2; i++)
270 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
271 for (i = 0; i < 2; i++)
272 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
273 for (i = 0; i < 2; i++)
274 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
275 for (i = 0; i < 2; i++)
276 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
277 for (i = 0; i < 2; i++)
278 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
279 for (i = 0; i < 2; i++)
280 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
281 for (i = 0; i < 2; i++)
282 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
283
284 for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
285 hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
286}
287
288static void ixgbevf_get_drvinfo(struct net_device *netdev,
289 struct ethtool_drvinfo *drvinfo)
290{
291 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
292
293 strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
294 strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
295
296 strlcpy(drvinfo->fw_version, "N/A", 4);
297 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
298}
299
300static void ixgbevf_get_ringparam(struct net_device *netdev,
301 struct ethtool_ringparam *ring)
302{
303 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
304 struct ixgbevf_ring *tx_ring = adapter->tx_ring;
305 struct ixgbevf_ring *rx_ring = adapter->rx_ring;
306
307 ring->rx_max_pending = IXGBEVF_MAX_RXD;
308 ring->tx_max_pending = IXGBEVF_MAX_TXD;
309 ring->rx_mini_max_pending = 0;
310 ring->rx_jumbo_max_pending = 0;
311 ring->rx_pending = rx_ring->count;
312 ring->tx_pending = tx_ring->count;
313 ring->rx_mini_pending = 0;
314 ring->rx_jumbo_pending = 0;
315}
316
317static int ixgbevf_set_ringparam(struct net_device *netdev,
318 struct ethtool_ringparam *ring)
319{
320 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
321 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
322 int i, err;
323 u32 new_rx_count, new_tx_count;
324 bool need_tx_update = false;
325 bool need_rx_update = false;
326
327 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
328 return -EINVAL;
329
330 new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
331 new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
332 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
333
334 new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
335 new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
336 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
337
338 if ((new_tx_count == adapter->tx_ring->count) &&
339 (new_rx_count == adapter->rx_ring->count)) {
340 /* nothing to do */
341 return 0;
342 }
343
344 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
345 msleep(1);
346
347 if (new_tx_count != adapter->tx_ring_count) {
348 tx_ring = kcalloc(adapter->num_tx_queues,
349 sizeof(struct ixgbevf_ring), GFP_KERNEL);
350 if (!tx_ring) {
351 err = -ENOMEM;
352 goto err_setup;
353 }
354 memcpy(tx_ring, adapter->tx_ring,
355 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
356 for (i = 0; i < adapter->num_tx_queues; i++) {
357 tx_ring[i].count = new_tx_count;
358 err = ixgbevf_setup_tx_resources(adapter,
359 &tx_ring[i]);
360 if (err) {
361 while (i) {
362 i--;
363 ixgbevf_free_tx_resources(adapter,
364 &tx_ring[i]);
365 }
366 kfree(tx_ring);
367 goto err_setup;
368 }
369 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
370 }
371 need_tx_update = true;
372 }
373
374 if (new_rx_count != adapter->rx_ring_count) {
375 rx_ring = kcalloc(adapter->num_rx_queues,
376 sizeof(struct ixgbevf_ring), GFP_KERNEL);
377 if ((!rx_ring) && (need_tx_update)) {
378 err = -ENOMEM;
379 goto err_rx_setup;
380 }
381 memcpy(rx_ring, adapter->rx_ring,
382 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
383 for (i = 0; i < adapter->num_rx_queues; i++) {
384 rx_ring[i].count = new_rx_count;
385 err = ixgbevf_setup_rx_resources(adapter,
386 &rx_ring[i]);
387 if (err) {
388 while (i) {
389 i--;
390 ixgbevf_free_rx_resources(adapter,
391 &rx_ring[i]);
392 }
393 kfree(rx_ring);
394 goto err_rx_setup;
395 }
396 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
397 }
398 need_rx_update = true;
399 }
400
401err_rx_setup:
402 /* if rings need to be updated, here's the place to do it in one shot */
403 if (need_tx_update || need_rx_update) {
404 if (netif_running(netdev))
405 ixgbevf_down(adapter);
406 }
407
408 /* tx */
409 if (need_tx_update) {
410 kfree(adapter->tx_ring);
411 adapter->tx_ring = tx_ring;
412 tx_ring = NULL;
413 adapter->tx_ring_count = new_tx_count;
414 }
415
416 /* rx */
417 if (need_rx_update) {
418 kfree(adapter->rx_ring);
419 adapter->rx_ring = rx_ring;
420 rx_ring = NULL;
421 adapter->rx_ring_count = new_rx_count;
422 }
423
424 /* success! */
425 err = 0;
426 if (netif_running(netdev))
427 ixgbevf_up(adapter);
428
429err_setup:
430 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
431 return err;
432}
433
434static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
435{
436 switch (stringset) {
437 case ETH_SS_TEST:
438 return IXGBE_TEST_LEN;
439 case ETH_SS_STATS:
440 return IXGBE_GLOBAL_STATS_LEN;
441 default:
442 return -EINVAL;
443 }
444}
445
446static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
447 struct ethtool_stats *stats, u64 *data)
448{
449 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
450 int i;
451
452 ixgbevf_update_stats(adapter);
453 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
454 char *p = (char *)adapter +
455 ixgbe_gstrings_stats[i].stat_offset;
456 char *b = (char *)adapter +
457 ixgbe_gstrings_stats[i].base_stat_offset;
458 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
459 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
460 ((ixgbe_gstrings_stats[i].sizeof_stat ==
461 sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
462 }
463}
464
465static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
466 u8 *data)
467{
468 char *p = (char *)data;
469 int i;
470
471 switch (stringset) {
472 case ETH_SS_TEST:
473 memcpy(data, *ixgbe_gstrings_test,
474 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
475 break;
476 case ETH_SS_STATS:
477 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
478 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
479 ETH_GSTRING_LEN);
480 p += ETH_GSTRING_LEN;
481 }
482 break;
483 }
484}
485
486static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
487{
488 struct ixgbe_hw *hw = &adapter->hw;
489 bool link_up;
490 u32 link_speed = 0;
491 *data = 0;
492
493 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
494 if (!link_up)
495 *data = 1;
496
497 return *data;
498}
499
500/* ethtool register test data */
501struct ixgbevf_reg_test {
502 u16 reg;
503 u8 array_len;
504 u8 test_type;
505 u32 mask;
506 u32 write;
507};
508
509/* In the hardware, registers are laid out either singly, in arrays
510 * spaced 0x40 bytes apart, or in contiguous tables. We assume
511 * most tests take place on arrays or single registers (handled
512 * as a single-element array) and special-case the tables.
513 * Table tests are always pattern tests.
514 *
515 * We also make provision for some required setup steps by specifying
516 * registers to be written without any read-back testing.
517 */
518
519#define PATTERN_TEST 1
520#define SET_READ_TEST 2
521#define WRITE_NO_TEST 3
522#define TABLE32_TEST 4
523#define TABLE64_TEST_LO 5
524#define TABLE64_TEST_HI 6
525
526/* default VF register test */
527static struct ixgbevf_reg_test reg_test_vf[] = {
528 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
529 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
530 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
531 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
532 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
533 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
534 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
535 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
536 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
537 { 0, 0, 0, 0 }
538};
539
540#define REG_PATTERN_TEST(R, M, W) \
541{ \
542 u32 pat, val, before; \
543 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
544 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
545 before = readl(adapter->hw.hw_addr + R); \
546 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
547 val = readl(adapter->hw.hw_addr + R); \
548 if (val != (_test[pat] & W & M)) { \
549 hw_dbg(&adapter->hw, \
550 "pattern test reg %04X failed: got " \
551 "0x%08X expected 0x%08X\n", \
552 R, val, (_test[pat] & W & M)); \
553 *data = R; \
554 writel(before, adapter->hw.hw_addr + R); \
555 return 1; \
556 } \
557 writel(before, adapter->hw.hw_addr + R); \
558 } \
559}
560
561#define REG_SET_AND_CHECK(R, M, W) \
562{ \
563 u32 val, before; \
564 before = readl(adapter->hw.hw_addr + R); \
565 writel((W & M), (adapter->hw.hw_addr + R)); \
566 val = readl(adapter->hw.hw_addr + R); \
567 if ((W & M) != (val & M)) { \
568 printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
569 "expected 0x%08X\n", R, (val & M), (W & M)); \
570 *data = R; \
571 writel(before, (adapter->hw.hw_addr + R)); \
572 return 1; \
573 } \
574 writel(before, (adapter->hw.hw_addr + R)); \
575}
576
577static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
578{
579 struct ixgbevf_reg_test *test;
580 u32 i;
581
582 test = reg_test_vf;
583
584 /*
585 * Perform the register test, looping through the test table
586 * until we either fail or reach the null entry.
587 */
588 while (test->reg) {
589 for (i = 0; i < test->array_len; i++) {
590 switch (test->test_type) {
591 case PATTERN_TEST:
592 REG_PATTERN_TEST(test->reg + (i * 0x40),
593 test->mask,
594 test->write);
595 break;
596 case SET_READ_TEST:
597 REG_SET_AND_CHECK(test->reg + (i * 0x40),
598 test->mask,
599 test->write);
600 break;
601 case WRITE_NO_TEST:
602 writel(test->write,
603 (adapter->hw.hw_addr + test->reg)
604 + (i * 0x40));
605 break;
606 case TABLE32_TEST:
607 REG_PATTERN_TEST(test->reg + (i * 4),
608 test->mask,
609 test->write);
610 break;
611 case TABLE64_TEST_LO:
612 REG_PATTERN_TEST(test->reg + (i * 8),
613 test->mask,
614 test->write);
615 break;
616 case TABLE64_TEST_HI:
617 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
618 test->mask,
619 test->write);
620 break;
621 }
622 }
623 test++;
624 }
625
626 *data = 0;
627 return *data;
628}
629
630static void ixgbevf_diag_test(struct net_device *netdev,
631 struct ethtool_test *eth_test, u64 *data)
632{
633 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
634 bool if_running = netif_running(netdev);
635
636 set_bit(__IXGBEVF_TESTING, &adapter->state);
637 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
638 /* Offline tests */
639
640 hw_dbg(&adapter->hw, "offline testing starting\n");
641
642 /* Link test performed before hardware reset so autoneg doesn't
643 * interfere with test result */
644 if (ixgbevf_link_test(adapter, &data[1]))
645 eth_test->flags |= ETH_TEST_FL_FAILED;
646
647 if (if_running)
648 /* indicate we're in test mode */
649 dev_close(netdev);
650 else
651 ixgbevf_reset(adapter);
652
653 hw_dbg(&adapter->hw, "register testing starting\n");
654 if (ixgbevf_reg_test(adapter, &data[0]))
655 eth_test->flags |= ETH_TEST_FL_FAILED;
656
657 ixgbevf_reset(adapter);
658
659 clear_bit(__IXGBEVF_TESTING, &adapter->state);
660 if (if_running)
661 dev_open(netdev);
662 } else {
663 hw_dbg(&adapter->hw, "online testing starting\n");
664 /* Online tests */
665 if (ixgbevf_link_test(adapter, &data[1]))
666 eth_test->flags |= ETH_TEST_FL_FAILED;
667
668 /* Online tests aren't run; pass by default */
669 data[0] = 0;
670
671 clear_bit(__IXGBEVF_TESTING, &adapter->state);
672 }
673 msleep_interruptible(4 * 1000);
674}
675
676static int ixgbevf_nway_reset(struct net_device *netdev)
677{
678 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
679
680 if (netif_running(netdev)) {
681 if (!adapter->dev_closed)
682 ixgbevf_reinit_locked(adapter);
683 }
684
685 return 0;
686}
687
688static struct ethtool_ops ixgbevf_ethtool_ops = {
689 .get_settings = ixgbevf_get_settings,
690 .get_drvinfo = ixgbevf_get_drvinfo,
691 .get_regs_len = ixgbevf_get_regs_len,
692 .get_regs = ixgbevf_get_regs,
693 .nway_reset = ixgbevf_nway_reset,
694 .get_link = ethtool_op_get_link,
695 .get_ringparam = ixgbevf_get_ringparam,
696 .set_ringparam = ixgbevf_set_ringparam,
697 .get_rx_csum = ixgbevf_get_rx_csum,
698 .set_rx_csum = ixgbevf_set_rx_csum,
699 .get_tx_csum = ethtool_op_get_tx_csum,
700 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
701 .get_sg = ethtool_op_get_sg,
702 .set_sg = ethtool_op_set_sg,
703 .get_msglevel = ixgbevf_get_msglevel,
704 .set_msglevel = ixgbevf_set_msglevel,
705 .get_tso = ethtool_op_get_tso,
706 .set_tso = ixgbevf_set_tso,
707 .self_test = ixgbevf_diag_test,
708 .get_sset_count = ixgbevf_get_sset_count,
709 .get_strings = ixgbevf_get_strings,
710 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
711};
712
713void ixgbevf_set_ethtool_ops(struct net_device *netdev)
714{
715 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
716}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 00000000000..f7015efbff0
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_H_
29#define _IXGBEVF_H_
30
31#include <linux/types.h>
32#include <linux/timer.h>
33#include <linux/io.h>
34#include <linux/netdevice.h>
35
36#include "vf.h"
37
38/* wrapper around a pointer to a socket buffer,
39 * so a DMA handle can be stored along with the buffer */
40struct ixgbevf_tx_buffer {
41 struct sk_buff *skb;
42 dma_addr_t dma;
43 unsigned long time_stamp;
44 u16 length;
45 u16 next_to_watch;
46 u16 mapped_as_page;
47};
48
49struct ixgbevf_rx_buffer {
50 struct sk_buff *skb;
51 dma_addr_t dma;
52 struct page *page;
53 dma_addr_t page_dma;
54 unsigned int page_offset;
55};
56
57struct ixgbevf_ring {
58 struct ixgbevf_adapter *adapter; /* backlink */
59 void *desc; /* descriptor ring memory */
60 dma_addr_t dma; /* phys. address of descriptor ring */
61 unsigned int size; /* length in bytes */
62 unsigned int count; /* amount of descriptors */
63 unsigned int next_to_use;
64 unsigned int next_to_clean;
65
66 int queue_index; /* needed for multiqueue queue management */
67 union {
68 struct ixgbevf_tx_buffer *tx_buffer_info;
69 struct ixgbevf_rx_buffer *rx_buffer_info;
70 };
71
72 u16 head;
73 u16 tail;
74
75 unsigned int total_bytes;
76 unsigned int total_packets;
77
78 u16 reg_idx; /* holds the special value that gets the hardware register
79 * offset associated with this ring, which is different
80 * for DCB and RSS modes */
81
82#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
83 /* cpu for tx queue */
84 int cpu;
85#endif
86
87 u64 v_idx; /* maps directly to the index for this ring in the hardware
88 * vector array, can also be used for finding the bit in EICR
89 * and friends that represents the vector for this ring */
90
91 u16 work_limit; /* max work per interrupt */
92 u16 rx_buf_len;
93};
94
95enum ixgbevf_ring_f_enum {
96 RING_F_NONE = 0,
97 RING_F_ARRAY_SIZE /* must be last in enum set */
98};
99
100struct ixgbevf_ring_feature {
101 int indices;
102 int mask;
103};
104
105/* How many Rx Buffers do we bundle into one write to the hardware ? */
106#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
107
108#define MAX_RX_QUEUES 1
109#define MAX_TX_QUEUES 1
110
111#define IXGBEVF_DEFAULT_TXD 1024
112#define IXGBEVF_DEFAULT_RXD 512
113#define IXGBEVF_MAX_TXD 4096
114#define IXGBEVF_MIN_TXD 64
115#define IXGBEVF_MAX_RXD 4096
116#define IXGBEVF_MIN_RXD 64
117
118/* Supported Rx Buffer Sizes */
119#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
120#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
121#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
122#define IXGBEVF_RXBUFFER_2048 2048
123#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
124
125#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
126
127#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
128
129#define IXGBE_TX_FLAGS_CSUM (u32)(1)
130#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
131#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
132#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
133#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
134#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
135#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
136#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
137#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
138
139/* MAX_MSIX_Q_VECTORS of these are allocated,
140 * but we only use one per queue-specific vector.
141 */
142struct ixgbevf_q_vector {
143 struct ixgbevf_adapter *adapter;
144 struct napi_struct napi;
145 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
146 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
147 u8 rxr_count; /* Rx ring count assigned to this vector */
148 u8 txr_count; /* Tx ring count assigned to this vector */
149 u8 tx_itr;
150 u8 rx_itr;
151 u32 eitr;
152 int v_idx; /* vector index in list */
153};
154
155/* Helper macros to switch between ints/sec and what the register uses.
156 * And yes, it's the same math going both ways. The lowest value
157 * supported by all of the ixgbe hardware is 8.
158 */
159#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
160 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
161#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
162
163#define IXGBE_DESC_UNUSED(R) \
164 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
165 (R)->next_to_clean - (R)->next_to_use - 1)
166
167#define IXGBE_RX_DESC_ADV(R, i) \
168 (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
169#define IXGBE_TX_DESC_ADV(R, i) \
170 (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
171#define IXGBE_TX_CTXTDESC_ADV(R, i) \
172 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
173
174#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
175
176#define OTHER_VECTOR 1
177#define NON_Q_VECTORS (OTHER_VECTOR)
178
179#define MAX_MSIX_Q_VECTORS 2
180#define MAX_MSIX_COUNT 2
181
182#define MIN_MSIX_Q_VECTORS 2
183#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
184
185/* board specific private data structure */
186struct ixgbevf_adapter {
187 struct timer_list watchdog_timer;
188#ifdef NETIF_F_HW_VLAN_TX
189 struct vlan_group *vlgrp;
190#endif
191 u16 bd_number;
192 struct work_struct reset_task;
193 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
194 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
195
196 /* Interrupt Throttle Rate */
197 u32 itr_setting;
198 u16 eitr_low;
199 u16 eitr_high;
200
201 /* TX */
202 struct ixgbevf_ring *tx_ring; /* One per active queue */
203 int num_tx_queues;
204 u64 restart_queue;
205 u64 hw_csum_tx_good;
206 u64 lsc_int;
207 u64 hw_tso_ctxt;
208 u64 hw_tso6_ctxt;
209 u32 tx_timeout_count;
210 bool detect_tx_hung;
211
212 /* RX */
213 struct ixgbevf_ring *rx_ring; /* One per active queue */
214 int num_rx_queues;
215 int num_rx_pools; /* == num_rx_queues in 82598 */
216 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
217 u64 hw_csum_rx_error;
218 u64 hw_rx_no_dma_resources;
219 u64 hw_csum_rx_good;
220 u64 non_eop_descs;
221 int num_msix_vectors;
222 int max_msix_q_vectors; /* true count of q_vectors for device */
223 struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
224 struct msix_entry *msix_entries;
225
226 u64 rx_hdr_split;
227 u32 alloc_rx_page_failed;
228 u32 alloc_rx_buff_failed;
229
230 /* Some features need tri-state capability,
231 * thus the additional *_CAPABLE flags.
232 */
233 u32 flags;
234#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
235#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
236#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
237#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
238#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
239#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
240#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
241#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
242#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
243 /* OS defined structs */
244 struct net_device *netdev;
245 struct pci_dev *pdev;
246 struct net_device_stats net_stats;
247
248 /* structs defined in ixgbe_vf.h */
249 struct ixgbe_hw hw;
250 u16 msg_enable;
251 struct ixgbevf_hw_stats stats;
252 u64 zero_base;
253 /* Interrupt Throttle Rate */
254 u32 eitr_param;
255
256 unsigned long state;
257 u32 *config_space;
258 u64 tx_busy;
259 unsigned int tx_ring_count;
260 unsigned int rx_ring_count;
261
262 u32 link_speed;
263 bool link_up;
264 unsigned long link_check_timeout;
265
266 struct work_struct watchdog_task;
267 bool netdev_registered;
268 bool dev_closed;
269};
270
271enum ixbgevf_state_t {
272 __IXGBEVF_TESTING,
273 __IXGBEVF_RESETTING,
274 __IXGBEVF_DOWN
275};
276
277enum ixgbevf_boards {
278 board_82599_vf,
279};
280
281extern struct ixgbevf_info ixgbevf_vf_info;
282extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
283
284/* needed by ethtool.c */
285extern char ixgbevf_driver_name[];
286extern const char ixgbevf_driver_version[];
287
288extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
289extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
290extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
291extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
292extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
293extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
294 struct ixgbevf_ring *);
295extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
296 struct ixgbevf_ring *);
297extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
298 struct ixgbevf_ring *);
299extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
300 struct ixgbevf_ring *);
301extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
302
303#ifdef ETHTOOL_OPS_COMPAT
304extern int ethtool_ioctl(struct ifreq *ifr);
305
306#endif
307extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
308extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
309
310#ifdef DEBUG
311extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
312#define hw_dbg(hw, format, arg...) \
313 printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
314#else
315#define hw_dbg(hw, format, arg...) do {} while (0)
316#endif
317
318#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 00000000000..b9f10d05049
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3578 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/netdevice.h>
36#include <linux/vmalloc.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41#include <linux/ipv6.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
44#include <linux/ethtool.h>
45#include <linux/if_vlan.h>
46
47#include "ixgbevf.h"
48
49char ixgbevf_driver_name[] = "ixgbevf";
50static const char ixgbevf_driver_string[] =
51 "Intel(R) 82599 Virtual Function";
52
53#define DRV_VERSION "1.0.0-k0"
54const char ixgbevf_driver_version[] = DRV_VERSION;
55static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
56
57static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
58 [board_82599_vf] = &ixgbevf_vf_info,
59};
60
61/* ixgbevf_pci_tbl - PCI Device ID Table
62 *
63 * Wildcard entries (PCI_ANY_ID) should come last
64 * Last entry must be all 0s
65 *
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67 * Class, Class Mask, private data (not used) }
68 */
69static struct pci_device_id ixgbevf_pci_tbl[] = {
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
71 board_82599_vf},
72
73 /* required last entry */
74 {0, }
75};
76MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
77
78MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
79MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
80MODULE_LICENSE("GPL");
81MODULE_VERSION(DRV_VERSION);
82
83#define DEFAULT_DEBUG_LEVEL_SHIFT 3
84
85/* forward decls */
86static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
87static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
88 u32 itr_reg);
89
90static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
91 struct ixgbevf_ring *rx_ring,
92 u32 val)
93{
94 /*
95 * Force memory writes to complete before letting h/w
96 * know there are new descriptors to fetch. (Only
97 * applicable for weak-ordered memory model archs,
98 * such as IA-64).
99 */
100 wmb();
101 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
102}
103
104/*
105 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
106 * @adapter: pointer to adapter struct
107 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
108 * @queue: queue to map the corresponding interrupt to
109 * @msix_vector: the vector to map to the corresponding queue
110 *
111 */
112static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
113 u8 queue, u8 msix_vector)
114{
115 u32 ivar, index;
116 struct ixgbe_hw *hw = &adapter->hw;
117 if (direction == -1) {
118 /* other causes */
119 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
120 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
121 ivar &= ~0xFF;
122 ivar |= msix_vector;
123 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
124 } else {
125 /* tx or rx causes */
126 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
127 index = ((16 * (queue & 1)) + (8 * direction));
128 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
129 ivar &= ~(0xFF << index);
130 ivar |= (msix_vector << index);
131 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
132 }
133}
134
135static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
136 struct ixgbevf_tx_buffer
137 *tx_buffer_info)
138{
139 if (tx_buffer_info->dma) {
140 if (tx_buffer_info->mapped_as_page)
141 pci_unmap_page(adapter->pdev,
142 tx_buffer_info->dma,
143 tx_buffer_info->length,
144 PCI_DMA_TODEVICE);
145 else
146 pci_unmap_single(adapter->pdev,
147 tx_buffer_info->dma,
148 tx_buffer_info->length,
149 PCI_DMA_TODEVICE);
150 tx_buffer_info->dma = 0;
151 }
152 if (tx_buffer_info->skb) {
153 dev_kfree_skb_any(tx_buffer_info->skb);
154 tx_buffer_info->skb = NULL;
155 }
156 tx_buffer_info->time_stamp = 0;
157 /* tx_buffer_info must be completely set up in the transmit path */
158}
159
160static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
161 struct ixgbevf_ring *tx_ring,
162 unsigned int eop)
163{
164 struct ixgbe_hw *hw = &adapter->hw;
165 u32 head, tail;
166
167 /* Detect a transmit hang in hardware, this serializes the
168 * check with the clearing of time_stamp and movement of eop */
169 head = readl(hw->hw_addr + tx_ring->head);
170 tail = readl(hw->hw_addr + tx_ring->tail);
171 adapter->detect_tx_hung = false;
172 if ((head != tail) &&
173 tx_ring->tx_buffer_info[eop].time_stamp &&
174 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
175 /* detected Tx unit hang */
176 union ixgbe_adv_tx_desc *tx_desc;
177 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
178 printk(KERN_ERR "Detected Tx Unit Hang\n"
179 " Tx Queue <%d>\n"
180 " TDH, TDT <%x>, <%x>\n"
181 " next_to_use <%x>\n"
182 " next_to_clean <%x>\n"
183 "tx_buffer_info[next_to_clean]\n"
184 " time_stamp <%lx>\n"
185 " jiffies <%lx>\n",
186 tx_ring->queue_index,
187 head, tail,
188 tx_ring->next_to_use, eop,
189 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
190 return true;
191 }
192
193 return false;
194}
195
196#define IXGBE_MAX_TXD_PWR 14
197#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
198
199/* Tx Descriptors needed, worst case */
200#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
201 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
202#ifdef MAX_SKB_FRAGS
203#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
205#else
206#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
207#endif
208
209static void ixgbevf_tx_timeout(struct net_device *netdev);
210
211/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
213 * @adapter: board private structure
214 * @tx_ring: tx ring to clean
215 **/
216static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
217 struct ixgbevf_ring *tx_ring)
218{
219 struct net_device *netdev = adapter->netdev;
220 struct ixgbe_hw *hw = &adapter->hw;
221 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
222 struct ixgbevf_tx_buffer *tx_buffer_info;
223 unsigned int i, eop, count = 0;
224 unsigned int total_bytes = 0, total_packets = 0;
225
226 i = tx_ring->next_to_clean;
227 eop = tx_ring->tx_buffer_info[i].next_to_watch;
228 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
229
230 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
231 (count < tx_ring->work_limit)) {
232 bool cleaned = false;
233 for ( ; !cleaned; count++) {
234 struct sk_buff *skb;
235 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
236 tx_buffer_info = &tx_ring->tx_buffer_info[i];
237 cleaned = (i == eop);
238 skb = tx_buffer_info->skb;
239
240 if (cleaned && skb) {
241 unsigned int segs, bytecount;
242
243 /* gso_segs is currently only valid for tcp */
244 segs = skb_shinfo(skb)->gso_segs ?: 1;
245 /* multiply data chunks by size of headers */
246 bytecount = ((segs - 1) * skb_headlen(skb)) +
247 skb->len;
248 total_packets += segs;
249 total_bytes += bytecount;
250 }
251
252 ixgbevf_unmap_and_free_tx_resource(adapter,
253 tx_buffer_info);
254
255 tx_desc->wb.status = 0;
256
257 i++;
258 if (i == tx_ring->count)
259 i = 0;
260 }
261
262 eop = tx_ring->tx_buffer_info[i].next_to_watch;
263 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
264 }
265
266 tx_ring->next_to_clean = i;
267
268#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
269 if (unlikely(count && netif_carrier_ok(netdev) &&
270 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
271 /* Make sure that anybody stopping the queue after this
272 * sees the new next_to_clean.
273 */
274 smp_mb();
275#ifdef HAVE_TX_MQ
276 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
277 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
278 netif_wake_subqueue(netdev, tx_ring->queue_index);
279 ++adapter->restart_queue;
280 }
281#else
282 if (netif_queue_stopped(netdev) &&
283 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
284 netif_wake_queue(netdev);
285 ++adapter->restart_queue;
286 }
287#endif
288 }
289
290 if (adapter->detect_tx_hung) {
291 if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
292 /* schedule immediate reset if we believe we hung */
293 printk(KERN_INFO
294 "tx hang %d detected, resetting adapter\n",
295 adapter->tx_timeout_count + 1);
296 ixgbevf_tx_timeout(adapter->netdev);
297 }
298 }
299
300 /* re-arm the interrupt */
301 if ((count >= tx_ring->work_limit) &&
302 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
303 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
304 }
305
306 tx_ring->total_bytes += total_bytes;
307 tx_ring->total_packets += total_packets;
308
309 adapter->net_stats.tx_bytes += total_bytes;
310 adapter->net_stats.tx_packets += total_packets;
311
312 return (count < tx_ring->work_limit);
313}
314
315/**
316 * ixgbevf_receive_skb - Send a completed packet up the stack
317 * @q_vector: structure containing interrupt and ring information
318 * @skb: packet to send up
319 * @status: hardware indication of status of receive
320 * @rx_ring: rx descriptor ring (for a specific queue) to setup
321 * @rx_desc: rx descriptor
322 **/
323static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
324 struct sk_buff *skb, u8 status,
325 struct ixgbevf_ring *ring,
326 union ixgbe_adv_rx_desc *rx_desc)
327{
328 struct ixgbevf_adapter *adapter = q_vector->adapter;
329 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
330 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
331 int ret;
332
333 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
334 if (adapter->vlgrp && is_vlan)
335 vlan_gro_receive(&q_vector->napi,
336 adapter->vlgrp,
337 tag, skb);
338 else
339 napi_gro_receive(&q_vector->napi, skb);
340 } else {
341 if (adapter->vlgrp && is_vlan)
342 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
343 else
344 ret = netif_rx(skb);
345 }
346}
347
348/**
349 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
350 * @adapter: address of board private structure
351 * @status_err: hardware indication of status of receive
352 * @skb: skb currently being received and modified
353 **/
354static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
355 u32 status_err, struct sk_buff *skb)
356{
357 skb->ip_summed = CHECKSUM_NONE;
358
359 /* Rx csum disabled */
360 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
361 return;
362
363 /* if IP and error */
364 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
365 (status_err & IXGBE_RXDADV_ERR_IPE)) {
366 adapter->hw_csum_rx_error++;
367 return;
368 }
369
370 if (!(status_err & IXGBE_RXD_STAT_L4CS))
371 return;
372
373 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
374 adapter->hw_csum_rx_error++;
375 return;
376 }
377
378 /* It must be a TCP or UDP packet with a valid checksum */
379 skb->ip_summed = CHECKSUM_UNNECESSARY;
380 adapter->hw_csum_rx_good++;
381}
382
383/**
384 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
385 * @adapter: address of board private structure
386 **/
387static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
388 struct ixgbevf_ring *rx_ring,
389 int cleaned_count)
390{
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc;
393 struct ixgbevf_rx_buffer *bi;
394 struct sk_buff *skb;
395 unsigned int i;
396 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
397
398 i = rx_ring->next_to_use;
399 bi = &rx_ring->rx_buffer_info[i];
400
401 while (cleaned_count--) {
402 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
403
404 if (!bi->page_dma &&
405 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
406 if (!bi->page) {
407 bi->page = netdev_alloc_page(adapter->netdev);
408 if (!bi->page) {
409 adapter->alloc_rx_page_failed++;
410 goto no_buffers;
411 }
412 bi->page_offset = 0;
413 } else {
414 /* use a half page if we're re-using */
415 bi->page_offset ^= (PAGE_SIZE / 2);
416 }
417
418 bi->page_dma = pci_map_page(pdev, bi->page,
419 bi->page_offset,
420 (PAGE_SIZE / 2),
421 PCI_DMA_FROMDEVICE);
422 }
423
424 skb = bi->skb;
425 if (!skb) {
426 skb = netdev_alloc_skb(adapter->netdev,
427 bufsz);
428
429 if (!skb) {
430 adapter->alloc_rx_buff_failed++;
431 goto no_buffers;
432 }
433
434 /*
435 * Make buffer alignment 2 beyond a 16 byte boundary
436 * this will result in a 16 byte aligned IP header after
437 * the 14 byte MAC header is removed
438 */
439 skb_reserve(skb, NET_IP_ALIGN);
440
441 bi->skb = skb;
442 }
443 if (!bi->dma) {
444 bi->dma = pci_map_single(pdev, skb->data,
445 rx_ring->rx_buf_len,
446 PCI_DMA_FROMDEVICE);
447 }
448 /* Refresh the desc even if buffer_addrs didn't change because
449 * each write-back erases this info. */
450 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
451 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
452 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
453 } else {
454 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
455 }
456
457 i++;
458 if (i == rx_ring->count)
459 i = 0;
460 bi = &rx_ring->rx_buffer_info[i];
461 }
462
463no_buffers:
464 if (rx_ring->next_to_use != i) {
465 rx_ring->next_to_use = i;
466 if (i-- == 0)
467 i = (rx_ring->count - 1);
468
469 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
470 }
471}
472
473static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
474 u64 qmask)
475{
476 u32 mask;
477 struct ixgbe_hw *hw = &adapter->hw;
478
479 mask = (qmask & 0xFFFFFFFF);
480 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
481}
482
483static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
484{
485 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
486}
487
488static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
489{
490 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
491}
492
493static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
494 struct ixgbevf_ring *rx_ring,
495 int *work_done, int work_to_do)
496{
497 struct ixgbevf_adapter *adapter = q_vector->adapter;
498 struct pci_dev *pdev = adapter->pdev;
499 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
500 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
501 struct sk_buff *skb;
502 unsigned int i;
503 u32 len, staterr;
504 u16 hdr_info;
505 bool cleaned = false;
506 int cleaned_count = 0;
507 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
508
509 i = rx_ring->next_to_clean;
510 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512 rx_buffer_info = &rx_ring->rx_buffer_info[i];
513
514 while (staterr & IXGBE_RXD_STAT_DD) {
515 u32 upper_len = 0;
516 if (*work_done >= work_to_do)
517 break;
518 (*work_done)++;
519
520 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
521 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
522 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
523 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
524 if (hdr_info & IXGBE_RXDADV_SPH)
525 adapter->rx_hdr_split++;
526 if (len > IXGBEVF_RX_HDR_SIZE)
527 len = IXGBEVF_RX_HDR_SIZE;
528 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
529 } else {
530 len = le16_to_cpu(rx_desc->wb.upper.length);
531 }
532 cleaned = true;
533 skb = rx_buffer_info->skb;
534 prefetch(skb->data - NET_IP_ALIGN);
535 rx_buffer_info->skb = NULL;
536
537 if (rx_buffer_info->dma) {
538 pci_unmap_single(pdev, rx_buffer_info->dma,
539 rx_ring->rx_buf_len,
540 PCI_DMA_FROMDEVICE);
541 rx_buffer_info->dma = 0;
542 skb_put(skb, len);
543 }
544
545 if (upper_len) {
546 pci_unmap_page(pdev, rx_buffer_info->page_dma,
547 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
548 rx_buffer_info->page_dma = 0;
549 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
550 rx_buffer_info->page,
551 rx_buffer_info->page_offset,
552 upper_len);
553
554 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
555 (page_count(rx_buffer_info->page) != 1))
556 rx_buffer_info->page = NULL;
557 else
558 get_page(rx_buffer_info->page);
559
560 skb->len += upper_len;
561 skb->data_len += upper_len;
562 skb->truesize += upper_len;
563 }
564
565 i++;
566 if (i == rx_ring->count)
567 i = 0;
568
569 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
570 prefetch(next_rxd);
571 cleaned_count++;
572
573 next_buffer = &rx_ring->rx_buffer_info[i];
574
575 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
576 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
577 rx_buffer_info->skb = next_buffer->skb;
578 rx_buffer_info->dma = next_buffer->dma;
579 next_buffer->skb = skb;
580 next_buffer->dma = 0;
581 } else {
582 skb->next = next_buffer->skb;
583 skb->next->prev = skb;
584 }
585 adapter->non_eop_descs++;
586 goto next_desc;
587 }
588
589 /* ERR_MASK will only have valid bits if EOP set */
590 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
591 dev_kfree_skb_irq(skb);
592 goto next_desc;
593 }
594
595 ixgbevf_rx_checksum(adapter, staterr, skb);
596
597 /* probably a little skewed due to removing CRC */
598 total_rx_bytes += skb->len;
599 total_rx_packets++;
600
601 /*
602 * Work around issue of some types of VM to VM loop back
603 * packets not getting split correctly
604 */
605 if (staterr & IXGBE_RXD_STAT_LB) {
606 u32 header_fixup_len = skb->len - skb->data_len;
607 if (header_fixup_len < 14)
608 skb_push(skb, header_fixup_len);
609 }
610 skb->protocol = eth_type_trans(skb, adapter->netdev);
611
612 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
613 adapter->netdev->last_rx = jiffies;
614
615next_desc:
616 rx_desc->wb.upper.status_error = 0;
617
618 /* return some buffers to hardware, one at a time is too slow */
619 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
620 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
621 cleaned_count);
622 cleaned_count = 0;
623 }
624
625 /* use prefetched values */
626 rx_desc = next_rxd;
627 rx_buffer_info = &rx_ring->rx_buffer_info[i];
628
629 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
630 }
631
632 rx_ring->next_to_clean = i;
633 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
634
635 if (cleaned_count)
636 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
637
638 rx_ring->total_packets += total_rx_packets;
639 rx_ring->total_bytes += total_rx_bytes;
640 adapter->net_stats.rx_bytes += total_rx_bytes;
641 adapter->net_stats.rx_packets += total_rx_packets;
642
643 return cleaned;
644}
645
646/**
647 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
648 * @napi: napi struct with our devices info in it
649 * @budget: amount of work driver is allowed to do this pass, in packets
650 *
651 * This function is optimized for cleaning one queue only on a single
652 * q_vector!!!
653 **/
654static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
655{
656 struct ixgbevf_q_vector *q_vector =
657 container_of(napi, struct ixgbevf_q_vector, napi);
658 struct ixgbevf_adapter *adapter = q_vector->adapter;
659 struct ixgbevf_ring *rx_ring = NULL;
660 int work_done = 0;
661 long r_idx;
662
663 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
664 rx_ring = &(adapter->rx_ring[r_idx]);
665
666 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
667
668 /* If all Rx work done, exit the polling mode */
669 if (work_done < budget) {
670 napi_complete(napi);
671 if (adapter->itr_setting & 1)
672 ixgbevf_set_itr_msix(q_vector);
673 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
674 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
675 }
676
677 return work_done;
678}
679
680/**
681 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
682 * @napi: napi struct with our devices info in it
683 * @budget: amount of work driver is allowed to do this pass, in packets
684 *
685 * This function will clean more than one rx queue associated with a
686 * q_vector.
687 **/
688static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
689{
690 struct ixgbevf_q_vector *q_vector =
691 container_of(napi, struct ixgbevf_q_vector, napi);
692 struct ixgbevf_adapter *adapter = q_vector->adapter;
693 struct ixgbevf_ring *rx_ring = NULL;
694 int work_done = 0, i;
695 long r_idx;
696 u64 enable_mask = 0;
697
698 /* attempt to distribute budget to each queue fairly, but don't allow
699 * the budget to go below 1 because we'll exit polling */
700 budget /= (q_vector->rxr_count ?: 1);
701 budget = max(budget, 1);
702 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
703 for (i = 0; i < q_vector->rxr_count; i++) {
704 rx_ring = &(adapter->rx_ring[r_idx]);
705 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
706 enable_mask |= rx_ring->v_idx;
707 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
708 r_idx + 1);
709 }
710
711#ifndef HAVE_NETDEV_NAPI_LIST
712 if (!netif_running(adapter->netdev))
713 work_done = 0;
714
715#endif
716 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
717 rx_ring = &(adapter->rx_ring[r_idx]);
718
719 /* If all Rx work done, exit the polling mode */
720 if (work_done < budget) {
721 napi_complete(napi);
722 if (adapter->itr_setting & 1)
723 ixgbevf_set_itr_msix(q_vector);
724 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
725 ixgbevf_irq_enable_queues(adapter, enable_mask);
726 }
727
728 return work_done;
729}
730
731
732/**
733 * ixgbevf_configure_msix - Configure MSI-X hardware
734 * @adapter: board private structure
735 *
736 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
737 * interrupts.
738 **/
739static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
740{
741 struct ixgbevf_q_vector *q_vector;
742 struct ixgbe_hw *hw = &adapter->hw;
743 int i, j, q_vectors, v_idx, r_idx;
744 u32 mask;
745
746 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
747
748 /*
749 * Populate the IVAR table and set the ITR values to the
750 * corresponding register.
751 */
752 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
753 q_vector = adapter->q_vector[v_idx];
754 /* XXX for_each_bit(...) */
755 r_idx = find_first_bit(q_vector->rxr_idx,
756 adapter->num_rx_queues);
757
758 for (i = 0; i < q_vector->rxr_count; i++) {
759 j = adapter->rx_ring[r_idx].reg_idx;
760 ixgbevf_set_ivar(adapter, 0, j, v_idx);
761 r_idx = find_next_bit(q_vector->rxr_idx,
762 adapter->num_rx_queues,
763 r_idx + 1);
764 }
765 r_idx = find_first_bit(q_vector->txr_idx,
766 adapter->num_tx_queues);
767
768 for (i = 0; i < q_vector->txr_count; i++) {
769 j = adapter->tx_ring[r_idx].reg_idx;
770 ixgbevf_set_ivar(adapter, 1, j, v_idx);
771 r_idx = find_next_bit(q_vector->txr_idx,
772 adapter->num_tx_queues,
773 r_idx + 1);
774 }
775
776 /* if this is a tx only vector halve the interrupt rate */
777 if (q_vector->txr_count && !q_vector->rxr_count)
778 q_vector->eitr = (adapter->eitr_param >> 1);
779 else if (q_vector->rxr_count)
780 /* rx only */
781 q_vector->eitr = adapter->eitr_param;
782
783 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
784 }
785
786 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
787
788 /* set up to autoclear timer, and the vectors */
789 mask = IXGBE_EIMS_ENABLE_MASK;
790 mask &= ~IXGBE_EIMS_OTHER;
791 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
792}
793
794enum latency_range {
795 lowest_latency = 0,
796 low_latency = 1,
797 bulk_latency = 2,
798 latency_invalid = 255
799};
800
801/**
802 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
803 * @adapter: pointer to adapter
804 * @eitr: eitr setting (ints per sec) to give last timeslice
805 * @itr_setting: current throttle rate in ints/second
806 * @packets: the number of packets during this measurement interval
807 * @bytes: the number of bytes during this measurement interval
808 *
809 * Stores a new ITR value based on packets and byte
810 * counts during the last interrupt. The advantage of per interrupt
811 * computation is faster updates and more accurate ITR for the current
812 * traffic pattern. Constants in this function were computed
813 * based on theoretical maximum wire speed and thresholds were set based
814 * on testing data as well as attempting to minimize response time
815 * while increasing bulk throughput.
816 **/
817static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
818 u32 eitr, u8 itr_setting,
819 int packets, int bytes)
820{
821 unsigned int retval = itr_setting;
822 u32 timepassed_us;
823 u64 bytes_perint;
824
825 if (packets == 0)
826 goto update_itr_done;
827
828
829 /* simple throttlerate management
830 * 0-20MB/s lowest (100000 ints/s)
831 * 20-100MB/s low (20000 ints/s)
832 * 100-1249MB/s bulk (8000 ints/s)
833 */
834 /* what was last interrupt timeslice? */
835 timepassed_us = 1000000/eitr;
836 bytes_perint = bytes / timepassed_us; /* bytes/usec */
837
838 switch (itr_setting) {
839 case lowest_latency:
840 if (bytes_perint > adapter->eitr_low)
841 retval = low_latency;
842 break;
843 case low_latency:
844 if (bytes_perint > adapter->eitr_high)
845 retval = bulk_latency;
846 else if (bytes_perint <= adapter->eitr_low)
847 retval = lowest_latency;
848 break;
849 case bulk_latency:
850 if (bytes_perint <= adapter->eitr_high)
851 retval = low_latency;
852 break;
853 }
854
855update_itr_done:
856 return retval;
857}
858
859/**
860 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
861 * @adapter: pointer to adapter struct
862 * @v_idx: vector index into q_vector array
863 * @itr_reg: new value to be written in *register* format, not ints/s
864 *
865 * This function is made to be called by ethtool and by the driver
866 * when it needs to update VTEITR registers at runtime. Hardware
867 * specific quirks/differences are taken care of here.
868 */
869static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
870 u32 itr_reg)
871{
872 struct ixgbe_hw *hw = &adapter->hw;
873
874 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
875
876 /*
877 * set the WDIS bit to not clear the timer bits and cause an
878 * immediate assertion of the interrupt
879 */
880 itr_reg |= IXGBE_EITR_CNT_WDIS;
881
882 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
883}
884
885static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
886{
887 struct ixgbevf_adapter *adapter = q_vector->adapter;
888 u32 new_itr;
889 u8 current_itr, ret_itr;
890 int i, r_idx, v_idx = q_vector->v_idx;
891 struct ixgbevf_ring *rx_ring, *tx_ring;
892
893 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
894 for (i = 0; i < q_vector->txr_count; i++) {
895 tx_ring = &(adapter->tx_ring[r_idx]);
896 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
897 q_vector->tx_itr,
898 tx_ring->total_packets,
899 tx_ring->total_bytes);
900 /* if the result for this queue would decrease interrupt
901 * rate for this vector then use that result */
902 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
903 q_vector->tx_itr - 1 : ret_itr);
904 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
905 r_idx + 1);
906 }
907
908 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
909 for (i = 0; i < q_vector->rxr_count; i++) {
910 rx_ring = &(adapter->rx_ring[r_idx]);
911 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
912 q_vector->rx_itr,
913 rx_ring->total_packets,
914 rx_ring->total_bytes);
915 /* if the result for this queue would decrease interrupt
916 * rate for this vector then use that result */
917 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
918 q_vector->rx_itr - 1 : ret_itr);
919 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
920 r_idx + 1);
921 }
922
923 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
924
925 switch (current_itr) {
926 /* counts and packets in update_itr are dependent on these numbers */
927 case lowest_latency:
928 new_itr = 100000;
929 break;
930 case low_latency:
931 new_itr = 20000; /* aka hwitr = ~200 */
932 break;
933 case bulk_latency:
934 default:
935 new_itr = 8000;
936 break;
937 }
938
939 if (new_itr != q_vector->eitr) {
940 u32 itr_reg;
941
942 /* save the algorithm value here, not the smoothed one */
943 q_vector->eitr = new_itr;
944 /* do an exponential smoothing */
945 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
946 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
947 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
948 }
949
950 return;
951}
952
953static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
954{
955 struct net_device *netdev = data;
956 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
957 struct ixgbe_hw *hw = &adapter->hw;
958 u32 eicr;
959 u32 msg;
960
961 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
962 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
963
964 hw->mbx.ops.read(hw, &msg, 1);
965
966 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
967 mod_timer(&adapter->watchdog_timer,
968 round_jiffies(jiffies + 10));
969
970 return IRQ_HANDLED;
971}
972
973static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
974{
975 struct ixgbevf_q_vector *q_vector = data;
976 struct ixgbevf_adapter *adapter = q_vector->adapter;
977 struct ixgbevf_ring *tx_ring;
978 int i, r_idx;
979
980 if (!q_vector->txr_count)
981 return IRQ_HANDLED;
982
983 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
984 for (i = 0; i < q_vector->txr_count; i++) {
985 tx_ring = &(adapter->tx_ring[r_idx]);
986 tx_ring->total_bytes = 0;
987 tx_ring->total_packets = 0;
988 ixgbevf_clean_tx_irq(adapter, tx_ring);
989 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
990 r_idx + 1);
991 }
992
993 if (adapter->itr_setting & 1)
994 ixgbevf_set_itr_msix(q_vector);
995
996 return IRQ_HANDLED;
997}
998
999/**
1000 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1001 * @irq: unused
1002 * @data: pointer to our q_vector struct for this interrupt vector
1003 **/
1004static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
1005{
1006 struct ixgbevf_q_vector *q_vector = data;
1007 struct ixgbevf_adapter *adapter = q_vector->adapter;
1008 struct ixgbe_hw *hw = &adapter->hw;
1009 struct ixgbevf_ring *rx_ring;
1010 int r_idx;
1011 int i;
1012
1013 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1014 for (i = 0; i < q_vector->rxr_count; i++) {
1015 rx_ring = &(adapter->rx_ring[r_idx]);
1016 rx_ring->total_bytes = 0;
1017 rx_ring->total_packets = 0;
1018 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1019 r_idx + 1);
1020 }
1021
1022 if (!q_vector->rxr_count)
1023 return IRQ_HANDLED;
1024
1025 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1026 rx_ring = &(adapter->rx_ring[r_idx]);
1027 /* disable interrupts on this vector only */
1028 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1029 napi_schedule(&q_vector->napi);
1030
1031
1032 return IRQ_HANDLED;
1033}
1034
1035static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1036{
1037 ixgbevf_msix_clean_rx(irq, data);
1038 ixgbevf_msix_clean_tx(irq, data);
1039
1040 return IRQ_HANDLED;
1041}
1042
1043static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1044 int r_idx)
1045{
1046 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1047
1048 set_bit(r_idx, q_vector->rxr_idx);
1049 q_vector->rxr_count++;
1050 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1051}
1052
1053static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1054 int t_idx)
1055{
1056 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1057
1058 set_bit(t_idx, q_vector->txr_idx);
1059 q_vector->txr_count++;
1060 a->tx_ring[t_idx].v_idx = 1 << v_idx;
1061}
1062
1063/**
1064 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1065 * @adapter: board private structure to initialize
1066 *
1067 * This function maps descriptor rings to the queue-specific vectors
1068 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1069 * one vector per ring/queue, but on a constrained vector budget, we
1070 * group the rings as "efficiently" as possible. You would add new
1071 * mapping configurations in here.
1072 **/
1073static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1074{
1075 int q_vectors;
1076 int v_start = 0;
1077 int rxr_idx = 0, txr_idx = 0;
1078 int rxr_remaining = adapter->num_rx_queues;
1079 int txr_remaining = adapter->num_tx_queues;
1080 int i, j;
1081 int rqpv, tqpv;
1082 int err = 0;
1083
1084 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1085
1086 /*
1087 * The ideal configuration...
1088 * We have enough vectors to map one per queue.
1089 */
1090 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1091 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1092 map_vector_to_rxq(adapter, v_start, rxr_idx);
1093
1094 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1095 map_vector_to_txq(adapter, v_start, txr_idx);
1096 goto out;
1097 }
1098
1099 /*
1100 * If we don't have enough vectors for a 1-to-1
1101 * mapping, we'll have to group them so there are
1102 * multiple queues per vector.
1103 */
1104 /* Re-adjusting *qpv takes care of the remainder. */
1105 for (i = v_start; i < q_vectors; i++) {
1106 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1107 for (j = 0; j < rqpv; j++) {
1108 map_vector_to_rxq(adapter, i, rxr_idx);
1109 rxr_idx++;
1110 rxr_remaining--;
1111 }
1112 }
1113 for (i = v_start; i < q_vectors; i++) {
1114 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1115 for (j = 0; j < tqpv; j++) {
1116 map_vector_to_txq(adapter, i, txr_idx);
1117 txr_idx++;
1118 txr_remaining--;
1119 }
1120 }
1121
1122out:
1123 return err;
1124}
1125
1126/**
1127 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1128 * @adapter: board private structure
1129 *
1130 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1131 * interrupts from the kernel.
1132 **/
1133static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1134{
1135 struct net_device *netdev = adapter->netdev;
1136 irqreturn_t (*handler)(int, void *);
1137 int i, vector, q_vectors, err;
1138 int ri = 0, ti = 0;
1139
1140 /* Decrement for Other and TCP Timer vectors */
1141 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1142
1143#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1144 ? &ixgbevf_msix_clean_many : \
1145 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1146 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1147 NULL)
1148 for (vector = 0; vector < q_vectors; vector++) {
1149 handler = SET_HANDLER(adapter->q_vector[vector]);
1150
1151 if (handler == &ixgbevf_msix_clean_rx) {
1152 sprintf(adapter->name[vector], "%s-%s-%d",
1153 netdev->name, "rx", ri++);
1154 } else if (handler == &ixgbevf_msix_clean_tx) {
1155 sprintf(adapter->name[vector], "%s-%s-%d",
1156 netdev->name, "tx", ti++);
1157 } else if (handler == &ixgbevf_msix_clean_many) {
1158 sprintf(adapter->name[vector], "%s-%s-%d",
1159 netdev->name, "TxRx", vector);
1160 } else {
1161 /* skip this unused q_vector */
1162 continue;
1163 }
1164 err = request_irq(adapter->msix_entries[vector].vector,
1165 handler, 0, adapter->name[vector],
1166 adapter->q_vector[vector]);
1167 if (err) {
1168 hw_dbg(&adapter->hw,
1169 "request_irq failed for MSIX interrupt "
1170 "Error: %d\n", err);
1171 goto free_queue_irqs;
1172 }
1173 }
1174
1175 sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1176 err = request_irq(adapter->msix_entries[vector].vector,
1177 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1178 if (err) {
1179 hw_dbg(&adapter->hw,
1180 "request_irq for msix_mbx failed: %d\n", err);
1181 goto free_queue_irqs;
1182 }
1183
1184 return 0;
1185
1186free_queue_irqs:
1187 for (i = vector - 1; i >= 0; i--)
1188 free_irq(adapter->msix_entries[--vector].vector,
1189 &(adapter->q_vector[i]));
1190 pci_disable_msix(adapter->pdev);
1191 kfree(adapter->msix_entries);
1192 adapter->msix_entries = NULL;
1193 return err;
1194}
1195
1196static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1197{
1198 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1199
1200 for (i = 0; i < q_vectors; i++) {
1201 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1202 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1203 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1204 q_vector->rxr_count = 0;
1205 q_vector->txr_count = 0;
1206 q_vector->eitr = adapter->eitr_param;
1207 }
1208}
1209
1210/**
1211 * ixgbevf_request_irq - initialize interrupts
1212 * @adapter: board private structure
1213 *
1214 * Attempts to configure interrupts using the best available
1215 * capabilities of the hardware and kernel.
1216 **/
1217static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1218{
1219 int err = 0;
1220
1221 err = ixgbevf_request_msix_irqs(adapter);
1222
1223 if (err)
1224 hw_dbg(&adapter->hw,
1225 "request_irq failed, Error %d\n", err);
1226
1227 return err;
1228}
1229
1230static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1231{
1232 struct net_device *netdev = adapter->netdev;
1233 int i, q_vectors;
1234
1235 q_vectors = adapter->num_msix_vectors;
1236
1237 i = q_vectors - 1;
1238
1239 free_irq(adapter->msix_entries[i].vector, netdev);
1240 i--;
1241
1242 for (; i >= 0; i--) {
1243 free_irq(adapter->msix_entries[i].vector,
1244 adapter->q_vector[i]);
1245 }
1246
1247 ixgbevf_reset_q_vectors(adapter);
1248}
1249
1250/**
1251 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1252 * @adapter: board private structure
1253 **/
1254static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1255{
1256 int i;
1257 struct ixgbe_hw *hw = &adapter->hw;
1258
1259 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1260
1261 IXGBE_WRITE_FLUSH(hw);
1262
1263 for (i = 0; i < adapter->num_msix_vectors; i++)
1264 synchronize_irq(adapter->msix_entries[i].vector);
1265}
1266
1267/**
1268 * ixgbevf_irq_enable - Enable default interrupt generation settings
1269 * @adapter: board private structure
1270 **/
1271static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1272 bool queues, bool flush)
1273{
1274 struct ixgbe_hw *hw = &adapter->hw;
1275 u32 mask;
1276 u64 qmask;
1277
1278 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1279 qmask = ~0;
1280
1281 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1282
1283 if (queues)
1284 ixgbevf_irq_enable_queues(adapter, qmask);
1285
1286 if (flush)
1287 IXGBE_WRITE_FLUSH(hw);
1288}
1289
1290/**
1291 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1292 * @adapter: board private structure
1293 *
1294 * Configure the Tx unit of the MAC after a reset.
1295 **/
1296static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1297{
1298 u64 tdba;
1299 struct ixgbe_hw *hw = &adapter->hw;
1300 u32 i, j, tdlen, txctrl;
1301
1302 /* Setup the HW Tx Head and Tail descriptor pointers */
1303 for (i = 0; i < adapter->num_tx_queues; i++) {
1304 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1305 j = ring->reg_idx;
1306 tdba = ring->dma;
1307 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1308 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1309 (tdba & DMA_BIT_MASK(32)));
1310 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1311 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1312 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1313 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1314 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1315 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1316 /* Disable Tx Head Writeback RO bit, since this hoses
1317 * bookkeeping if things aren't delivered in order.
1318 */
1319 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1320 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1321 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1322 }
1323}
1324
1325#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1326
1327static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1328{
1329 struct ixgbevf_ring *rx_ring;
1330 struct ixgbe_hw *hw = &adapter->hw;
1331 u32 srrctl;
1332
1333 rx_ring = &adapter->rx_ring[index];
1334
1335 srrctl = IXGBE_SRRCTL_DROP_EN;
1336
1337 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1338 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1339 /* grow the amount we can receive on large page machines */
1340 if (bufsz < (PAGE_SIZE / 2))
1341 bufsz = (PAGE_SIZE / 2);
1342 /* cap the bufsz at our largest descriptor size */
1343 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1344
1345 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1346 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1347 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1348 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1349 IXGBE_SRRCTL_BSIZEHDR_MASK);
1350 } else {
1351 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1352
1353 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1354 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1355 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1356 else
1357 srrctl |= rx_ring->rx_buf_len >>
1358 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1359 }
1360 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1361}
1362
1363/**
1364 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1365 * @adapter: board private structure
1366 *
1367 * Configure the Rx unit of the MAC after a reset.
1368 **/
1369static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1370{
1371 u64 rdba;
1372 struct ixgbe_hw *hw = &adapter->hw;
1373 struct net_device *netdev = adapter->netdev;
1374 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1375 int i, j;
1376 u32 rdlen;
1377 int rx_buf_len;
1378
1379 /* Decide whether to use packet split mode or not */
1380 if (netdev->mtu > ETH_DATA_LEN) {
1381 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1382 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1383 else
1384 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1385 } else {
1386 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1387 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1388 else
1389 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1390 }
1391
1392 /* Set the RX buffer length according to the mode */
1393 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1394 /* PSRTYPE must be initialized in 82599 */
1395 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1396 IXGBE_PSRTYPE_UDPHDR |
1397 IXGBE_PSRTYPE_IPV4HDR |
1398 IXGBE_PSRTYPE_IPV6HDR |
1399 IXGBE_PSRTYPE_L2HDR;
1400 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1401 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1402 } else {
1403 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1404 if (netdev->mtu <= ETH_DATA_LEN)
1405 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1406 else
1407 rx_buf_len = ALIGN(max_frame, 1024);
1408 }
1409
1410 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1411 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1412 * the Base and Length of the Rx Descriptor Ring */
1413 for (i = 0; i < adapter->num_rx_queues; i++) {
1414 rdba = adapter->rx_ring[i].dma;
1415 j = adapter->rx_ring[i].reg_idx;
1416 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1417 (rdba & DMA_BIT_MASK(32)));
1418 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1419 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1420 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1421 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1422 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1423 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1424 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1425
1426 ixgbevf_configure_srrctl(adapter, j);
1427 }
1428}
1429
1430static void ixgbevf_vlan_rx_register(struct net_device *netdev,
1431 struct vlan_group *grp)
1432{
1433 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1434 struct ixgbe_hw *hw = &adapter->hw;
1435 int i, j;
1436 u32 ctrl;
1437
1438 adapter->vlgrp = grp;
1439
1440 for (i = 0; i < adapter->num_rx_queues; i++) {
1441 j = adapter->rx_ring[i].reg_idx;
1442 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1443 ctrl |= IXGBE_RXDCTL_VME;
1444 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
1445 }
1446}
1447
1448static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1449{
1450 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1451 struct ixgbe_hw *hw = &adapter->hw;
1452 struct net_device *v_netdev;
1453
1454 /* add VID to filter table */
1455 if (hw->mac.ops.set_vfta)
1456 hw->mac.ops.set_vfta(hw, vid, 0, true);
1457 /*
1458 * Copy feature flags from netdev to the vlan netdev for this vid.
1459 * This allows things like TSO to bubble down to our vlan device.
1460 */
1461 v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
1462 v_netdev->features |= adapter->netdev->features;
1463 vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
1464}
1465
1466static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1467{
1468 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1469 struct ixgbe_hw *hw = &adapter->hw;
1470
1471 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1472 ixgbevf_irq_disable(adapter);
1473
1474 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1475
1476 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1477 ixgbevf_irq_enable(adapter, true, true);
1478
1479 /* remove VID from filter table */
1480 if (hw->mac.ops.set_vfta)
1481 hw->mac.ops.set_vfta(hw, vid, 0, false);
1482}
1483
1484static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1485{
1486 ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1487
1488 if (adapter->vlgrp) {
1489 u16 vid;
1490 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1491 if (!vlan_group_get_device(adapter->vlgrp, vid))
1492 continue;
1493 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1494 }
1495 }
1496}
1497
1498static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1499 u32 *vmdq)
1500{
1501 struct dev_mc_list *mc_ptr;
1502 u8 *addr = *mc_addr_ptr;
1503 *vmdq = 0;
1504
1505 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1506 if (mc_ptr->next)
1507 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1508 else
1509 *mc_addr_ptr = NULL;
1510
1511 return addr;
1512}
1513
1514/**
1515 * ixgbevf_set_rx_mode - Multicast set
1516 * @netdev: network interface device structure
1517 *
1518 * The set_rx_method entry point is called whenever the multicast address
1519 * list or the network interface flags are updated. This routine is
1520 * responsible for configuring the hardware for proper multicast mode.
1521 **/
1522static void ixgbevf_set_rx_mode(struct net_device *netdev)
1523{
1524 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1525 struct ixgbe_hw *hw = &adapter->hw;
1526 u8 *addr_list = NULL;
1527 int addr_count = 0;
1528
1529 /* reprogram multicast list */
1530 addr_count = netdev->mc_count;
1531 if (addr_count)
1532 addr_list = netdev->mc_list->dmi_addr;
1533 if (hw->mac.ops.update_mc_addr_list)
1534 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1535 ixgbevf_addr_list_itr);
1536}
1537
1538static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1539{
1540 int q_idx;
1541 struct ixgbevf_q_vector *q_vector;
1542 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1543
1544 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1545 struct napi_struct *napi;
1546 q_vector = adapter->q_vector[q_idx];
1547 if (!q_vector->rxr_count)
1548 continue;
1549 napi = &q_vector->napi;
1550 if (q_vector->rxr_count > 1)
1551 napi->poll = &ixgbevf_clean_rxonly_many;
1552
1553 napi_enable(napi);
1554 }
1555}
1556
1557static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1558{
1559 int q_idx;
1560 struct ixgbevf_q_vector *q_vector;
1561 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1562
1563 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1564 q_vector = adapter->q_vector[q_idx];
1565 if (!q_vector->rxr_count)
1566 continue;
1567 napi_disable(&q_vector->napi);
1568 }
1569}
1570
1571static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1572{
1573 struct net_device *netdev = adapter->netdev;
1574 int i;
1575
1576 ixgbevf_set_rx_mode(netdev);
1577
1578 ixgbevf_restore_vlan(adapter);
1579
1580 ixgbevf_configure_tx(adapter);
1581 ixgbevf_configure_rx(adapter);
1582 for (i = 0; i < adapter->num_rx_queues; i++) {
1583 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1584 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1585 ring->next_to_use = ring->count - 1;
1586 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1587 }
1588}
1589
1590#define IXGBE_MAX_RX_DESC_POLL 10
1591static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1592 int rxr)
1593{
1594 struct ixgbe_hw *hw = &adapter->hw;
1595 int j = adapter->rx_ring[rxr].reg_idx;
1596 int k;
1597
1598 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1599 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1600 break;
1601 else
1602 msleep(1);
1603 }
1604 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1605 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1606 "not set within the polling period\n", rxr);
1607 }
1608
1609 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1610 (adapter->rx_ring[rxr].count - 1));
1611}
1612
1613static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1614{
1615 struct net_device *netdev = adapter->netdev;
1616 struct ixgbe_hw *hw = &adapter->hw;
1617 int i, j = 0;
1618 int num_rx_rings = adapter->num_rx_queues;
1619 u32 txdctl, rxdctl;
1620
1621 for (i = 0; i < adapter->num_tx_queues; i++) {
1622 j = adapter->tx_ring[i].reg_idx;
1623 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1624 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1625 txdctl |= (8 << 16);
1626 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1627 }
1628
1629 for (i = 0; i < adapter->num_tx_queues; i++) {
1630 j = adapter->tx_ring[i].reg_idx;
1631 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1632 txdctl |= IXGBE_TXDCTL_ENABLE;
1633 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1634 }
1635
1636 for (i = 0; i < num_rx_rings; i++) {
1637 j = adapter->rx_ring[i].reg_idx;
1638 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1639 rxdctl |= IXGBE_RXDCTL_ENABLE;
1640 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1641 ixgbevf_rx_desc_queue_enable(adapter, i);
1642 }
1643
1644 ixgbevf_configure_msix(adapter);
1645
1646 if (hw->mac.ops.set_rar) {
1647 if (is_valid_ether_addr(hw->mac.addr))
1648 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1649 else
1650 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1651 }
1652
1653 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1654 ixgbevf_napi_enable_all(adapter);
1655
1656 /* enable transmits */
1657 netif_tx_start_all_queues(netdev);
1658
1659 /* bring the link up in the watchdog, this could race with our first
1660 * link up interrupt but shouldn't be a problem */
1661 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1662 adapter->link_check_timeout = jiffies;
1663 mod_timer(&adapter->watchdog_timer, jiffies);
1664 return 0;
1665}
1666
1667int ixgbevf_up(struct ixgbevf_adapter *adapter)
1668{
1669 int err;
1670 struct ixgbe_hw *hw = &adapter->hw;
1671
1672 ixgbevf_configure(adapter);
1673
1674 err = ixgbevf_up_complete(adapter);
1675
1676 /* clear any pending interrupts, may auto mask */
1677 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1678
1679 ixgbevf_irq_enable(adapter, true, true);
1680
1681 return err;
1682}
1683
1684/**
1685 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1686 * @adapter: board private structure
1687 * @rx_ring: ring to free buffers from
1688 **/
1689static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1690 struct ixgbevf_ring *rx_ring)
1691{
1692 struct pci_dev *pdev = adapter->pdev;
1693 unsigned long size;
1694 unsigned int i;
1695
1696 if (!rx_ring->rx_buffer_info)
1697 return;
1698
1699 /* Free all the Rx ring sk_buffs */
1700 for (i = 0; i < rx_ring->count; i++) {
1701 struct ixgbevf_rx_buffer *rx_buffer_info;
1702
1703 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1704 if (rx_buffer_info->dma) {
1705 pci_unmap_single(pdev, rx_buffer_info->dma,
1706 rx_ring->rx_buf_len,
1707 PCI_DMA_FROMDEVICE);
1708 rx_buffer_info->dma = 0;
1709 }
1710 if (rx_buffer_info->skb) {
1711 struct sk_buff *skb = rx_buffer_info->skb;
1712 rx_buffer_info->skb = NULL;
1713 do {
1714 struct sk_buff *this = skb;
1715 skb = skb->prev;
1716 dev_kfree_skb(this);
1717 } while (skb);
1718 }
1719 if (!rx_buffer_info->page)
1720 continue;
1721 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
1722 PCI_DMA_FROMDEVICE);
1723 rx_buffer_info->page_dma = 0;
1724 put_page(rx_buffer_info->page);
1725 rx_buffer_info->page = NULL;
1726 rx_buffer_info->page_offset = 0;
1727 }
1728
1729 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1730 memset(rx_ring->rx_buffer_info, 0, size);
1731
1732 /* Zero out the descriptor ring */
1733 memset(rx_ring->desc, 0, rx_ring->size);
1734
1735 rx_ring->next_to_clean = 0;
1736 rx_ring->next_to_use = 0;
1737
1738 if (rx_ring->head)
1739 writel(0, adapter->hw.hw_addr + rx_ring->head);
1740 if (rx_ring->tail)
1741 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1742}
1743
1744/**
1745 * ixgbevf_clean_tx_ring - Free Tx Buffers
1746 * @adapter: board private structure
1747 * @tx_ring: ring to be cleaned
1748 **/
1749static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1750 struct ixgbevf_ring *tx_ring)
1751{
1752 struct ixgbevf_tx_buffer *tx_buffer_info;
1753 unsigned long size;
1754 unsigned int i;
1755
1756 if (!tx_ring->tx_buffer_info)
1757 return;
1758
1759 /* Free all the Tx ring sk_buffs */
1760
1761 for (i = 0; i < tx_ring->count; i++) {
1762 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1763 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1764 }
1765
1766 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1767 memset(tx_ring->tx_buffer_info, 0, size);
1768
1769 memset(tx_ring->desc, 0, tx_ring->size);
1770
1771 tx_ring->next_to_use = 0;
1772 tx_ring->next_to_clean = 0;
1773
1774 if (tx_ring->head)
1775 writel(0, adapter->hw.hw_addr + tx_ring->head);
1776 if (tx_ring->tail)
1777 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1778}
1779
1780/**
1781 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1782 * @adapter: board private structure
1783 **/
1784static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1785{
1786 int i;
1787
1788 for (i = 0; i < adapter->num_rx_queues; i++)
1789 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1790}
1791
1792/**
1793 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1794 * @adapter: board private structure
1795 **/
1796static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1797{
1798 int i;
1799
1800 for (i = 0; i < adapter->num_tx_queues; i++)
1801 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1802}
1803
1804void ixgbevf_down(struct ixgbevf_adapter *adapter)
1805{
1806 struct net_device *netdev = adapter->netdev;
1807 struct ixgbe_hw *hw = &adapter->hw;
1808 u32 txdctl;
1809 int i, j;
1810
1811 /* signal that we are down to the interrupt handler */
1812 set_bit(__IXGBEVF_DOWN, &adapter->state);
1813 /* disable receives */
1814
1815 netif_tx_disable(netdev);
1816
1817 msleep(10);
1818
1819 netif_tx_stop_all_queues(netdev);
1820
1821 ixgbevf_irq_disable(adapter);
1822
1823 ixgbevf_napi_disable_all(adapter);
1824
1825 del_timer_sync(&adapter->watchdog_timer);
1826 /* can't call flush scheduled work here because it can deadlock
1827 * if linkwatch_event tries to acquire the rtnl_lock which we are
1828 * holding */
1829 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1830 msleep(1);
1831
1832 /* disable transmits in the hardware now that interrupts are off */
1833 for (i = 0; i < adapter->num_tx_queues; i++) {
1834 j = adapter->tx_ring[i].reg_idx;
1835 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1836 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1837 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1838 }
1839
1840 netif_carrier_off(netdev);
1841
1842 if (!pci_channel_offline(adapter->pdev))
1843 ixgbevf_reset(adapter);
1844
1845 ixgbevf_clean_all_tx_rings(adapter);
1846 ixgbevf_clean_all_rx_rings(adapter);
1847}
1848
1849void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1850{
1851 struct ixgbe_hw *hw = &adapter->hw;
1852
1853 WARN_ON(in_interrupt());
1854
1855 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1856 msleep(1);
1857
1858 /*
1859 * Check if PF is up before re-init. If not then skip until
1860 * later when the PF is up and ready to service requests from
1861 * the VF via mailbox. If the VF is up and running then the
1862 * watchdog task will continue to schedule reset tasks until
1863 * the PF is up and running.
1864 */
1865 if (!hw->mac.ops.reset_hw(hw)) {
1866 ixgbevf_down(adapter);
1867 ixgbevf_up(adapter);
1868 }
1869
1870 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1871}
1872
1873void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1874{
1875 struct ixgbe_hw *hw = &adapter->hw;
1876 struct net_device *netdev = adapter->netdev;
1877
1878 if (hw->mac.ops.reset_hw(hw))
1879 hw_dbg(hw, "PF still resetting\n");
1880 else
1881 hw->mac.ops.init_hw(hw);
1882
1883 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1884 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1885 netdev->addr_len);
1886 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1887 netdev->addr_len);
1888 }
1889}
1890
1891static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1892 int vectors)
1893{
1894 int err, vector_threshold;
1895
1896 /* We'll want at least 3 (vector_threshold):
1897 * 1) TxQ[0] Cleanup
1898 * 2) RxQ[0] Cleanup
1899 * 3) Other (Link Status Change, etc.)
1900 */
1901 vector_threshold = MIN_MSIX_COUNT;
1902
1903 /* The more we get, the more we will assign to Tx/Rx Cleanup
1904 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1905 * Right now, we simply care about how many we'll get; we'll
1906 * set them up later while requesting irq's.
1907 */
1908 while (vectors >= vector_threshold) {
1909 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1910 vectors);
1911 if (!err) /* Success in acquiring all requested vectors. */
1912 break;
1913 else if (err < 0)
1914 vectors = 0; /* Nasty failure, quit now */
1915 else /* err == number of vectors we should try again with */
1916 vectors = err;
1917 }
1918
1919 if (vectors < vector_threshold) {
1920 /* Can't allocate enough MSI-X interrupts? Oh well.
1921 * This just means we'll go with either a single MSI
1922 * vector or fall back to legacy interrupts.
1923 */
1924 hw_dbg(&adapter->hw,
1925 "Unable to allocate MSI-X interrupts\n");
1926 kfree(adapter->msix_entries);
1927 adapter->msix_entries = NULL;
1928 } else {
1929 /*
1930 * Adjust for only the vectors we'll use, which is minimum
1931 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1932 * vectors we were allocated.
1933 */
1934 adapter->num_msix_vectors = vectors;
1935 }
1936}
1937
1938/*
1939 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
1940 * @adapter: board private structure to initialize
1941 *
1942 * This is the top level queue allocation routine. The order here is very
1943 * important, starting with the "most" number of features turned on at once,
1944 * and ending with the smallest set of features. This way large combinations
1945 * can be allocated if they're turned on, and smaller combinations are the
1946 * fallthrough conditions.
1947 *
1948 **/
1949static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1950{
1951 /* Start with base case */
1952 adapter->num_rx_queues = 1;
1953 adapter->num_tx_queues = 1;
1954 adapter->num_rx_pools = adapter->num_rx_queues;
1955 adapter->num_rx_queues_per_pool = 1;
1956}
1957
1958/**
1959 * ixgbevf_alloc_queues - Allocate memory for all rings
1960 * @adapter: board private structure to initialize
1961 *
1962 * We allocate one ring per queue at run-time since we don't know the
1963 * number of queues at compile-time. The polling_netdev array is
1964 * intended for Multiqueue, but should work fine with a single queue.
1965 **/
1966static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1967{
1968 int i;
1969
1970 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1971 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1972 if (!adapter->tx_ring)
1973 goto err_tx_ring_allocation;
1974
1975 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1976 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1977 if (!adapter->rx_ring)
1978 goto err_rx_ring_allocation;
1979
1980 for (i = 0; i < adapter->num_tx_queues; i++) {
1981 adapter->tx_ring[i].count = adapter->tx_ring_count;
1982 adapter->tx_ring[i].queue_index = i;
1983 adapter->tx_ring[i].reg_idx = i;
1984 }
1985
1986 for (i = 0; i < adapter->num_rx_queues; i++) {
1987 adapter->rx_ring[i].count = adapter->rx_ring_count;
1988 adapter->rx_ring[i].queue_index = i;
1989 adapter->rx_ring[i].reg_idx = i;
1990 }
1991
1992 return 0;
1993
1994err_rx_ring_allocation:
1995 kfree(adapter->tx_ring);
1996err_tx_ring_allocation:
1997 return -ENOMEM;
1998}
1999
2000/**
2001 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2002 * @adapter: board private structure to initialize
2003 *
2004 * Attempt to configure the interrupts using the best available
2005 * capabilities of the hardware and the kernel.
2006 **/
2007static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2008{
2009 int err = 0;
2010 int vector, v_budget;
2011
2012 /*
2013 * It's easy to be greedy for MSI-X vectors, but it really
2014 * doesn't do us much good if we have a lot more vectors
2015 * than CPU's. So let's be conservative and only ask for
2016 * (roughly) twice the number of vectors as there are CPU's.
2017 */
2018 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2019 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2020
2021 /* A failure in MSI-X entry allocation isn't fatal, but it does
2022 * mean we disable MSI-X capabilities of the adapter. */
2023 adapter->msix_entries = kcalloc(v_budget,
2024 sizeof(struct msix_entry), GFP_KERNEL);
2025 if (!adapter->msix_entries) {
2026 err = -ENOMEM;
2027 goto out;
2028 }
2029
2030 for (vector = 0; vector < v_budget; vector++)
2031 adapter->msix_entries[vector].entry = vector;
2032
2033 ixgbevf_acquire_msix_vectors(adapter, v_budget);
2034
2035out:
2036 return err;
2037}
2038
2039/**
2040 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2041 * @adapter: board private structure to initialize
2042 *
2043 * We allocate one q_vector per queue interrupt. If allocation fails we
2044 * return -ENOMEM.
2045 **/
2046static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2047{
2048 int q_idx, num_q_vectors;
2049 struct ixgbevf_q_vector *q_vector;
2050 int napi_vectors;
2051 int (*poll)(struct napi_struct *, int);
2052
2053 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2054 napi_vectors = adapter->num_rx_queues;
2055 poll = &ixgbevf_clean_rxonly;
2056
2057 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2058 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2059 if (!q_vector)
2060 goto err_out;
2061 q_vector->adapter = adapter;
2062 q_vector->v_idx = q_idx;
2063 q_vector->eitr = adapter->eitr_param;
2064 if (q_idx < napi_vectors)
2065 netif_napi_add(adapter->netdev, &q_vector->napi,
2066 (*poll), 64);
2067 adapter->q_vector[q_idx] = q_vector;
2068 }
2069
2070 return 0;
2071
2072err_out:
2073 while (q_idx) {
2074 q_idx--;
2075 q_vector = adapter->q_vector[q_idx];
2076 netif_napi_del(&q_vector->napi);
2077 kfree(q_vector);
2078 adapter->q_vector[q_idx] = NULL;
2079 }
2080 return -ENOMEM;
2081}
2082
2083/**
2084 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2085 * @adapter: board private structure to initialize
2086 *
2087 * This function frees the memory allocated to the q_vectors. In addition if
2088 * NAPI is enabled it will delete any references to the NAPI struct prior
2089 * to freeing the q_vector.
2090 **/
2091static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2092{
2093 int q_idx, num_q_vectors;
2094 int napi_vectors;
2095
2096 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2097 napi_vectors = adapter->num_rx_queues;
2098
2099 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2100 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2101
2102 adapter->q_vector[q_idx] = NULL;
2103 if (q_idx < napi_vectors)
2104 netif_napi_del(&q_vector->napi);
2105 kfree(q_vector);
2106 }
2107}
2108
2109/**
2110 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2111 * @adapter: board private structure
2112 *
2113 **/
2114static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2115{
2116 pci_disable_msix(adapter->pdev);
2117 kfree(adapter->msix_entries);
2118 adapter->msix_entries = NULL;
2119
2120 return;
2121}
2122
2123/**
2124 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2125 * @adapter: board private structure to initialize
2126 *
2127 **/
2128static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2129{
2130 int err;
2131
2132 /* Number of supported queues */
2133 ixgbevf_set_num_queues(adapter);
2134
2135 err = ixgbevf_set_interrupt_capability(adapter);
2136 if (err) {
2137 hw_dbg(&adapter->hw,
2138 "Unable to setup interrupt capabilities\n");
2139 goto err_set_interrupt;
2140 }
2141
2142 err = ixgbevf_alloc_q_vectors(adapter);
2143 if (err) {
2144 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2145 "vectors\n");
2146 goto err_alloc_q_vectors;
2147 }
2148
2149 err = ixgbevf_alloc_queues(adapter);
2150 if (err) {
2151 printk(KERN_ERR "Unable to allocate memory for queues\n");
2152 goto err_alloc_queues;
2153 }
2154
2155 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2156 "Tx Queue count = %u\n",
2157 (adapter->num_rx_queues > 1) ? "Enabled" :
2158 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2159
2160 set_bit(__IXGBEVF_DOWN, &adapter->state);
2161
2162 return 0;
2163err_alloc_queues:
2164 ixgbevf_free_q_vectors(adapter);
2165err_alloc_q_vectors:
2166 ixgbevf_reset_interrupt_capability(adapter);
2167err_set_interrupt:
2168 return err;
2169}
2170
2171/**
2172 * ixgbevf_sw_init - Initialize general software structures
2173 * (struct ixgbevf_adapter)
2174 * @adapter: board private structure to initialize
2175 *
2176 * ixgbevf_sw_init initializes the Adapter private data structure.
2177 * Fields are initialized based on PCI device information and
2178 * OS network device settings (MTU size).
2179 **/
2180static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2181{
2182 struct ixgbe_hw *hw = &adapter->hw;
2183 struct pci_dev *pdev = adapter->pdev;
2184 int err;
2185
2186 /* PCI config space info */
2187
2188 hw->vendor_id = pdev->vendor;
2189 hw->device_id = pdev->device;
2190 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2191 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2192 hw->subsystem_device_id = pdev->subsystem_device;
2193
2194 hw->mbx.ops.init_params(hw);
2195 hw->mac.max_tx_queues = MAX_TX_QUEUES;
2196 hw->mac.max_rx_queues = MAX_RX_QUEUES;
2197 err = hw->mac.ops.reset_hw(hw);
2198 if (err) {
2199 dev_info(&pdev->dev,
2200 "PF still in reset state, assigning new address\n");
2201 random_ether_addr(hw->mac.addr);
2202 } else {
2203 err = hw->mac.ops.init_hw(hw);
2204 if (err) {
2205 printk(KERN_ERR "init_shared_code failed: %d\n", err);
2206 goto out;
2207 }
2208 }
2209
2210 /* Enable dynamic interrupt throttling rates */
2211 adapter->eitr_param = 20000;
2212 adapter->itr_setting = 1;
2213
2214 /* set defaults for eitr in MegaBytes */
2215 adapter->eitr_low = 10;
2216 adapter->eitr_high = 20;
2217
2218 /* set default ring sizes */
2219 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2220 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2221
2222 /* enable rx csum by default */
2223 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2224
2225 set_bit(__IXGBEVF_DOWN, &adapter->state);
2226
2227out:
2228 return err;
2229}
2230
2231static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2232{
2233 struct ixgbe_hw *hw = &adapter->hw;
2234
2235 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2236 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2237 adapter->stats.last_vfgorc |=
2238 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2239 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2240 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2241 adapter->stats.last_vfgotc |=
2242 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2243 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2244
2245 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2246 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2247 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2248 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2249 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2250}
2251
2252#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2253 { \
2254 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2255 if (current_counter < last_counter) \
2256 counter += 0x100000000LL; \
2257 last_counter = current_counter; \
2258 counter &= 0xFFFFFFFF00000000LL; \
2259 counter |= current_counter; \
2260 }
2261
2262#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2263 { \
2264 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2265 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2266 u64 current_counter = (current_counter_msb << 32) | \
2267 current_counter_lsb; \
2268 if (current_counter < last_counter) \
2269 counter += 0x1000000000LL; \
2270 last_counter = current_counter; \
2271 counter &= 0xFFFFFFF000000000LL; \
2272 counter |= current_counter; \
2273 }
2274/**
2275 * ixgbevf_update_stats - Update the board statistics counters.
2276 * @adapter: board private structure
2277 **/
2278void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2279{
2280 struct ixgbe_hw *hw = &adapter->hw;
2281
2282 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2283 adapter->stats.vfgprc);
2284 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2285 adapter->stats.vfgptc);
2286 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2287 adapter->stats.last_vfgorc,
2288 adapter->stats.vfgorc);
2289 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2290 adapter->stats.last_vfgotc,
2291 adapter->stats.vfgotc);
2292 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2293 adapter->stats.vfmprc);
2294
2295 /* Fill out the OS statistics structure */
2296 adapter->net_stats.multicast = adapter->stats.vfmprc -
2297 adapter->stats.base_vfmprc;
2298}
2299
2300/**
2301 * ixgbevf_watchdog - Timer Call-back
2302 * @data: pointer to adapter cast into an unsigned long
2303 **/
2304static void ixgbevf_watchdog(unsigned long data)
2305{
2306 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2307 struct ixgbe_hw *hw = &adapter->hw;
2308 u64 eics = 0;
2309 int i;
2310
2311 /*
2312 * Do the watchdog outside of interrupt context due to the lovely
2313 * delays that some of the newer hardware requires
2314 */
2315
2316 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2317 goto watchdog_short_circuit;
2318
2319 /* get one bit for every active tx/rx interrupt vector */
2320 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2321 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2322 if (qv->rxr_count || qv->txr_count)
2323 eics |= (1 << i);
2324 }
2325
2326 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2327
2328watchdog_short_circuit:
2329 schedule_work(&adapter->watchdog_task);
2330}
2331
2332/**
2333 * ixgbevf_tx_timeout - Respond to a Tx Hang
2334 * @netdev: network interface device structure
2335 **/
2336static void ixgbevf_tx_timeout(struct net_device *netdev)
2337{
2338 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2339
2340 /* Do the reset outside of interrupt context */
2341 schedule_work(&adapter->reset_task);
2342}
2343
2344static void ixgbevf_reset_task(struct work_struct *work)
2345{
2346 struct ixgbevf_adapter *adapter;
2347 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2348
2349 /* If we're already down or resetting, just bail */
2350 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2351 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2352 return;
2353
2354 adapter->tx_timeout_count++;
2355
2356 ixgbevf_reinit_locked(adapter);
2357}
2358
2359/**
2360 * ixgbevf_watchdog_task - worker thread to bring link up
2361 * @work: pointer to work_struct containing our data
2362 **/
2363static void ixgbevf_watchdog_task(struct work_struct *work)
2364{
2365 struct ixgbevf_adapter *adapter = container_of(work,
2366 struct ixgbevf_adapter,
2367 watchdog_task);
2368 struct net_device *netdev = adapter->netdev;
2369 struct ixgbe_hw *hw = &adapter->hw;
2370 u32 link_speed = adapter->link_speed;
2371 bool link_up = adapter->link_up;
2372
2373 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2374
2375 /*
2376 * Always check the link on the watchdog because we have
2377 * no LSC interrupt
2378 */
2379 if (hw->mac.ops.check_link) {
2380 if ((hw->mac.ops.check_link(hw, &link_speed,
2381 &link_up, false)) != 0) {
2382 adapter->link_up = link_up;
2383 adapter->link_speed = link_speed;
2384 netif_carrier_off(netdev);
2385 netif_tx_stop_all_queues(netdev);
2386 schedule_work(&adapter->reset_task);
2387 goto pf_has_reset;
2388 }
2389 } else {
2390 /* always assume link is up, if no check link
2391 * function */
2392 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2393 link_up = true;
2394 }
2395 adapter->link_up = link_up;
2396 adapter->link_speed = link_speed;
2397
2398 if (link_up) {
2399 if (!netif_carrier_ok(netdev)) {
2400 hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
2401 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2402 "10 Gbps" : "1 Gbps"));
2403 netif_carrier_on(netdev);
2404 netif_tx_wake_all_queues(netdev);
2405 } else {
2406 /* Force detection of hung controller */
2407 adapter->detect_tx_hung = true;
2408 }
2409 } else {
2410 adapter->link_up = false;
2411 adapter->link_speed = 0;
2412 if (netif_carrier_ok(netdev)) {
2413 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2414 netif_carrier_off(netdev);
2415 netif_tx_stop_all_queues(netdev);
2416 }
2417 }
2418
2419pf_has_reset:
2420 ixgbevf_update_stats(adapter);
2421
2422 /* Force detection of hung controller every watchdog period */
2423 adapter->detect_tx_hung = true;
2424
2425 /* Reset the timer */
2426 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2427 mod_timer(&adapter->watchdog_timer,
2428 round_jiffies(jiffies + (2 * HZ)));
2429
2430 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2431}
2432
2433/**
2434 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2435 * @adapter: board private structure
2436 * @tx_ring: Tx descriptor ring for a specific queue
2437 *
2438 * Free all transmit software resources
2439 **/
2440void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2441 struct ixgbevf_ring *tx_ring)
2442{
2443 struct pci_dev *pdev = adapter->pdev;
2444
2445 ixgbevf_clean_tx_ring(adapter, tx_ring);
2446
2447 vfree(tx_ring->tx_buffer_info);
2448 tx_ring->tx_buffer_info = NULL;
2449
2450 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2451
2452 tx_ring->desc = NULL;
2453}
2454
2455/**
2456 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2457 * @adapter: board private structure
2458 *
2459 * Free all transmit software resources
2460 **/
2461static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2462{
2463 int i;
2464
2465 for (i = 0; i < adapter->num_tx_queues; i++)
2466 if (adapter->tx_ring[i].desc)
2467 ixgbevf_free_tx_resources(adapter,
2468 &adapter->tx_ring[i]);
2469
2470}
2471
2472/**
2473 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2474 * @adapter: board private structure
2475 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2476 *
2477 * Return 0 on success, negative on failure
2478 **/
2479int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2480 struct ixgbevf_ring *tx_ring)
2481{
2482 struct pci_dev *pdev = adapter->pdev;
2483 int size;
2484
2485 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2486 tx_ring->tx_buffer_info = vmalloc(size);
2487 if (!tx_ring->tx_buffer_info)
2488 goto err;
2489 memset(tx_ring->tx_buffer_info, 0, size);
2490
2491 /* round up to nearest 4K */
2492 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2493 tx_ring->size = ALIGN(tx_ring->size, 4096);
2494
2495 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2496 &tx_ring->dma);
2497 if (!tx_ring->desc)
2498 goto err;
2499
2500 tx_ring->next_to_use = 0;
2501 tx_ring->next_to_clean = 0;
2502 tx_ring->work_limit = tx_ring->count;
2503 return 0;
2504
2505err:
2506 vfree(tx_ring->tx_buffer_info);
2507 tx_ring->tx_buffer_info = NULL;
2508 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2509 "descriptor ring\n");
2510 return -ENOMEM;
2511}
2512
2513/**
2514 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2515 * @adapter: board private structure
2516 *
2517 * If this function returns with an error, then it's possible one or
2518 * more of the rings is populated (while the rest are not). It is the
2519 * callers duty to clean those orphaned rings.
2520 *
2521 * Return 0 on success, negative on failure
2522 **/
2523static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2524{
2525 int i, err = 0;
2526
2527 for (i = 0; i < adapter->num_tx_queues; i++) {
2528 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2529 if (!err)
2530 continue;
2531 hw_dbg(&adapter->hw,
2532 "Allocation for Tx Queue %u failed\n", i);
2533 break;
2534 }
2535
2536 return err;
2537}
2538
2539/**
2540 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2541 * @adapter: board private structure
2542 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2543 *
2544 * Returns 0 on success, negative on failure
2545 **/
2546int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2547 struct ixgbevf_ring *rx_ring)
2548{
2549 struct pci_dev *pdev = adapter->pdev;
2550 int size;
2551
2552 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2553 rx_ring->rx_buffer_info = vmalloc(size);
2554 if (!rx_ring->rx_buffer_info) {
2555 hw_dbg(&adapter->hw,
2556 "Unable to vmalloc buffer memory for "
2557 "the receive descriptor ring\n");
2558 goto alloc_failed;
2559 }
2560 memset(rx_ring->rx_buffer_info, 0, size);
2561
2562 /* Round up to nearest 4K */
2563 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2564 rx_ring->size = ALIGN(rx_ring->size, 4096);
2565
2566 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2567 &rx_ring->dma);
2568
2569 if (!rx_ring->desc) {
2570 hw_dbg(&adapter->hw,
2571 "Unable to allocate memory for "
2572 "the receive descriptor ring\n");
2573 vfree(rx_ring->rx_buffer_info);
2574 rx_ring->rx_buffer_info = NULL;
2575 goto alloc_failed;
2576 }
2577
2578 rx_ring->next_to_clean = 0;
2579 rx_ring->next_to_use = 0;
2580
2581 return 0;
2582alloc_failed:
2583 return -ENOMEM;
2584}
2585
2586/**
2587 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2588 * @adapter: board private structure
2589 *
2590 * If this function returns with an error, then it's possible one or
2591 * more of the rings is populated (while the rest are not). It is the
2592 * callers duty to clean those orphaned rings.
2593 *
2594 * Return 0 on success, negative on failure
2595 **/
2596static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2597{
2598 int i, err = 0;
2599
2600 for (i = 0; i < adapter->num_rx_queues; i++) {
2601 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2602 if (!err)
2603 continue;
2604 hw_dbg(&adapter->hw,
2605 "Allocation for Rx Queue %u failed\n", i);
2606 break;
2607 }
2608 return err;
2609}
2610
2611/**
2612 * ixgbevf_free_rx_resources - Free Rx Resources
2613 * @adapter: board private structure
2614 * @rx_ring: ring to clean the resources from
2615 *
2616 * Free all receive software resources
2617 **/
2618void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2619 struct ixgbevf_ring *rx_ring)
2620{
2621 struct pci_dev *pdev = adapter->pdev;
2622
2623 ixgbevf_clean_rx_ring(adapter, rx_ring);
2624
2625 vfree(rx_ring->rx_buffer_info);
2626 rx_ring->rx_buffer_info = NULL;
2627
2628 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2629
2630 rx_ring->desc = NULL;
2631}
2632
2633/**
2634 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2635 * @adapter: board private structure
2636 *
2637 * Free all receive software resources
2638 **/
2639static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2640{
2641 int i;
2642
2643 for (i = 0; i < adapter->num_rx_queues; i++)
2644 if (adapter->rx_ring[i].desc)
2645 ixgbevf_free_rx_resources(adapter,
2646 &adapter->rx_ring[i]);
2647}
2648
2649/**
2650 * ixgbevf_open - Called when a network interface is made active
2651 * @netdev: network interface device structure
2652 *
2653 * Returns 0 on success, negative value on failure
2654 *
2655 * The open entry point is called when a network interface is made
2656 * active by the system (IFF_UP). At this point all resources needed
2657 * for transmit and receive operations are allocated, the interrupt
2658 * handler is registered with the OS, the watchdog timer is started,
2659 * and the stack is notified that the interface is ready.
2660 **/
2661static int ixgbevf_open(struct net_device *netdev)
2662{
2663 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2664 struct ixgbe_hw *hw = &adapter->hw;
2665 int err;
2666
2667 /* disallow open during test */
2668 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2669 return -EBUSY;
2670
2671 if (hw->adapter_stopped) {
2672 ixgbevf_reset(adapter);
2673 /* if adapter is still stopped then PF isn't up and
2674 * the vf can't start. */
2675 if (hw->adapter_stopped) {
2676 err = IXGBE_ERR_MBX;
2677 printk(KERN_ERR "Unable to start - perhaps the PF"
2678 "Driver isn't up yet\n");
2679 goto err_setup_reset;
2680 }
2681 }
2682
2683 /* allocate transmit descriptors */
2684 err = ixgbevf_setup_all_tx_resources(adapter);
2685 if (err)
2686 goto err_setup_tx;
2687
2688 /* allocate receive descriptors */
2689 err = ixgbevf_setup_all_rx_resources(adapter);
2690 if (err)
2691 goto err_setup_rx;
2692
2693 ixgbevf_configure(adapter);
2694
2695 /*
2696 * Map the Tx/Rx rings to the vectors we were allotted.
2697 * if request_irq will be called in this function map_rings
2698 * must be called *before* up_complete
2699 */
2700 ixgbevf_map_rings_to_vectors(adapter);
2701
2702 err = ixgbevf_up_complete(adapter);
2703 if (err)
2704 goto err_up;
2705
2706 /* clear any pending interrupts, may auto mask */
2707 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2708 err = ixgbevf_request_irq(adapter);
2709 if (err)
2710 goto err_req_irq;
2711
2712 ixgbevf_irq_enable(adapter, true, true);
2713
2714 return 0;
2715
2716err_req_irq:
2717 ixgbevf_down(adapter);
2718err_up:
2719 ixgbevf_free_irq(adapter);
2720err_setup_rx:
2721 ixgbevf_free_all_rx_resources(adapter);
2722err_setup_tx:
2723 ixgbevf_free_all_tx_resources(adapter);
2724 ixgbevf_reset(adapter);
2725
2726err_setup_reset:
2727
2728 return err;
2729}
2730
2731/**
2732 * ixgbevf_close - Disables a network interface
2733 * @netdev: network interface device structure
2734 *
2735 * Returns 0, this is not allowed to fail
2736 *
2737 * The close entry point is called when an interface is de-activated
2738 * by the OS. The hardware is still under the drivers control, but
2739 * needs to be disabled. A global MAC reset is issued to stop the
2740 * hardware, and all transmit and receive resources are freed.
2741 **/
2742static int ixgbevf_close(struct net_device *netdev)
2743{
2744 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2745
2746 ixgbevf_down(adapter);
2747 ixgbevf_free_irq(adapter);
2748
2749 ixgbevf_free_all_tx_resources(adapter);
2750 ixgbevf_free_all_rx_resources(adapter);
2751
2752 return 0;
2753}
2754
2755static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2756 struct ixgbevf_ring *tx_ring,
2757 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2758{
2759 struct ixgbe_adv_tx_context_desc *context_desc;
2760 unsigned int i;
2761 int err;
2762 struct ixgbevf_tx_buffer *tx_buffer_info;
2763 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2764 u32 mss_l4len_idx, l4len;
2765
2766 if (skb_is_gso(skb)) {
2767 if (skb_header_cloned(skb)) {
2768 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2769 if (err)
2770 return err;
2771 }
2772 l4len = tcp_hdrlen(skb);
2773 *hdr_len += l4len;
2774
2775 if (skb->protocol == htons(ETH_P_IP)) {
2776 struct iphdr *iph = ip_hdr(skb);
2777 iph->tot_len = 0;
2778 iph->check = 0;
2779 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2780 iph->daddr, 0,
2781 IPPROTO_TCP,
2782 0);
2783 adapter->hw_tso_ctxt++;
2784 } else if (skb_is_gso_v6(skb)) {
2785 ipv6_hdr(skb)->payload_len = 0;
2786 tcp_hdr(skb)->check =
2787 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2788 &ipv6_hdr(skb)->daddr,
2789 0, IPPROTO_TCP, 0);
2790 adapter->hw_tso6_ctxt++;
2791 }
2792
2793 i = tx_ring->next_to_use;
2794
2795 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2796 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2797
2798 /* VLAN MACLEN IPLEN */
2799 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2800 vlan_macip_lens |=
2801 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2802 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2803 IXGBE_ADVTXD_MACLEN_SHIFT);
2804 *hdr_len += skb_network_offset(skb);
2805 vlan_macip_lens |=
2806 (skb_transport_header(skb) - skb_network_header(skb));
2807 *hdr_len +=
2808 (skb_transport_header(skb) - skb_network_header(skb));
2809 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2810 context_desc->seqnum_seed = 0;
2811
2812 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2813 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2814 IXGBE_ADVTXD_DTYP_CTXT);
2815
2816 if (skb->protocol == htons(ETH_P_IP))
2817 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2818 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2819 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2820
2821 /* MSS L4LEN IDX */
2822 mss_l4len_idx =
2823 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2824 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2825 /* use index 1 for TSO */
2826 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2827 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2828
2829 tx_buffer_info->time_stamp = jiffies;
2830 tx_buffer_info->next_to_watch = i;
2831
2832 i++;
2833 if (i == tx_ring->count)
2834 i = 0;
2835 tx_ring->next_to_use = i;
2836
2837 return true;
2838 }
2839
2840 return false;
2841}
2842
2843static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2844 struct ixgbevf_ring *tx_ring,
2845 struct sk_buff *skb, u32 tx_flags)
2846{
2847 struct ixgbe_adv_tx_context_desc *context_desc;
2848 unsigned int i;
2849 struct ixgbevf_tx_buffer *tx_buffer_info;
2850 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2851
2852 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2853 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2854 i = tx_ring->next_to_use;
2855 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2856 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2857
2858 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2859 vlan_macip_lens |= (tx_flags &
2860 IXGBE_TX_FLAGS_VLAN_MASK);
2861 vlan_macip_lens |= (skb_network_offset(skb) <<
2862 IXGBE_ADVTXD_MACLEN_SHIFT);
2863 if (skb->ip_summed == CHECKSUM_PARTIAL)
2864 vlan_macip_lens |= (skb_transport_header(skb) -
2865 skb_network_header(skb));
2866
2867 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2868 context_desc->seqnum_seed = 0;
2869
2870 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2871 IXGBE_ADVTXD_DTYP_CTXT);
2872
2873 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2874 switch (skb->protocol) {
2875 case __constant_htons(ETH_P_IP):
2876 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2877 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2878 type_tucmd_mlhl |=
2879 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2880 break;
2881 case __constant_htons(ETH_P_IPV6):
2882 /* XXX what about other V6 headers?? */
2883 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2884 type_tucmd_mlhl |=
2885 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2886 break;
2887 default:
2888 if (unlikely(net_ratelimit())) {
2889 printk(KERN_WARNING
2890 "partial checksum but "
2891 "proto=%x!\n",
2892 skb->protocol);
2893 }
2894 break;
2895 }
2896 }
2897
2898 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2899 /* use index zero for tx checksum offload */
2900 context_desc->mss_l4len_idx = 0;
2901
2902 tx_buffer_info->time_stamp = jiffies;
2903 tx_buffer_info->next_to_watch = i;
2904
2905 adapter->hw_csum_tx_good++;
2906 i++;
2907 if (i == tx_ring->count)
2908 i = 0;
2909 tx_ring->next_to_use = i;
2910
2911 return true;
2912 }
2913
2914 return false;
2915}
2916
2917static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2918 struct ixgbevf_ring *tx_ring,
2919 struct sk_buff *skb, u32 tx_flags,
2920 unsigned int first)
2921{
2922 struct pci_dev *pdev = adapter->pdev;
2923 struct ixgbevf_tx_buffer *tx_buffer_info;
2924 unsigned int len;
2925 unsigned int total = skb->len;
2926 unsigned int offset = 0, size, count = 0, i;
2927 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2928 unsigned int f;
2929
2930 i = tx_ring->next_to_use;
2931
2932 len = min(skb_headlen(skb), total);
2933 while (len) {
2934 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2935 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2936
2937 tx_buffer_info->length = size;
2938 tx_buffer_info->mapped_as_page = false;
2939 tx_buffer_info->dma = pci_map_single(adapter->pdev,
2940 skb->data + offset,
2941 size, PCI_DMA_TODEVICE);
2942 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2943 goto dma_error;
2944 tx_buffer_info->time_stamp = jiffies;
2945 tx_buffer_info->next_to_watch = i;
2946
2947 len -= size;
2948 total -= size;
2949 offset += size;
2950 count++;
2951 i++;
2952 if (i == tx_ring->count)
2953 i = 0;
2954 }
2955
2956 for (f = 0; f < nr_frags; f++) {
2957 struct skb_frag_struct *frag;
2958
2959 frag = &skb_shinfo(skb)->frags[f];
2960 len = min((unsigned int)frag->size, total);
2961 offset = frag->page_offset;
2962
2963 while (len) {
2964 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2965 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2966
2967 tx_buffer_info->length = size;
2968 tx_buffer_info->dma = pci_map_page(adapter->pdev,
2969 frag->page,
2970 offset,
2971 size,
2972 PCI_DMA_TODEVICE);
2973 tx_buffer_info->mapped_as_page = true;
2974 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2975 goto dma_error;
2976 tx_buffer_info->time_stamp = jiffies;
2977 tx_buffer_info->next_to_watch = i;
2978
2979 len -= size;
2980 total -= size;
2981 offset += size;
2982 count++;
2983 i++;
2984 if (i == tx_ring->count)
2985 i = 0;
2986 }
2987 if (total == 0)
2988 break;
2989 }
2990
2991 if (i == 0)
2992 i = tx_ring->count - 1;
2993 else
2994 i = i - 1;
2995 tx_ring->tx_buffer_info[i].skb = skb;
2996 tx_ring->tx_buffer_info[first].next_to_watch = i;
2997
2998 return count;
2999
3000dma_error:
3001 dev_err(&pdev->dev, "TX DMA map failed\n");
3002
3003 /* clear timestamp and dma mappings for failed tx_buffer_info map */
3004 tx_buffer_info->dma = 0;
3005 tx_buffer_info->time_stamp = 0;
3006 tx_buffer_info->next_to_watch = 0;
3007 count--;
3008
3009 /* clear timestamp and dma mappings for remaining portion of packet */
3010 while (count >= 0) {
3011 count--;
3012 i--;
3013 if (i < 0)
3014 i += tx_ring->count;
3015 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3016 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3017 }
3018
3019 return count;
3020}
3021
3022static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
3023 struct ixgbevf_ring *tx_ring, int tx_flags,
3024 int count, u32 paylen, u8 hdr_len)
3025{
3026 union ixgbe_adv_tx_desc *tx_desc = NULL;
3027 struct ixgbevf_tx_buffer *tx_buffer_info;
3028 u32 olinfo_status = 0, cmd_type_len = 0;
3029 unsigned int i;
3030
3031 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3032
3033 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3034
3035 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3036
3037 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3038 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3039
3040 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3041 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3042
3043 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3044 IXGBE_ADVTXD_POPTS_SHIFT;
3045
3046 /* use index 1 context for tso */
3047 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3048 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3049 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3050 IXGBE_ADVTXD_POPTS_SHIFT;
3051
3052 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3053 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3054 IXGBE_ADVTXD_POPTS_SHIFT;
3055
3056 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3057
3058 i = tx_ring->next_to_use;
3059 while (count--) {
3060 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3061 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3062 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3063 tx_desc->read.cmd_type_len =
3064 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3065 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3066 i++;
3067 if (i == tx_ring->count)
3068 i = 0;
3069 }
3070
3071 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3072
3073 /*
3074 * Force memory writes to complete before letting h/w
3075 * know there are new descriptors to fetch. (Only
3076 * applicable for weak-ordered memory model archs,
3077 * such as IA-64).
3078 */
3079 wmb();
3080
3081 tx_ring->next_to_use = i;
3082 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3083}
3084
3085static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3086 struct ixgbevf_ring *tx_ring, int size)
3087{
3088 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3089
3090 netif_stop_subqueue(netdev, tx_ring->queue_index);
3091 /* Herbert's original patch had:
3092 * smp_mb__after_netif_stop_queue();
3093 * but since that doesn't exist yet, just open code it. */
3094 smp_mb();
3095
3096 /* We need to check again in a case another CPU has just
3097 * made room available. */
3098 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3099 return -EBUSY;
3100
3101 /* A reprieve! - use start_queue because it doesn't call schedule */
3102 netif_start_subqueue(netdev, tx_ring->queue_index);
3103 ++adapter->restart_queue;
3104 return 0;
3105}
3106
3107static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3108 struct ixgbevf_ring *tx_ring, int size)
3109{
3110 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3111 return 0;
3112 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3113}
3114
3115static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3116{
3117 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3118 struct ixgbevf_ring *tx_ring;
3119 unsigned int first;
3120 unsigned int tx_flags = 0;
3121 u8 hdr_len = 0;
3122 int r_idx = 0, tso;
3123 int count = 0;
3124
3125 unsigned int f;
3126
3127 tx_ring = &adapter->tx_ring[r_idx];
3128
3129 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3130 tx_flags |= vlan_tx_tag_get(skb);
3131 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3132 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3133 }
3134
3135 /* four things can cause us to need a context descriptor */
3136 if (skb_is_gso(skb) ||
3137 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3138 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3139 count++;
3140
3141 count += TXD_USE_COUNT(skb_headlen(skb));
3142 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3143 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3144
3145 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3146 adapter->tx_busy++;
3147 return NETDEV_TX_BUSY;
3148 }
3149
3150 first = tx_ring->next_to_use;
3151
3152 if (skb->protocol == htons(ETH_P_IP))
3153 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3154 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3155 if (tso < 0) {
3156 dev_kfree_skb_any(skb);
3157 return NETDEV_TX_OK;
3158 }
3159
3160 if (tso)
3161 tx_flags |= IXGBE_TX_FLAGS_TSO;
3162 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3163 (skb->ip_summed == CHECKSUM_PARTIAL))
3164 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3165
3166 ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3167 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3168 skb->len, hdr_len);
3169
3170 netdev->trans_start = jiffies;
3171
3172 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3173
3174 return NETDEV_TX_OK;
3175}
3176
3177/**
3178 * ixgbevf_get_stats - Get System Network Statistics
3179 * @netdev: network interface device structure
3180 *
3181 * Returns the address of the device statistics structure.
3182 * The statistics are actually updated from the timer callback.
3183 **/
3184static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
3185{
3186 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3187
3188 /* only return the current stats */
3189 return &adapter->net_stats;
3190}
3191
3192/**
3193 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3194 * @netdev: network interface device structure
3195 * @p: pointer to an address structure
3196 *
3197 * Returns 0 on success, negative on failure
3198 **/
3199static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3200{
3201 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3202 struct ixgbe_hw *hw = &adapter->hw;
3203 struct sockaddr *addr = p;
3204
3205 if (!is_valid_ether_addr(addr->sa_data))
3206 return -EADDRNOTAVAIL;
3207
3208 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3209 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3210
3211 if (hw->mac.ops.set_rar)
3212 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3213
3214 return 0;
3215}
3216
3217/**
3218 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3219 * @netdev: network interface device structure
3220 * @new_mtu: new value for maximum frame size
3221 *
3222 * Returns 0 on success, negative on failure
3223 **/
3224static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3225{
3226 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3227 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3228
3229 /* MTU < 68 is an error and causes problems on some kernels */
3230 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
3231 return -EINVAL;
3232
3233 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3234 netdev->mtu, new_mtu);
3235 /* must set new MTU before calling down or up */
3236 netdev->mtu = new_mtu;
3237
3238 if (netif_running(netdev))
3239 ixgbevf_reinit_locked(adapter);
3240
3241 return 0;
3242}
3243
3244static void ixgbevf_shutdown(struct pci_dev *pdev)
3245{
3246 struct net_device *netdev = pci_get_drvdata(pdev);
3247 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3248
3249 netif_device_detach(netdev);
3250
3251 if (netif_running(netdev)) {
3252 ixgbevf_down(adapter);
3253 ixgbevf_free_irq(adapter);
3254 ixgbevf_free_all_tx_resources(adapter);
3255 ixgbevf_free_all_rx_resources(adapter);
3256 }
3257
3258#ifdef CONFIG_PM
3259 pci_save_state(pdev);
3260#endif
3261
3262 pci_disable_device(pdev);
3263}
3264
3265static const struct net_device_ops ixgbe_netdev_ops = {
3266 .ndo_open = &ixgbevf_open,
3267 .ndo_stop = &ixgbevf_close,
3268 .ndo_start_xmit = &ixgbevf_xmit_frame,
3269 .ndo_get_stats = &ixgbevf_get_stats,
3270 .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
3271 .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
3272 .ndo_validate_addr = eth_validate_addr,
3273 .ndo_set_mac_address = &ixgbevf_set_mac,
3274 .ndo_change_mtu = &ixgbevf_change_mtu,
3275 .ndo_tx_timeout = &ixgbevf_tx_timeout,
3276 .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
3277 .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
3278 .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
3279};
3280
3281static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3282{
3283 struct ixgbevf_adapter *adapter;
3284 adapter = netdev_priv(dev);
3285 dev->netdev_ops = &ixgbe_netdev_ops;
3286 ixgbevf_set_ethtool_ops(dev);
3287 dev->watchdog_timeo = 5 * HZ;
3288}
3289
3290/**
3291 * ixgbevf_probe - Device Initialization Routine
3292 * @pdev: PCI device information struct
3293 * @ent: entry in ixgbevf_pci_tbl
3294 *
3295 * Returns 0 on success, negative on failure
3296 *
3297 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3298 * The OS initialization, configuring of the adapter private structure,
3299 * and a hardware reset occur.
3300 **/
3301static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3302 const struct pci_device_id *ent)
3303{
3304 struct net_device *netdev;
3305 struct ixgbevf_adapter *adapter = NULL;
3306 struct ixgbe_hw *hw = NULL;
3307 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3308 static int cards_found;
3309 int err, pci_using_dac;
3310
3311 err = pci_enable_device(pdev);
3312 if (err)
3313 return err;
3314
3315 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3316 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3317 pci_using_dac = 1;
3318 } else {
3319 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3320 if (err) {
3321 err = pci_set_consistent_dma_mask(pdev,
3322 DMA_BIT_MASK(32));
3323 if (err) {
3324 dev_err(&pdev->dev, "No usable DMA "
3325 "configuration, aborting\n");
3326 goto err_dma;
3327 }
3328 }
3329 pci_using_dac = 0;
3330 }
3331
3332 err = pci_request_regions(pdev, ixgbevf_driver_name);
3333 if (err) {
3334 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3335 goto err_pci_reg;
3336 }
3337
3338 pci_set_master(pdev);
3339
3340#ifdef HAVE_TX_MQ
3341 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3342 MAX_TX_QUEUES);
3343#else
3344 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3345#endif
3346 if (!netdev) {
3347 err = -ENOMEM;
3348 goto err_alloc_etherdev;
3349 }
3350
3351 SET_NETDEV_DEV(netdev, &pdev->dev);
3352
3353 pci_set_drvdata(pdev, netdev);
3354 adapter = netdev_priv(netdev);
3355
3356 adapter->netdev = netdev;
3357 adapter->pdev = pdev;
3358 hw = &adapter->hw;
3359 hw->back = adapter;
3360 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3361
3362 /*
3363 * call save state here in standalone driver because it relies on
3364 * adapter struct to exist, and needs to call netdev_priv
3365 */
3366 pci_save_state(pdev);
3367
3368 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3369 pci_resource_len(pdev, 0));
3370 if (!hw->hw_addr) {
3371 err = -EIO;
3372 goto err_ioremap;
3373 }
3374
3375 ixgbevf_assign_netdev_ops(netdev);
3376
3377 adapter->bd_number = cards_found;
3378
3379 /* Setup hw api */
3380 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3381 hw->mac.type = ii->mac;
3382
3383 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3384 sizeof(struct ixgbe_mac_operations));
3385
3386 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3387 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3388 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3389
3390 /* setup the private structure */
3391 err = ixgbevf_sw_init(adapter);
3392
3393 ixgbevf_init_last_counter_stats(adapter);
3394
3395#ifdef MAX_SKB_FRAGS
3396 netdev->features = NETIF_F_SG |
3397 NETIF_F_IP_CSUM |
3398 NETIF_F_HW_VLAN_TX |
3399 NETIF_F_HW_VLAN_RX |
3400 NETIF_F_HW_VLAN_FILTER;
3401
3402 netdev->features |= NETIF_F_IPV6_CSUM;
3403 netdev->features |= NETIF_F_TSO;
3404 netdev->features |= NETIF_F_TSO6;
3405 netdev->vlan_features |= NETIF_F_TSO;
3406 netdev->vlan_features |= NETIF_F_TSO6;
3407 netdev->vlan_features |= NETIF_F_IP_CSUM;
3408 netdev->vlan_features |= NETIF_F_SG;
3409
3410 if (pci_using_dac)
3411 netdev->features |= NETIF_F_HIGHDMA;
3412
3413#endif /* MAX_SKB_FRAGS */
3414
3415 /* The HW MAC address was set and/or determined in sw_init */
3416 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3417 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3418
3419 if (!is_valid_ether_addr(netdev->dev_addr)) {
3420 printk(KERN_ERR "invalid MAC address\n");
3421 err = -EIO;
3422 goto err_sw_init;
3423 }
3424
3425 init_timer(&adapter->watchdog_timer);
3426 adapter->watchdog_timer.function = &ixgbevf_watchdog;
3427 adapter->watchdog_timer.data = (unsigned long)adapter;
3428
3429 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3430 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3431
3432 err = ixgbevf_init_interrupt_scheme(adapter);
3433 if (err)
3434 goto err_sw_init;
3435
3436 /* pick up the PCI bus settings for reporting later */
3437 if (hw->mac.ops.get_bus_info)
3438 hw->mac.ops.get_bus_info(hw);
3439
3440
3441 netif_carrier_off(netdev);
3442 netif_tx_stop_all_queues(netdev);
3443
3444 strcpy(netdev->name, "eth%d");
3445
3446 err = register_netdev(netdev);
3447 if (err)
3448 goto err_register;
3449
3450 adapter->netdev_registered = true;
3451
3452 /* print the MAC address */
3453 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3454 netdev->dev_addr[0],
3455 netdev->dev_addr[1],
3456 netdev->dev_addr[2],
3457 netdev->dev_addr[3],
3458 netdev->dev_addr[4],
3459 netdev->dev_addr[5]);
3460
3461 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3462
3463 hw_dbg(hw, "LRO is disabled \n");
3464
3465 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3466 cards_found++;
3467 return 0;
3468
3469err_register:
3470err_sw_init:
3471 ixgbevf_reset_interrupt_capability(adapter);
3472 iounmap(hw->hw_addr);
3473err_ioremap:
3474 free_netdev(netdev);
3475err_alloc_etherdev:
3476 pci_release_regions(pdev);
3477err_pci_reg:
3478err_dma:
3479 pci_disable_device(pdev);
3480 return err;
3481}
3482
3483/**
3484 * ixgbevf_remove - Device Removal Routine
3485 * @pdev: PCI device information struct
3486 *
3487 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3488 * that it should release a PCI device. The could be caused by a
3489 * Hot-Plug event, or because the driver is going to be removed from
3490 * memory.
3491 **/
3492static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3493{
3494 struct net_device *netdev = pci_get_drvdata(pdev);
3495 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3496
3497 set_bit(__IXGBEVF_DOWN, &adapter->state);
3498
3499 del_timer_sync(&adapter->watchdog_timer);
3500
3501 cancel_work_sync(&adapter->watchdog_task);
3502
3503 flush_scheduled_work();
3504
3505 if (adapter->netdev_registered) {
3506 unregister_netdev(netdev);
3507 adapter->netdev_registered = false;
3508 }
3509
3510 ixgbevf_reset_interrupt_capability(adapter);
3511
3512 iounmap(adapter->hw.hw_addr);
3513 pci_release_regions(pdev);
3514
3515 hw_dbg(&adapter->hw, "Remove complete\n");
3516
3517 kfree(adapter->tx_ring);
3518 kfree(adapter->rx_ring);
3519
3520 free_netdev(netdev);
3521
3522 pci_disable_device(pdev);
3523}
3524
3525static struct pci_driver ixgbevf_driver = {
3526 .name = ixgbevf_driver_name,
3527 .id_table = ixgbevf_pci_tbl,
3528 .probe = ixgbevf_probe,
3529 .remove = __devexit_p(ixgbevf_remove),
3530 .shutdown = ixgbevf_shutdown,
3531};
3532
3533/**
3534 * ixgbe_init_module - Driver Registration Routine
3535 *
3536 * ixgbe_init_module is the first routine called when the driver is
3537 * loaded. All it does is register with the PCI subsystem.
3538 **/
3539static int __init ixgbevf_init_module(void)
3540{
3541 int ret;
3542 printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
3543 ixgbevf_driver_version);
3544
3545 printk(KERN_INFO "%s\n", ixgbevf_copyright);
3546
3547 ret = pci_register_driver(&ixgbevf_driver);
3548 return ret;
3549}
3550
3551module_init(ixgbevf_init_module);
3552
3553/**
3554 * ixgbe_exit_module - Driver Exit Cleanup Routine
3555 *
3556 * ixgbe_exit_module is called just before the driver is removed
3557 * from memory.
3558 **/
3559static void __exit ixgbevf_exit_module(void)
3560{
3561 pci_unregister_driver(&ixgbevf_driver);
3562}
3563
3564#ifdef DEBUG
3565/**
3566 * ixgbe_get_hw_dev_name - return device name string
3567 * used by hardware layer to print debugging information
3568 **/
3569char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3570{
3571 struct ixgbevf_adapter *adapter = hw->back;
3572 return adapter->netdev->name;
3573}
3574
3575#endif
3576module_exit(ixgbevf_exit_module);
3577
3578/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 00000000000..b8143501e6f
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "mbx.h"
29
30/**
31 * ixgbevf_poll_for_msg - Wait for message notification
32 * @hw: pointer to the HW structure
33 *
34 * returns 0 if it successfully received a message notification
35 **/
36static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
37{
38 struct ixgbe_mbx_info *mbx = &hw->mbx;
39 int countdown = mbx->timeout;
40
41 while (countdown && mbx->ops.check_for_msg(hw)) {
42 countdown--;
43 udelay(mbx->udelay);
44 }
45
46 /* if we failed, all future posted messages fail until reset */
47 if (!countdown)
48 mbx->timeout = 0;
49
50 return countdown ? 0 : IXGBE_ERR_MBX;
51}
52
53/**
54 * ixgbevf_poll_for_ack - Wait for message acknowledgement
55 * @hw: pointer to the HW structure
56 *
57 * returns 0 if it successfully received a message acknowledgement
58 **/
59static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
60{
61 struct ixgbe_mbx_info *mbx = &hw->mbx;
62 int countdown = mbx->timeout;
63
64 while (countdown && mbx->ops.check_for_ack(hw)) {
65 countdown--;
66 udelay(mbx->udelay);
67 }
68
69 /* if we failed, all future posted messages fail until reset */
70 if (!countdown)
71 mbx->timeout = 0;
72
73 return countdown ? 0 : IXGBE_ERR_MBX;
74}
75
76/**
77 * ixgbevf_read_posted_mbx - Wait for message notification and receive message
78 * @hw: pointer to the HW structure
79 * @msg: The message buffer
80 * @size: Length of buffer
81 *
82 * returns 0 if it successfully received a message notification and
83 * copied it into the receive buffer.
84 **/
85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
86{
87 struct ixgbe_mbx_info *mbx = &hw->mbx;
88 s32 ret_val = IXGBE_ERR_MBX;
89
90 ret_val = ixgbevf_poll_for_msg(hw);
91
92 /* if ack received read message, otherwise we timed out */
93 if (!ret_val)
94 ret_val = mbx->ops.read(hw, msg, size);
95
96 return ret_val;
97}
98
99/**
100 * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
101 * @hw: pointer to the HW structure
102 * @msg: The message buffer
103 * @size: Length of buffer
104 *
105 * returns 0 if it successfully copied message into the buffer and
106 * received an ack to that message within delay * timeout period
107 **/
108static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
109{
110 struct ixgbe_mbx_info *mbx = &hw->mbx;
111 s32 ret_val;
112
113 /* send msg */
114 ret_val = mbx->ops.write(hw, msg, size);
115
116 /* if msg sent wait until we receive an ack */
117 if (!ret_val)
118 ret_val = ixgbevf_poll_for_ack(hw);
119
120 return ret_val;
121}
122
123/**
124 * ixgbevf_read_v2p_mailbox - read v2p mailbox
125 * @hw: pointer to the HW structure
126 *
127 * This function is used to read the v2p mailbox without losing the read to
128 * clear status bits.
129 **/
130static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
131{
132 u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
133
134 v2p_mailbox |= hw->mbx.v2p_mailbox;
135 hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
136
137 return v2p_mailbox;
138}
139
140/**
141 * ixgbevf_check_for_bit_vf - Determine if a status bit was set
142 * @hw: pointer to the HW structure
143 * @mask: bitmask for bits to be tested and cleared
144 *
145 * This function is used to check for the read to clear bits within
146 * the V2P mailbox.
147 **/
148static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
149{
150 u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
151 s32 ret_val = IXGBE_ERR_MBX;
152
153 if (v2p_mailbox & mask)
154 ret_val = 0;
155
156 hw->mbx.v2p_mailbox &= ~mask;
157
158 return ret_val;
159}
160
161/**
162 * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
163 * @hw: pointer to the HW structure
164 *
165 * returns 0 if the PF has set the Status bit or else ERR_MBX
166 **/
167static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
168{
169 s32 ret_val = IXGBE_ERR_MBX;
170
171 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
172 ret_val = 0;
173 hw->mbx.stats.reqs++;
174 }
175
176 return ret_val;
177}
178
179/**
180 * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
181 * @hw: pointer to the HW structure
182 *
183 * returns 0 if the PF has set the ACK bit or else ERR_MBX
184 **/
185static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
186{
187 s32 ret_val = IXGBE_ERR_MBX;
188
189 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
190 ret_val = 0;
191 hw->mbx.stats.acks++;
192 }
193
194 return ret_val;
195}
196
197/**
198 * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
199 * @hw: pointer to the HW structure
200 *
201 * returns true if the PF has set the reset done bit or else false
202 **/
203static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
204{
205 s32 ret_val = IXGBE_ERR_MBX;
206
207 if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
208 IXGBE_VFMAILBOX_RSTI))) {
209 ret_val = 0;
210 hw->mbx.stats.rsts++;
211 }
212
213 return ret_val;
214}
215
216/**
217 * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
218 * @hw: pointer to the HW structure
219 *
220 * return 0 if we obtained the mailbox lock
221 **/
222static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
223{
224 s32 ret_val = IXGBE_ERR_MBX;
225
226 /* Take ownership of the buffer */
227 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
228
229 /* reserve mailbox for vf use */
230 if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
231 ret_val = 0;
232
233 return ret_val;
234}
235
236/**
237 * ixgbevf_write_mbx_vf - Write a message to the mailbox
238 * @hw: pointer to the HW structure
239 * @msg: The message buffer
240 * @size: Length of buffer
241 *
242 * returns 0 if it successfully copied message into the buffer
243 **/
244static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
245{
246 s32 ret_val;
247 u16 i;
248
249
250 /* lock the mailbox to prevent pf/vf race condition */
251 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
252 if (ret_val)
253 goto out_no_write;
254
255 /* flush msg and acks as we are overwriting the message buffer */
256 ixgbevf_check_for_msg_vf(hw);
257 ixgbevf_check_for_ack_vf(hw);
258
259 /* copy the caller specified message to the mailbox memory buffer */
260 for (i = 0; i < size; i++)
261 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
262
263 /* update stats */
264 hw->mbx.stats.msgs_tx++;
265
266 /* Drop VFU and interrupt the PF to tell it a message has been sent */
267 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
268
269out_no_write:
270 return ret_val;
271}
272
273/**
274 * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
275 * @hw: pointer to the HW structure
276 * @msg: The message buffer
277 * @size: Length of buffer
278 *
279 * returns 0 if it successfuly read message from buffer
280 **/
281static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
282{
283 s32 ret_val = 0;
284 u16 i;
285
286 /* lock the mailbox to prevent pf/vf race condition */
287 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
288 if (ret_val)
289 goto out_no_read;
290
291 /* copy the message from the mailbox memory buffer */
292 for (i = 0; i < size; i++)
293 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
294
295 /* Acknowledge receipt and release mailbox, then we're done */
296 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
297
298 /* update stats */
299 hw->mbx.stats.msgs_rx++;
300
301out_no_read:
302 return ret_val;
303}
304
305/**
306 * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
307 * @hw: pointer to the HW structure
308 *
309 * Initializes the hw->mbx struct to correct values for vf mailbox
310 */
311s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
312{
313 struct ixgbe_mbx_info *mbx = &hw->mbx;
314
315 /* start mailbox as timed out and let the reset_hw call set the timeout
316 * value to begin communications */
317 mbx->timeout = 0;
318 mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
319
320 mbx->size = IXGBE_VFMAILBOX_SIZE;
321
322 mbx->stats.msgs_tx = 0;
323 mbx->stats.msgs_rx = 0;
324 mbx->stats.reqs = 0;
325 mbx->stats.acks = 0;
326 mbx->stats.rsts = 0;
327
328 return 0;
329}
330
331struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
332 .init_params = ixgbevf_init_mbx_params_vf,
333 .read = ixgbevf_read_mbx_vf,
334 .write = ixgbevf_write_mbx_vf,
335 .read_posted = ixgbevf_read_posted_mbx,
336 .write_posted = ixgbevf_write_posted_mbx,
337 .check_for_msg = ixgbevf_check_for_msg_vf,
338 .check_for_ack = ixgbevf_check_for_ack_vf,
339 .check_for_rst = ixgbevf_check_for_rst_vf,
340};
341
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 00000000000..1b0e0bf4c0f
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_MBX_H_
29#define _IXGBE_MBX_H_
30
31#include "vf.h"
32
33#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
34#define IXGBE_ERR_MBX -100
35
36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200
38
39/* Define mailbox register bits */
40#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
41#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
42#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
43#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
44#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
45#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
46#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
47#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
48#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
49
50#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
51#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
52
53#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
54#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
55#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
56#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
57#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
58
59#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
60#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
61#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
62#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
63
64
65/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
66 * PF. The reverse is true if it is IXGBE_PF_*.
67 * Message ACK's are the value or'd with 0xF0000000
68 */
69#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
70 * this are the ACK */
71#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
72 * this are the NACK */
73#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
74 * clear to send requests */
75#define IXGBE_VT_MSGINFO_SHIFT 16
76/* bits 23:16 are used for exra info for certain messages */
77#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
78
79#define IXGBE_VF_RESET 0x01 /* VF requests reset */
80#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
81#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
82#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
83#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
84
85/* length of permanent address message returned from PF */
86#define IXGBE_VF_PERMADDR_MSG_LEN 4
87/* word in permanent address message with the current multicast type */
88#define IXGBE_VF_MC_TYPE_WORD 3
89
90#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
91
92#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
93#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
94
95/* forward declaration of the HW struct */
96struct ixgbe_hw;
97
98s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
99
100#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 00000000000..12f75960aec
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_REGS_H_
29#define _IXGBEVF_REGS_H_
30
31#define IXGBE_VFCTRL 0x00000
32#define IXGBE_VFSTATUS 0x00008
33#define IXGBE_VFLINKS 0x00010
34#define IXGBE_VFRTIMER 0x00048
35#define IXGBE_VFRXMEMWRAP 0x03190
36#define IXGBE_VTEICR 0x00100
37#define IXGBE_VTEICS 0x00104
38#define IXGBE_VTEIMS 0x00108
39#define IXGBE_VTEIMC 0x0010C
40#define IXGBE_VTEIAC 0x00110
41#define IXGBE_VTEIAM 0x00114
42#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
43#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
44#define IXGBE_VTIVAR_MISC 0x00140
45#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
46#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
47#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
48#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
49#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
50#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
51#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
52#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
53#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
54#define IXGBE_VFPSRTYPE 0x00300
55#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
56#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
57#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
58#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
59#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
60#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
61#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
62#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
63#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
64#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
65#define IXGBE_VFGPRC 0x0101C
66#define IXGBE_VFGPTC 0x0201C
67#define IXGBE_VFGORC_LSB 0x01020
68#define IXGBE_VFGORC_MSB 0x01024
69#define IXGBE_VFGOTC_LSB 0x02020
70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034
72
73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
74
75#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
76
77#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
78 writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
79
80#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
81 readl((a)->hw_addr + (reg) + ((offset) << 2)))
82
83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
84
85#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 00000000000..4b5dec0ec14
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "vf.h"
29
30/**
31 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
32 * @hw: pointer to hardware structure
33 *
34 * Starts the hardware by filling the bus info structure and media type, clears
35 * all on chip counters, initializes receive address registers, multicast
36 * table, VLAN filter table, calls routine to set up link and flow control
37 * settings, and leaves transmit and receive units disabled and uninitialized
38 **/
39static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
40{
41 /* Clear adapter stopped flag */
42 hw->adapter_stopped = false;
43
44 return 0;
45}
46
47/**
48 * ixgbevf_init_hw_vf - virtual function hardware initialization
49 * @hw: pointer to hardware structure
50 *
51 * Initialize the hardware by resetting the hardware and then starting
52 * the hardware
53 **/
54static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
55{
56 s32 status = hw->mac.ops.start_hw(hw);
57
58 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
59
60 return status;
61}
62
63/**
64 * ixgbevf_reset_hw_vf - Performs hardware reset
65 * @hw: pointer to hardware structure
66 *
67 * Resets the hardware by reseting the transmit and receive units, masks and
68 * clears all interrupts.
69 **/
70static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
71{
72 struct ixgbe_mbx_info *mbx = &hw->mbx;
73 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
74 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
75 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
76 u8 *addr = (u8 *)(&msgbuf[1]);
77
78 /* Call adapter stop to disable tx/rx and clear interrupts */
79 hw->mac.ops.stop_adapter(hw);
80
81 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
82 IXGBE_WRITE_FLUSH(hw);
83
84 /* we cannot reset while the RSTI / RSTD bits are asserted */
85 while (!mbx->ops.check_for_rst(hw) && timeout) {
86 timeout--;
87 udelay(5);
88 }
89
90 if (!timeout)
91 return IXGBE_ERR_RESET_FAILED;
92
93 /* mailbox timeout can now become active */
94 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
95
96 msgbuf[0] = IXGBE_VF_RESET;
97 mbx->ops.write_posted(hw, msgbuf, 1);
98
99 msleep(10);
100
101 /* set our "perm_addr" based on info provided by PF */
102 /* also set up the mc_filter_type which is piggy backed
103 * on the mac address in word 3 */
104 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
105 if (ret_val)
106 return ret_val;
107
108 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
109 return IXGBE_ERR_INVALID_MAC_ADDR;
110
111 memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
112 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
113
114 return 0;
115}
116
117/**
118 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
119 * @hw: pointer to hardware structure
120 *
121 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
122 * disables transmit and receive units. The adapter_stopped flag is used by
123 * the shared code and drivers to determine if the adapter is in a stopped
124 * state and should not touch the hardware.
125 **/
126static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
127{
128 u32 number_of_queues;
129 u32 reg_val;
130 u16 i;
131
132 /*
133 * Set the adapter_stopped flag so other driver functions stop touching
134 * the hardware
135 */
136 hw->adapter_stopped = true;
137
138 /* Disable the receive unit by stopped each queue */
139 number_of_queues = hw->mac.max_rx_queues;
140 for (i = 0; i < number_of_queues; i++) {
141 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
142 if (reg_val & IXGBE_RXDCTL_ENABLE) {
143 reg_val &= ~IXGBE_RXDCTL_ENABLE;
144 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
145 }
146 }
147
148 IXGBE_WRITE_FLUSH(hw);
149
150 /* Clear interrupt mask to stop from interrupts being generated */
151 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
152
153 /* Clear any pending interrupts */
154 IXGBE_READ_REG(hw, IXGBE_VTEICR);
155
156 /* Disable the transmit unit. Each queue must be disabled. */
157 number_of_queues = hw->mac.max_tx_queues;
158 for (i = 0; i < number_of_queues; i++) {
159 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
160 if (reg_val & IXGBE_TXDCTL_ENABLE) {
161 reg_val &= ~IXGBE_TXDCTL_ENABLE;
162 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
163 }
164 }
165
166 return 0;
167}
168
169/**
170 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
171 * @hw: pointer to hardware structure
172 * @mc_addr: the multicast address
173 *
174 * Extracts the 12 bits, from a multicast address, to determine which
175 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
176 * incoming rx multicast addresses, to determine the bit-vector to check in
177 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
178 * by the MO field of the MCSTCTRL. The MO field is set during initialization
179 * to mc_filter_type.
180 **/
181static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
182{
183 u32 vector = 0;
184
185 switch (hw->mac.mc_filter_type) {
186 case 0: /* use bits [47:36] of the address */
187 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
188 break;
189 case 1: /* use bits [46:35] of the address */
190 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
191 break;
192 case 2: /* use bits [45:34] of the address */
193 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
194 break;
195 case 3: /* use bits [43:32] of the address */
196 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
197 break;
198 default: /* Invalid mc_filter_type */
199 break;
200 }
201
202 /* vector can only be 12-bits or boundary will be exceeded */
203 vector &= 0xFFF;
204 return vector;
205}
206
207/**
208 * ixgbevf_get_mac_addr_vf - Read device MAC address
209 * @hw: pointer to the HW structure
210 * @mac_addr: pointer to storage for retrieved MAC address
211 **/
212static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
213{
214 memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
215
216 return 0;
217}
218
219/**
220 * ixgbevf_set_rar_vf - set device MAC address
221 * @hw: pointer to hardware structure
222 * @index: Receive address register to write
223 * @addr: Address to put into receive address register
224 * @vmdq: Unused in this implementation
225 **/
226static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
227 u32 vmdq)
228{
229 struct ixgbe_mbx_info *mbx = &hw->mbx;
230 u32 msgbuf[3];
231 u8 *msg_addr = (u8 *)(&msgbuf[1]);
232 s32 ret_val;
233
234 memset(msgbuf, 0, sizeof(msgbuf));
235 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
236 memcpy(msg_addr, addr, 6);
237 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
238
239 if (!ret_val)
240 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
241
242 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
243
244 /* if nacked the address was rejected, use "perm_addr" */
245 if (!ret_val &&
246 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
247 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
248
249 return ret_val;
250}
251
252/**
253 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
254 * @hw: pointer to the HW structure
255 * @mc_addr_list: array of multicast addresses to program
256 * @mc_addr_count: number of multicast addresses to program
257 * @next: caller supplied function to return next address in list
258 *
259 * Updates the Multicast Table Array.
260 **/
261static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
262 u32 mc_addr_count,
263 ixgbe_mc_addr_itr next)
264{
265 struct ixgbe_mbx_info *mbx = &hw->mbx;
266 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
267 u16 *vector_list = (u16 *)&msgbuf[1];
268 u32 vector;
269 u32 cnt, i;
270 u32 vmdq;
271
272 /* Each entry in the list uses 1 16 bit word. We have 30
273 * 16 bit words available in our HW msg buffer (minus 1 for the
274 * msg type). That's 30 hash values if we pack 'em right. If
275 * there are more than 30 MC addresses to add then punt the
276 * extras for now and then add code to handle more than 30 later.
277 * It would be unusual for a server to request that many multi-cast
278 * addresses except for in large enterprise network environments.
279 */
280
281 cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
282 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
283 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
284
285 for (i = 0; i < cnt; i++) {
286 vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
287 vector_list[i] = vector;
288 }
289
290 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
291
292 return 0;
293}
294
295/**
296 * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
297 * @hw: pointer to the HW structure
298 * @vlan: 12 bit VLAN ID
299 * @vind: unused by VF drivers
300 * @vlan_on: if true then set bit, else clear bit
301 **/
302static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
303 bool vlan_on)
304{
305 struct ixgbe_mbx_info *mbx = &hw->mbx;
306 u32 msgbuf[2];
307
308 msgbuf[0] = IXGBE_VF_SET_VLAN;
309 msgbuf[1] = vlan;
310 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
311 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
312
313 return mbx->ops.write_posted(hw, msgbuf, 2);
314}
315
316/**
317 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
318 * @hw: pointer to hardware structure
319 * @speed: Unused in this implementation
320 * @autoneg: Unused in this implementation
321 * @autoneg_wait_to_complete: Unused in this implementation
322 *
323 * Do nothing and return success. VF drivers are not allowed to change
324 * global settings. Maintained for driver compatibility.
325 **/
326static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
327 ixgbe_link_speed speed, bool autoneg,
328 bool autoneg_wait_to_complete)
329{
330 return 0;
331}
332
333/**
334 * ixgbevf_check_mac_link_vf - Get link/speed status
335 * @hw: pointer to hardware structure
336 * @speed: pointer to link speed
337 * @link_up: true is link is up, false otherwise
338 * @autoneg_wait_to_complete: true when waiting for completion is needed
339 *
340 * Reads the links register to determine if link is up and the current speed
341 **/
342static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
343 ixgbe_link_speed *speed,
344 bool *link_up,
345 bool autoneg_wait_to_complete)
346{
347 u32 links_reg;
348
349 if (!(hw->mbx.ops.check_for_rst(hw))) {
350 *link_up = false;
351 *speed = 0;
352 return -1;
353 }
354
355 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
356
357 if (links_reg & IXGBE_LINKS_UP)
358 *link_up = true;
359 else
360 *link_up = false;
361
362 if (links_reg & IXGBE_LINKS_SPEED)
363 *speed = IXGBE_LINK_SPEED_10GB_FULL;
364 else
365 *speed = IXGBE_LINK_SPEED_1GB_FULL;
366
367 return 0;
368}
369
370struct ixgbe_mac_operations ixgbevf_mac_ops = {
371 .init_hw = ixgbevf_init_hw_vf,
372 .reset_hw = ixgbevf_reset_hw_vf,
373 .start_hw = ixgbevf_start_hw_vf,
374 .get_mac_addr = ixgbevf_get_mac_addr_vf,
375 .stop_adapter = ixgbevf_stop_hw_vf,
376 .setup_link = ixgbevf_setup_mac_link_vf,
377 .check_link = ixgbevf_check_mac_link_vf,
378 .set_rar = ixgbevf_set_rar_vf,
379 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
380 .set_vfta = ixgbevf_set_vfta_vf,
381};
382
383struct ixgbevf_info ixgbevf_vf_info = {
384 .mac = ixgbe_mac_82599_vf,
385 .mac_ops = &ixgbevf_mac_ops,
386};
387
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 00000000000..799600e9270
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef __IXGBE_VF_H__
29#define __IXGBE_VF_H__
30
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/if_ether.h>
35
36#include "defines.h"
37#include "regs.h"
38#include "mbx.h"
39
40struct ixgbe_hw;
41
42/* iterator type for walking multicast address lists */
43typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
44 u32 *vmdq);
45struct ixgbe_mac_operations {
46 s32 (*init_hw)(struct ixgbe_hw *);
47 s32 (*reset_hw)(struct ixgbe_hw *);
48 s32 (*start_hw)(struct ixgbe_hw *);
49 s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
50 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
51 u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
52 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
53 s32 (*stop_adapter)(struct ixgbe_hw *);
54 s32 (*get_bus_info)(struct ixgbe_hw *);
55
56 /* Link */
57 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
58 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
59 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
60 bool *);
61
62 /* RAR, Multicast, VLAN */
63 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
64 s32 (*init_rx_addrs)(struct ixgbe_hw *);
65 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
66 ixgbe_mc_addr_itr);
67 s32 (*enable_mc)(struct ixgbe_hw *);
68 s32 (*disable_mc)(struct ixgbe_hw *);
69 s32 (*clear_vfta)(struct ixgbe_hw *);
70 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
71};
72
73enum ixgbe_mac_type {
74 ixgbe_mac_unknown = 0,
75 ixgbe_mac_82599_vf,
76 ixgbe_num_macs
77};
78
79struct ixgbe_mac_info {
80 struct ixgbe_mac_operations ops;
81 u8 addr[6];
82 u8 perm_addr[6];
83
84 enum ixgbe_mac_type type;
85
86 s32 mc_filter_type;
87
88 bool get_link_status;
89 u32 max_tx_queues;
90 u32 max_rx_queues;
91 u32 max_msix_vectors;
92};
93
94struct ixgbe_mbx_operations {
95 s32 (*init_params)(struct ixgbe_hw *hw);
96 s32 (*read)(struct ixgbe_hw *, u32 *, u16);
97 s32 (*write)(struct ixgbe_hw *, u32 *, u16);
98 s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
99 s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
100 s32 (*check_for_msg)(struct ixgbe_hw *);
101 s32 (*check_for_ack)(struct ixgbe_hw *);
102 s32 (*check_for_rst)(struct ixgbe_hw *);
103};
104
105struct ixgbe_mbx_stats {
106 u32 msgs_tx;
107 u32 msgs_rx;
108
109 u32 acks;
110 u32 reqs;
111 u32 rsts;
112};
113
114struct ixgbe_mbx_info {
115 struct ixgbe_mbx_operations ops;
116 struct ixgbe_mbx_stats stats;
117 u32 timeout;
118 u32 udelay;
119 u32 v2p_mailbox;
120 u16 size;
121};
122
123struct ixgbe_hw {
124 void *back;
125
126 u8 __iomem *hw_addr;
127 u8 *flash_address;
128 unsigned long io_base;
129
130 struct ixgbe_mac_info mac;
131 struct ixgbe_mbx_info mbx;
132
133 u16 device_id;
134 u16 subsystem_vendor_id;
135 u16 subsystem_device_id;
136 u16 vendor_id;
137
138 u8 revision_id;
139 bool adapter_stopped;
140};
141
142struct ixgbevf_hw_stats {
143 u64 base_vfgprc;
144 u64 base_vfgptc;
145 u64 base_vfgorc;
146 u64 base_vfgotc;
147 u64 base_vfmprc;
148
149 u64 last_vfgprc;
150 u64 last_vfgptc;
151 u64 last_vfgorc;
152 u64 last_vfgotc;
153 u64 last_vfmprc;
154
155 u64 vfgprc;
156 u64 vfgptc;
157 u64 vfgorc;
158 u64 vfgotc;
159 u64 vfmprc;
160};
161
162struct ixgbevf_info {
163 enum ixgbe_mac_type mac;
164 struct ixgbe_mac_operations *mac_ops;
165};
166
167#endif /* __IXGBE_VF_H__ */
168
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc357..26eed49d320 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2994,7 +2994,7 @@ jme_resume(struct pci_dev *pdev)
2994} 2994}
2995#endif 2995#endif
2996 2996
2997static struct pci_device_id jme_pci_tbl[] = { 2997static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
2998 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 2998 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
2999 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 2999 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3000 { } 3000 { }
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b117f7f8b19..b60efd4bd01 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1094,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev)
1094 return i; 1094 return i;
1095 }; 1095 };
1096 1096
1097 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", 1097 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1098 dev->name, dev->base_addr)); 1098 dev->name, dev->base_addr, dev->dev_addr,
1099 for (i = 0; i < 6; i++) 1099 dev->irq));
1100 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1101 DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1102 DEB(DEB_INIT, printk(KERN_INFO 1100 DEB(DEB_INIT, printk(KERN_INFO
1103 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", 1101 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1104 dev->name, dma, (int)sizeof(struct i596_dma), 1102 dev->name, dma, (int)sizeof(struct i596_dma),
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f6..a8768672dc5 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
17/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */ 17/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
18/* 2003-12-26: Make sure Asante cards always work. */ 18/* 2003-12-26: Make sure Asante cards always work. */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/types.h> 24#include <linux/types.h>
@@ -34,31 +36,36 @@
34#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
35#include <linux/skbuff.h> 37#include <linux/skbuff.h>
36#include <linux/bitops.h> 38#include <linux/bitops.h>
39#include <linux/io.h>
37 40
38#include <asm/system.h> 41#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/dma.h> 42#include <asm/dma.h>
41#include <asm/hwtest.h> 43#include <asm/hwtest.h>
42#include <asm/macints.h> 44#include <asm/macints.h>
43 45
44static char version[] = 46static char version[] =
45 "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n"; 47 "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
46 48
47#define EI_SHIFT(x) (ei_local->reg_offset[x]) 49#define EI_SHIFT(x) (ei_local->reg_offset[x])
48#define ei_inb(port) in_8(port) 50#define ei_inb(port) in_8(port)
49#define ei_outb(val,port) out_8(port,val) 51#define ei_outb(val, port) out_8(port, val)
50#define ei_inb_p(port) in_8(port) 52#define ei_inb_p(port) in_8(port)
51#define ei_outb_p(val,port) out_8(port,val) 53#define ei_outb_p(val, port) out_8(port, val)
52 54
53#include "lib8390.c" 55#include "lib8390.c"
54 56
55#define WD_START_PG 0x00 /* First page of TX buffer */ 57#define WD_START_PG 0x00 /* First page of TX buffer */
56#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ 58#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
57#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ 59#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
58#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */ 60#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
61 /* First page of TX buffer */
59 62
60/* Unfortunately it seems we have to hardcode these for the moment */ 63/*
61/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */ 64 * Unfortunately it seems we have to hardcode these for the moment
65 * Shouldn't the card know about this?
66 * Does anyone know where to read it off the card?
67 * Do we trust the data provided by the card?
68 */
62 69
63#define DAYNA_8390_BASE 0x80000 70#define DAYNA_8390_BASE 0x80000
64#define DAYNA_8390_MEM 0x00000 71#define DAYNA_8390_MEM 0x00000
@@ -80,7 +87,7 @@ enum mac8390_type {
80 MAC8390_KINETICS, 87 MAC8390_KINETICS,
81}; 88};
82 89
83static const char * cardname[] = { 90static const char *cardname[] = {
84 "apple", 91 "apple",
85 "asante", 92 "asante",
86 "farallon", 93 "farallon",
@@ -90,7 +97,7 @@ static const char * cardname[] = {
90 "kinetics", 97 "kinetics",
91}; 98};
92 99
93static int word16[] = { 100static const int word16[] = {
94 1, /* apple */ 101 1, /* apple */
95 1, /* asante */ 102 1, /* asante */
96 1, /* farallon */ 103 1, /* farallon */
@@ -101,7 +108,7 @@ static int word16[] = {
101}; 108};
102 109
103/* on which cards do we use NuBus resources? */ 110/* on which cards do we use NuBus resources? */
104static int useresources[] = { 111static const int useresources[] = {
105 1, /* apple */ 112 1, /* apple */
106 1, /* asante */ 113 1, /* asante */
107 1, /* farallon */ 114 1, /* farallon */
@@ -117,22 +124,22 @@ enum mac8390_access {
117 ACCESS_16, 124 ACCESS_16,
118}; 125};
119 126
120extern int mac8390_memtest(struct net_device * dev); 127extern int mac8390_memtest(struct net_device *dev);
121static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 128static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
122 enum mac8390_type type); 129 enum mac8390_type type);
123 130
124static int mac8390_open(struct net_device * dev); 131static int mac8390_open(struct net_device *dev);
125static int mac8390_close(struct net_device * dev); 132static int mac8390_close(struct net_device *dev);
126static void mac8390_no_reset(struct net_device *dev); 133static void mac8390_no_reset(struct net_device *dev);
127static void interlan_reset(struct net_device *dev); 134static void interlan_reset(struct net_device *dev);
128 135
129/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/ 136/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
130static void sane_get_8390_hdr(struct net_device *dev, 137static void sane_get_8390_hdr(struct net_device *dev,
131 struct e8390_pkt_hdr *hdr, int ring_page); 138 struct e8390_pkt_hdr *hdr, int ring_page);
132static void sane_block_input(struct net_device * dev, int count, 139static void sane_block_input(struct net_device *dev, int count,
133 struct sk_buff * skb, int ring_offset); 140 struct sk_buff *skb, int ring_offset);
134static void sane_block_output(struct net_device * dev, int count, 141static void sane_block_output(struct net_device *dev, int count,
135 const unsigned char * buf, const int start_page); 142 const unsigned char *buf, const int start_page);
136 143
137/* dayna_memcpy to and from card */ 144/* dayna_memcpy to and from card */
138static void dayna_memcpy_fromcard(struct net_device *dev, void *to, 145static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +155,8 @@ static void dayna_block_input(struct net_device *dev, int count,
148static void dayna_block_output(struct net_device *dev, int count, 155static void dayna_block_output(struct net_device *dev, int count,
149 const unsigned char *buf, int start_page); 156 const unsigned char *buf, int start_page);
150 157
151#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 158#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
152#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 159#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
153 160
154/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 161/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
155static void slow_sane_get_8390_hdr(struct net_device *dev, 162static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +171,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
164static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) 171static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
165{ 172{
166 switch (dev->dr_sw) { 173 switch (dev->dr_sw) {
167 case NUBUS_DRSW_3COM: 174 case NUBUS_DRSW_3COM:
168 switch (dev->dr_hw) { 175 switch (dev->dr_hw) {
169 case NUBUS_DRHW_APPLE_SONIC_NB: 176 case NUBUS_DRHW_APPLE_SONIC_NB:
170 case NUBUS_DRHW_APPLE_SONIC_LC: 177 case NUBUS_DRHW_APPLE_SONIC_LC:
171 case NUBUS_DRHW_SONNET: 178 case NUBUS_DRHW_SONNET:
172 return MAC8390_NONE; 179 return MAC8390_NONE;
173 break;
174 default:
175 return MAC8390_APPLE;
176 break;
177 }
178 break; 180 break;
179 181 default:
180 case NUBUS_DRSW_APPLE: 182 return MAC8390_APPLE;
181 switch (dev->dr_hw) {
182 case NUBUS_DRHW_ASANTE_LC:
183 return MAC8390_NONE;
184 break;
185 case NUBUS_DRHW_CABLETRON:
186 return MAC8390_CABLETRON;
187 break;
188 default:
189 return MAC8390_APPLE;
190 break;
191 }
192 break; 183 break;
184 }
185 break;
193 186
194 case NUBUS_DRSW_ASANTE: 187 case NUBUS_DRSW_APPLE:
195 return MAC8390_ASANTE; 188 switch (dev->dr_hw) {
189 case NUBUS_DRHW_ASANTE_LC:
190 return MAC8390_NONE;
196 break; 191 break;
197 192 case NUBUS_DRHW_CABLETRON:
198 case NUBUS_DRSW_TECHWORKS: 193 return MAC8390_CABLETRON;
199 case NUBUS_DRSW_DAYNA2:
200 case NUBUS_DRSW_DAYNA_LC:
201 if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
202 return MAC8390_CABLETRON;
203 else
204 return MAC8390_APPLE;
205 break; 194 break;
206 195 default:
207 case NUBUS_DRSW_FARALLON: 196 return MAC8390_APPLE;
208 return MAC8390_FARALLON;
209 break; 197 break;
198 }
199 break;
210 200
211 case NUBUS_DRSW_KINETICS: 201 case NUBUS_DRSW_ASANTE:
212 switch (dev->dr_hw) { 202 return MAC8390_ASANTE;
213 case NUBUS_DRHW_INTERLAN: 203 break;
214 return MAC8390_INTERLAN;
215 break;
216 default:
217 return MAC8390_KINETICS;
218 break;
219 }
220 break;
221 204
222 case NUBUS_DRSW_DAYNA: 205 case NUBUS_DRSW_TECHWORKS:
223 // These correspond to Dayna Sonic cards 206 case NUBUS_DRSW_DAYNA2:
224 // which use the macsonic driver 207 case NUBUS_DRSW_DAYNA_LC:
225 if (dev->dr_hw == NUBUS_DRHW_SMC9194 || 208 if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
226 dev->dr_hw == NUBUS_DRHW_INTERLAN ) 209 return MAC8390_CABLETRON;
227 return MAC8390_NONE; 210 else
228 else 211 return MAC8390_APPLE;
229 return MAC8390_DAYNA; 212 break;
213
214 case NUBUS_DRSW_FARALLON:
215 return MAC8390_FARALLON;
216 break;
217
218 case NUBUS_DRSW_KINETICS:
219 switch (dev->dr_hw) {
220 case NUBUS_DRHW_INTERLAN:
221 return MAC8390_INTERLAN;
222 break;
223 default:
224 return MAC8390_KINETICS;
230 break; 225 break;
226 }
227 break;
228
229 case NUBUS_DRSW_DAYNA:
230 /*
231 * These correspond to Dayna Sonic cards
232 * which use the macsonic driver
233 */
234 if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
235 dev->dr_hw == NUBUS_DRHW_INTERLAN)
236 return MAC8390_NONE;
237 else
238 return MAC8390_DAYNA;
239 break;
231 } 240 }
232 return MAC8390_NONE; 241 return MAC8390_NONE;
233} 242}
@@ -237,14 +246,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
237 unsigned long outdata = 0xA5A0B5B0; 246 unsigned long outdata = 0xA5A0B5B0;
238 unsigned long indata = 0x00000000; 247 unsigned long indata = 0x00000000;
239 /* Try writing 32 bits */ 248 /* Try writing 32 bits */
240 memcpy((char *)membase, (char *)&outdata, 4); 249 memcpy(membase, &outdata, 4);
241 /* Now compare them */ 250 /* Now compare them */
242 if (memcmp((char *)&outdata, (char *)membase, 4) == 0) 251 if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
243 return ACCESS_32; 252 return ACCESS_32;
244 /* Write 16 bit output */ 253 /* Write 16 bit output */
245 word_memcpy_tocard((char *)membase, (char *)&outdata, 4); 254 word_memcpy_tocard(membase, &outdata, 4);
246 /* Now read it back */ 255 /* Now read it back */
247 word_memcpy_fromcard((char *)&indata, (char *)membase, 4); 256 word_memcpy_fromcard(&indata, membase, 4);
248 if (outdata == indata) 257 if (outdata == indata)
249 return ACCESS_16; 258 return ACCESS_16;
250 return ACCESS_UNKNOWN; 259 return ACCESS_UNKNOWN;
@@ -258,7 +267,7 @@ static int __init mac8390_memsize(unsigned long membase)
258 local_irq_save(flags); 267 local_irq_save(flags);
259 /* Check up to 32K in 4K increments */ 268 /* Check up to 32K in 4K increments */
260 for (i = 0; i < 8; i++) { 269 for (i = 0; i < 8; i++) {
261 volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000)); 270 volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
262 271
263 /* Unwriteable - we have a fully decoded card and the 272 /* Unwriteable - we have a fully decoded card and the
264 RAM end located */ 273 RAM end located */
@@ -273,28 +282,127 @@ static int __init mac8390_memsize(unsigned long membase)
273 282
274 /* check for partial decode and wrap */ 283 /* check for partial decode and wrap */
275 for (j = 0; j < i; j++) { 284 for (j = 0; j < i; j++) {
276 volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000)); 285 volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
277 if (*p != (0xA5A0 | j)) 286 if (*p != (0xA5A0 | j))
278 break; 287 break;
279 } 288 }
280 } 289 }
281 local_irq_restore(flags); 290 local_irq_restore(flags);
282 /* in any case, we stopped once we tried one block too many, 291 /*
283 or once we reached 32K */ 292 * in any case, we stopped once we tried one block too many,
284 return i * 0x1000; 293 * or once we reached 32K
294 */
295 return i * 0x1000;
296}
297
298static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
299 enum mac8390_type cardtype)
300{
301 struct nubus_dir dir;
302 struct nubus_dirent ent;
303 int offset;
304 volatile unsigned short *i;
305
306 printk_once(KERN_INFO pr_fmt("%s"), version);
307
308 dev->irq = SLOT2IRQ(ndev->board->slot);
309 /* This is getting to be a habit */
310 dev->base_addr = (ndev->board->slot_addr |
311 ((ndev->board->slot & 0xf) << 20));
312
313 /*
314 * Get some Nubus info - we will trust the card's idea
315 * of where its memory and registers are.
316 */
317
318 if (nubus_get_func_dir(ndev, &dir) == -1) {
319 pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
320 dev->name, ndev->board->slot);
321 return false;
322 }
323
324 /* Get the MAC address */
325 if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
326 pr_info("%s: Couldn't get MAC address!\n", dev->name);
327 return false;
328 }
329
330 nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
331
332 if (useresources[cardtype] == 1) {
333 nubus_rewinddir(&dir);
334 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
335 &ent) == -1) {
336 pr_err("%s: Memory offset resource for slot %X not found!\n",
337 dev->name, ndev->board->slot);
338 return false;
339 }
340 nubus_get_rsrc_mem(&offset, &ent, 4);
341 dev->mem_start = dev->base_addr + offset;
342 /* yes, this is how the Apple driver does it */
343 dev->base_addr = dev->mem_start + 0x10000;
344 nubus_rewinddir(&dir);
345 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
346 &ent) == -1) {
347 pr_info("%s: Memory length resource for slot %X not found, probing\n",
348 dev->name, ndev->board->slot);
349 offset = mac8390_memsize(dev->mem_start);
350 } else {
351 nubus_get_rsrc_mem(&offset, &ent, 4);
352 }
353 dev->mem_end = dev->mem_start + offset;
354 } else {
355 switch (cardtype) {
356 case MAC8390_KINETICS:
357 case MAC8390_DAYNA: /* it's the same */
358 dev->base_addr = (int)(ndev->board->slot_addr +
359 DAYNA_8390_BASE);
360 dev->mem_start = (int)(ndev->board->slot_addr +
361 DAYNA_8390_MEM);
362 dev->mem_end = dev->mem_start +
363 mac8390_memsize(dev->mem_start);
364 break;
365 case MAC8390_INTERLAN:
366 dev->base_addr = (int)(ndev->board->slot_addr +
367 INTERLAN_8390_BASE);
368 dev->mem_start = (int)(ndev->board->slot_addr +
369 INTERLAN_8390_MEM);
370 dev->mem_end = dev->mem_start +
371 mac8390_memsize(dev->mem_start);
372 break;
373 case MAC8390_CABLETRON:
374 dev->base_addr = (int)(ndev->board->slot_addr +
375 CABLETRON_8390_BASE);
376 dev->mem_start = (int)(ndev->board->slot_addr +
377 CABLETRON_8390_MEM);
378 /* The base address is unreadable if 0x00
379 * has been written to the command register
380 * Reset the chip by writing E8390_NODMA +
381 * E8390_PAGE0 + E8390_STOP just to be
382 * sure
383 */
384 i = (void *)dev->base_addr;
385 *i = 0x21;
386 dev->mem_end = dev->mem_start +
387 mac8390_memsize(dev->mem_start);
388 break;
389
390 default:
391 pr_err("Card type %s is unsupported, sorry\n",
392 ndev->board->name);
393 return false;
394 }
395 }
396
397 return true;
285} 398}
286 399
287struct net_device * __init mac8390_probe(int unit) 400struct net_device * __init mac8390_probe(int unit)
288{ 401{
289 struct net_device *dev; 402 struct net_device *dev;
290 volatile unsigned short *i; 403 struct nubus_dev *ndev = NULL;
291 int version_disp = 0;
292 struct nubus_dev * ndev = NULL;
293 int err = -ENODEV; 404 int err = -ENODEV;
294 405
295 struct nubus_dir dir;
296 struct nubus_dirent ent;
297 int offset;
298 static unsigned int slots; 406 static unsigned int slots;
299 407
300 enum mac8390_type cardtype; 408 enum mac8390_type cardtype;
@@ -311,118 +419,19 @@ struct net_device * __init mac8390_probe(int unit)
311 if (unit >= 0) 419 if (unit >= 0)
312 sprintf(dev->name, "eth%d", unit); 420 sprintf(dev->name, "eth%d", unit);
313 421
314 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) { 422 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
423 ndev))) {
315 /* Have we seen it already? */ 424 /* Have we seen it already? */
316 if (slots & (1<<ndev->board->slot)) 425 if (slots & (1 << ndev->board->slot))
317 continue; 426 continue;
318 slots |= 1<<ndev->board->slot; 427 slots |= 1 << ndev->board->slot;
319 428
320 if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE) 429 cardtype = mac8390_ident(ndev);
430 if (cardtype == MAC8390_NONE)
321 continue; 431 continue;
322 432
323 if (version_disp == 0) { 433 if (!mac8390_init(dev, ndev, cardtype))
324 version_disp = 1;
325 printk(version);
326 }
327
328 dev->irq = SLOT2IRQ(ndev->board->slot);
329 /* This is getting to be a habit */
330 dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
331
332 /* Get some Nubus info - we will trust the card's idea
333 of where its memory and registers are. */
334
335 if (nubus_get_func_dir(ndev, &dir) == -1) {
336 printk(KERN_ERR "%s: Unable to get Nubus functional"
337 " directory for slot %X!\n",
338 dev->name, ndev->board->slot);
339 continue; 434 continue;
340 }
341
342 /* Get the MAC address */
343 if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
344 printk(KERN_INFO "%s: Couldn't get MAC address!\n",
345 dev->name);
346 continue;
347 } else {
348 nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
349 }
350
351 if (useresources[cardtype] == 1) {
352 nubus_rewinddir(&dir);
353 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
354 printk(KERN_ERR "%s: Memory offset resource"
355 " for slot %X not found!\n",
356 dev->name, ndev->board->slot);
357 continue;
358 }
359 nubus_get_rsrc_mem(&offset, &ent, 4);
360 dev->mem_start = dev->base_addr + offset;
361 /* yes, this is how the Apple driver does it */
362 dev->base_addr = dev->mem_start + 0x10000;
363 nubus_rewinddir(&dir);
364 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
365 printk(KERN_INFO "%s: Memory length resource"
366 " for slot %X not found"
367 ", probing\n",
368 dev->name, ndev->board->slot);
369 offset = mac8390_memsize(dev->mem_start);
370 } else {
371 nubus_get_rsrc_mem(&offset, &ent, 4);
372 }
373 dev->mem_end = dev->mem_start + offset;
374 } else {
375 switch (cardtype) {
376 case MAC8390_KINETICS:
377 case MAC8390_DAYNA: /* it's the same */
378 dev->base_addr =
379 (int)(ndev->board->slot_addr +
380 DAYNA_8390_BASE);
381 dev->mem_start =
382 (int)(ndev->board->slot_addr +
383 DAYNA_8390_MEM);
384 dev->mem_end =
385 dev->mem_start +
386 mac8390_memsize(dev->mem_start);
387 break;
388 case MAC8390_INTERLAN:
389 dev->base_addr =
390 (int)(ndev->board->slot_addr +
391 INTERLAN_8390_BASE);
392 dev->mem_start =
393 (int)(ndev->board->slot_addr +
394 INTERLAN_8390_MEM);
395 dev->mem_end =
396 dev->mem_start +
397 mac8390_memsize(dev->mem_start);
398 break;
399 case MAC8390_CABLETRON:
400 dev->base_addr =
401 (int)(ndev->board->slot_addr +
402 CABLETRON_8390_BASE);
403 dev->mem_start =
404 (int)(ndev->board->slot_addr +
405 CABLETRON_8390_MEM);
406 /* The base address is unreadable if 0x00
407 * has been written to the command register
408 * Reset the chip by writing E8390_NODMA +
409 * E8390_PAGE0 + E8390_STOP just to be
410 * sure
411 */
412 i = (void *)dev->base_addr;
413 *i = 0x21;
414 dev->mem_end =
415 dev->mem_start +
416 mac8390_memsize(dev->mem_start);
417 break;
418
419 default:
420 printk(KERN_ERR "Card type %s is"
421 " unsupported, sorry\n",
422 ndev->board->name);
423 continue;
424 }
425 }
426 435
427 /* Do the nasty 8390 stuff */ 436 /* Do the nasty 8390 stuff */
428 if (!mac8390_initdev(dev, ndev, cardtype)) 437 if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +467,7 @@ int init_module(void)
458 dev_mac890[i] = dev; 467 dev_mac890[i] = dev;
459 } 468 }
460 if (!i) { 469 if (!i) {
461 printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n"); 470 pr_notice("No useable cards found, driver NOT installed.\n");
462 return -ENODEV; 471 return -ENODEV;
463 } 472 }
464 return 0; 473 return 0;
@@ -493,22 +502,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
493#endif 502#endif
494}; 503};
495 504
496static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 505static int __init mac8390_initdev(struct net_device *dev,
497 enum mac8390_type type) 506 struct nubus_dev *ndev,
507 enum mac8390_type type)
498{ 508{
499 static u32 fwrd4_offsets[16]={ 509 static u32 fwrd4_offsets[16] = {
500 0, 4, 8, 12, 510 0, 4, 8, 12,
501 16, 20, 24, 28, 511 16, 20, 24, 28,
502 32, 36, 40, 44, 512 32, 36, 40, 44,
503 48, 52, 56, 60 513 48, 52, 56, 60
504 }; 514 };
505 static u32 back4_offsets[16]={ 515 static u32 back4_offsets[16] = {
506 60, 56, 52, 48, 516 60, 56, 52, 48,
507 44, 40, 36, 32, 517 44, 40, 36, 32,
508 28, 24, 20, 16, 518 28, 24, 20, 16,
509 12, 8, 4, 0 519 12, 8, 4, 0
510 }; 520 };
511 static u32 fwrd2_offsets[16]={ 521 static u32 fwrd2_offsets[16] = {
512 0, 2, 4, 6, 522 0, 2, 4, 6,
513 8, 10, 12, 14, 523 8, 10, 12, 14,
514 16, 18, 20, 22, 524 16, 18, 20, 22,
@@ -526,47 +536,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
526 536
527 /* Cabletron's TX/RX buffers are backwards */ 537 /* Cabletron's TX/RX buffers are backwards */
528 if (type == MAC8390_CABLETRON) { 538 if (type == MAC8390_CABLETRON) {
529 ei_status.tx_start_page = CABLETRON_TX_START_PG; 539 ei_status.tx_start_page = CABLETRON_TX_START_PG;
530 ei_status.rx_start_page = CABLETRON_RX_START_PG; 540 ei_status.rx_start_page = CABLETRON_RX_START_PG;
531 ei_status.stop_page = CABLETRON_RX_STOP_PG; 541 ei_status.stop_page = CABLETRON_RX_STOP_PG;
532 ei_status.rmem_start = dev->mem_start; 542 ei_status.rmem_start = dev->mem_start;
533 ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256; 543 ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
534 } else { 544 } else {
535 ei_status.tx_start_page = WD_START_PG; 545 ei_status.tx_start_page = WD_START_PG;
536 ei_status.rx_start_page = WD_START_PG + TX_PAGES; 546 ei_status.rx_start_page = WD_START_PG + TX_PAGES;
537 ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; 547 ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
538 ei_status.rmem_start = dev->mem_start + TX_PAGES*256; 548 ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
539 ei_status.rmem_end = dev->mem_end; 549 ei_status.rmem_end = dev->mem_end;
540 } 550 }
541 551
542 /* Fill in model-specific information and functions */ 552 /* Fill in model-specific information and functions */
543 switch(type) { 553 switch (type) {
544 case MAC8390_FARALLON: 554 case MAC8390_FARALLON:
545 case MAC8390_APPLE: 555 case MAC8390_APPLE:
546 switch(mac8390_testio(dev->mem_start)) { 556 switch (mac8390_testio(dev->mem_start)) {
547 case ACCESS_UNKNOWN: 557 case ACCESS_UNKNOWN:
548 printk("Don't know how to access card memory!\n"); 558 pr_info("Don't know how to access card memory!\n");
549 return -ENODEV; 559 return -ENODEV;
550 break; 560 break;
551 561
552 case ACCESS_16: 562 case ACCESS_16:
553 /* 16 bit card, register map is reversed */ 563 /* 16 bit card, register map is reversed */
554 ei_status.reset_8390 = &mac8390_no_reset; 564 ei_status.reset_8390 = &mac8390_no_reset;
555 ei_status.block_input = &slow_sane_block_input; 565 ei_status.block_input = &slow_sane_block_input;
556 ei_status.block_output = &slow_sane_block_output; 566 ei_status.block_output = &slow_sane_block_output;
557 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 567 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
558 ei_status.reg_offset = back4_offsets; 568 ei_status.reg_offset = back4_offsets;
559 break; 569 break;
560 570
561 case ACCESS_32: 571 case ACCESS_32:
562 /* 32 bit card, register map is reversed */ 572 /* 32 bit card, register map is reversed */
563 ei_status.reset_8390 = &mac8390_no_reset; 573 ei_status.reset_8390 = &mac8390_no_reset;
564 ei_status.block_input = &sane_block_input; 574 ei_status.block_input = &sane_block_input;
565 ei_status.block_output = &sane_block_output; 575 ei_status.block_output = &sane_block_output;
566 ei_status.get_8390_hdr = &sane_get_8390_hdr; 576 ei_status.get_8390_hdr = &sane_get_8390_hdr;
567 ei_status.reg_offset = back4_offsets; 577 ei_status.reg_offset = back4_offsets;
568 access_bitmode = 1; 578 access_bitmode = 1;
569 break; 579 break;
570 } 580 }
571 break; 581 break;
572 582
@@ -608,24 +618,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
608 ei_status.block_input = &slow_sane_block_input; 618 ei_status.block_input = &slow_sane_block_input;
609 ei_status.block_output = &slow_sane_block_output; 619 ei_status.block_output = &slow_sane_block_output;
610 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 620 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
611 ei_status.reg_offset = fwrd4_offsets; 621 ei_status.reg_offset = fwrd4_offsets;
612 break; 622 break;
613 623
614 default: 624 default:
615 printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name); 625 pr_err("Card type %s is unsupported, sorry\n",
626 ndev->board->name);
616 return -ENODEV; 627 return -ENODEV;
617 } 628 }
618 629
619 __NS8390_init(dev, 0); 630 __NS8390_init(dev, 0);
620 631
621 /* Good, done, now spit out some messages */ 632 /* Good, done, now spit out some messages */
622 printk(KERN_INFO "%s: %s in slot %X (type %s)\n", 633 pr_info("%s: %s in slot %X (type %s)\n",
623 dev->name, ndev->board->name, ndev->board->slot, cardname[type]); 634 dev->name, ndev->board->name, ndev->board->slot,
624 printk(KERN_INFO 635 cardname[type]);
625 "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", 636 pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
626 dev->dev_addr, dev->irq, 637 dev->dev_addr, dev->irq,
627 (unsigned int)(dev->mem_end - dev->mem_start) >> 10, 638 (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
628 dev->mem_start, access_bitmode ? 32 : 16); 639 dev->mem_start, access_bitmode ? 32 : 16);
629 return 0; 640 return 0;
630} 641}
631 642
@@ -633,7 +644,7 @@ static int mac8390_open(struct net_device *dev)
633{ 644{
634 __ei_open(dev); 645 __ei_open(dev);
635 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { 646 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
636 printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq); 647 pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
637 return -EAGAIN; 648 return -EAGAIN;
638 } 649 }
639 return 0; 650 return 0;
@@ -650,72 +661,71 @@ static void mac8390_no_reset(struct net_device *dev)
650{ 661{
651 ei_status.txing = 0; 662 ei_status.txing = 0;
652 if (ei_debug > 1) 663 if (ei_debug > 1)
653 printk("reset not supported\n"); 664 pr_info("reset not supported\n");
654 return; 665 return;
655} 666}
656 667
657static void interlan_reset(struct net_device *dev) 668static void interlan_reset(struct net_device *dev)
658{ 669{
659 unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq)); 670 unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
660 if (ei_debug > 1) 671 if (ei_debug > 1)
661 printk("Need to reset the NS8390 t=%lu...", jiffies); 672 pr_info("Need to reset the NS8390 t=%lu...", jiffies);
662 ei_status.txing = 0; 673 ei_status.txing = 0;
663 target[0xC0000] = 0; 674 target[0xC0000] = 0;
664 if (ei_debug > 1) 675 if (ei_debug > 1)
665 printk("reset complete\n"); 676 pr_cont("reset complete\n");
666 return; 677 return;
667} 678}
668 679
669/* dayna_memcpy_fromio/dayna_memcpy_toio */ 680/* dayna_memcpy_fromio/dayna_memcpy_toio */
670/* directly from daynaport.c by Alan Cox */ 681/* directly from daynaport.c by Alan Cox */
671static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count) 682static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
683 int count)
672{ 684{
673 volatile unsigned char *ptr; 685 volatile unsigned char *ptr;
674 unsigned char *target=to; 686 unsigned char *target = to;
675 from<<=1; /* word, skip overhead */ 687 from <<= 1; /* word, skip overhead */
676 ptr=(unsigned char *)(dev->mem_start+from); 688 ptr = (unsigned char *)(dev->mem_start+from);
677 /* Leading byte? */ 689 /* Leading byte? */
678 if (from&2) { 690 if (from & 2) {
679 *target++ = ptr[-1]; 691 *target++ = ptr[-1];
680 ptr += 2; 692 ptr += 2;
681 count--; 693 count--;
682 } 694 }
683 while(count>=2) 695 while (count >= 2) {
684 {
685 *(unsigned short *)target = *(unsigned short volatile *)ptr; 696 *(unsigned short *)target = *(unsigned short volatile *)ptr;
686 ptr += 4; /* skip cruft */ 697 ptr += 4; /* skip cruft */
687 target += 2; 698 target += 2;
688 count-=2; 699 count -= 2;
689 } 700 }
690 /* Trailing byte? */ 701 /* Trailing byte? */
691 if(count) 702 if (count)
692 *target = *ptr; 703 *target = *ptr;
693} 704}
694 705
695static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count) 706static void dayna_memcpy_tocard(struct net_device *dev, int to,
707 const void *from, int count)
696{ 708{
697 volatile unsigned short *ptr; 709 volatile unsigned short *ptr;
698 const unsigned char *src=from; 710 const unsigned char *src = from;
699 to<<=1; /* word, skip overhead */ 711 to <<= 1; /* word, skip overhead */
700 ptr=(unsigned short *)(dev->mem_start+to); 712 ptr = (unsigned short *)(dev->mem_start+to);
701 /* Leading byte? */ 713 /* Leading byte? */
702 if (to&2) { /* avoid a byte write (stomps on other data) */ 714 if (to & 2) { /* avoid a byte write (stomps on other data) */
703 ptr[-1] = (ptr[-1]&0xFF00)|*src++; 715 ptr[-1] = (ptr[-1]&0xFF00)|*src++;
704 ptr++; 716 ptr++;
705 count--; 717 count--;
706 } 718 }
707 while(count>=2) 719 while (count >= 2) {
708 { 720 *ptr++ = *(unsigned short *)src; /* Copy and */
709 *ptr++=*(unsigned short *)src; /* Copy and */
710 ptr++; /* skip cruft */ 721 ptr++; /* skip cruft */
711 src += 2; 722 src += 2;
712 count-=2; 723 count -= 2;
713 } 724 }
714 /* Trailing byte? */ 725 /* Trailing byte? */
715 if(count) 726 if (count) {
716 {
717 /* card doesn't like byte writes */ 727 /* card doesn't like byte writes */
718 *ptr=(*ptr&0x00FF)|(*src << 8); 728 *ptr = (*ptr & 0x00FF) | (*src << 8);
719 } 729 }
720} 730}
721 731
@@ -738,11 +748,14 @@ static void sane_block_input(struct net_device *dev, int count,
738 if (xfer_start + count > ei_status.rmem_end) { 748 if (xfer_start + count > ei_status.rmem_end) {
739 /* We must wrap the input move. */ 749 /* We must wrap the input move. */
740 int semi_count = ei_status.rmem_end - xfer_start; 750 int semi_count = ei_status.rmem_end - xfer_start;
741 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count); 751 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
752 semi_count);
742 count -= semi_count; 753 count -= semi_count;
743 memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count); 754 memcpy_toio(skb->data + semi_count,
755 (char *)ei_status.rmem_start, count);
744 } else { 756 } else {
745 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count); 757 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
758 count);
746 } 759 }
747} 760}
748 761
@@ -755,16 +768,18 @@ static void sane_block_output(struct net_device *dev, int count,
755} 768}
756 769
757/* dayna block input/output */ 770/* dayna block input/output */
758static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) 771static void dayna_get_8390_hdr(struct net_device *dev,
772 struct e8390_pkt_hdr *hdr, int ring_page)
759{ 773{
760 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 774 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
761 775
762 dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4); 776 dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
763 /* Fix endianness */ 777 /* Fix endianness */
764 hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8); 778 hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
765} 779}
766 780
767static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) 781static void dayna_block_input(struct net_device *dev, int count,
782 struct sk_buff *skb, int ring_offset)
768{ 783{
769 unsigned long xfer_base = ring_offset - (WD_START_PG<<8); 784 unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
770 unsigned long xfer_start = xfer_base+dev->mem_start; 785 unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +787,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
772 /* Note the offset math is done in card memory space which is word 787 /* Note the offset math is done in card memory space which is word
773 per long onto our space. */ 788 per long onto our space. */
774 789
775 if (xfer_start + count > ei_status.rmem_end) 790 if (xfer_start + count > ei_status.rmem_end) {
776 {
777 /* We must wrap the input move. */ 791 /* We must wrap the input move. */
778 int semi_count = ei_status.rmem_end - xfer_start; 792 int semi_count = ei_status.rmem_end - xfer_start;
779 dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count); 793 dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +795,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
781 dayna_memcpy_fromcard(dev, skb->data + semi_count, 795 dayna_memcpy_fromcard(dev, skb->data + semi_count,
782 ei_status.rmem_start - dev->mem_start, 796 ei_status.rmem_start - dev->mem_start,
783 count); 797 count);
784 } 798 } else {
785 else
786 {
787 dayna_memcpy_fromcard(dev, skb->data, xfer_base, count); 799 dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
788 } 800 }
789} 801}
790 802
791static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf, 803static void dayna_block_output(struct net_device *dev, int count,
792 int start_page) 804 const unsigned char *buf,
805 int start_page)
793{ 806{
794 long shmem = (start_page - WD_START_PG)<<8; 807 long shmem = (start_page - WD_START_PG)<<8;
795 808
@@ -797,40 +810,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
797} 810}
798 811
799/* Cabletron block I/O */ 812/* Cabletron block I/O */
800static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 813static void slow_sane_get_8390_hdr(struct net_device *dev,
801 int ring_page) 814 struct e8390_pkt_hdr *hdr,
815 int ring_page)
802{ 816{
803 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 817 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
804 word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4); 818 word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
805 /* Register endianism - fix here rather than 8390.c */ 819 /* Register endianism - fix here rather than 8390.c */
806 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); 820 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
807} 821}
808 822
809static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb, 823static void slow_sane_block_input(struct net_device *dev, int count,
810 int ring_offset) 824 struct sk_buff *skb, int ring_offset)
811{ 825{
812 unsigned long xfer_base = ring_offset - (WD_START_PG<<8); 826 unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
813 unsigned long xfer_start = xfer_base+dev->mem_start; 827 unsigned long xfer_start = xfer_base+dev->mem_start;
814 828
815 if (xfer_start + count > ei_status.rmem_end) 829 if (xfer_start + count > ei_status.rmem_end) {
816 {
817 /* We must wrap the input move. */ 830 /* We must wrap the input move. */
818 int semi_count = ei_status.rmem_end - xfer_start; 831 int semi_count = ei_status.rmem_end - xfer_start;
819 word_memcpy_fromcard(skb->data, (char *)dev->mem_start + 832 word_memcpy_fromcard(skb->data,
820 xfer_base, semi_count); 833 (char *)dev->mem_start + xfer_base,
834 semi_count);
821 count -= semi_count; 835 count -= semi_count;
822 word_memcpy_fromcard(skb->data + semi_count, 836 word_memcpy_fromcard(skb->data + semi_count,
823 (char *)ei_status.rmem_start, count); 837 (char *)ei_status.rmem_start, count);
824 } 838 } else {
825 else 839 word_memcpy_fromcard(skb->data,
826 { 840 (char *)dev->mem_start + xfer_base, count);
827 word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
828 xfer_base, count);
829 } 841 }
830} 842}
831 843
832static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf, 844static void slow_sane_block_output(struct net_device *dev, int count,
833 int start_page) 845 const unsigned char *buf, int start_page)
834{ 846{
835 long shmem = (start_page - WD_START_PG)<<8; 847 long shmem = (start_page - WD_START_PG)<<8;
836 848
@@ -843,10 +855,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
843 const unsigned short *from = fp; 855 const unsigned short *from = fp;
844 856
845 count++; 857 count++;
846 count/=2; 858 count /= 2;
847 859
848 while(count--) 860 while (count--)
849 *to++=*from++; 861 *to++ = *from++;
850} 862}
851 863
852static void word_memcpy_fromcard(void *tp, const void *fp, int count) 864static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +867,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
855 const volatile unsigned short *from = fp; 867 const volatile unsigned short *from = fp;
856 868
857 count++; 869 count++;
858 count/=2; 870 count /= 2;
859 871
860 while(count--) 872 while (count--)
861 *to++=*from++; 873 *to++ = *from++;
862} 874}
863 875
864 876
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 21a9c9ab4b3..fa0dc514dba 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -418,7 +418,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
418#define MACVLAN_FEATURES \ 418#define MACVLAN_FEATURES \
419 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 419 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
420 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ 420 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
421 NETIF_F_TSO_ECN | NETIF_F_TSO6) 421 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
422 422
423#define MACVLAN_STATE_MASK \ 423#define MACVLAN_STATE_MASK \
424 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 424 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 2af81735386..9f72cb45f4a 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -51,14 +51,11 @@
51 51
52static const char *meth_str="SGI O2 Fast Ethernet"; 52static const char *meth_str="SGI O2 Fast Ethernet";
53 53
54#define HAVE_TX_TIMEOUT
55/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ 54/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
56#define TX_TIMEOUT (400*HZ/1000) 55#define TX_TIMEOUT (400*HZ/1000)
57 56
58#ifdef HAVE_TX_TIMEOUT
59static int timeout = TX_TIMEOUT; 57static int timeout = TX_TIMEOUT;
60module_param(timeout, int, 0); 58module_param(timeout, int, 0);
61#endif
62 59
63/* 60/*
64 * This structure is private to each device. It is used to pass 61 * This structure is private to each device. It is used to pass
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3cf56d90d85..8f6e816a739 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1271,7 +1271,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
1271 return __mlx4_init_one(pdev, NULL); 1271 return __mlx4_init_one(pdev, NULL);
1272} 1272}
1273 1273
1274static struct pci_device_id mlx4_pci_table[] = { 1274static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1275 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ 1275 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1276 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ 1276 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1277 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1277 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index af67af55efe..e24072a9a97 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,6 @@
55#include <linux/types.h> 55#include <linux/types.h>
56#include <linux/inet_lro.h> 56#include <linux/inet_lro.h>
57#include <asm/system.h> 57#include <asm/system.h>
58#include <linux/list.h>
59 58
60static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 59static char mv643xx_eth_driver_name[] = "mv643xx_eth";
61static char mv643xx_eth_driver_version[] = "1.4"; 60static char mv643xx_eth_driver_version[] = "1.4";
@@ -1697,7 +1696,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
1697 return 0; 1696 return 0;
1698 1697
1699 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1698 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1700 list_for_each_entry(ha, &dev->uc.list, list) { 1699 netdev_for_each_uc_addr(ha, dev) {
1701 if (memcmp(dev->dev_addr, ha->addr, 5)) 1700 if (memcmp(dev->dev_addr, ha->addr, 5))
1702 return 0; 1701 return 0;
1703 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1702 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0..c0884a9cba3 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -4085,7 +4085,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4085#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 4085#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4086#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 4086#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4087 4087
4088static struct pci_device_id myri10ge_pci_tbl[] = { 4088static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
4089 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, 4089 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
4090 {PCI_DEVICE 4090 {PCI_DEVICE
4091 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, 4091 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b3513ad3b70..8b431308535 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -716,10 +716,10 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
716 pad[0] = MYRI_PAD_LEN; 716 pad[0] = MYRI_PAD_LEN;
717 pad[1] = 0xab; 717 pad[1] = 0xab;
718 718
719 /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length 719 /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
720 * in here instead. It is up to the 802.2 layer to carry protocol information. 720 * length in here instead.
721 */ 721 */
722 if (type != ETH_P_802_3) 722 if (type != ETH_P_802_3 && type != ETH_P_802_2)
723 eth->h_proto = htons(type); 723 eth->h_proto = htons(type);
724 else 724 else
725 eth->h_proto = htons(len); 725 eth->h_proto = htons(len);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce2..2d7b3bbfed0 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
247 { "NatSemi DP8381[56]", 0, 24 }, 247 { "NatSemi DP8381[56]", 0, 24 },
248}; 248};
249 249
250static struct pci_device_id natsemi_pci_tbl[] __devinitdata = { 250static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, 251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253 { } /* terminate list */ 253 { } /* terminate list */
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151..85aec4f1013 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
136}; 136};
137 137
138 138
139static struct pci_device_id ne2k_pci_tbl[] = { 139static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
140 { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 }, 140 { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
141 { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 }, 141 { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
142 { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 }, 142 { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index 11d94e2434e..861a0590b1f 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -18,7 +18,7 @@
18# MA 02111-1307, USA. 18# MA 02111-1307, USA.
19# 19#
20# The full GNU General Public License is included in this distribution 20# The full GNU General Public License is included in this distribution
21# in the file called LICENSE. 21# in the file called "COPYING".
22# 22#
23# 23#
24 24
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 9bc5bd1d538..144d2e88042 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -420,7 +420,7 @@ struct status_desc {
420} __attribute__ ((aligned(16))); 420} __attribute__ ((aligned(16)));
421 421
422/* UNIFIED ROMIMAGE *************************/ 422/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0x3eb000 423#define NX_UNI_FW_MIN_SIZE 0xc8000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6 425#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7 426#define NX_UNI_DIR_SECT_FW 0x7
@@ -1427,8 +1427,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
1427 1427
1428} 1428}
1429 1429
1430int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1430int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1431int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1431int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1432extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1432extern void netxen_change_ringparam(struct netxen_adapter *adapter);
1433extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, 1433extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
1434 int *valp); 1434 int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9cb8f687804..2a8ef5fc966 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 542f408333f..f8499e56cbe 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d138fc22927..622e4c8be93 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -969,7 +969,8 @@ enum {
969#define NX_DEV_READY 3 969#define NX_DEV_READY 3
970#define NX_DEV_NEED_RESET 4 970#define NX_DEV_NEED_RESET 4
971#define NX_DEV_NEED_QUISCENT 5 971#define NX_DEV_NEED_QUISCENT 5
972#define NX_DEV_FAILED 6 972#define NX_DEV_NEED_AER 6
973#define NX_DEV_FAILED 7
973 974
974#define NX_RCODE_DRIVER_INFO 0x20000000 975#define NX_RCODE_DRIVER_INFO 0x20000000
975#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 976#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 85e28e60ecf..dd45c7a9122 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -777,17 +777,20 @@ int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
777int netxen_config_intr_coalesce(struct netxen_adapter *adapter) 777int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
778{ 778{
779 nx_nic_req_t req; 779 nx_nic_req_t req;
780 u64 word; 780 u64 word[6];
781 int rv; 781 int rv, i;
782 782
783 memset(&req, 0, sizeof(nx_nic_req_t)); 783 memset(&req, 0, sizeof(nx_nic_req_t));
784 memset(word, 0, sizeof(word));
784 785
785 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); 786 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
786 787
787 word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); 788 word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
788 req.req_hdr = cpu_to_le64(word); 789 req.req_hdr = cpu_to_le64(word[0]);
789 790
790 memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal)); 791 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
792 for (i = 0; i < 6; i++)
793 req.words[i] = cpu_to_le64(word[i]);
791 794
792 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 795 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
793 if (rv != 0) { 796 if (rv != 0) {
@@ -1033,7 +1036,7 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
1033 return 0; 1036 return 0;
1034} 1037}
1035 1038
1036int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 1039int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
1037{ 1040{
1038 __le32 *pmac = (__le32 *) mac; 1041 __le32 *pmac = (__le32 *) mac;
1039 u32 offset; 1042 u32 offset;
@@ -1058,7 +1061,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
1058 return 0; 1061 return 0;
1059} 1062}
1060 1063
1061int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 1064int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
1062{ 1065{
1063 uint32_t crbaddr, mac_hi, mac_lo; 1066 uint32_t crbaddr, mac_hi, mac_lo;
1064 int pci_func = adapter->ahw.pci_func; 1067 int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 3fd1dcb3583..e2c5b6f2df0 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 64cff68d372..1c63610ead4 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -780,6 +780,9 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
780 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 780 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
781 return 1; 781 return 1;
782 782
783 if (adapter->need_fw_reset)
784 return 1;
785
783 /* last attempt had failed */ 786 /* last attempt had failed */
784 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 787 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
785 return 1; 788 return 1;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f9d6081959..076f826d5a5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -35,6 +35,7 @@
35#include <linux/ipv6.h> 35#include <linux/ipv6.h>
36#include <linux/inetdevice.h> 36#include <linux/inetdevice.h>
37#include <linux/sysfs.h> 37#include <linux/sysfs.h>
38#include <linux/aer.h>
38 39
39MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 40MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
40MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
@@ -84,6 +85,7 @@ static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
84static void netxen_create_diag_entries(struct netxen_adapter *adapter); 85static void netxen_create_diag_entries(struct netxen_adapter *adapter);
85static void netxen_remove_diag_entries(struct netxen_adapter *adapter); 86static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
86 87
88static int nx_dev_request_aer(struct netxen_adapter *adapter);
87static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 89static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
88static int netxen_can_start_firmware(struct netxen_adapter *adapter); 90static int netxen_can_start_firmware(struct netxen_adapter *adapter);
89 91
@@ -98,7 +100,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
98 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 100 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
99 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
100 102
101static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 103static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
102 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 104 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
103 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), 105 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
104 ENTRY(PCI_DEVICE_ID_NX2031_4GCU), 106 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -430,7 +432,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
430{ 432{
431 int i; 433 int i;
432 unsigned char *p; 434 unsigned char *p;
433 __le64 mac_addr; 435 u64 mac_addr;
434 struct net_device *netdev = adapter->netdev; 436 struct net_device *netdev = adapter->netdev;
435 struct pci_dev *pdev = adapter->pdev; 437 struct pci_dev *pdev = adapter->pdev;
436 438
@@ -1262,6 +1264,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1262 if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) 1264 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
1263 goto err_out_disable_pdev; 1265 goto err_out_disable_pdev;
1264 1266
1267 if (NX_IS_REVISION_P3(pdev->revision))
1268 pci_enable_pcie_error_reporting(pdev);
1269
1265 pci_set_master(pdev); 1270 pci_set_master(pdev);
1266 1271
1267 netdev = alloc_etherdev(sizeof(struct netxen_adapter)); 1272 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1409,17 +1414,19 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1409 1414
1410 netxen_release_firmware(adapter); 1415 netxen_release_firmware(adapter);
1411 1416
1417 if (NX_IS_REVISION_P3(pdev->revision))
1418 pci_disable_pcie_error_reporting(pdev);
1419
1412 pci_release_regions(pdev); 1420 pci_release_regions(pdev);
1413 pci_disable_device(pdev); 1421 pci_disable_device(pdev);
1414 pci_set_drvdata(pdev, NULL); 1422 pci_set_drvdata(pdev, NULL);
1415 1423
1416 free_netdev(netdev); 1424 free_netdev(netdev);
1417} 1425}
1418static int __netxen_nic_shutdown(struct pci_dev *pdev) 1426
1427static void netxen_nic_detach_func(struct netxen_adapter *adapter)
1419{ 1428{
1420 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1421 struct net_device *netdev = adapter->netdev; 1429 struct net_device *netdev = adapter->netdev;
1422 int retval;
1423 1430
1424 netif_device_detach(netdev); 1431 netif_device_detach(netdev);
1425 1432
@@ -1438,53 +1445,22 @@ static int __netxen_nic_shutdown(struct pci_dev *pdev)
1438 nx_decr_dev_ref_cnt(adapter); 1445 nx_decr_dev_ref_cnt(adapter);
1439 1446
1440 clear_bit(__NX_RESETTING, &adapter->state); 1447 clear_bit(__NX_RESETTING, &adapter->state);
1441
1442 retval = pci_save_state(pdev);
1443 if (retval)
1444 return retval;
1445
1446 if (netxen_nic_wol_supported(adapter)) {
1447 pci_enable_wake(pdev, PCI_D3cold, 1);
1448 pci_enable_wake(pdev, PCI_D3hot, 1);
1449 }
1450
1451 pci_disable_device(pdev);
1452
1453 return 0;
1454} 1448}
1455static void netxen_nic_shutdown(struct pci_dev *pdev)
1456{
1457 if (__netxen_nic_shutdown(pdev))
1458 return;
1459}
1460#ifdef CONFIG_PM
1461static int
1462netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1463{
1464 int retval;
1465
1466 retval = __netxen_nic_shutdown(pdev);
1467 if (retval)
1468 return retval;
1469 1449
1470 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1450static int netxen_nic_attach_func(struct pci_dev *pdev)
1471 return 0;
1472}
1473
1474static int
1475netxen_nic_resume(struct pci_dev *pdev)
1476{ 1451{
1477 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1452 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1478 struct net_device *netdev = adapter->netdev; 1453 struct net_device *netdev = adapter->netdev;
1479 int err; 1454 int err;
1480 1455
1481 pci_set_power_state(pdev, PCI_D0);
1482 pci_restore_state(pdev);
1483
1484 err = pci_enable_device(pdev); 1456 err = pci_enable_device(pdev);
1485 if (err) 1457 if (err)
1486 return err; 1458 return err;
1487 1459
1460 pci_set_power_state(pdev, PCI_D0);
1461 pci_set_master(pdev);
1462 pci_restore_state(pdev);
1463
1488 adapter->ahw.crb_win = -1; 1464 adapter->ahw.crb_win = -1;
1489 adapter->ahw.ocm_win = -1; 1465 adapter->ahw.ocm_win = -1;
1490 1466
@@ -1503,11 +1479,10 @@ netxen_nic_resume(struct pci_dev *pdev)
1503 if (err) 1479 if (err)
1504 goto err_out_detach; 1480 goto err_out_detach;
1505 1481
1506 netif_device_attach(netdev);
1507
1508 netxen_config_indev_addr(netdev, NETDEV_UP); 1482 netxen_config_indev_addr(netdev, NETDEV_UP);
1509 } 1483 }
1510 1484
1485 netif_device_attach(netdev);
1511 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1486 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
1512 return 0; 1487 return 0;
1513 1488
@@ -1517,6 +1492,85 @@ err_out:
1517 nx_decr_dev_ref_cnt(adapter); 1492 nx_decr_dev_ref_cnt(adapter);
1518 return err; 1493 return err;
1519} 1494}
1495
1496static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
1497 pci_channel_state_t state)
1498{
1499 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1500
1501 if (state == pci_channel_io_perm_failure)
1502 return PCI_ERS_RESULT_DISCONNECT;
1503
1504 if (nx_dev_request_aer(adapter))
1505 return PCI_ERS_RESULT_RECOVERED;
1506
1507 netxen_nic_detach_func(adapter);
1508
1509 pci_disable_device(pdev);
1510
1511 return PCI_ERS_RESULT_NEED_RESET;
1512}
1513
1514static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
1515{
1516 int err = 0;
1517
1518 err = netxen_nic_attach_func(pdev);
1519
1520 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1521}
1522
1523static void netxen_io_resume(struct pci_dev *pdev)
1524{
1525 pci_cleanup_aer_uncorrect_error_status(pdev);
1526}
1527
1528static void netxen_nic_shutdown(struct pci_dev *pdev)
1529{
1530 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1531
1532 netxen_nic_detach_func(adapter);
1533
1534 if (pci_save_state(pdev))
1535 return;
1536
1537 if (netxen_nic_wol_supported(adapter)) {
1538 pci_enable_wake(pdev, PCI_D3cold, 1);
1539 pci_enable_wake(pdev, PCI_D3hot, 1);
1540 }
1541
1542 pci_disable_device(pdev);
1543}
1544
1545#ifdef CONFIG_PM
1546static int
1547netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1548{
1549 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1550 int retval;
1551
1552 netxen_nic_detach_func(adapter);
1553
1554 retval = pci_save_state(pdev);
1555 if (retval)
1556 return retval;
1557
1558 if (netxen_nic_wol_supported(adapter)) {
1559 pci_enable_wake(pdev, PCI_D3cold, 1);
1560 pci_enable_wake(pdev, PCI_D3hot, 1);
1561 }
1562
1563 pci_disable_device(pdev);
1564 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1565
1566 return 0;
1567}
1568
1569static int
1570netxen_nic_resume(struct pci_dev *pdev)
1571{
1572 return netxen_nic_attach_func(pdev);
1573}
1520#endif 1574#endif
1521 1575
1522static int netxen_nic_open(struct net_device *netdev) 1576static int netxen_nic_open(struct net_device *netdev)
@@ -2104,20 +2158,49 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
2104 return count; 2158 return count;
2105} 2159}
2106 2160
2107static void 2161static int
2162nx_dev_request_aer(struct netxen_adapter *adapter)
2163{
2164 u32 state;
2165 int ret = -EINVAL;
2166
2167 if (netxen_api_lock(adapter))
2168 return ret;
2169
2170 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2171
2172 if (state == NX_DEV_NEED_AER)
2173 ret = 0;
2174 else if (state == NX_DEV_READY) {
2175 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
2176 ret = 0;
2177 }
2178
2179 netxen_api_unlock(adapter);
2180 return ret;
2181}
2182
2183static int
2108nx_dev_request_reset(struct netxen_adapter *adapter) 2184nx_dev_request_reset(struct netxen_adapter *adapter)
2109{ 2185{
2110 u32 state; 2186 u32 state;
2187 int ret = -EINVAL;
2111 2188
2112 if (netxen_api_lock(adapter)) 2189 if (netxen_api_lock(adapter))
2113 return; 2190 return ret;
2114 2191
2115 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2192 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2116 2193
2117 if (state != NX_DEV_INITALIZING) 2194 if (state == NX_DEV_NEED_RESET)
2195 ret = 0;
2196 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
2118 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); 2197 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
2198 ret = 0;
2199 }
2119 2200
2120 netxen_api_unlock(adapter); 2201 netxen_api_unlock(adapter);
2202
2203 return ret;
2121} 2204}
2122 2205
2123static int 2206static int
@@ -2269,17 +2352,29 @@ netxen_check_health(struct netxen_adapter *adapter)
2269 u32 state, heartbit; 2352 u32 state, heartbit;
2270 struct net_device *netdev = adapter->netdev; 2353 struct net_device *netdev = adapter->netdev;
2271 2354
2355 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2356 if (state == NX_DEV_NEED_AER)
2357 return 0;
2358
2272 if (netxen_nic_check_temp(adapter)) 2359 if (netxen_nic_check_temp(adapter))
2273 goto detach; 2360 goto detach;
2274 2361
2275 if (adapter->need_fw_reset) { 2362 if (adapter->need_fw_reset) {
2276 nx_dev_request_reset(adapter); 2363 if (nx_dev_request_reset(adapter))
2364 return 0;
2277 goto detach; 2365 goto detach;
2278 } 2366 }
2279 2367
2280 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2368 /* NX_DEV_NEED_RESET, this state can be marked in two cases
2281 if (state == NX_DEV_NEED_RESET) 2369 * 1. Tx timeout 2. Fw hang
2282 goto detach; 2370 * Send request to destroy context in case of tx timeout only
2371 * and doesn't required in case of Fw hang
2372 */
2373 if (state == NX_DEV_NEED_RESET) {
2374 adapter->need_fw_reset = 1;
2375 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2376 goto detach;
2377 }
2283 2378
2284 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2379 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2285 return 0; 2380 return 0;
@@ -2288,12 +2383,17 @@ netxen_check_health(struct netxen_adapter *adapter)
2288 if (heartbit != adapter->heartbit) { 2383 if (heartbit != adapter->heartbit) {
2289 adapter->heartbit = heartbit; 2384 adapter->heartbit = heartbit;
2290 adapter->fw_fail_cnt = 0; 2385 adapter->fw_fail_cnt = 0;
2386 if (adapter->need_fw_reset)
2387 goto detach;
2291 return 0; 2388 return 0;
2292 } 2389 }
2293 2390
2294 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) 2391 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2295 return 0; 2392 return 0;
2296 2393
2394 if (nx_dev_request_reset(adapter))
2395 return 0;
2396
2297 clear_bit(__NX_FW_ATTACHED, &adapter->state); 2397 clear_bit(__NX_FW_ATTACHED, &adapter->state);
2298 2398
2299 dev_info(&netdev->dev, "firmware hang detected\n"); 2399 dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2496,7 +2596,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2496 return size; 2596 return size;
2497} 2597}
2498 2598
2499ssize_t netxen_sysfs_write_mem(struct kobject *kobj, 2599static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
2500 struct bin_attribute *attr, char *buf, 2600 struct bin_attribute *attr, char *buf,
2501 loff_t offset, size_t size) 2601 loff_t offset, size_t size)
2502{ 2602{
@@ -2723,6 +2823,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
2723{ } 2823{ }
2724#endif 2824#endif
2725 2825
2826static struct pci_error_handlers netxen_err_handler = {
2827 .error_detected = netxen_io_error_detected,
2828 .slot_reset = netxen_io_slot_reset,
2829 .resume = netxen_io_resume,
2830};
2831
2726static struct pci_driver netxen_driver = { 2832static struct pci_driver netxen_driver = {
2727 .name = netxen_nic_driver_name, 2833 .name = netxen_nic_driver_name,
2728 .id_table = netxen_pci_tbl, 2834 .id_table = netxen_pci_tbl,
@@ -2732,7 +2838,8 @@ static struct pci_driver netxen_driver = {
2732 .suspend = netxen_nic_suspend, 2838 .suspend = netxen_nic_suspend,
2733 .resume = netxen_nic_resume, 2839 .resume = netxen_nic_resume,
2734#endif 2840#endif
2735 .shutdown = netxen_nic_shutdown 2841 .shutdown = netxen_nic_shutdown,
2842 .err_handler = &netxen_err_handler
2736}; 2843};
2737 2844
2738static int __init netxen_init_module(void) 2845static int __init netxen_init_module(void)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2aed2b382c4..af9a8647c7e 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -58,7 +58,7 @@ static void writeq(u64 val, void __iomem *reg)
58} 58}
59#endif 59#endif
60 60
61static struct pci_device_id niu_pci_tbl[] = { 61static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
62 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 62 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
63 {} 63 {}
64}; 64};
@@ -6372,7 +6372,7 @@ static void niu_set_rx_mode(struct net_device *dev)
6372 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) 6372 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
6373 np->flags |= NIU_FLAGS_MCAST; 6373 np->flags |= NIU_FLAGS_MCAST;
6374 6374
6375 alt_cnt = dev->uc.count; 6375 alt_cnt = netdev_uc_count(dev);
6376 if (alt_cnt > niu_num_alt_addr(np)) { 6376 if (alt_cnt > niu_num_alt_addr(np)) {
6377 alt_cnt = 0; 6377 alt_cnt = 0;
6378 np->flags |= NIU_FLAGS_PROMISC; 6378 np->flags |= NIU_FLAGS_PROMISC;
@@ -6381,7 +6381,7 @@ static void niu_set_rx_mode(struct net_device *dev)
6381 if (alt_cnt) { 6381 if (alt_cnt) {
6382 int index = 0; 6382 int index = 0;
6383 6383
6384 list_for_each_entry(ha, &dev->uc.list, list) { 6384 netdev_for_each_uc_addr(ha, dev) {
6385 err = niu_set_alt_mac(np, index, ha->addr); 6385 err = niu_set_alt_mac(np, index, ha->addr);
6386 if (err) 6386 if (err)
6387 printk(KERN_WARNING PFX "%s: Error %d " 6387 printk(KERN_WARNING PFX "%s: Error %d "
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d4153..a3b6aa0f375 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -2292,7 +2292,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
2292 pci_set_drvdata(pci_dev, NULL); 2292 pci_set_drvdata(pci_dev, NULL);
2293} 2293}
2294 2294
2295static struct pci_device_id ns83820_pci_tbl[] = { 2295static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
2296 { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, }, 2296 { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
2297 { 0, }, 2297 { 0, },
2298}; 2298};
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 050538bf155..6fd8789ef48 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -1119,11 +1119,8 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
1119 1119
1120 if (p->port >= octeon_bootinfo->mac_addr_count) 1120 if (p->port >= octeon_bootinfo->mac_addr_count)
1121 dev_err(&pdev->dev, 1121 dev_err(&pdev->dev,
1122 "Error %s: Using MAC outside of the assigned range: " 1122 "Error %s: Using MAC outside of the assigned range: %pM\n",
1123 "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name, 1123 netdev->name, netdev->dev_addr);
1124 netdev->dev_addr[0], netdev->dev_addr[1],
1125 netdev->dev_addr[2], netdev->dev_addr[3],
1126 netdev->dev_addr[4], netdev->dev_addr[5]);
1127 1124
1128 if (register_netdev(netdev)) 1125 if (register_netdev(netdev))
1129 goto err; 1126 goto err;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1..d44d4a208bb 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1875,7 +1875,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1875 free_netdev(netdev); 1875 free_netdev(netdev);
1876} 1876}
1877 1877
1878static struct pci_device_id pasemi_mac_pci_tbl[] = { 1878static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
1879 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, 1879 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1880 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, 1880 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1881 { }, 1881 { },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402aff..20273832bfc 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -211,7 +211,7 @@ static struct {
211}; 211};
212 212
213 213
214static struct pci_device_id netdrv_pci_tbl[] = { 214static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
215 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 215 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
216 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB }, 216 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
217 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX }, 217 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d431b59e7d1..2ee57bd52a0 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1065,14 +1065,11 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
1065 1065
1066 spin_lock_irqsave(&ei_local->page_lock, flags); 1066 spin_lock_irqsave(&ei_local->page_lock, flags);
1067 outb_p(0x00, e8390_base + EN0_IMR); 1067 outb_p(0x00, e8390_base + EN0_IMR);
1068 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1069 1068
1070 /* 1069 /*
1071 * Slow phase with lock held. 1070 * Slow phase with lock held.
1072 */ 1071 */
1073 1072
1074 spin_lock_irqsave(&ei_local->page_lock, flags);
1075
1076 ei_local->irqlock = 1; 1073 ei_local->irqlock = 1;
1077 1074
1078 send_length = max(length, ETH_ZLEN); 1075 send_length = max(length, ETH_ZLEN);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index e154677ff70..0dc7ff896ee 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -59,7 +59,7 @@ static const char *const version =
59/* 59/*
60 * PCI device identifiers for "new style" Linux PCI Device Drivers 60 * PCI device identifiers for "new style" Linux PCI Device Drivers
61 */ 61 */
62static struct pci_device_id pcnet32_pci_tbl[] = { 62static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, 63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, 64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
65 65
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6f69b9ba0df..65ed385c2ce 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -63,6 +63,7 @@
63#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb 63#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
64#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3 64#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
65#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4 65#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
66#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
66#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000 67#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
67#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000 68#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
68 69
@@ -269,6 +270,43 @@ static int m88e1111_config_init(struct phy_device *phydev)
269 return err; 270 return err;
270 } 271 }
271 272
273 if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
274 temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
275 if (temp < 0)
276 return temp;
277 temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
278 err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
279 if (err < 0)
280 return err;
281
282 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
283 if (temp < 0)
284 return temp;
285 temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
286 temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
287 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
288 if (err < 0)
289 return err;
290
291 /* soft reset */
292 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
293 if (err < 0)
294 return err;
295 do
296 temp = phy_read(phydev, MII_BMCR);
297 while (temp & BMCR_RESET);
298
299 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
300 if (temp < 0)
301 return temp;
302 temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
303 temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
304 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
305 if (err < 0)
306 return err;
307 }
308
309
272 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 310 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
273 if (err < 0) 311 if (err < 0)
274 return err; 312 return err;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd..ed2644a5750 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
25 25
26#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ 26#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
27#define MII_LAN83C185_IM 30 /* Interrupt Mask */ 27#define MII_LAN83C185_IM 30 /* Interrupt Mask */
28#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
28 29
29#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ 30#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
30#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ 31#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
37#define MII_LAN83C185_ISF_INT_ALL (0x0e) 38#define MII_LAN83C185_ISF_INT_ALL (0x0e)
38 39
39#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ 40#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
40 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) 41 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
42 MII_LAN83C185_ISF_INT7)
41 43
44#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
42 45
43static int smsc_phy_config_intr(struct phy_device *phydev) 46static int smsc_phy_config_intr(struct phy_device *phydev)
44{ 47{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
59 62
60static int smsc_phy_config_init(struct phy_device *phydev) 63static int smsc_phy_config_init(struct phy_device *phydev)
61{ 64{
65 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
66 if (rc < 0)
67 return rc;
68
69 /* Enable energy detect mode for this SMSC Transceivers */
70 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
71 rc | MII_LAN83C185_EDPWRDOWN);
72 if (rc < 0)
73 return rc;
74
62 return smsc_phy_ack_interrupt (phydev); 75 return smsc_phy_ack_interrupt (phydev);
63} 76}
64 77
78static int lan911x_config_init(struct phy_device *phydev)
79{
80 return smsc_phy_ack_interrupt(phydev);
81}
65 82
66static struct phy_driver lan83c185_driver = { 83static struct phy_driver lan83c185_driver = {
67 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ 84 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
147 /* basic functions */ 164 /* basic functions */
148 .config_aneg = genphy_config_aneg, 165 .config_aneg = genphy_config_aneg,
149 .read_status = genphy_read_status, 166 .read_status = genphy_read_status,
150 .config_init = smsc_phy_config_init, 167 .config_init = lan911x_config_init,
151 168
152 /* IRQ related */ 169 /* IRQ related */
153 .ack_interrupt = smsc_phy_ack_interrupt, 170 .ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 2282e729edb..6d61602208c 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -167,7 +167,7 @@ struct channel {
167 u8 avail; /* flag used in multilink stuff */ 167 u8 avail; /* flag used in multilink stuff */
168 u8 had_frag; /* >= 1 fragments have been sent */ 168 u8 had_frag; /* >= 1 fragments have been sent */
169 u32 lastseq; /* MP: last sequence # received */ 169 u32 lastseq; /* MP: last sequence # received */
170 int speed; /* speed of the corresponding ppp channel*/ 170 int speed; /* speed of the corresponding ppp channel*/
171#endif /* CONFIG_PPP_MULTILINK */ 171#endif /* CONFIG_PPP_MULTILINK */
172}; 172};
173 173
@@ -1293,13 +1293,13 @@ ppp_push(struct ppp *ppp)
1293 */ 1293 */
1294static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1294static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1295{ 1295{
1296 int len, totlen; 1296 int len, totlen;
1297 int i, bits, hdrlen, mtu; 1297 int i, bits, hdrlen, mtu;
1298 int flen; 1298 int flen;
1299 int navail, nfree, nzero; 1299 int navail, nfree, nzero;
1300 int nbigger; 1300 int nbigger;
1301 int totspeed; 1301 int totspeed;
1302 int totfree; 1302 int totfree;
1303 unsigned char *p, *q; 1303 unsigned char *p, *q;
1304 struct list_head *list; 1304 struct list_head *list;
1305 struct channel *pch; 1305 struct channel *pch;
@@ -1307,21 +1307,21 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1307 struct ppp_channel *chan; 1307 struct ppp_channel *chan;
1308 1308
1309 totspeed = 0; /*total bitrate of the bundle*/ 1309 totspeed = 0; /*total bitrate of the bundle*/
1310 nfree = 0; /* # channels which have no packet already queued */ 1310 nfree = 0; /* # channels which have no packet already queued */
1311 navail = 0; /* total # of usable channels (not deregistered) */ 1311 navail = 0; /* total # of usable channels (not deregistered) */
1312 nzero = 0; /* number of channels with zero speed associated*/ 1312 nzero = 0; /* number of channels with zero speed associated*/
1313 totfree = 0; /*total # of channels available and 1313 totfree = 0; /*total # of channels available and
1314 *having no queued packets before 1314 *having no queued packets before
1315 *starting the fragmentation*/ 1315 *starting the fragmentation*/
1316 1316
1317 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1317 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1318 i = 0; 1318 i = 0;
1319 list_for_each_entry(pch, &ppp->channels, clist) { 1319 list_for_each_entry(pch, &ppp->channels, clist) {
1320 navail += pch->avail = (pch->chan != NULL); 1320 navail += pch->avail = (pch->chan != NULL);
1321 pch->speed = pch->chan->speed; 1321 pch->speed = pch->chan->speed;
1322 if (pch->avail) { 1322 if (pch->avail) {
1323 if (skb_queue_empty(&pch->file.xq) || 1323 if (skb_queue_empty(&pch->file.xq) ||
1324 !pch->had_frag) { 1324 !pch->had_frag) {
1325 if (pch->speed == 0) 1325 if (pch->speed == 0)
1326 nzero++; 1326 nzero++;
1327 else 1327 else
@@ -1331,60 +1331,60 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1331 ++nfree; 1331 ++nfree;
1332 ++totfree; 1332 ++totfree;
1333 } 1333 }
1334 if (!pch->had_frag && i < ppp->nxchan) 1334 if (!pch->had_frag && i < ppp->nxchan)
1335 ppp->nxchan = i; 1335 ppp->nxchan = i;
1336 } 1336 }
1337 ++i; 1337 ++i;
1338 } 1338 }
1339 /* 1339 /*
1340 * Don't start sending this packet unless at least half of 1340 * Don't start sending this packet unless at least half of
1341 * the channels are free. This gives much better TCP 1341 * the channels are free. This gives much better TCP
1342 * performance if we have a lot of channels. 1342 * performance if we have a lot of channels.
1343 */ 1343 */
1344 if (nfree == 0 || nfree < navail / 2) 1344 if (nfree == 0 || nfree < navail / 2)
1345 return 0; /* can't take now, leave it in xmit_pending */ 1345 return 0; /* can't take now, leave it in xmit_pending */
1346 1346
1347 /* Do protocol field compression (XXX this should be optional) */ 1347 /* Do protocol field compression (XXX this should be optional) */
1348 p = skb->data; 1348 p = skb->data;
1349 len = skb->len; 1349 len = skb->len;
1350 if (*p == 0) { 1350 if (*p == 0) {
1351 ++p; 1351 ++p;
1352 --len; 1352 --len;
1353 } 1353 }
1354 1354
1355 totlen = len; 1355 totlen = len;
1356 nbigger = len % nfree; 1356 nbigger = len % nfree;
1357 1357
1358 /* skip to the channel after the one we last used 1358 /* skip to the channel after the one we last used
1359 and start at that one */ 1359 and start at that one */
1360 list = &ppp->channels; 1360 list = &ppp->channels;
1361 for (i = 0; i < ppp->nxchan; ++i) { 1361 for (i = 0; i < ppp->nxchan; ++i) {
1362 list = list->next; 1362 list = list->next;
1363 if (list == &ppp->channels) { 1363 if (list == &ppp->channels) {
1364 i = 0; 1364 i = 0;
1365 break; 1365 break;
1366 } 1366 }
1367 } 1367 }
1368 1368
1369 /* create a fragment for each channel */ 1369 /* create a fragment for each channel */
1370 bits = B; 1370 bits = B;
1371 while (len > 0) { 1371 while (len > 0) {
1372 list = list->next; 1372 list = list->next;
1373 if (list == &ppp->channels) { 1373 if (list == &ppp->channels) {
1374 i = 0; 1374 i = 0;
1375 continue; 1375 continue;
1376 } 1376 }
1377 pch = list_entry(list, struct channel, clist); 1377 pch = list_entry(list, struct channel, clist);
1378 ++i; 1378 ++i;
1379 if (!pch->avail) 1379 if (!pch->avail)
1380 continue; 1380 continue;
1381 1381
1382 /* 1382 /*
1383 * Skip this channel if it has a fragment pending already and 1383 * Skip this channel if it has a fragment pending already and
1384 * we haven't given a fragment to all of the free channels. 1384 * we haven't given a fragment to all of the free channels.
1385 */ 1385 */
1386 if (pch->avail == 1) { 1386 if (pch->avail == 1) {
1387 if (nfree > 0) 1387 if (nfree > 0)
1388 continue; 1388 continue;
1389 } else { 1389 } else {
1390 pch->avail = 1; 1390 pch->avail = 1;
@@ -1393,32 +1393,32 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1393 /* check the channel's mtu and whether it is still attached. */ 1393 /* check the channel's mtu and whether it is still attached. */
1394 spin_lock_bh(&pch->downl); 1394 spin_lock_bh(&pch->downl);
1395 if (pch->chan == NULL) { 1395 if (pch->chan == NULL) {
1396 /* can't use this channel, it's being deregistered */ 1396 /* can't use this channel, it's being deregistered */
1397 if (pch->speed == 0) 1397 if (pch->speed == 0)
1398 nzero--; 1398 nzero--;
1399 else 1399 else
1400 totspeed -= pch->speed; 1400 totspeed -= pch->speed;
1401 1401
1402 spin_unlock_bh(&pch->downl); 1402 spin_unlock_bh(&pch->downl);
1403 pch->avail = 0; 1403 pch->avail = 0;
1404 totlen = len; 1404 totlen = len;
1405 totfree--; 1405 totfree--;
1406 nfree--; 1406 nfree--;
1407 if (--navail == 0) 1407 if (--navail == 0)
1408 break; 1408 break;
1409 continue; 1409 continue;
1410 } 1410 }
1411 1411
1412 /* 1412 /*
1413 *if the channel speed is not set divide 1413 *if the channel speed is not set divide
1414 *the packet evenly among the free channels; 1414 *the packet evenly among the free channels;
1415 *otherwise divide it according to the speed 1415 *otherwise divide it according to the speed
1416 *of the channel we are going to transmit on 1416 *of the channel we are going to transmit on
1417 */ 1417 */
1418 flen = len; 1418 flen = len;
1419 if (nfree > 0) { 1419 if (nfree > 0) {
1420 if (pch->speed == 0) { 1420 if (pch->speed == 0) {
1421 flen = totlen/nfree ; 1421 flen = totlen/nfree;
1422 if (nbigger > 0) { 1422 if (nbigger > 0) {
1423 flen++; 1423 flen++;
1424 nbigger--; 1424 nbigger--;
@@ -1436,8 +1436,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1436 } 1436 }
1437 1437
1438 /* 1438 /*
1439 *check if we are on the last channel or 1439 *check if we are on the last channel or
1440 *we exceded the lenght of the data to 1440 *we exceded the lenght of the data to
1441 *fragment 1441 *fragment
1442 */ 1442 */
1443 if ((nfree <= 0) || (flen > len)) 1443 if ((nfree <= 0) || (flen > len))
@@ -1448,29 +1448,29 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1448 *above formula will be equal or less than zero. 1448 *above formula will be equal or less than zero.
1449 *Skip the channel in this case 1449 *Skip the channel in this case
1450 */ 1450 */
1451 if (flen <= 0) { 1451 if (flen <= 0) {
1452 pch->avail = 2; 1452 pch->avail = 2;
1453 spin_unlock_bh(&pch->downl); 1453 spin_unlock_bh(&pch->downl);
1454 continue; 1454 continue;
1455 } 1455 }
1456 1456
1457 mtu = pch->chan->mtu - hdrlen; 1457 mtu = pch->chan->mtu - hdrlen;
1458 if (mtu < 4) 1458 if (mtu < 4)
1459 mtu = 4; 1459 mtu = 4;
1460 if (flen > mtu) 1460 if (flen > mtu)
1461 flen = mtu; 1461 flen = mtu;
1462 if (flen == len) 1462 if (flen == len)
1463 bits |= E; 1463 bits |= E;
1464 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1464 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1465 if (!frag) 1465 if (!frag)
1466 goto noskb; 1466 goto noskb;
1467 q = skb_put(frag, flen + hdrlen); 1467 q = skb_put(frag, flen + hdrlen);
1468 1468
1469 /* make the MP header */ 1469 /* make the MP header */
1470 q[0] = PPP_MP >> 8; 1470 q[0] = PPP_MP >> 8;
1471 q[1] = PPP_MP; 1471 q[1] = PPP_MP;
1472 if (ppp->flags & SC_MP_XSHORTSEQ) { 1472 if (ppp->flags & SC_MP_XSHORTSEQ) {
1473 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1473 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1474 q[3] = ppp->nxseq; 1474 q[3] = ppp->nxseq;
1475 } else { 1475 } else {
1476 q[2] = bits; 1476 q[2] = bits;
@@ -1483,24 +1483,24 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1483 1483
1484 /* try to send it down the channel */ 1484 /* try to send it down the channel */
1485 chan = pch->chan; 1485 chan = pch->chan;
1486 if (!skb_queue_empty(&pch->file.xq) || 1486 if (!skb_queue_empty(&pch->file.xq) ||
1487 !chan->ops->start_xmit(chan, frag)) 1487 !chan->ops->start_xmit(chan, frag))
1488 skb_queue_tail(&pch->file.xq, frag); 1488 skb_queue_tail(&pch->file.xq, frag);
1489 pch->had_frag = 1; 1489 pch->had_frag = 1;
1490 p += flen; 1490 p += flen;
1491 len -= flen; 1491 len -= flen;
1492 ++ppp->nxseq; 1492 ++ppp->nxseq;
1493 bits = 0; 1493 bits = 0;
1494 spin_unlock_bh(&pch->downl); 1494 spin_unlock_bh(&pch->downl);
1495 } 1495 }
1496 ppp->nxchan = i; 1496 ppp->nxchan = i;
1497 1497
1498 return 1; 1498 return 1;
1499 1499
1500 noskb: 1500 noskb:
1501 spin_unlock_bh(&pch->downl); 1501 spin_unlock_bh(&pch->downl);
1502 if (ppp->debug & 1) 1502 if (ppp->debug & 1)
1503 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1503 printk(KERN_ERR "PPP: no memory (fragment)\n");
1504 ++ppp->dev->stats.tx_errors; 1504 ++ppp->dev->stats.tx_errors;
1505 ++ppp->nxseq; 1505 ++ppp->nxseq;
1506 return 1; /* abandon the frame */ 1506 return 1; /* abandon the frame */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8..4ef0afbcbe1 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
61module_param(msi, int, 0); 61module_param(msi, int, 0);
62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 63
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 64static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
67 /* required last entry */ 67 /* required last entry */
@@ -4087,7 +4087,6 @@ static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4087 struct ql3_adapter *qdev = netdev_priv(ndev); 4087 struct ql3_adapter *qdev = netdev_priv(ndev);
4088 4088
4089 unregister_netdev(ndev); 4089 unregister_netdev(ndev);
4090 qdev = netdev_priv(ndev);
4091 4090
4092 ql_disable_interrupts(qdev); 4091 ql_disable_interrupts(qdev);
4093 4092
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 00000000000..ddba83ef3f4
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
3#
4
5obj-$(CONFIG_QLCNIC) := qlcnic.o
6
7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 00000000000..abec4684653
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1106 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef _QLCNIC_H_
26#define _QLCNIC_H_
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/ioport.h>
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/tcp.h>
38#include <linux/skbuff.h>
39#include <linux/firmware.h>
40
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/timer.h>
44
45#include <linux/vmalloc.h>
46
47#include <linux/io.h>
48#include <asm/byteorder.h>
49
50#include "qlcnic_hdr.h"
51
52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 0
55#define QLCNIC_LINUX_VERSIONID "5.0.0"
56
57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
58#define _major(v) (((v) >> 24) & 0xff)
59#define _minor(v) (((v) >> 16) & 0xff)
60#define _build(v) ((v) & 0xffff)
61
62/* version in image has weird encoding:
63 * 7:0 - major
64 * 15:8 - minor
65 * 31:16 - build (little endian)
66 */
67#define QLCNIC_DECODE_VERSION(v) \
68 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
69
70#define QLCNIC_NUM_FLASH_SECTORS (64)
71#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
72#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
73 * QLCNIC_FLASH_SECTOR_SIZE)
74
75#define RCV_DESC_RINGSIZE(rds_ring) \
76 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
77#define RCV_BUFF_RINGSIZE(rds_ring) \
78 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
79#define STATUS_DESC_RINGSIZE(sds_ring) \
80 (sizeof(struct status_desc) * (sds_ring)->num_desc)
81#define TX_BUFF_RINGSIZE(tx_ring) \
82 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
83#define TX_DESC_RINGSIZE(tx_ring) \
84 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
85
86#define QLCNIC_P3P_A0 0x50
87
88#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
89
90#define FIRST_PAGE_GROUP_START 0
91#define FIRST_PAGE_GROUP_END 0x100000
92
93#define P3_MAX_MTU (9600)
94#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
95
96#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
97#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
98#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
99#define QLCNIC_LRO_BUFFER_EXTRA 2048
100
101#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
102
103/* Opcodes to be used with the commands */
104#define TX_ETHER_PKT 0x01
105#define TX_TCP_PKT 0x02
106#define TX_UDP_PKT 0x03
107#define TX_IP_PKT 0x04
108#define TX_TCP_LSO 0x05
109#define TX_TCP_LSO6 0x06
110#define TX_IPSEC 0x07
111#define TX_IPSEC_CMD 0x0a
112#define TX_TCPV6_PKT 0x0b
113#define TX_UDPV6_PKT 0x0c
114
115/* Tx defines */
116#define MAX_BUFFERS_PER_CMD 32
117#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
118#define QLCNIC_MAX_TX_TIMEOUTS 2
119
120/*
121 * Following are the states of the Phantom. Phantom will set them and
122 * Host will read to check if the fields are correct.
123 */
124#define PHAN_INITIALIZE_FAILED 0xffff
125#define PHAN_INITIALIZE_COMPLETE 0xff01
126
127/* Host writes the following to notify that it has done the init-handshake */
128#define PHAN_INITIALIZE_ACK 0xf00f
129#define PHAN_PEG_RCV_INITIALIZED 0xff01
130
131#define NUM_RCV_DESC_RINGS 3
132#define NUM_STS_DESC_RINGS 4
133
134#define RCV_RING_NORMAL 0
135#define RCV_RING_JUMBO 1
136#define RCV_RING_LRO 2
137
138#define MIN_CMD_DESCRIPTORS 64
139#define MIN_RCV_DESCRIPTORS 64
140#define MIN_JUMBO_DESCRIPTORS 32
141
142#define MAX_CMD_DESCRIPTORS 1024
143#define MAX_RCV_DESCRIPTORS_1G 4096
144#define MAX_RCV_DESCRIPTORS_10G 8192
145#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
146#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
147#define MAX_LRO_RCV_DESCRIPTORS 8
148
149#define DEFAULT_RCV_DESCRIPTORS_1G 2048
150#define DEFAULT_RCV_DESCRIPTORS_10G 4096
151
152#define get_next_index(index, length) \
153 (((index) + 1) & ((length) - 1))
154
155#define MPORT_MULTI_FUNCTION_MODE 0x2222
156
157/*
158 * Following data structures describe the descriptors that will be used.
159 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
160 * we are doing LSO (above the 1500 size packet) only.
161 */
162
163#define FLAGS_VLAN_TAGGED 0x10
164#define FLAGS_VLAN_OOB 0x40
165
166#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
167 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
168#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
169 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
170#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
171 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
172
173#define qlcnic_set_tx_port(_desc, _port) \
174 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
175
176#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
177 ((_desc)->flags_opcode = \
178 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
179
180#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
181 ((_desc)->nfrags__length = \
182 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
183
184struct cmd_desc_type0 {
185 u8 tcp_hdr_offset; /* For LSO only */
186 u8 ip_hdr_offset; /* For LSO only */
187 __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
188 __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
189
190 __le64 addr_buffer2;
191
192 __le16 reference_handle;
193 __le16 mss;
194 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
195 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
196 __le16 conn_id; /* IPSec offoad only */
197
198 __le64 addr_buffer3;
199 __le64 addr_buffer1;
200
201 __le16 buffer_length[4];
202
203 __le64 addr_buffer4;
204
205 __le32 reserved2;
206 __le16 reserved;
207 __le16 vlan_TCI;
208
209} __attribute__ ((aligned(64)));
210
211/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
212struct rcv_desc {
213 __le16 reference_handle;
214 __le16 reserved;
215 __le32 buffer_length; /* allocated buffer length (usually 2K) */
216 __le64 addr_buffer;
217};
218
219/* opcode field in status_desc */
220#define QLCNIC_SYN_OFFLOAD 0x03
221#define QLCNIC_RXPKT_DESC 0x04
222#define QLCNIC_OLD_RXPKT_DESC 0x3f
223#define QLCNIC_RESPONSE_DESC 0x05
224#define QLCNIC_LRO_DESC 0x12
225
226/* for status field in status_desc */
227#define STATUS_CKSUM_OK (2)
228
229/* owner bits of status_desc */
230#define STATUS_OWNER_HOST (0x1ULL << 56)
231#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
232
233/* Status descriptor:
234 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
235 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
236 53-55 desc_cnt, 56-57 owner, 58-63 opcode
237 */
238#define qlcnic_get_sts_port(sts_data) \
239 ((sts_data) & 0x0F)
240#define qlcnic_get_sts_status(sts_data) \
241 (((sts_data) >> 4) & 0x0F)
242#define qlcnic_get_sts_type(sts_data) \
243 (((sts_data) >> 8) & 0x0F)
244#define qlcnic_get_sts_totallength(sts_data) \
245 (((sts_data) >> 12) & 0xFFFF)
246#define qlcnic_get_sts_refhandle(sts_data) \
247 (((sts_data) >> 28) & 0xFFFF)
248#define qlcnic_get_sts_prot(sts_data) \
249 (((sts_data) >> 44) & 0x0F)
250#define qlcnic_get_sts_pkt_offset(sts_data) \
251 (((sts_data) >> 48) & 0x1F)
252#define qlcnic_get_sts_desc_cnt(sts_data) \
253 (((sts_data) >> 53) & 0x7)
254#define qlcnic_get_sts_opcode(sts_data) \
255 (((sts_data) >> 58) & 0x03F)
256
257#define qlcnic_get_lro_sts_refhandle(sts_data) \
258 ((sts_data) & 0x0FFFF)
259#define qlcnic_get_lro_sts_length(sts_data) \
260 (((sts_data) >> 16) & 0x0FFFF)
261#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
262 (((sts_data) >> 32) & 0x0FF)
263#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
264 (((sts_data) >> 40) & 0x0FF)
265#define qlcnic_get_lro_sts_timestamp(sts_data) \
266 (((sts_data) >> 48) & 0x1)
267#define qlcnic_get_lro_sts_type(sts_data) \
268 (((sts_data) >> 49) & 0x7)
269#define qlcnic_get_lro_sts_push_flag(sts_data) \
270 (((sts_data) >> 52) & 0x1)
271#define qlcnic_get_lro_sts_seq_number(sts_data) \
272 ((sts_data) & 0x0FFFFFFFF)
273
274
275struct status_desc {
276 __le64 status_desc_data[2];
277} __attribute__ ((aligned(16)));
278
279/* UNIFIED ROMIMAGE */
280#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
281#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
282#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
283#define QLCNIC_UNI_DIR_SECT_FW 0x7
284
285/*Offsets */
286#define QLCNIC_UNI_CHIP_REV_OFF 10
287#define QLCNIC_UNI_FLAGS_OFF 11
288#define QLCNIC_UNI_BIOS_VERSION_OFF 12
289#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
290#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
291
292struct uni_table_desc{
293 u32 findex;
294 u32 num_entries;
295 u32 entry_size;
296 u32 reserved[5];
297};
298
299struct uni_data_desc{
300 u32 findex;
301 u32 size;
302 u32 reserved[5];
303};
304
305/* Magic number to let user know flash is programmed */
306#define QLCNIC_BDINFO_MAGIC 0x12345678
307
308#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
309#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
310#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
311#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
312#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
313#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
314#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
315#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
316#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
317#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
318#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
319#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
320#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
321#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
322
323/* Flash memory map */
324#define QLCNIC_BRDCFG_START 0x4000 /* board config */
325#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
326#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
327#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
328
329#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
330#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
331#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
332#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
333
334#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
335#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
336
337#define QLCNIC_FW_MIN_SIZE (0x3fffff)
338#define QLCNIC_UNIFIED_ROMIMAGE 0
339#define QLCNIC_FLASH_ROMIMAGE 1
340#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
341
342#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
343#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
344
345extern char qlcnic_driver_name[];
346
347/* Number of status descriptors to handle per interrupt */
348#define MAX_STATUS_HANDLE (64)
349
350/*
351 * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
352 * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
353 */
354struct qlcnic_skb_frag {
355 u64 dma;
356 u64 length;
357};
358
359struct qlcnic_recv_crb {
360 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
361 u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
362 u32 sw_int_mask[NUM_STS_DESC_RINGS];
363};
364
365/* Following defines are for the state of the buffers */
366#define QLCNIC_BUFFER_FREE 0
367#define QLCNIC_BUFFER_BUSY 1
368
369/*
370 * There will be one qlcnic_buffer per skb packet. These will be
371 * used to save the dma info for pci_unmap_page()
372 */
373struct qlcnic_cmd_buffer {
374 struct sk_buff *skb;
375 struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
376 u32 frag_count;
377};
378
379/* In rx_buffer, we do not need multiple fragments as is a single buffer */
380struct qlcnic_rx_buffer {
381 struct list_head list;
382 struct sk_buff *skb;
383 u64 dma;
384 u16 ref_handle;
385 u16 state;
386};
387
388/* Board types */
389#define QLCNIC_GBE 0x01
390#define QLCNIC_XGBE 0x02
391
392/*
393 * One hardware_context{} per adapter
394 * contains interrupt info as well shared hardware info.
395 */
396struct qlcnic_hardware_context {
397 void __iomem *pci_base0;
398 void __iomem *ocm_win_crb;
399
400 unsigned long pci_len0;
401
402 u32 ocm_win;
403 u32 crb_win;
404
405 rwlock_t crb_lock;
406 struct mutex mem_lock;
407
408 u8 cut_through;
409 u8 revision_id;
410 u8 pci_func;
411 u8 linkup;
412 u16 port_type;
413 u16 board_type;
414};
415
416struct qlcnic_adapter_stats {
417 u64 xmitcalled;
418 u64 xmitfinished;
419 u64 rxdropped;
420 u64 txdropped;
421 u64 csummed;
422 u64 rx_pkts;
423 u64 lro_pkts;
424 u64 rxbytes;
425 u64 txbytes;
426};
427
428/*
429 * Rcv Descriptor Context. One such per Rcv Descriptor. There may
430 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
431 */
432struct qlcnic_host_rds_ring {
433 u32 producer;
434 u32 num_desc;
435 u32 dma_size;
436 u32 skb_size;
437 u32 flags;
438 void __iomem *crb_rcv_producer;
439 struct rcv_desc *desc_head;
440 struct qlcnic_rx_buffer *rx_buf_arr;
441 struct list_head free_list;
442 spinlock_t lock;
443 dma_addr_t phys_addr;
444};
445
446struct qlcnic_host_sds_ring {
447 u32 consumer;
448 u32 num_desc;
449 void __iomem *crb_sts_consumer;
450 void __iomem *crb_intr_mask;
451
452 struct status_desc *desc_head;
453 struct qlcnic_adapter *adapter;
454 struct napi_struct napi;
455 struct list_head free_list[NUM_RCV_DESC_RINGS];
456
457 int irq;
458
459 dma_addr_t phys_addr;
460 char name[IFNAMSIZ+4];
461};
462
463struct qlcnic_host_tx_ring {
464 u32 producer;
465 __le32 *hw_consumer;
466 u32 sw_consumer;
467 void __iomem *crb_cmd_producer;
468 u32 num_desc;
469
470 struct netdev_queue *txq;
471
472 struct qlcnic_cmd_buffer *cmd_buf_arr;
473 struct cmd_desc_type0 *desc_head;
474 dma_addr_t phys_addr;
475 dma_addr_t hw_cons_phys_addr;
476};
477
478/*
479 * Receive context. There is one such structure per instance of the
480 * receive processing. Any state information that is relevant to
481 * the receive, and is must be in this structure. The global data may be
482 * present elsewhere.
483 */
484struct qlcnic_recv_context {
485 u32 state;
486 u16 context_id;
487 u16 virt_port;
488
489 struct qlcnic_host_rds_ring *rds_rings;
490 struct qlcnic_host_sds_ring *sds_rings;
491};
492
493/* HW context creation */
494
495#define QLCNIC_OS_CRB_RETRY_COUNT 4000
496#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
497 (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
498
499#define QLCNIC_CDRP_CMD_BIT 0x80000000
500
501/*
502 * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
503 * in the crb QLCNIC_CDRP_CRB_OFFSET.
504 */
505#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
506#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
507
508#define QLCNIC_CDRP_RSP_OK 0x00000001
509#define QLCNIC_CDRP_RSP_FAIL 0x00000002
510#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
511
512/*
513 * All commands must have the QLCNIC_CDRP_CMD_BIT set in
514 * the crb QLCNIC_CDRP_CRB_OFFSET.
515 */
516#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
517#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
518
519#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
520#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
521#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
522#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
523#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
524#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
525#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
526#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
527#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
528#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
529#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
530#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
531#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
532#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
533#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
534#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
535#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
536#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
537#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
538#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
539#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
540#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
541#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
542#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
543#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
544#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
545#define QLCNIC_CDRP_CMD_MAX 0x0000001f
546
547#define QLCNIC_RCODE_SUCCESS 0
548#define QLCNIC_RCODE_TIMEOUT 17
549#define QLCNIC_DESTROY_CTX_RESET 0
550
551/*
552 * Capabilities Announced
553 */
554#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
555#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
556#define QLCNIC_CAP0_LSO (1 << 6)
557#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
558#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
559
560/*
561 * Context state
562 */
563#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
564
565/*
566 * Rx context
567 */
568
569struct qlcnic_hostrq_sds_ring {
570 __le64 host_phys_addr; /* Ring base addr */
571 __le32 ring_size; /* Ring entries */
572 __le16 msi_index;
573 __le16 rsvd; /* Padding */
574};
575
576struct qlcnic_hostrq_rds_ring {
577 __le64 host_phys_addr; /* Ring base addr */
578 __le64 buff_size; /* Packet buffer size */
579 __le32 ring_size; /* Ring entries */
580 __le32 ring_kind; /* Class of ring */
581};
582
583struct qlcnic_hostrq_rx_ctx {
584 __le64 host_rsp_dma_addr; /* Response dma'd here */
585 __le32 capabilities[4]; /* Flag bit vector */
586 __le32 host_int_crb_mode; /* Interrupt crb usage */
587 __le32 host_rds_crb_mode; /* RDS crb usage */
588 /* These ring offsets are relative to data[0] below */
589 __le32 rds_ring_offset; /* Offset to RDS config */
590 __le32 sds_ring_offset; /* Offset to SDS config */
591 __le16 num_rds_rings; /* Count of RDS rings */
592 __le16 num_sds_rings; /* Count of SDS rings */
593 __le16 rsvd1; /* Padding */
594 __le16 rsvd2; /* Padding */
595 u8 reserved[128]; /* reserve space for future expansion*/
596 /* MUST BE 64-bit aligned.
597 The following is packed:
598 - N hostrq_rds_rings
599 - N hostrq_sds_rings */
600 char data[0];
601};
602
603struct qlcnic_cardrsp_rds_ring{
604 __le32 host_producer_crb; /* Crb to use */
605 __le32 rsvd1; /* Padding */
606};
607
608struct qlcnic_cardrsp_sds_ring {
609 __le32 host_consumer_crb; /* Crb to use */
610 __le32 interrupt_crb; /* Crb to use */
611};
612
613struct qlcnic_cardrsp_rx_ctx {
614 /* These ring offsets are relative to data[0] below */
615 __le32 rds_ring_offset; /* Offset to RDS config */
616 __le32 sds_ring_offset; /* Offset to SDS config */
617 __le32 host_ctx_state; /* Starting State */
618 __le32 num_fn_per_port; /* How many PCI fn share the port */
619 __le16 num_rds_rings; /* Count of RDS rings */
620 __le16 num_sds_rings; /* Count of SDS rings */
621 __le16 context_id; /* Handle for context */
622 u8 phys_port; /* Physical id of port */
623 u8 virt_port; /* Virtual/Logical id of port */
624 u8 reserved[128]; /* save space for future expansion */
625 /* MUST BE 64-bit aligned.
626 The following is packed:
627 - N cardrsp_rds_rings
628 - N cardrs_sds_rings */
629 char data[0];
630};
631
632#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
633 (sizeof(HOSTRQ_RX) + \
634 (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
635 (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
636
637#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
638 (sizeof(CARDRSP_RX) + \
639 (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
640 (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
641
642/*
643 * Tx context
644 */
645
646struct qlcnic_hostrq_cds_ring {
647 __le64 host_phys_addr; /* Ring base addr */
648 __le32 ring_size; /* Ring entries */
649 __le32 rsvd; /* Padding */
650};
651
652struct qlcnic_hostrq_tx_ctx {
653 __le64 host_rsp_dma_addr; /* Response dma'd here */
654 __le64 cmd_cons_dma_addr; /* */
655 __le64 dummy_dma_addr; /* */
656 __le32 capabilities[4]; /* Flag bit vector */
657 __le32 host_int_crb_mode; /* Interrupt crb usage */
658 __le32 rsvd1; /* Padding */
659 __le16 rsvd2; /* Padding */
660 __le16 interrupt_ctl;
661 __le16 msi_index;
662 __le16 rsvd3; /* Padding */
663 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
664 u8 reserved[128]; /* future expansion */
665};
666
667struct qlcnic_cardrsp_cds_ring {
668 __le32 host_producer_crb; /* Crb to use */
669 __le32 interrupt_crb; /* Crb to use */
670};
671
672struct qlcnic_cardrsp_tx_ctx {
673 __le32 host_ctx_state; /* Starting state */
674 __le16 context_id; /* Handle for context */
675 u8 phys_port; /* Physical id of port */
676 u8 virt_port; /* Virtual/Logical id of port */
677 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
678 u8 reserved[128]; /* future expansion */
679};
680
681#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
682#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
683
684/* CRB */
685
686#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
687#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
688#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
689#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
690
691#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
692#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
693#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
694#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
695#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
696
697
698/* MAC */
699
700#define MC_COUNT_P3 38
701
702#define QLCNIC_MAC_NOOP 0
703#define QLCNIC_MAC_ADD 1
704#define QLCNIC_MAC_DEL 2
705
706struct qlcnic_mac_list_s {
707 struct list_head list;
708 uint8_t mac_addr[ETH_ALEN+2];
709};
710
711/*
712 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
713 * adjusted based on configured MTU.
714 */
715#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
716#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
717#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
718#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
719
720#define QLCNIC_INTR_DEFAULT 0x04
721
722union qlcnic_nic_intr_coalesce_data {
723 struct {
724 u16 rx_packets;
725 u16 rx_time_us;
726 u16 tx_packets;
727 u16 tx_time_us;
728 } data;
729 u64 word;
730};
731
732struct qlcnic_nic_intr_coalesce {
733 u16 stats_time_us;
734 u16 rate_sample_time;
735 u16 flags;
736 u16 rsvd_1;
737 u32 low_threshold;
738 u32 high_threshold;
739 union qlcnic_nic_intr_coalesce_data normal;
740 union qlcnic_nic_intr_coalesce_data low;
741 union qlcnic_nic_intr_coalesce_data high;
742 union qlcnic_nic_intr_coalesce_data irq;
743};
744
745#define QLCNIC_HOST_REQUEST 0x13
746#define QLCNIC_REQUEST 0x14
747
748#define QLCNIC_MAC_EVENT 0x1
749
750#define QLCNIC_IP_UP 2
751#define QLCNIC_IP_DOWN 3
752
753/*
754 * Driver --> Firmware
755 */
756#define QLCNIC_H2C_OPCODE_START 0
757#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
758#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
759#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
760#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
761#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
762#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
763#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
764#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
765#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
766#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
767#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
768#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
769#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
770#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
771#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
772#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
773#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
774#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
775#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
776#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
777#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
778#define QLCNIC_C2C_OPCODE 22
779#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
780#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
781#define QLCNIC_H2C_OPCODE_LAST 25
782/*
783 * Firmware --> Driver
784 */
785
786#define QLCNIC_C2H_OPCODE_START 128
787#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
788#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
789#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
790#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
791#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
792#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
793#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
794#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
795#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
796#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
797#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
798#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
799#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
800#define QLCNIC_C2H_OPCODE_LAST 142
801
802#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
803#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
804#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
805
806#define QLCNIC_LRO_REQUEST_CLEANUP 4
807
808/* Capabilites received */
809#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
810#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
811#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
812
813/* module types */
814#define LINKEVENT_MODULE_NOT_PRESENT 1
815#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
816#define LINKEVENT_MODULE_OPTICAL_SRLR 3
817#define LINKEVENT_MODULE_OPTICAL_LRM 4
818#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
819#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
820#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
821#define LINKEVENT_MODULE_TWINAX 8
822
823#define LINKSPEED_10GBPS 10000
824#define LINKSPEED_1GBPS 1000
825#define LINKSPEED_100MBPS 100
826#define LINKSPEED_10MBPS 10
827
828#define LINKSPEED_ENCODED_10MBPS 0
829#define LINKSPEED_ENCODED_100MBPS 1
830#define LINKSPEED_ENCODED_1GBPS 2
831
832#define LINKEVENT_AUTONEG_DISABLED 0
833#define LINKEVENT_AUTONEG_ENABLED 1
834
835#define LINKEVENT_HALF_DUPLEX 0
836#define LINKEVENT_FULL_DUPLEX 1
837
838#define LINKEVENT_LINKSPEED_MBPS 0
839#define LINKEVENT_LINKSPEED_ENCODED 1
840
841#define AUTO_FW_RESET_ENABLED 0x01
842/* firmware response header:
843 * 63:58 - message type
844 * 57:56 - owner
845 * 55:53 - desc count
846 * 52:48 - reserved
847 * 47:40 - completion id
848 * 39:32 - opcode
849 * 31:16 - error code
850 * 15:00 - reserved
851 */
852#define qlcnic_get_nic_msg_opcode(msg_hdr) \
853 ((msg_hdr >> 32) & 0xFF)
854
855struct qlcnic_fw_msg {
856 union {
857 struct {
858 u64 hdr;
859 u64 body[7];
860 };
861 u64 words[8];
862 };
863};
864
865struct qlcnic_nic_req {
866 __le64 qhdr;
867 __le64 req_hdr;
868 __le64 words[6];
869};
870
871struct qlcnic_mac_req {
872 u8 op;
873 u8 tag;
874 u8 mac_addr[6];
875};
876
877#define QLCNIC_MSI_ENABLED 0x02
878#define QLCNIC_MSIX_ENABLED 0x04
879#define QLCNIC_LRO_ENABLED 0x08
880#define QLCNIC_BRIDGE_ENABLED 0X10
881#define QLCNIC_DIAG_ENABLED 0x20
882#define QLCNIC_IS_MSI_FAMILY(adapter) \
883 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
884
885#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
886#define QLCNIC_MSIX_TBL_SPACE 8192
887#define QLCNIC_PCI_REG_MSIX_TBL 0x44
888
889#define QLCNIC_NETDEV_WEIGHT 128
890#define QLCNIC_ADAPTER_UP_MAGIC 777
891
892#define __QLCNIC_FW_ATTACHED 0
893#define __QLCNIC_DEV_UP 1
894#define __QLCNIC_RESETTING 2
895#define __QLCNIC_START_FW 4
896
897struct qlcnic_adapter {
898 struct qlcnic_hardware_context ahw;
899
900 struct net_device *netdev;
901 struct pci_dev *pdev;
902 struct list_head mac_list;
903
904 spinlock_t tx_clean_lock;
905
906 u16 num_txd;
907 u16 num_rxd;
908 u16 num_jumbo_rxd;
909 u16 num_lro_rxd;
910
911 u8 max_rds_rings;
912 u8 max_sds_rings;
913 u8 driver_mismatch;
914 u8 msix_supported;
915 u8 rx_csum;
916 u8 pci_using_dac;
917 u8 portnum;
918 u8 physical_port;
919
920 u8 mc_enabled;
921 u8 max_mc_count;
922 u8 rss_supported;
923 u8 rsrvd1;
924 u8 fw_wait_cnt;
925 u8 fw_fail_cnt;
926 u8 tx_timeo_cnt;
927 u8 need_fw_reset;
928
929 u8 has_link_events;
930 u8 fw_type;
931 u16 tx_context_id;
932 u16 mtu;
933 u16 is_up;
934
935 u16 link_speed;
936 u16 link_duplex;
937 u16 link_autoneg;
938 u16 module_type;
939
940 u32 capabilities;
941 u32 flags;
942 u32 irq;
943 u32 temp;
944
945 u32 int_vec_bit;
946 u32 heartbit;
947
948 u8 dev_state;
949 u8 rsrd1;
950 u32 rsrd2;
951
952
953 u8 mac_addr[ETH_ALEN];
954
955 struct qlcnic_adapter_stats stats;
956
957 struct qlcnic_recv_context recv_ctx;
958 struct qlcnic_host_tx_ring *tx_ring;
959
960 void __iomem *tgt_mask_reg;
961 void __iomem *tgt_status_reg;
962 void __iomem *crb_int_state_reg;
963 void __iomem *isr_int_vec;
964
965 struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
966
967 struct delayed_work fw_work;
968
969 struct work_struct tx_timeout_task;
970
971 struct qlcnic_nic_intr_coalesce coal;
972
973 unsigned long state;
974 __le32 file_prd_off; /*File fw product offset*/
975 u32 fw_version;
976 const struct firmware *fw;
977};
978
979int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
980int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
981
982u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
983int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
984int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
985int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
986
987#define QLCRD32(adapter, off) \
988 (qlcnic_hw_read_wx_2M(adapter, off))
989#define QLCWR32(adapter, off, val) \
990 (qlcnic_hw_write_wx_2M(adapter, off, val))
991
992int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
993void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
994
995#define qlcnic_rom_lock(a) \
996 qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
997#define qlcnic_rom_unlock(a) \
998 qlcnic_pcie_sem_unlock((a), 2)
999#define qlcnic_phy_lock(a) \
1000 qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
1001#define qlcnic_phy_unlock(a) \
1002 qlcnic_pcie_sem_unlock((a), 3)
1003#define qlcnic_api_lock(a) \
1004 qlcnic_pcie_sem_lock((a), 5, 0)
1005#define qlcnic_api_unlock(a) \
1006 qlcnic_pcie_sem_unlock((a), 5)
1007#define qlcnic_sw_lock(a) \
1008 qlcnic_pcie_sem_lock((a), 6, 0)
1009#define qlcnic_sw_unlock(a) \
1010 qlcnic_pcie_sem_unlock((a), 6)
1011#define crb_win_lock(a) \
1012 qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
1013#define crb_win_unlock(a) \
1014 qlcnic_pcie_sem_unlock((a), 7)
1015
1016int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1017int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1018
1019/* Functions from qlcnic_init.c */
1020int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
1021int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
1022int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1023void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1024void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1025int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1026
1027int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1028int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1029 u8 *bytes, size_t size);
1030int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
1031void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
1032
1033void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
1034
1035int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1036void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
1037
1038void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1039void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1040
1041int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
1042void qlcnic_watchdog_task(struct work_struct *work);
1043void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1044 struct qlcnic_host_rds_ring *rds_ring);
1045int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1046void qlcnic_set_multi(struct net_device *netdev);
1047void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
1048int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
1049int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
1050int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
1051int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
1052int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
1053void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1054
1055int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1056int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1057int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1058int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
1059int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1060void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1061 struct qlcnic_host_tx_ring *tx_ring);
1062int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
1063
1064/* Functions from qlcnic_main.c */
1065int qlcnic_reset_context(struct qlcnic_adapter *);
1066
1067/*
1068 * QLOGIC Board information
1069 */
1070
1071#define QLCNIC_MAX_BOARD_NAME_LEN 64
1072struct qlcnic_brdinfo {
1073 unsigned short vendor;
1074 unsigned short device;
1075 unsigned short sub_vendor;
1076 unsigned short sub_device;
1077 char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
1078};
1079
1080static const struct qlcnic_brdinfo qlcnic_boards[] = {
1081 {0x1077, 0x8020, 0x1077, 0x203, "8200 Series Single Port 10GbE CNA"},
1082 {0x1077, 0x8020, 0x1077, 0x207, "8200 Series Dual Port 10GbE CNA"},
1083 {0x1077, 0x8020, 0x1077, 0x20b,
1084 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
1085 {0x1077, 0x8020, 0x1077, 0x20c,
1086 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1087 {0x1077, 0x8020, 0x1077, 0x20f,
1088 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1089 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1090};
1091
1092#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
1093
1094static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1095{
1096 smp_mb();
1097 if (tx_ring->producer < tx_ring->sw_consumer)
1098 return tx_ring->sw_consumer - tx_ring->producer;
1099 else
1100 return tx_ring->sw_consumer + tx_ring->num_desc -
1101 tx_ring->producer;
1102}
1103
1104extern const struct ethtool_ops qlcnic_ethtool_ops;
1105
1106#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 00000000000..71c16a18345
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,536 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27#define QLCHAL_VERSION 1
28
29static u32
30qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
31{
32 u32 rsp;
33 int timeout = 0;
34
35 do {
36 /* give atleast 1ms for firmware to respond */
37 msleep(1);
38
39 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
40 return QLCNIC_CDRP_RSP_TIMEOUT;
41
42 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
43 } while (!QLCNIC_CDRP_IS_RSP(rsp));
44
45 return rsp;
46}
47
48static u32
49qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
50 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
51{
52 u32 rsp;
53 u32 signature;
54 u32 rcode = QLCNIC_RCODE_SUCCESS;
55 struct pci_dev *pdev = adapter->pdev;
56
57 signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
58
59 /* Acquire semaphore before accessing CRB */
60 if (qlcnic_api_lock(adapter))
61 return QLCNIC_RCODE_TIMEOUT;
62
63 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
64 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
65 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
66 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
67 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
68
69 rsp = qlcnic_poll_rsp(adapter);
70
71 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
72 dev_err(&pdev->dev, "card response timeout.\n");
73 rcode = QLCNIC_RCODE_TIMEOUT;
74 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
75 rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
76 dev_err(&pdev->dev, "failed card response code:0x%x\n",
77 rcode);
78 }
79
80 /* Release semaphore */
81 qlcnic_api_unlock(adapter);
82
83 return rcode;
84}
85
86int
87qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
88{
89 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
90
91 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
92 if (qlcnic_issue_cmd(adapter,
93 adapter->ahw.pci_func,
94 QLCHAL_VERSION,
95 recv_ctx->context_id,
96 mtu,
97 0,
98 QLCNIC_CDRP_CMD_SET_MTU)) {
99
100 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
101 return -EIO;
102 }
103 }
104
105 return 0;
106}
107
108static int
109qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
110{
111 void *addr;
112 struct qlcnic_hostrq_rx_ctx *prq;
113 struct qlcnic_cardrsp_rx_ctx *prsp;
114 struct qlcnic_hostrq_rds_ring *prq_rds;
115 struct qlcnic_hostrq_sds_ring *prq_sds;
116 struct qlcnic_cardrsp_rds_ring *prsp_rds;
117 struct qlcnic_cardrsp_sds_ring *prsp_sds;
118 struct qlcnic_host_rds_ring *rds_ring;
119 struct qlcnic_host_sds_ring *sds_ring;
120
121 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
122 u64 phys_addr;
123
124 int i, nrds_rings, nsds_rings;
125 size_t rq_size, rsp_size;
126 u32 cap, reg, val;
127 int err;
128
129 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
130
131 nrds_rings = adapter->max_rds_rings;
132 nsds_rings = adapter->max_sds_rings;
133
134 rq_size =
135 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
136 nsds_rings);
137 rsp_size =
138 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
139 nsds_rings);
140
141 addr = pci_alloc_consistent(adapter->pdev,
142 rq_size, &hostrq_phys_addr);
143 if (addr == NULL)
144 return -ENOMEM;
145 prq = (struct qlcnic_hostrq_rx_ctx *)addr;
146
147 addr = pci_alloc_consistent(adapter->pdev,
148 rsp_size, &cardrsp_phys_addr);
149 if (addr == NULL) {
150 err = -ENOMEM;
151 goto out_free_rq;
152 }
153 prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
154
155 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
156
157 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
158 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
159
160 prq->capabilities[0] = cpu_to_le32(cap);
161 prq->host_int_crb_mode =
162 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
163 prq->host_rds_crb_mode =
164 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
165
166 prq->num_rds_rings = cpu_to_le16(nrds_rings);
167 prq->num_sds_rings = cpu_to_le16(nsds_rings);
168 prq->rds_ring_offset = cpu_to_le32(0);
169
170 val = le32_to_cpu(prq->rds_ring_offset) +
171 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
172 prq->sds_ring_offset = cpu_to_le32(val);
173
174 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
175 le32_to_cpu(prq->rds_ring_offset));
176
177 for (i = 0; i < nrds_rings; i++) {
178
179 rds_ring = &recv_ctx->rds_rings[i];
180
181 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
182 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
183 prq_rds[i].ring_kind = cpu_to_le32(i);
184 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
185 }
186
187 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
188 le32_to_cpu(prq->sds_ring_offset));
189
190 for (i = 0; i < nsds_rings; i++) {
191
192 sds_ring = &recv_ctx->sds_rings[i];
193
194 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
195 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
196 prq_sds[i].msi_index = cpu_to_le16(i);
197 }
198
199 phys_addr = hostrq_phys_addr;
200 err = qlcnic_issue_cmd(adapter,
201 adapter->ahw.pci_func,
202 QLCHAL_VERSION,
203 (u32)(phys_addr >> 32),
204 (u32)(phys_addr & 0xffffffff),
205 rq_size,
206 QLCNIC_CDRP_CMD_CREATE_RX_CTX);
207 if (err) {
208 dev_err(&adapter->pdev->dev,
209 "Failed to create rx ctx in firmware%d\n", err);
210 goto out_free_rsp;
211 }
212
213
214 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
215 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
216
217 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
218 rds_ring = &recv_ctx->rds_rings[i];
219
220 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
221 rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
222 QLCNIC_REG(reg - 0x200));
223 }
224
225 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
226 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
227
228 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
229 sds_ring = &recv_ctx->sds_rings[i];
230
231 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
232 sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
233 QLCNIC_REG(reg - 0x200));
234
235 reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
236 sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
237 QLCNIC_REG(reg - 0x200));
238 }
239
240 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
241 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
242 recv_ctx->virt_port = prsp->virt_port;
243
244out_free_rsp:
245 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
246out_free_rq:
247 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
248 return err;
249}
250
251static void
252qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
253{
254 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
255
256 if (qlcnic_issue_cmd(adapter,
257 adapter->ahw.pci_func,
258 QLCHAL_VERSION,
259 recv_ctx->context_id,
260 QLCNIC_DESTROY_CTX_RESET,
261 0,
262 QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
263
264 dev_err(&adapter->pdev->dev,
265 "Failed to destroy rx ctx in firmware\n");
266 }
267}
268
269static int
270qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
271{
272 struct qlcnic_hostrq_tx_ctx *prq;
273 struct qlcnic_hostrq_cds_ring *prq_cds;
274 struct qlcnic_cardrsp_tx_ctx *prsp;
275 void *rq_addr, *rsp_addr;
276 size_t rq_size, rsp_size;
277 u32 temp;
278 int err;
279 u64 phys_addr;
280 dma_addr_t rq_phys_addr, rsp_phys_addr;
281 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
282
283 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
284 rq_addr = pci_alloc_consistent(adapter->pdev,
285 rq_size, &rq_phys_addr);
286 if (!rq_addr)
287 return -ENOMEM;
288
289 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
290 rsp_addr = pci_alloc_consistent(adapter->pdev,
291 rsp_size, &rsp_phys_addr);
292 if (!rsp_addr) {
293 err = -ENOMEM;
294 goto out_free_rq;
295 }
296
297 memset(rq_addr, 0, rq_size);
298 prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
299
300 memset(rsp_addr, 0, rsp_size);
301 prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
302
303 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
304
305 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
306 QLCNIC_CAP0_LSO);
307 prq->capabilities[0] = cpu_to_le32(temp);
308
309 prq->host_int_crb_mode =
310 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
311
312 prq->interrupt_ctl = 0;
313 prq->msi_index = 0;
314 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
315
316 prq_cds = &prq->cds_ring;
317
318 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
319 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
320
321 phys_addr = rq_phys_addr;
322 err = qlcnic_issue_cmd(adapter,
323 adapter->ahw.pci_func,
324 QLCHAL_VERSION,
325 (u32)(phys_addr >> 32),
326 ((u32)phys_addr & 0xffffffff),
327 rq_size,
328 QLCNIC_CDRP_CMD_CREATE_TX_CTX);
329
330 if (err == QLCNIC_RCODE_SUCCESS) {
331 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
332 tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
333 QLCNIC_REG(temp - 0x200));
334
335 adapter->tx_context_id =
336 le16_to_cpu(prsp->context_id);
337 } else {
338 dev_err(&adapter->pdev->dev,
339 "Failed to create tx ctx in firmware%d\n", err);
340 err = -EIO;
341 }
342
343 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
344
345out_free_rq:
346 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
347
348 return err;
349}
350
351static void
352qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
353{
354 if (qlcnic_issue_cmd(adapter,
355 adapter->ahw.pci_func,
356 QLCHAL_VERSION,
357 adapter->tx_context_id,
358 QLCNIC_DESTROY_CTX_RESET,
359 0,
360 QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
361
362 dev_err(&adapter->pdev->dev,
363 "Failed to destroy tx ctx in firmware\n");
364 }
365}
366
367int
368qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
369{
370
371 if (qlcnic_issue_cmd(adapter,
372 adapter->ahw.pci_func,
373 QLCHAL_VERSION,
374 reg,
375 0,
376 0,
377 QLCNIC_CDRP_CMD_READ_PHY)) {
378
379 return -EIO;
380 }
381
382 return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
383}
384
385int
386qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
387{
388 return qlcnic_issue_cmd(adapter,
389 adapter->ahw.pci_func,
390 QLCHAL_VERSION,
391 reg,
392 val,
393 0,
394 QLCNIC_CDRP_CMD_WRITE_PHY);
395}
396
397int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
398{
399 void *addr;
400 int err;
401 int ring;
402 struct qlcnic_recv_context *recv_ctx;
403 struct qlcnic_host_rds_ring *rds_ring;
404 struct qlcnic_host_sds_ring *sds_ring;
405 struct qlcnic_host_tx_ring *tx_ring;
406
407 struct pci_dev *pdev = adapter->pdev;
408
409 recv_ctx = &adapter->recv_ctx;
410 tx_ring = adapter->tx_ring;
411
412 tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
413 &tx_ring->hw_cons_phys_addr);
414 if (tx_ring->hw_consumer == NULL) {
415 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
416 return -ENOMEM;
417 }
418 *(tx_ring->hw_consumer) = 0;
419
420 /* cmd desc ring */
421 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
422 &tx_ring->phys_addr);
423
424 if (addr == NULL) {
425 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
426 return -ENOMEM;
427 }
428
429 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
430
431 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
432 rds_ring = &recv_ctx->rds_rings[ring];
433 addr = pci_alloc_consistent(adapter->pdev,
434 RCV_DESC_RINGSIZE(rds_ring),
435 &rds_ring->phys_addr);
436 if (addr == NULL) {
437 dev_err(&pdev->dev,
438 "failed to allocate rds ring [%d]\n", ring);
439 err = -ENOMEM;
440 goto err_out_free;
441 }
442 rds_ring->desc_head = (struct rcv_desc *)addr;
443
444 }
445
446 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
447 sds_ring = &recv_ctx->sds_rings[ring];
448
449 addr = pci_alloc_consistent(adapter->pdev,
450 STATUS_DESC_RINGSIZE(sds_ring),
451 &sds_ring->phys_addr);
452 if (addr == NULL) {
453 dev_err(&pdev->dev,
454 "failed to allocate sds ring [%d]\n", ring);
455 err = -ENOMEM;
456 goto err_out_free;
457 }
458 sds_ring->desc_head = (struct status_desc *)addr;
459 }
460
461
462 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
463 if (err)
464 goto err_out_free;
465 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
466 if (err)
467 goto err_out_free;
468
469 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
470 return 0;
471
472err_out_free:
473 qlcnic_free_hw_resources(adapter);
474 return err;
475}
476
477void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
478{
479 struct qlcnic_recv_context *recv_ctx;
480 struct qlcnic_host_rds_ring *rds_ring;
481 struct qlcnic_host_sds_ring *sds_ring;
482 struct qlcnic_host_tx_ring *tx_ring;
483 int ring;
484
485
486 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
487 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
488 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
489
490 /* Allow dma queues to drain after context reset */
491 msleep(20);
492 }
493
494 recv_ctx = &adapter->recv_ctx;
495
496 tx_ring = adapter->tx_ring;
497 if (tx_ring->hw_consumer != NULL) {
498 pci_free_consistent(adapter->pdev,
499 sizeof(u32),
500 tx_ring->hw_consumer,
501 tx_ring->hw_cons_phys_addr);
502 tx_ring->hw_consumer = NULL;
503 }
504
505 if (tx_ring->desc_head != NULL) {
506 pci_free_consistent(adapter->pdev,
507 TX_DESC_RINGSIZE(tx_ring),
508 tx_ring->desc_head, tx_ring->phys_addr);
509 tx_ring->desc_head = NULL;
510 }
511
512 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
513 rds_ring = &recv_ctx->rds_rings[ring];
514
515 if (rds_ring->desc_head != NULL) {
516 pci_free_consistent(adapter->pdev,
517 RCV_DESC_RINGSIZE(rds_ring),
518 rds_ring->desc_head,
519 rds_ring->phys_addr);
520 rds_ring->desc_head = NULL;
521 }
522 }
523
524 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
525 sds_ring = &recv_ctx->sds_rings[ring];
526
527 if (sds_ring->desc_head != NULL) {
528 pci_free_consistent(adapter->pdev,
529 STATUS_DESC_RINGSIZE(sds_ring),
530 sds_ring->desc_head,
531 sds_ring->phys_addr);
532 sds_ring->desc_head = NULL;
533 }
534 }
535}
536
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 00000000000..65e9620e28f
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,870 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/delay.h>
27#include <linux/pci.h>
28#include <linux/io.h>
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31
32#include "qlcnic.h"
33
34struct qlcnic_stats {
35 char stat_string[ETH_GSTRING_LEN];
36 int sizeof_stat;
37 int stat_offset;
38};
39
40#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
41#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
42
43static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
44 {"xmit_called",
45 QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
46 {"xmit_finished",
47 QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
48 {"rx_dropped",
49 QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
50 {"tx_dropped",
51 QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
52 {"csummed",
53 QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
54 {"rx_pkts",
55 QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
56 {"lro_pkts",
57 QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
58 {"rx_bytes",
59 QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
60 {"tx_bytes",
61 QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
62};
63
64#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
65
66static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
67 "Register_Test_on_offline",
68 "Link_Test_on_offline"
69};
70
71#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
72
73#define QLCNIC_RING_REGS_COUNT 20
74#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
75#define QLCNIC_MAX_EEPROM_LEN 1024
76
77static const u32 diag_registers[] = {
78 CRB_CMDPEG_STATE,
79 CRB_RCVPEG_STATE,
80 CRB_XG_STATE_P3,
81 CRB_FW_CAPABILITIES_1,
82 ISR_INT_STATE_REG,
83 QLCNIC_CRB_DEV_REF_COUNT,
84 QLCNIC_CRB_DEV_STATE,
85 QLCNIC_CRB_DRV_STATE,
86 QLCNIC_CRB_DRV_SCRATCH,
87 QLCNIC_CRB_DEV_PARTITION_INFO,
88 QLCNIC_CRB_DRV_IDC_VER,
89 QLCNIC_PEG_ALIVE_COUNTER,
90 QLCNIC_PEG_HALT_STATUS1,
91 QLCNIC_PEG_HALT_STATUS2,
92 QLCNIC_CRB_PEG_NET_0+0x3c,
93 QLCNIC_CRB_PEG_NET_1+0x3c,
94 QLCNIC_CRB_PEG_NET_2+0x3c,
95 QLCNIC_CRB_PEG_NET_4+0x3c,
96 -1
97};
98
99static int qlcnic_get_regs_len(struct net_device *dev)
100{
101 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
102}
103
104static int qlcnic_get_eeprom_len(struct net_device *dev)
105{
106 return QLCNIC_FLASH_TOTAL_SIZE;
107}
108
109static void
110qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
111{
112 struct qlcnic_adapter *adapter = netdev_priv(dev);
113 u32 fw_major, fw_minor, fw_build;
114
115 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
116 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
117 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
118 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
119
120 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
121 strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
122 strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
123}
124
125static int
126qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
127{
128 struct qlcnic_adapter *adapter = netdev_priv(dev);
129 int check_sfp_module = 0;
130 u16 pcifn = adapter->ahw.pci_func;
131
132 /* read which mode */
133 if (adapter->ahw.port_type == QLCNIC_GBE) {
134 ecmd->supported = (SUPPORTED_10baseT_Half |
135 SUPPORTED_10baseT_Full |
136 SUPPORTED_100baseT_Half |
137 SUPPORTED_100baseT_Full |
138 SUPPORTED_1000baseT_Half |
139 SUPPORTED_1000baseT_Full);
140
141 ecmd->advertising = (ADVERTISED_100baseT_Half |
142 ADVERTISED_100baseT_Full |
143 ADVERTISED_1000baseT_Half |
144 ADVERTISED_1000baseT_Full);
145
146 ecmd->speed = adapter->link_speed;
147 ecmd->duplex = adapter->link_duplex;
148 ecmd->autoneg = adapter->link_autoneg;
149
150 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
151 u32 val;
152
153 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
154 if (val == QLCNIC_PORT_MODE_802_3_AP) {
155 ecmd->supported = SUPPORTED_1000baseT_Full;
156 ecmd->advertising = ADVERTISED_1000baseT_Full;
157 } else {
158 ecmd->supported = SUPPORTED_10000baseT_Full;
159 ecmd->advertising = ADVERTISED_10000baseT_Full;
160 }
161
162 if (netif_running(dev) && adapter->has_link_events) {
163 ecmd->speed = adapter->link_speed;
164 ecmd->autoneg = adapter->link_autoneg;
165 ecmd->duplex = adapter->link_duplex;
166 goto skip;
167 }
168
169 val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
170 ecmd->speed = P3_LINK_SPEED_MHZ *
171 P3_LINK_SPEED_VAL(pcifn, val);
172 ecmd->duplex = DUPLEX_FULL;
173 ecmd->autoneg = AUTONEG_DISABLE;
174 } else
175 return -EIO;
176
177skip:
178 ecmd->phy_address = adapter->physical_port;
179 ecmd->transceiver = XCVR_EXTERNAL;
180
181 switch (adapter->ahw.board_type) {
182 case QLCNIC_BRDTYPE_P3_REF_QG:
183 case QLCNIC_BRDTYPE_P3_4_GB:
184 case QLCNIC_BRDTYPE_P3_4_GB_MM:
185
186 ecmd->supported |= SUPPORTED_Autoneg;
187 ecmd->advertising |= ADVERTISED_Autoneg;
188 case QLCNIC_BRDTYPE_P3_10G_CX4:
189 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
190 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
191 ecmd->supported |= SUPPORTED_TP;
192 ecmd->advertising |= ADVERTISED_TP;
193 ecmd->port = PORT_TP;
194 ecmd->autoneg = adapter->link_autoneg;
195 break;
196 case QLCNIC_BRDTYPE_P3_IMEZ:
197 case QLCNIC_BRDTYPE_P3_XG_LOM:
198 case QLCNIC_BRDTYPE_P3_HMEZ:
199 ecmd->supported |= SUPPORTED_MII;
200 ecmd->advertising |= ADVERTISED_MII;
201 ecmd->port = PORT_MII;
202 ecmd->autoneg = AUTONEG_DISABLE;
203 break;
204 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
205 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
206 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
207 ecmd->advertising |= ADVERTISED_TP;
208 ecmd->supported |= SUPPORTED_TP;
209 check_sfp_module = netif_running(dev) &&
210 adapter->has_link_events;
211 case QLCNIC_BRDTYPE_P3_10G_XFP:
212 ecmd->supported |= SUPPORTED_FIBRE;
213 ecmd->advertising |= ADVERTISED_FIBRE;
214 ecmd->port = PORT_FIBRE;
215 ecmd->autoneg = AUTONEG_DISABLE;
216 break;
217 case QLCNIC_BRDTYPE_P3_10G_TP:
218 if (adapter->ahw.port_type == QLCNIC_XGBE) {
219 ecmd->autoneg = AUTONEG_DISABLE;
220 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
221 ecmd->advertising |=
222 (ADVERTISED_FIBRE | ADVERTISED_TP);
223 ecmd->port = PORT_FIBRE;
224 check_sfp_module = netif_running(dev) &&
225 adapter->has_link_events;
226 } else {
227 ecmd->autoneg = AUTONEG_ENABLE;
228 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
229 ecmd->advertising |=
230 (ADVERTISED_TP | ADVERTISED_Autoneg);
231 ecmd->port = PORT_TP;
232 }
233 break;
234 default:
235 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
236 adapter->ahw.board_type);
237 return -EIO;
238 }
239
240 if (check_sfp_module) {
241 switch (adapter->module_type) {
242 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
243 case LINKEVENT_MODULE_OPTICAL_SRLR:
244 case LINKEVENT_MODULE_OPTICAL_LRM:
245 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
246 ecmd->port = PORT_FIBRE;
247 break;
248 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
249 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
250 case LINKEVENT_MODULE_TWINAX:
251 ecmd->port = PORT_TP;
252 break;
253 default:
254 ecmd->port = PORT_OTHER;
255 }
256 }
257
258 return 0;
259}
260
261static int
262qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
263{
264 struct qlcnic_adapter *adapter = netdev_priv(dev);
265 __u32 status;
266
267 /* read which mode */
268 if (adapter->ahw.port_type == QLCNIC_GBE) {
269 /* autonegotiation */
270 if (qlcnic_fw_cmd_set_phy(adapter,
271 QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
272 ecmd->autoneg) != 0)
273 return -EIO;
274 else
275 adapter->link_autoneg = ecmd->autoneg;
276
277 if (qlcnic_fw_cmd_query_phy(adapter,
278 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
279 &status) != 0)
280 return -EIO;
281
282 switch (ecmd->speed) {
283 case SPEED_10:
284 qlcnic_set_phy_speed(status, 0);
285 break;
286 case SPEED_100:
287 qlcnic_set_phy_speed(status, 1);
288 break;
289 case SPEED_1000:
290 qlcnic_set_phy_speed(status, 2);
291 break;
292 }
293
294 if (ecmd->duplex == DUPLEX_HALF)
295 qlcnic_clear_phy_duplex(status);
296 if (ecmd->duplex == DUPLEX_FULL)
297 qlcnic_set_phy_duplex(status);
298 if (qlcnic_fw_cmd_set_phy(adapter,
299 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
300 *((int *)&status)) != 0)
301 return -EIO;
302 else {
303 adapter->link_speed = ecmd->speed;
304 adapter->link_duplex = ecmd->duplex;
305 }
306 } else
307 return -EOPNOTSUPP;
308
309 if (!netif_running(dev))
310 return 0;
311
312 dev->netdev_ops->ndo_stop(dev);
313 return dev->netdev_ops->ndo_open(dev);
314}
315
316static void
317qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
318{
319 struct qlcnic_adapter *adapter = netdev_priv(dev);
320 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
321 struct qlcnic_host_sds_ring *sds_ring;
322 u32 *regs_buff = p;
323 int ring, i = 0;
324
325 memset(p, 0, qlcnic_get_regs_len(dev));
326 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
327 (adapter->pdev)->device;
328
329 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
330 return;
331
332 for (i = 0; diag_registers[i] != -1; i++)
333 regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
334
335 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
336
337 regs_buff[i++] = 1; /* No. of tx ring */
338 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
339 regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
340
341 regs_buff[i++] = 2; /* No. of rx ring */
342 regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
343 regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
344
345 regs_buff[i++] = adapter->max_sds_rings;
346
347 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
348 sds_ring = &(recv_ctx->sds_rings[ring]);
349 regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
350 }
351}
352
353static u32 qlcnic_test_link(struct net_device *dev)
354{
355 struct qlcnic_adapter *adapter = netdev_priv(dev);
356 u32 val;
357
358 val = QLCRD32(adapter, CRB_XG_STATE_P3);
359 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
360 return (val == XG_LINK_UP_P3) ? 0 : 1;
361}
362
363static int
364qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
365 u8 *bytes)
366{
367 struct qlcnic_adapter *adapter = netdev_priv(dev);
368 int offset;
369 int ret;
370
371 if (eeprom->len == 0)
372 return -EINVAL;
373
374 eeprom->magic = (adapter->pdev)->vendor |
375 ((adapter->pdev)->device << 16);
376 offset = eeprom->offset;
377
378 ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
379 eeprom->len);
380 if (ret < 0)
381 return ret;
382
383 return 0;
384}
385
386static void
387qlcnic_get_ringparam(struct net_device *dev,
388 struct ethtool_ringparam *ring)
389{
390 struct qlcnic_adapter *adapter = netdev_priv(dev);
391
392 ring->rx_pending = adapter->num_rxd;
393 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
394 ring->rx_jumbo_pending += adapter->num_lro_rxd;
395 ring->tx_pending = adapter->num_txd;
396
397 if (adapter->ahw.port_type == QLCNIC_GBE) {
398 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
399 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
400 } else {
401 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
402 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
403 }
404
405 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
406
407 ring->rx_mini_max_pending = 0;
408 ring->rx_mini_pending = 0;
409}
410
411static u32
412qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
413{
414 u32 num_desc;
415 num_desc = max(val, min);
416 num_desc = min(num_desc, max);
417 num_desc = roundup_pow_of_two(num_desc);
418
419 if (val != num_desc) {
420 printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
421 qlcnic_driver_name, r_name, num_desc, val);
422 }
423
424 return num_desc;
425}
426
427static int
428qlcnic_set_ringparam(struct net_device *dev,
429 struct ethtool_ringparam *ring)
430{
431 struct qlcnic_adapter *adapter = netdev_priv(dev);
432 u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
433 u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
434 u16 num_rxd, num_jumbo_rxd, num_txd;
435
436
437 if (ring->rx_mini_pending)
438 return -EOPNOTSUPP;
439
440 if (adapter->ahw.port_type == QLCNIC_GBE) {
441 max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
442 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
443 }
444
445 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
446 MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
447
448 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
449 MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
450
451 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
452 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
453
454 if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
455 num_jumbo_rxd == adapter->num_jumbo_rxd)
456 return 0;
457
458 adapter->num_rxd = num_rxd;
459 adapter->num_jumbo_rxd = num_jumbo_rxd;
460 adapter->num_txd = num_txd;
461
462 return qlcnic_reset_context(adapter);
463}
464
465static void
466qlcnic_get_pauseparam(struct net_device *netdev,
467 struct ethtool_pauseparam *pause)
468{
469 struct qlcnic_adapter *adapter = netdev_priv(netdev);
470 int port = adapter->physical_port;
471 __u32 val;
472
473 if (adapter->ahw.port_type == QLCNIC_GBE) {
474 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
475 return;
476 /* get flow control settings */
477 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
478 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
479 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
480 switch (port) {
481 case 0:
482 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
483 break;
484 case 1:
485 pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
486 break;
487 case 2:
488 pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
489 break;
490 case 3:
491 default:
492 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
493 break;
494 }
495 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
496 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
497 return;
498 pause->rx_pause = 1;
499 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
500 if (port == 0)
501 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
502 else
503 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
504 } else {
505 dev_err(&netdev->dev, "Unknown board type: %x\n",
506 adapter->ahw.port_type);
507 }
508}
509
510static int
511qlcnic_set_pauseparam(struct net_device *netdev,
512 struct ethtool_pauseparam *pause)
513{
514 struct qlcnic_adapter *adapter = netdev_priv(netdev);
515 int port = adapter->physical_port;
516 __u32 val;
517
518 /* read mode */
519 if (adapter->ahw.port_type == QLCNIC_GBE) {
520 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
521 return -EIO;
522 /* set flow control */
523 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
524
525 if (pause->rx_pause)
526 qlcnic_gb_rx_flowctl(val);
527 else
528 qlcnic_gb_unset_rx_flowctl(val);
529
530 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
531 val);
532 /* set autoneg */
533 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
534 switch (port) {
535 case 0:
536 if (pause->tx_pause)
537 qlcnic_gb_unset_gb0_mask(val);
538 else
539 qlcnic_gb_set_gb0_mask(val);
540 break;
541 case 1:
542 if (pause->tx_pause)
543 qlcnic_gb_unset_gb1_mask(val);
544 else
545 qlcnic_gb_set_gb1_mask(val);
546 break;
547 case 2:
548 if (pause->tx_pause)
549 qlcnic_gb_unset_gb2_mask(val);
550 else
551 qlcnic_gb_set_gb2_mask(val);
552 break;
553 case 3:
554 default:
555 if (pause->tx_pause)
556 qlcnic_gb_unset_gb3_mask(val);
557 else
558 qlcnic_gb_set_gb3_mask(val);
559 break;
560 }
561 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
562 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
563 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
564 return -EIO;
565 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
566 if (port == 0) {
567 if (pause->tx_pause)
568 qlcnic_xg_unset_xg0_mask(val);
569 else
570 qlcnic_xg_set_xg0_mask(val);
571 } else {
572 if (pause->tx_pause)
573 qlcnic_xg_unset_xg1_mask(val);
574 else
575 qlcnic_xg_set_xg1_mask(val);
576 }
577 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
578 } else {
579 dev_err(&netdev->dev, "Unknown board type: %x\n",
580 adapter->ahw.port_type);
581 }
582 return 0;
583}
584
585static int qlcnic_reg_test(struct net_device *dev)
586{
587 struct qlcnic_adapter *adapter = netdev_priv(dev);
588 u32 data_read, data_written;
589
590 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
591 if ((data_read & 0xffff) != adapter->pdev->vendor)
592 return 1;
593
594 data_written = (u32)0xa5a5a5a5;
595
596 QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
597 data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
598 if (data_written != data_read)
599 return 1;
600
601 return 0;
602}
603
604static int qlcnic_get_sset_count(struct net_device *dev, int sset)
605{
606 switch (sset) {
607 case ETH_SS_TEST:
608 return QLCNIC_TEST_LEN;
609 case ETH_SS_STATS:
610 return QLCNIC_STATS_LEN;
611 default:
612 return -EOPNOTSUPP;
613 }
614}
615
616static void
617qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
618 u64 *data)
619{
620 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
621 data[0] = qlcnic_reg_test(dev);
622 if (data[0])
623 eth_test->flags |= ETH_TEST_FL_FAILED;
624
625 /* link test */
626 data[1] = (u64) qlcnic_test_link(dev);
627 if (data[1])
628 eth_test->flags |= ETH_TEST_FL_FAILED;
629}
630
631static void
632qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
633{
634 int index;
635
636 switch (stringset) {
637 case ETH_SS_TEST:
638 memcpy(data, *qlcnic_gstrings_test,
639 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
640 break;
641 case ETH_SS_STATS:
642 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
643 memcpy(data + index * ETH_GSTRING_LEN,
644 qlcnic_gstrings_stats[index].stat_string,
645 ETH_GSTRING_LEN);
646 }
647 break;
648 }
649}
650
651static void
652qlcnic_get_ethtool_stats(struct net_device *dev,
653 struct ethtool_stats *stats, u64 * data)
654{
655 struct qlcnic_adapter *adapter = netdev_priv(dev);
656 int index;
657
658 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
659 char *p =
660 (char *)adapter +
661 qlcnic_gstrings_stats[index].stat_offset;
662 data[index] =
663 (qlcnic_gstrings_stats[index].sizeof_stat ==
664 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
665 }
666}
667
668static u32 qlcnic_get_rx_csum(struct net_device *dev)
669{
670 struct qlcnic_adapter *adapter = netdev_priv(dev);
671 return adapter->rx_csum;
672}
673
674static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
675{
676 struct qlcnic_adapter *adapter = netdev_priv(dev);
677 adapter->rx_csum = !!data;
678 return 0;
679}
680
681static u32 qlcnic_get_tso(struct net_device *dev)
682{
683 return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
684}
685
686static int qlcnic_set_tso(struct net_device *dev, u32 data)
687{
688 if (data)
689 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
690 else
691 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
692
693 return 0;
694}
695
696static void
697qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
698{
699 struct qlcnic_adapter *adapter = netdev_priv(dev);
700 u32 wol_cfg;
701
702 wol->supported = 0;
703 wol->wolopts = 0;
704
705 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
706 if (wol_cfg & (1UL << adapter->portnum))
707 wol->supported |= WAKE_MAGIC;
708
709 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
710 if (wol_cfg & (1UL << adapter->portnum))
711 wol->wolopts |= WAKE_MAGIC;
712}
713
714static int
715qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
716{
717 struct qlcnic_adapter *adapter = netdev_priv(dev);
718 u32 wol_cfg;
719
720 if (wol->wolopts & ~WAKE_MAGIC)
721 return -EOPNOTSUPP;
722
723 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
724 if (!(wol_cfg & (1 << adapter->portnum)))
725 return -EOPNOTSUPP;
726
727 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
728 if (wol->wolopts & WAKE_MAGIC)
729 wol_cfg |= 1UL << adapter->portnum;
730 else
731 wol_cfg &= ~(1UL << adapter->portnum);
732
733 QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
734
735 return 0;
736}
737
738/*
739 * Set the coalescing parameters. Currently only normal is supported.
740 * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
741 * firmware coalescing to default.
742 */
743static int qlcnic_set_intr_coalesce(struct net_device *netdev,
744 struct ethtool_coalesce *ethcoal)
745{
746 struct qlcnic_adapter *adapter = netdev_priv(netdev);
747
748 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
749 return -EINVAL;
750
751 /*
752 * Return Error if unsupported values or
753 * unsupported parameters are set.
754 */
755 if (ethcoal->rx_coalesce_usecs > 0xffff ||
756 ethcoal->rx_max_coalesced_frames > 0xffff ||
757 ethcoal->tx_coalesce_usecs > 0xffff ||
758 ethcoal->tx_max_coalesced_frames > 0xffff ||
759 ethcoal->rx_coalesce_usecs_irq ||
760 ethcoal->rx_max_coalesced_frames_irq ||
761 ethcoal->tx_coalesce_usecs_irq ||
762 ethcoal->tx_max_coalesced_frames_irq ||
763 ethcoal->stats_block_coalesce_usecs ||
764 ethcoal->use_adaptive_rx_coalesce ||
765 ethcoal->use_adaptive_tx_coalesce ||
766 ethcoal->pkt_rate_low ||
767 ethcoal->rx_coalesce_usecs_low ||
768 ethcoal->rx_max_coalesced_frames_low ||
769 ethcoal->tx_coalesce_usecs_low ||
770 ethcoal->tx_max_coalesced_frames_low ||
771 ethcoal->pkt_rate_high ||
772 ethcoal->rx_coalesce_usecs_high ||
773 ethcoal->rx_max_coalesced_frames_high ||
774 ethcoal->tx_coalesce_usecs_high ||
775 ethcoal->tx_max_coalesced_frames_high)
776 return -EINVAL;
777
778 if (!ethcoal->rx_coalesce_usecs ||
779 !ethcoal->rx_max_coalesced_frames) {
780 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
781 adapter->coal.normal.data.rx_time_us =
782 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
783 adapter->coal.normal.data.rx_packets =
784 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
785 } else {
786 adapter->coal.flags = 0;
787 adapter->coal.normal.data.rx_time_us =
788 ethcoal->rx_coalesce_usecs;
789 adapter->coal.normal.data.rx_packets =
790 ethcoal->rx_max_coalesced_frames;
791 }
792 adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
793 adapter->coal.normal.data.tx_packets =
794 ethcoal->tx_max_coalesced_frames;
795
796 qlcnic_config_intr_coalesce(adapter);
797
798 return 0;
799}
800
801static int qlcnic_get_intr_coalesce(struct net_device *netdev,
802 struct ethtool_coalesce *ethcoal)
803{
804 struct qlcnic_adapter *adapter = netdev_priv(netdev);
805
806 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
807 return -EINVAL;
808
809 ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
810 ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
811 ethcoal->rx_max_coalesced_frames =
812 adapter->coal.normal.data.rx_packets;
813 ethcoal->tx_max_coalesced_frames =
814 adapter->coal.normal.data.tx_packets;
815
816 return 0;
817}
818
819static int qlcnic_set_flags(struct net_device *netdev, u32 data)
820{
821 struct qlcnic_adapter *adapter = netdev_priv(netdev);
822 int hw_lro;
823
824 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
825 return -EINVAL;
826
827 ethtool_op_set_flags(netdev, data);
828
829 hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
830
831 if (qlcnic_config_hw_lro(adapter, hw_lro))
832 return -EIO;
833
834 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
835 return -EIO;
836
837
838 return 0;
839}
840
841const struct ethtool_ops qlcnic_ethtool_ops = {
842 .get_settings = qlcnic_get_settings,
843 .set_settings = qlcnic_set_settings,
844 .get_drvinfo = qlcnic_get_drvinfo,
845 .get_regs_len = qlcnic_get_regs_len,
846 .get_regs = qlcnic_get_regs,
847 .get_link = ethtool_op_get_link,
848 .get_eeprom_len = qlcnic_get_eeprom_len,
849 .get_eeprom = qlcnic_get_eeprom,
850 .get_ringparam = qlcnic_get_ringparam,
851 .set_ringparam = qlcnic_set_ringparam,
852 .get_pauseparam = qlcnic_get_pauseparam,
853 .set_pauseparam = qlcnic_set_pauseparam,
854 .set_tx_csum = ethtool_op_set_tx_csum,
855 .set_sg = ethtool_op_set_sg,
856 .get_tso = qlcnic_get_tso,
857 .set_tso = qlcnic_set_tso,
858 .get_wol = qlcnic_get_wol,
859 .set_wol = qlcnic_set_wol,
860 .self_test = qlcnic_diag_test,
861 .get_strings = qlcnic_get_strings,
862 .get_ethtool_stats = qlcnic_get_ethtool_stats,
863 .get_sset_count = qlcnic_get_sset_count,
864 .get_rx_csum = qlcnic_get_rx_csum,
865 .set_rx_csum = qlcnic_set_rx_csum,
866 .get_coalesce = qlcnic_get_intr_coalesce,
867 .set_coalesce = qlcnic_set_intr_coalesce,
868 .get_flags = ethtool_op_get_flags,
869 .set_flags = qlcnic_set_flags,
870};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 00000000000..0469f84360a
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef __QLCNIC_HDR_H_
26#define __QLCNIC_HDR_H_
27
28#include <linux/kernel.h>
29#include <linux/types.h>
30
31/*
32 * The basic unit of access when reading/writing control registers.
33 */
34
35enum {
36 QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
37 QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
38 QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
39 QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
40 QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
41 QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
42 QLCNIC_HW_H6_CH_HUB_ADR = 0x08
43};
44
45/* Hub 0 */
46enum {
47 QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
48 QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
49};
50
51/* Hub 1 */
52enum {
53 QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
54 QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
55 QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
56 QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
57 QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
58 QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
59 QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
60 QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
61 QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
62 QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
63 QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
64 QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
65 QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
66 QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
67 QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
68 QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
69};
70
71/* Hub 2 */
72enum {
73 QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
74 QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
75 QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
76
77 QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
78 QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
79 QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
80 QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
81 QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
82 QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
83 QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
84 QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
85 QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
86 QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
87 QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
88 QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
89 QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
90};
91
92/* Hub 3 */
93enum {
94 QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
95 QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
96 QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
97 QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
98};
99
100/* Hub 4 */
101enum {
102 QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
103 QLCNIC_HW_PEGN1_CRB_AGT_ADR,
104 QLCNIC_HW_PEGN2_CRB_AGT_ADR,
105 QLCNIC_HW_PEGN3_CRB_AGT_ADR,
106 QLCNIC_HW_PEGNI_CRB_AGT_ADR,
107 QLCNIC_HW_PEGND_CRB_AGT_ADR,
108 QLCNIC_HW_PEGNC_CRB_AGT_ADR,
109 QLCNIC_HW_PEGR0_CRB_AGT_ADR,
110 QLCNIC_HW_PEGR1_CRB_AGT_ADR,
111 QLCNIC_HW_PEGR2_CRB_AGT_ADR,
112 QLCNIC_HW_PEGR3_CRB_AGT_ADR,
113 QLCNIC_HW_PEGN4_CRB_AGT_ADR
114};
115
116/* Hub 5 */
117enum {
118 QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
119 QLCNIC_HW_PEGS1_CRB_AGT_ADR,
120 QLCNIC_HW_PEGS2_CRB_AGT_ADR,
121 QLCNIC_HW_PEGS3_CRB_AGT_ADR,
122 QLCNIC_HW_PEGSI_CRB_AGT_ADR,
123 QLCNIC_HW_PEGSD_CRB_AGT_ADR,
124 QLCNIC_HW_PEGSC_CRB_AGT_ADR
125};
126
127/* Hub 6 */
128enum {
129 QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
130 QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
131 QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
132 QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
133 QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
134 QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
135 QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
136 QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
137 QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
138};
139
140/* Floaters - non existent modules */
141#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
142
143/* This field defines PCI/X adr [25:20] of agents on the CRB */
144enum {
145 QLCNIC_HW_PX_MAP_CRB_PH = 0,
146 QLCNIC_HW_PX_MAP_CRB_PS,
147 QLCNIC_HW_PX_MAP_CRB_MN,
148 QLCNIC_HW_PX_MAP_CRB_MS,
149 QLCNIC_HW_PX_MAP_CRB_PGR1,
150 QLCNIC_HW_PX_MAP_CRB_SRE,
151 QLCNIC_HW_PX_MAP_CRB_NIU,
152 QLCNIC_HW_PX_MAP_CRB_QMN,
153 QLCNIC_HW_PX_MAP_CRB_SQN0,
154 QLCNIC_HW_PX_MAP_CRB_SQN1,
155 QLCNIC_HW_PX_MAP_CRB_SQN2,
156 QLCNIC_HW_PX_MAP_CRB_SQN3,
157 QLCNIC_HW_PX_MAP_CRB_QMS,
158 QLCNIC_HW_PX_MAP_CRB_SQS0,
159 QLCNIC_HW_PX_MAP_CRB_SQS1,
160 QLCNIC_HW_PX_MAP_CRB_SQS2,
161 QLCNIC_HW_PX_MAP_CRB_SQS3,
162 QLCNIC_HW_PX_MAP_CRB_PGN0,
163 QLCNIC_HW_PX_MAP_CRB_PGN1,
164 QLCNIC_HW_PX_MAP_CRB_PGN2,
165 QLCNIC_HW_PX_MAP_CRB_PGN3,
166 QLCNIC_HW_PX_MAP_CRB_PGND,
167 QLCNIC_HW_PX_MAP_CRB_PGNI,
168 QLCNIC_HW_PX_MAP_CRB_PGS0,
169 QLCNIC_HW_PX_MAP_CRB_PGS1,
170 QLCNIC_HW_PX_MAP_CRB_PGS2,
171 QLCNIC_HW_PX_MAP_CRB_PGS3,
172 QLCNIC_HW_PX_MAP_CRB_PGSD,
173 QLCNIC_HW_PX_MAP_CRB_PGSI,
174 QLCNIC_HW_PX_MAP_CRB_SN,
175 QLCNIC_HW_PX_MAP_CRB_PGR2,
176 QLCNIC_HW_PX_MAP_CRB_EG,
177 QLCNIC_HW_PX_MAP_CRB_PH2,
178 QLCNIC_HW_PX_MAP_CRB_PS2,
179 QLCNIC_HW_PX_MAP_CRB_CAM,
180 QLCNIC_HW_PX_MAP_CRB_CAS0,
181 QLCNIC_HW_PX_MAP_CRB_CAS1,
182 QLCNIC_HW_PX_MAP_CRB_CAS2,
183 QLCNIC_HW_PX_MAP_CRB_C2C0,
184 QLCNIC_HW_PX_MAP_CRB_C2C1,
185 QLCNIC_HW_PX_MAP_CRB_TIMR,
186 QLCNIC_HW_PX_MAP_CRB_PGR3,
187 QLCNIC_HW_PX_MAP_CRB_RPMX1,
188 QLCNIC_HW_PX_MAP_CRB_RPMX2,
189 QLCNIC_HW_PX_MAP_CRB_RPMX3,
190 QLCNIC_HW_PX_MAP_CRB_RPMX4,
191 QLCNIC_HW_PX_MAP_CRB_RPMX5,
192 QLCNIC_HW_PX_MAP_CRB_RPMX6,
193 QLCNIC_HW_PX_MAP_CRB_RPMX7,
194 QLCNIC_HW_PX_MAP_CRB_XDMA,
195 QLCNIC_HW_PX_MAP_CRB_I2Q,
196 QLCNIC_HW_PX_MAP_CRB_ROMUSB,
197 QLCNIC_HW_PX_MAP_CRB_CAS3,
198 QLCNIC_HW_PX_MAP_CRB_RPMX0,
199 QLCNIC_HW_PX_MAP_CRB_RPMX8,
200 QLCNIC_HW_PX_MAP_CRB_RPMX9,
201 QLCNIC_HW_PX_MAP_CRB_OCM0,
202 QLCNIC_HW_PX_MAP_CRB_OCM1,
203 QLCNIC_HW_PX_MAP_CRB_SMB,
204 QLCNIC_HW_PX_MAP_CRB_I2C0,
205 QLCNIC_HW_PX_MAP_CRB_I2C1,
206 QLCNIC_HW_PX_MAP_CRB_LPC,
207 QLCNIC_HW_PX_MAP_CRB_PGNC,
208 QLCNIC_HW_PX_MAP_CRB_PGR0
209};
210
211/* This field defines CRB adr [31:20] of the agents */
212
213#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
214 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
215#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
216 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
217#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
218 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
219
220#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
221 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
222#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
223 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
224#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
225 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
226#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
227 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
228#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
229 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
230#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
231 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
232#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
233 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
234#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
235 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
236#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
237 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
238#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
239 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
240#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
241 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
242#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
243 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
244#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
245 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
246#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
247 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
248#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
249 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
250
251#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
252 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
253#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
254 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
255#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
256 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
257
258#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
259 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
260#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
261 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
262#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
263 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
264#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
265 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
266#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
267 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
268#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
269 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
270#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
271 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
272#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
273 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
274#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
275 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
276#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
277 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
278#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
279 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
280#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
281 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
282#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
283 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
284#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
285 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
286#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
287 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
288#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
289 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
290
291#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
292 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
293#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
294 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
295#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
296 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
297#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
298 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
299#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
300 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
301#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
302 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
303#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
304 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
305#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
306 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
307#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
308 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
309#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
310 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
311#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
312 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
313#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
314 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
315
316#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
317 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
318#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
319 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
320#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
321 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
322#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
323 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
324#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
325 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
326#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
327 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
328#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
329 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
330
331#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
332 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
333#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
334 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
335#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
336 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
337#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
338 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
339#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
340 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
341#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
342 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
343#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
344 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
345#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
346 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
347#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
348 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
349
350#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
351
352#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
353
354#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
355#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
356
357#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
358#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
359#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
360#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
361#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
362#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
363#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
364
365#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
366
367#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
368#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
369#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
370#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
371#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
372#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
373
374/* Lock IDs for ROM lock */
375#define ROM_LOCK_DRIVER 0x0d417340
376
377/******************************************************************************
378*
379* Definitions specific to M25P flash
380*
381*******************************************************************************
382*/
383
384/* all are 1MB windows */
385
386#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
387#define QLCNIC_PCI_CRB_WINDOW(A) \
388 (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
389
390#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
391#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
392#define QLCNIC_CRB_ROMUSB \
393 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
394#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
395#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
396#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
397#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
398
399#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
400#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
401#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
402#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
403#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
404#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
405#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
406#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
407#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
408#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
409#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
410
411#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
412#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
413
414#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
415#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
416#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
417#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
418#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
419#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
420#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
421#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
422#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
423#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
424#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
425#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
426#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
427#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
428#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
429#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
430#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
431#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
432#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
433
434#define QLCNIC_PCI_MN_2M (0)
435#define QLCNIC_PCI_MS_2M (0x80000)
436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
437#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
438#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
439#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
440#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
441
442#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
443
444#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
445#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
446#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
447#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
448#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
449#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
450#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
451#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
452
453/*
454 * Register offsets for MN
455 */
456#define QLCNIC_MIU_CONTROL (0x000)
457#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
458
459/* 200ms delay in each loop */
460#define QLCNIC_NIU_PHY_WAITLEN 200000
461/* 10 seconds before we give up */
462#define QLCNIC_NIU_PHY_WAITMAX 50
463#define QLCNIC_NIU_MAX_GBE_PORTS 4
464#define QLCNIC_NIU_MAX_XG_PORTS 2
465
466#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
467#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
468#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
469
470#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
471 (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
472#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
473 (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
474
475
476#define TEST_AGT_CTRL (0x00)
477
478#define TA_CTL_START 1
479#define TA_CTL_ENABLE 2
480#define TA_CTL_WRITE 4
481#define TA_CTL_BUSY 8
482
483/*
484 * Register offsets for MN
485 */
486#define MIU_TEST_AGT_BASE (0x90)
487
488#define MIU_TEST_AGT_ADDR_LO (0x04)
489#define MIU_TEST_AGT_ADDR_HI (0x08)
490#define MIU_TEST_AGT_WRDATA_LO (0x10)
491#define MIU_TEST_AGT_WRDATA_HI (0x14)
492#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
493#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
494#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
495#define MIU_TEST_AGT_RDDATA_LO (0x18)
496#define MIU_TEST_AGT_RDDATA_HI (0x1c)
497#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
498#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
499#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
500
501#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
502#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
503
504/*
505 * Register offsets for MS
506 */
507#define SIU_TEST_AGT_BASE (0x60)
508
509#define SIU_TEST_AGT_ADDR_LO (0x04)
510#define SIU_TEST_AGT_ADDR_HI (0x18)
511#define SIU_TEST_AGT_WRDATA_LO (0x08)
512#define SIU_TEST_AGT_WRDATA_HI (0x0c)
513#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
514#define SIU_TEST_AGT_RDDATA_LO (0x10)
515#define SIU_TEST_AGT_RDDATA_HI (0x14)
516#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
517
518#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
519#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
520
521/* XG Link status */
522#define XG_LINK_UP 0x10
523#define XG_LINK_DOWN 0x20
524
525#define XG_LINK_UP_P3 0x01
526#define XG_LINK_DOWN_P3 0x02
527#define XG_LINK_STATE_P3_MASK 0xf
528#define XG_LINK_STATE_P3(pcifn, val) \
529 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
530
531#define P3_LINK_SPEED_MHZ 100
532#define P3_LINK_SPEED_MASK 0xff
533#define P3_LINK_SPEED_REG(pcifn) \
534 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
535#define P3_LINK_SPEED_VAL(pcifn, reg) \
536 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
537
538#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
539#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
540#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
541#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
542#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
543#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
544#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
545#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
546
547#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
548#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
549#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
550#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
551
552#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
553#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
554#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
555#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
556#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
557
558#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
559#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
560
561#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
562#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
563#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
564
565#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
566#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
567
568#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
569
570#define CRB_V2P_0 (QLCNIC_REG(0x290))
571#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
572#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
573
574#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
575#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
576#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
577#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
578
579#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
580#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
581
582/*
583 * capabilities register, can be used to selectively enable/disable features
584 * for backward compability
585 */
586#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
587#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
588#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
589#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
590
591#define INTR_SCHEME_PERPORT 0x1
592#define MSI_MODE_MULTIFUNC 0x1
593
594/* used for ethtool tests */
595#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
596
597/*
598 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
599 * which can be read by the Phantom host to get producer/consumer indexes from
600 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
601 * registers will be used for the addresses of the ring's shared memory
602 * on the Phantom.
603 */
604
605#define qlcnic_get_temp_val(x) ((x) >> 16)
606#define qlcnic_get_temp_state(x) ((x) & 0xffff)
607#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
608
609/*
610 * Temperature control.
611 */
612enum {
613 QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
614 QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
615 QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
616};
617
618/* Lock IDs for PHY lock */
619#define PHY_LOCK_DRIVER 0x44524956
620
621/* Used for PS PCI Memory access */
622#define PCIX_PS_OP_ADDR_LO (0x10000)
623/* via CRB (PS side only) */
624#define PCIX_PS_OP_ADDR_HI (0x10004)
625
626#define PCIX_INT_VECTOR (0x10100)
627#define PCIX_INT_MASK (0x10104)
628
629#define PCIX_OCM_WINDOW (0x10800)
630#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
631
632#define PCIX_TARGET_STATUS (0x10118)
633#define PCIX_TARGET_STATUS_F1 (0x10160)
634#define PCIX_TARGET_STATUS_F2 (0x10164)
635#define PCIX_TARGET_STATUS_F3 (0x10168)
636#define PCIX_TARGET_STATUS_F4 (0x10360)
637#define PCIX_TARGET_STATUS_F5 (0x10364)
638#define PCIX_TARGET_STATUS_F6 (0x10368)
639#define PCIX_TARGET_STATUS_F7 (0x1036c)
640
641#define PCIX_TARGET_MASK (0x10128)
642#define PCIX_TARGET_MASK_F1 (0x10170)
643#define PCIX_TARGET_MASK_F2 (0x10174)
644#define PCIX_TARGET_MASK_F3 (0x10178)
645#define PCIX_TARGET_MASK_F4 (0x10370)
646#define PCIX_TARGET_MASK_F5 (0x10374)
647#define PCIX_TARGET_MASK_F6 (0x10378)
648#define PCIX_TARGET_MASK_F7 (0x1037c)
649
650#define PCIX_MSI_F(i) (0x13000+((i)*4))
651
652#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
653#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
654#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
655
656#define PCIE_SEM0_LOCK (0x1c000)
657#define PCIE_SEM0_UNLOCK (0x1c004)
658#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
659#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
660
661#define PCIE_SETUP_FUNCTION (0x12040)
662#define PCIE_SETUP_FUNCTION2 (0x12048)
663#define PCIE_MISCCFG_RC (0x1206c)
664#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
665#define PCIE_CHICKEN3 (0x120c8)
666
667#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
668#define PCIE_MAX_MASTER_SPLIT (0x14048)
669
670#define QLCNIC_PORT_MODE_NONE 0
671#define QLCNIC_PORT_MODE_XG 1
672#define QLCNIC_PORT_MODE_GB 2
673#define QLCNIC_PORT_MODE_802_3_AP 3
674#define QLCNIC_PORT_MODE_AUTO_NEG 4
675#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
676#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
677#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
678#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
679
680#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
681#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
682
683#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
684#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
685
686#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
687#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
688#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
689#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
690#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
691#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
692
693#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
694#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
695#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
696#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
697
698 /* Device State */
699#define QLCNIC_DEV_COLD 1
700#define QLCNIC_DEV_INITALIZING 2
701#define QLCNIC_DEV_READY 3
702#define QLCNIC_DEV_NEED_RESET 4
703#define QLCNIC_DEV_NEED_QUISCENT 5
704#define QLCNIC_DEV_FAILED 6
705
706#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
707#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
708#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
709#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
710#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
711
712#define FW_POLL_DELAY (2 * HZ)
713#define FW_FAIL_THRESH 3
714#define FW_POLL_THRESH 10
715
716#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
717#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
718
719/*
720 * PCI Interrupt Vector Values.
721 */
722#define PCIX_INT_VECTOR_BIT_F0 0x0080
723#define PCIX_INT_VECTOR_BIT_F1 0x0100
724#define PCIX_INT_VECTOR_BIT_F2 0x0200
725#define PCIX_INT_VECTOR_BIT_F3 0x0400
726#define PCIX_INT_VECTOR_BIT_F4 0x0800
727#define PCIX_INT_VECTOR_BIT_F5 0x1000
728#define PCIX_INT_VECTOR_BIT_F6 0x2000
729#define PCIX_INT_VECTOR_BIT_F7 0x4000
730
731struct qlcnic_legacy_intr_set {
732 u32 int_vec_bit;
733 u32 tgt_status_reg;
734 u32 tgt_mask_reg;
735 u32 pci_int_reg;
736};
737
738#define QLCNIC_LEGACY_INTR_CONFIG \
739{ \
740 { \
741 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
742 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
743 .tgt_mask_reg = ISR_INT_TARGET_MASK, \
744 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
745 \
746 { \
747 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
748 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
749 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
750 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
751 \
752 { \
753 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
754 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
755 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
756 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
757 \
758 { \
759 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
760 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
761 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
762 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
763 \
764 { \
765 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
766 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
767 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
768 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
769 \
770 { \
771 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
772 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
773 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
774 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
775 \
776 { \
777 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
778 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
779 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
780 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
781 \
782 { \
783 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
784 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
785 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
786 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
787}
788
789/* NIU REGS */
790
791#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
792
793/*
794 * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
795 *
796 * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
797 * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
798 * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
799 * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
800 * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
801 * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
802 * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
803 * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
804 * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
805 * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
806 * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
807 * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
808 */
809#define qlcnic_gb_rx_flowctl(config_word) \
810 ((config_word) |= 1 << 5)
811#define qlcnic_gb_get_rx_flowctl(config_word) \
812 _qlcnic_crb_get_bit((config_word), 5)
813#define qlcnic_gb_unset_rx_flowctl(config_word) \
814 ((config_word) &= ~(1 << 5))
815
816/*
817 * NIU GB Pause Ctl Register
818 */
819
820#define qlcnic_gb_set_gb0_mask(config_word) \
821 ((config_word) |= 1 << 0)
822#define qlcnic_gb_set_gb1_mask(config_word) \
823 ((config_word) |= 1 << 2)
824#define qlcnic_gb_set_gb2_mask(config_word) \
825 ((config_word) |= 1 << 4)
826#define qlcnic_gb_set_gb3_mask(config_word) \
827 ((config_word) |= 1 << 6)
828
829#define qlcnic_gb_get_gb0_mask(config_word) \
830 _qlcnic_crb_get_bit((config_word), 0)
831#define qlcnic_gb_get_gb1_mask(config_word) \
832 _qlcnic_crb_get_bit((config_word), 2)
833#define qlcnic_gb_get_gb2_mask(config_word) \
834 _qlcnic_crb_get_bit((config_word), 4)
835#define qlcnic_gb_get_gb3_mask(config_word) \
836 _qlcnic_crb_get_bit((config_word), 6)
837
838#define qlcnic_gb_unset_gb0_mask(config_word) \
839 ((config_word) &= ~(1 << 0))
840#define qlcnic_gb_unset_gb1_mask(config_word) \
841 ((config_word) &= ~(1 << 2))
842#define qlcnic_gb_unset_gb2_mask(config_word) \
843 ((config_word) &= ~(1 << 4))
844#define qlcnic_gb_unset_gb3_mask(config_word) \
845 ((config_word) &= ~(1 << 6))
846
847/*
848 * NIU XG Pause Ctl Register
849 *
850 * Bit 0 : xg0_mask => 1:disable tx pause frames
851 * Bit 1 : xg0_request => 1:request single pause frame
852 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
853 * Bit 3 : xg1_mask => 1:disable tx pause frames
854 * Bit 4 : xg1_request => 1:request single pause frame
855 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
856 */
857
858#define qlcnic_xg_set_xg0_mask(config_word) \
859 ((config_word) |= 1 << 0)
860#define qlcnic_xg_set_xg1_mask(config_word) \
861 ((config_word) |= 1 << 3)
862
863#define qlcnic_xg_get_xg0_mask(config_word) \
864 _qlcnic_crb_get_bit((config_word), 0)
865#define qlcnic_xg_get_xg1_mask(config_word) \
866 _qlcnic_crb_get_bit((config_word), 3)
867
868#define qlcnic_xg_unset_xg0_mask(config_word) \
869 ((config_word) &= ~(1 << 0))
870#define qlcnic_xg_unset_xg1_mask(config_word) \
871 ((config_word) &= ~(1 << 3))
872
873/*
874 * NIU XG Pause Ctl Register
875 *
876 * Bit 0 : xg0_mask => 1:disable tx pause frames
877 * Bit 1 : xg0_request => 1:request single pause frame
878 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
879 * Bit 3 : xg1_mask => 1:disable tx pause frames
880 * Bit 4 : xg1_request => 1:request single pause frame
881 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
882 */
883
884/*
885 * PHY-Specific MII control/status registers.
886 */
887#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
888#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
889
890/*
891 * PHY-Specific Status Register (reg 17).
892 *
893 * Bit 0 : jabber => 1:jabber detected, 0:not
894 * Bit 1 : polarity => 1:polarity reversed, 0:normal
895 * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
896 * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
897 * Bit 4 : energydetect => 1:sleep, 0:active
898 * Bit 5 : downshift => 1:downshift, 0:no downshift
899 * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
900 * Bits 7-9 : cablelen => not valid in 10Mb/s mode
901 * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
902 * Bit 10 : link => 1:link up, 0:link down
903 * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
904 * Bit 12 : pagercvd => 1:page received, 0:page not received
905 * Bit 13 : duplex => 1:full duplex, 0:half duplex
906 * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
907 */
908
909#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
910
911#define qlcnic_set_phy_speed(config_word, val) \
912 ((config_word) |= ((val & 0x03) << 14))
913#define qlcnic_set_phy_duplex(config_word) \
914 ((config_word) |= 1 << 13)
915#define qlcnic_clear_phy_duplex(config_word) \
916 ((config_word) &= ~(1 << 13))
917
918#define qlcnic_get_phy_link(config_word) \
919 _qlcnic_crb_get_bit(config_word, 10)
920#define qlcnic_get_phy_duplex(config_word) \
921 _qlcnic_crb_get_bit(config_word, 13)
922
923#define QLCNIC_NIU_NON_PROMISC_MODE 0
924#define QLCNIC_NIU_PROMISC_MODE 1
925#define QLCNIC_NIU_ALLMULTI_MODE 2
926
927struct crb_128M_2M_sub_block_map {
928 unsigned valid;
929 unsigned start_128M;
930 unsigned end_128M;
931 unsigned start_2M;
932};
933
934struct crb_128M_2M_block_map{
935 struct crb_128M_2M_sub_block_map sub_block[16];
936};
937#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 00000000000..91234e7b39e
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1201 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27#include <net/ip.h>
28
29#define MASK(n) ((1ULL<<(n))-1)
30#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
31
32#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
33
34#define CRB_BLK(off) ((off >> 20) & 0x3f)
35#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
36#define CRB_WINDOW_2M (0x130060)
37#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
38#define CRB_INDIRECT_2M (0x1e0000UL)
39
40
41#ifndef readq
42static inline u64 readq(void __iomem *addr)
43{
44 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
45}
46#endif
47
48#ifndef writeq
49static inline void writeq(u64 val, void __iomem *addr)
50{
51 writel(((u32) (val)), (addr));
52 writel(((u32) (val >> 32)), (addr + 4));
53}
54#endif
55
56#define ADDR_IN_RANGE(addr, low, high) \
57 (((addr) < (high)) && ((addr) >= (low)))
58
59#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
60 ((adapter)->ahw.pci_base0 + (off))
61
62static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
63 unsigned long off)
64{
65 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
66 return PCI_OFFSET_FIRST_RANGE(adapter, off);
67
68 return NULL;
69}
70
71static const struct crb_128M_2M_block_map
72crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
73 {{{0, 0, 0, 0} } }, /* 0: PCI */
74 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
75 {1, 0x0110000, 0x0120000, 0x130000},
76 {1, 0x0120000, 0x0122000, 0x124000},
77 {1, 0x0130000, 0x0132000, 0x126000},
78 {1, 0x0140000, 0x0142000, 0x128000},
79 {1, 0x0150000, 0x0152000, 0x12a000},
80 {1, 0x0160000, 0x0170000, 0x110000},
81 {1, 0x0170000, 0x0172000, 0x12e000},
82 {0, 0x0000000, 0x0000000, 0x000000},
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {1, 0x01e0000, 0x01e0800, 0x122000},
89 {0, 0x0000000, 0x0000000, 0x000000} } },
90 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
91 {{{0, 0, 0, 0} } }, /* 3: */
92 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
93 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
94 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
95 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
96 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
97 {0, 0x0000000, 0x0000000, 0x000000},
98 {0, 0x0000000, 0x0000000, 0x000000},
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {1, 0x08f0000, 0x08f2000, 0x172000} } },
112 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {1, 0x09f0000, 0x09f2000, 0x176000} } },
128 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
144 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
160 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
161 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
162 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
163 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
164 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
165 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
166 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
167 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
168 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
169 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
170 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
171 {{{0, 0, 0, 0} } }, /* 23: */
172 {{{0, 0, 0, 0} } }, /* 24: */
173 {{{0, 0, 0, 0} } }, /* 25: */
174 {{{0, 0, 0, 0} } }, /* 26: */
175 {{{0, 0, 0, 0} } }, /* 27: */
176 {{{0, 0, 0, 0} } }, /* 28: */
177 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
178 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
179 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
180 {{{0} } }, /* 32: PCI */
181 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
182 {1, 0x2110000, 0x2120000, 0x130000},
183 {1, 0x2120000, 0x2122000, 0x124000},
184 {1, 0x2130000, 0x2132000, 0x126000},
185 {1, 0x2140000, 0x2142000, 0x128000},
186 {1, 0x2150000, 0x2152000, 0x12a000},
187 {1, 0x2160000, 0x2170000, 0x110000},
188 {1, 0x2170000, 0x2172000, 0x12e000},
189 {0, 0x0000000, 0x0000000, 0x000000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {0, 0x0000000, 0x0000000, 0x000000},
195 {0, 0x0000000, 0x0000000, 0x000000},
196 {0, 0x0000000, 0x0000000, 0x000000} } },
197 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
198 {{{0} } }, /* 35: */
199 {{{0} } }, /* 36: */
200 {{{0} } }, /* 37: */
201 {{{0} } }, /* 38: */
202 {{{0} } }, /* 39: */
203 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
204 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
205 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
206 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
207 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
208 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
209 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
210 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
211 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
212 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
213 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
214 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
215 {{{0} } }, /* 52: */
216 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
217 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
218 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
219 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
220 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
221 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
222 {{{0} } }, /* 59: I2C0 */
223 {{{0} } }, /* 60: I2C1 */
224 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
225 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
226 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
227};
228
229/*
230 * top 12 bits of crb internal address (hub, agent)
231 */
232static const unsigned crb_hub_agt[64] = {
233 0,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
235 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
237 0,
238 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
239 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
240 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
241 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
242 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
244 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
255 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
260 0,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
263 0,
264 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
265 0,
266 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
267 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
268 0,
269 0,
270 0,
271 0,
272 0,
273 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
274 0,
275 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
276 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
277 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
278 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
279 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
280 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
281 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
282 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
283 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
284 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
285 0,
286 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
287 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
288 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
289 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
290 0,
291 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
292 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
293 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
294 0,
295 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
296 0,
297};
298
299/* PCI Windowing for DDR regions. */
300
301#define QLCNIC_PCIE_SEM_TIMEOUT 10000
302
303int
304qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
305{
306 int done = 0, timeout = 0;
307
308 while (!done) {
309 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
310 if (done == 1)
311 break;
312 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
313 return -EIO;
314 msleep(1);
315 }
316
317 if (id_reg)
318 QLCWR32(adapter, id_reg, adapter->portnum);
319
320 return 0;
321}
322
323void
324qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
325{
326 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
327}
328
329static int
330qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
331 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
332{
333 u32 i, producer, consumer;
334 struct qlcnic_cmd_buffer *pbuf;
335 struct cmd_desc_type0 *cmd_desc;
336 struct qlcnic_host_tx_ring *tx_ring;
337
338 i = 0;
339
340 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
341 return -EIO;
342
343 tx_ring = adapter->tx_ring;
344 __netif_tx_lock_bh(tx_ring->txq);
345
346 producer = tx_ring->producer;
347 consumer = tx_ring->sw_consumer;
348
349 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
350 netif_tx_stop_queue(tx_ring->txq);
351 __netif_tx_unlock_bh(tx_ring->txq);
352 return -EBUSY;
353 }
354
355 do {
356 cmd_desc = &cmd_desc_arr[i];
357
358 pbuf = &tx_ring->cmd_buf_arr[producer];
359 pbuf->skb = NULL;
360 pbuf->frag_count = 0;
361
362 memcpy(&tx_ring->desc_head[producer],
363 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
364
365 producer = get_next_index(producer, tx_ring->num_desc);
366 i++;
367
368 } while (i != nr_desc);
369
370 tx_ring->producer = producer;
371
372 qlcnic_update_cmd_producer(adapter, tx_ring);
373
374 __netif_tx_unlock_bh(tx_ring->txq);
375
376 return 0;
377}
378
379static int
380qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
381 unsigned op)
382{
383 struct qlcnic_nic_req req;
384 struct qlcnic_mac_req *mac_req;
385 u64 word;
386
387 memset(&req, 0, sizeof(struct qlcnic_nic_req));
388 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
389
390 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
391 req.req_hdr = cpu_to_le64(word);
392
393 mac_req = (struct qlcnic_mac_req *)&req.words[0];
394 mac_req->op = op;
395 memcpy(mac_req->mac_addr, addr, 6);
396
397 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
398}
399
400static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
401 u8 *addr, struct list_head *del_list)
402{
403 struct list_head *head;
404 struct qlcnic_mac_list_s *cur;
405
406 /* look up if already exists */
407 list_for_each(head, del_list) {
408 cur = list_entry(head, struct qlcnic_mac_list_s, list);
409
410 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
411 list_move_tail(head, &adapter->mac_list);
412 return 0;
413 }
414 }
415
416 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
417 if (cur == NULL) {
418 dev_err(&adapter->netdev->dev,
419 "failed to add mac address filter\n");
420 return -ENOMEM;
421 }
422 memcpy(cur->mac_addr, addr, ETH_ALEN);
423 list_add_tail(&cur->list, &adapter->mac_list);
424
425 return qlcnic_sre_macaddr_change(adapter,
426 cur->mac_addr, QLCNIC_MAC_ADD);
427}
428
429void qlcnic_set_multi(struct net_device *netdev)
430{
431 struct qlcnic_adapter *adapter = netdev_priv(netdev);
432 struct dev_mc_list *mc_ptr;
433 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
434 u32 mode = VPORT_MISS_MODE_DROP;
435 LIST_HEAD(del_list);
436 struct list_head *head;
437 struct qlcnic_mac_list_s *cur;
438
439 list_splice_tail_init(&adapter->mac_list, &del_list);
440
441 qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
442 qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
443
444 if (netdev->flags & IFF_PROMISC) {
445 mode = VPORT_MISS_MODE_ACCEPT_ALL;
446 goto send_fw_cmd;
447 }
448
449 if ((netdev->flags & IFF_ALLMULTI) ||
450 (netdev->mc_count > adapter->max_mc_count)) {
451 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
452 goto send_fw_cmd;
453 }
454
455 if (netdev->mc_count > 0) {
456 for (mc_ptr = netdev->mc_list; mc_ptr;
457 mc_ptr = mc_ptr->next) {
458 qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
459 &del_list);
460 }
461 }
462
463send_fw_cmd:
464 qlcnic_nic_set_promisc(adapter, mode);
465 head = &del_list;
466 while (!list_empty(head)) {
467 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
468
469 qlcnic_sre_macaddr_change(adapter,
470 cur->mac_addr, QLCNIC_MAC_DEL);
471 list_del(&cur->list);
472 kfree(cur);
473 }
474}
475
476int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
477{
478 struct qlcnic_nic_req req;
479 u64 word;
480
481 memset(&req, 0, sizeof(struct qlcnic_nic_req));
482
483 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
484
485 word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
486 ((u64)adapter->portnum << 16);
487 req.req_hdr = cpu_to_le64(word);
488
489 req.words[0] = cpu_to_le64(mode);
490
491 return qlcnic_send_cmd_descs(adapter,
492 (struct cmd_desc_type0 *)&req, 1);
493}
494
495void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
496{
497 struct qlcnic_mac_list_s *cur;
498 struct list_head *head = &adapter->mac_list;
499
500 while (!list_empty(head)) {
501 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
502 qlcnic_sre_macaddr_change(adapter,
503 cur->mac_addr, QLCNIC_MAC_DEL);
504 list_del(&cur->list);
505 kfree(cur);
506 }
507}
508
509#define QLCNIC_CONFIG_INTR_COALESCE 3
510
511/*
512 * Send the interrupt coalescing parameter set by ethtool to the card.
513 */
514int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
515{
516 struct qlcnic_nic_req req;
517 u64 word[6];
518 int rv, i;
519
520 memset(&req, 0, sizeof(struct qlcnic_nic_req));
521
522 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
523
524 word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
525 req.req_hdr = cpu_to_le64(word[0]);
526
527 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
528 for (i = 0; i < 6; i++)
529 req.words[i] = cpu_to_le64(word[i]);
530
531 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
532 if (rv != 0)
533 dev_err(&adapter->netdev->dev,
534 "Could not send interrupt coalescing parameters\n");
535
536 return rv;
537}
538
539int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
540{
541 struct qlcnic_nic_req req;
542 u64 word;
543 int rv;
544
545 if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
546 return 0;
547
548 memset(&req, 0, sizeof(struct qlcnic_nic_req));
549
550 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
551
552 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
553 req.req_hdr = cpu_to_le64(word);
554
555 req.words[0] = cpu_to_le64(enable);
556
557 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
558 if (rv != 0)
559 dev_err(&adapter->netdev->dev,
560 "Could not send configure hw lro request\n");
561
562 adapter->flags ^= QLCNIC_LRO_ENABLED;
563
564 return rv;
565}
566
567int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
568{
569 struct qlcnic_nic_req req;
570 u64 word;
571 int rv;
572
573 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
574 return 0;
575
576 memset(&req, 0, sizeof(struct qlcnic_nic_req));
577
578 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
579
580 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
581 ((u64)adapter->portnum << 16);
582 req.req_hdr = cpu_to_le64(word);
583
584 req.words[0] = cpu_to_le64(enable);
585
586 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
587 if (rv != 0)
588 dev_err(&adapter->netdev->dev,
589 "Could not send configure bridge mode request\n");
590
591 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
592
593 return rv;
594}
595
596
597#define RSS_HASHTYPE_IP_TCP 0x3
598
599int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
600{
601 struct qlcnic_nic_req req;
602 u64 word;
603 int i, rv;
604
605 const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
606 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
607 0x255b0ec26d5a56daULL };
608
609
610 memset(&req, 0, sizeof(struct qlcnic_nic_req));
611 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
612
613 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
614 req.req_hdr = cpu_to_le64(word);
615
616 /*
617 * RSS request:
618 * bits 3-0: hash_method
619 * 5-4: hash_type_ipv4
620 * 7-6: hash_type_ipv6
621 * 8: enable
622 * 9: use indirection table
623 * 47-10: reserved
624 * 63-48: indirection table mask
625 */
626 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
627 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
628 ((u64)(enable & 0x1) << 8) |
629 ((0x7ULL) << 48);
630 req.words[0] = cpu_to_le64(word);
631 for (i = 0; i < 5; i++)
632 req.words[i+1] = cpu_to_le64(key[i]);
633
634 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
635 if (rv != 0)
636 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
637
638 return rv;
639}
640
641int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
642{
643 struct qlcnic_nic_req req;
644 u64 word;
645 int rv;
646
647 memset(&req, 0, sizeof(struct qlcnic_nic_req));
648 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
649
650 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
651 req.req_hdr = cpu_to_le64(word);
652
653 req.words[0] = cpu_to_le64(cmd);
654 req.words[1] = cpu_to_le64(ip);
655
656 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
657 if (rv != 0)
658 dev_err(&adapter->netdev->dev,
659 "could not notify %s IP 0x%x reuqest\n",
660 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
661
662 return rv;
663}
664
665int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
666{
667 struct qlcnic_nic_req req;
668 u64 word;
669 int rv;
670
671 memset(&req, 0, sizeof(struct qlcnic_nic_req));
672 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
673
674 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
675 req.req_hdr = cpu_to_le64(word);
676 req.words[0] = cpu_to_le64(enable | (enable << 8));
677
678 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
679 if (rv != 0)
680 dev_err(&adapter->netdev->dev,
681 "could not configure link notification\n");
682
683 return rv;
684}
685
686int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
687{
688 struct qlcnic_nic_req req;
689 u64 word;
690 int rv;
691
692 memset(&req, 0, sizeof(struct qlcnic_nic_req));
693 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
694
695 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
696 ((u64)adapter->portnum << 16) |
697 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
698
699 req.req_hdr = cpu_to_le64(word);
700
701 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
702 if (rv != 0)
703 dev_err(&adapter->netdev->dev,
704 "could not cleanup lro flows\n");
705
706 return rv;
707}
708
709/*
710 * qlcnic_change_mtu - Change the Maximum Transfer Unit
711 * @returns 0 on success, negative on failure
712 */
713
714int qlcnic_change_mtu(struct net_device *netdev, int mtu)
715{
716 struct qlcnic_adapter *adapter = netdev_priv(netdev);
717 int rc = 0;
718
719 if (mtu > P3_MAX_MTU) {
720 dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
721 P3_MAX_MTU);
722 return -EINVAL;
723 }
724
725 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
726
727 if (!rc)
728 netdev->mtu = mtu;
729
730 return rc;
731}
732
733int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
734{
735 u32 crbaddr, mac_hi, mac_lo;
736 int pci_func = adapter->ahw.pci_func;
737
738 crbaddr = CRB_MAC_BLOCK_START +
739 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
740
741 mac_lo = QLCRD32(adapter, crbaddr);
742 mac_hi = QLCRD32(adapter, crbaddr+4);
743
744 if (pci_func & 1)
745 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
746 else
747 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
748
749 return 0;
750}
751
752/*
753 * Changes the CRB window to the specified window.
754 */
755 /* Returns < 0 if off is not valid,
756 * 1 if window access is needed. 'off' is set to offset from
757 * CRB space in 128M pci map
758 * 0 if no window access is needed. 'off' is set to 2M addr
759 * In: 'off' is offset from base in 128M pci map
760 */
761static int
762qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
763 ulong off, void __iomem **addr)
764{
765 const struct crb_128M_2M_sub_block_map *m;
766
767 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
768 return -EINVAL;
769
770 off -= QLCNIC_PCI_CRBSPACE;
771
772 /*
773 * Try direct map
774 */
775 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
776
777 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
778 *addr = adapter->ahw.pci_base0 + m->start_2M +
779 (off - m->start_128M);
780 return 0;
781 }
782
783 /*
784 * Not in direct map, use crb window
785 */
786 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
787 return 1;
788}
789
790/*
791 * In: 'off' is offset from CRB space in 128M pci map
792 * Out: 'off' is 2M pci map addr
793 * side effect: lock crb window
794 */
795static void
796qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
797{
798 u32 window;
799 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
800
801 off -= QLCNIC_PCI_CRBSPACE;
802
803 window = CRB_HI(off);
804
805 if (adapter->ahw.crb_win == window)
806 return;
807
808 writel(window, addr);
809 if (readl(addr) != window) {
810 if (printk_ratelimit())
811 dev_warn(&adapter->pdev->dev,
812 "failed to set CRB window to %d off 0x%lx\n",
813 window, off);
814 }
815 adapter->ahw.crb_win = window;
816}
817
818int
819qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
820{
821 unsigned long flags;
822 int rv;
823 void __iomem *addr = NULL;
824
825 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
826
827 if (rv == 0) {
828 writel(data, addr);
829 return 0;
830 }
831
832 if (rv > 0) {
833 /* indirect access */
834 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
835 crb_win_lock(adapter);
836 qlcnic_pci_set_crbwindow_2M(adapter, off);
837 writel(data, addr);
838 crb_win_unlock(adapter);
839 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
840 return 0;
841 }
842
843 dev_err(&adapter->pdev->dev,
844 "%s: invalid offset: 0x%016lx\n", __func__, off);
845 dump_stack();
846 return -EIO;
847}
848
849u32
850qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
851{
852 unsigned long flags;
853 int rv;
854 u32 data;
855 void __iomem *addr = NULL;
856
857 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
858
859 if (rv == 0)
860 return readl(addr);
861
862 if (rv > 0) {
863 /* indirect access */
864 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
865 crb_win_lock(adapter);
866 qlcnic_pci_set_crbwindow_2M(adapter, off);
867 data = readl(addr);
868 crb_win_unlock(adapter);
869 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
870 return data;
871 }
872
873 dev_err(&adapter->pdev->dev,
874 "%s: invalid offset: 0x%016lx\n", __func__, off);
875 dump_stack();
876 return -1;
877}
878
879
880void __iomem *
881qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
882{
883 void __iomem *addr = NULL;
884
885 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
886
887 return addr;
888}
889
890
891static int
892qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
893 u64 addr, u32 *start)
894{
895 u32 window;
896 struct pci_dev *pdev = adapter->pdev;
897
898 if ((addr & 0x00ff800) == 0xff800) {
899 if (printk_ratelimit())
900 dev_warn(&pdev->dev, "QM access not handled\n");
901 return -EIO;
902 }
903
904 window = OCM_WIN_P3P(addr);
905
906 writel(window, adapter->ahw.ocm_win_crb);
907 /* read back to flush */
908 readl(adapter->ahw.ocm_win_crb);
909
910 adapter->ahw.ocm_win = window;
911 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
912 return 0;
913}
914
915static int
916qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
917 u64 *data, int op)
918{
919 void __iomem *addr, *mem_ptr = NULL;
920 resource_size_t mem_base;
921 int ret;
922 u32 start;
923
924 mutex_lock(&adapter->ahw.mem_lock);
925
926 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
927 if (ret != 0)
928 goto unlock;
929
930 addr = pci_base_offset(adapter, start);
931 if (addr)
932 goto noremap;
933
934 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
935
936 mem_ptr = ioremap(mem_base, PAGE_SIZE);
937 if (mem_ptr == NULL) {
938 ret = -EIO;
939 goto unlock;
940 }
941
942 addr = mem_ptr + (start & (PAGE_SIZE - 1));
943
944noremap:
945 if (op == 0) /* read */
946 *data = readq(addr);
947 else /* write */
948 writeq(*data, addr);
949
950unlock:
951 mutex_unlock(&adapter->ahw.mem_lock);
952
953 if (mem_ptr)
954 iounmap(mem_ptr);
955 return ret;
956}
957
958#define MAX_CTL_CHECK 1000
959
960int
961qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
962 u64 off, u64 data)
963{
964 int i, j, ret;
965 u32 temp, off8;
966 u64 stride;
967 void __iomem *mem_crb;
968
969 /* Only 64-bit aligned access */
970 if (off & 7)
971 return -EIO;
972
973 /* P3 onward, test agent base for MIU and SIU is same */
974 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
975 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
976 mem_crb = qlcnic_get_ioaddr(adapter,
977 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
978 goto correct;
979 }
980
981 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
982 mem_crb = qlcnic_get_ioaddr(adapter,
983 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
984 goto correct;
985 }
986
987 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
988 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
989
990 return -EIO;
991
992correct:
993 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
994
995 off8 = off & ~(stride-1);
996
997 mutex_lock(&adapter->ahw.mem_lock);
998
999 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1000 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1001
1002 i = 0;
1003 if (stride == 16) {
1004 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1005 writel((TA_CTL_START | TA_CTL_ENABLE),
1006 (mem_crb + TEST_AGT_CTRL));
1007
1008 for (j = 0; j < MAX_CTL_CHECK; j++) {
1009 temp = readl(mem_crb + TEST_AGT_CTRL);
1010 if ((temp & TA_CTL_BUSY) == 0)
1011 break;
1012 }
1013
1014 if (j >= MAX_CTL_CHECK) {
1015 ret = -EIO;
1016 goto done;
1017 }
1018
1019 i = (off & 0xf) ? 0 : 2;
1020 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1021 mem_crb + MIU_TEST_AGT_WRDATA(i));
1022 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1023 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1024 i = (off & 0xf) ? 2 : 0;
1025 }
1026
1027 writel(data & 0xffffffff,
1028 mem_crb + MIU_TEST_AGT_WRDATA(i));
1029 writel((data >> 32) & 0xffffffff,
1030 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1031
1032 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1033 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1034 (mem_crb + TEST_AGT_CTRL));
1035
1036 for (j = 0; j < MAX_CTL_CHECK; j++) {
1037 temp = readl(mem_crb + TEST_AGT_CTRL);
1038 if ((temp & TA_CTL_BUSY) == 0)
1039 break;
1040 }
1041
1042 if (j >= MAX_CTL_CHECK) {
1043 if (printk_ratelimit())
1044 dev_err(&adapter->pdev->dev,
1045 "failed to write through agent\n");
1046 ret = -EIO;
1047 } else
1048 ret = 0;
1049
1050done:
1051 mutex_unlock(&adapter->ahw.mem_lock);
1052
1053 return ret;
1054}
1055
1056int
1057qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1058 u64 off, u64 *data)
1059{
1060 int j, ret;
1061 u32 temp, off8;
1062 u64 val, stride;
1063 void __iomem *mem_crb;
1064
1065 /* Only 64-bit aligned access */
1066 if (off & 7)
1067 return -EIO;
1068
1069 /* P3 onward, test agent base for MIU and SIU is same */
1070 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1071 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
1072 mem_crb = qlcnic_get_ioaddr(adapter,
1073 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1074 goto correct;
1075 }
1076
1077 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1078 mem_crb = qlcnic_get_ioaddr(adapter,
1079 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1080 goto correct;
1081 }
1082
1083 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1084 return qlcnic_pci_mem_access_direct(adapter,
1085 off, data, 0);
1086 }
1087
1088 return -EIO;
1089
1090correct:
1091 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1092
1093 off8 = off & ~(stride-1);
1094
1095 mutex_lock(&adapter->ahw.mem_lock);
1096
1097 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1098 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1099 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1100 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1101
1102 for (j = 0; j < MAX_CTL_CHECK; j++) {
1103 temp = readl(mem_crb + TEST_AGT_CTRL);
1104 if ((temp & TA_CTL_BUSY) == 0)
1105 break;
1106 }
1107
1108 if (j >= MAX_CTL_CHECK) {
1109 if (printk_ratelimit())
1110 dev_err(&adapter->pdev->dev,
1111 "failed to read through agent\n");
1112 ret = -EIO;
1113 } else {
1114 off8 = MIU_TEST_AGT_RDDATA_LO;
1115 if ((stride == 16) && (off & 0xf))
1116 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1117
1118 temp = readl(mem_crb + off8 + 4);
1119 val = (u64)temp << 32;
1120 val |= readl(mem_crb + off8);
1121 *data = val;
1122 ret = 0;
1123 }
1124
1125 mutex_unlock(&adapter->ahw.mem_lock);
1126
1127 return ret;
1128}
1129
1130int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1131{
1132 int offset, board_type, magic;
1133 struct pci_dev *pdev = adapter->pdev;
1134
1135 offset = QLCNIC_FW_MAGIC_OFFSET;
1136 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1137 return -EIO;
1138
1139 if (magic != QLCNIC_BDINFO_MAGIC) {
1140 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1141 magic);
1142 return -EIO;
1143 }
1144
1145 offset = QLCNIC_BRDTYPE_OFFSET;
1146 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1147 return -EIO;
1148
1149 adapter->ahw.board_type = board_type;
1150
1151 if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
1152 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1153 if ((gpio & 0x8000) == 0)
1154 board_type = QLCNIC_BRDTYPE_P3_10G_TP;
1155 }
1156
1157 switch (board_type) {
1158 case QLCNIC_BRDTYPE_P3_HMEZ:
1159 case QLCNIC_BRDTYPE_P3_XG_LOM:
1160 case QLCNIC_BRDTYPE_P3_10G_CX4:
1161 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
1162 case QLCNIC_BRDTYPE_P3_IMEZ:
1163 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
1164 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
1165 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
1166 case QLCNIC_BRDTYPE_P3_10G_XFP:
1167 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
1168 adapter->ahw.port_type = QLCNIC_XGBE;
1169 break;
1170 case QLCNIC_BRDTYPE_P3_REF_QG:
1171 case QLCNIC_BRDTYPE_P3_4_GB:
1172 case QLCNIC_BRDTYPE_P3_4_GB_MM:
1173 adapter->ahw.port_type = QLCNIC_GBE;
1174 break;
1175 case QLCNIC_BRDTYPE_P3_10G_TP:
1176 adapter->ahw.port_type = (adapter->portnum < 2) ?
1177 QLCNIC_XGBE : QLCNIC_GBE;
1178 break;
1179 default:
1180 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1181 adapter->ahw.port_type = QLCNIC_XGBE;
1182 break;
1183 }
1184
1185 return 0;
1186}
1187
1188int
1189qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1190{
1191 u32 wol_cfg;
1192
1193 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1194 if (wol_cfg & (1UL << adapter->portnum)) {
1195 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1196 if (wol_cfg & (1 << adapter->portnum))
1197 return 1;
1198 }
1199
1200 return 0;
1201}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 00000000000..7ae8bcc1e43
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1466 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/netdevice.h>
26#include <linux/delay.h>
27#include "qlcnic.h"
28
29struct crb_addr_pair {
30 u32 addr;
31 u32 data;
32};
33
34#define QLCNIC_MAX_CRB_XFORM 60
35static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
36
37#define crb_addr_transform(name) \
38 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
39 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
40
41#define QLCNIC_ADDR_ERROR (0xffffffff)
42
43static void
44qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
45 struct qlcnic_host_rds_ring *rds_ring);
46
47static void crb_addr_transform_setup(void)
48{
49 crb_addr_transform(XDMA);
50 crb_addr_transform(TIMR);
51 crb_addr_transform(SRE);
52 crb_addr_transform(SQN3);
53 crb_addr_transform(SQN2);
54 crb_addr_transform(SQN1);
55 crb_addr_transform(SQN0);
56 crb_addr_transform(SQS3);
57 crb_addr_transform(SQS2);
58 crb_addr_transform(SQS1);
59 crb_addr_transform(SQS0);
60 crb_addr_transform(RPMX7);
61 crb_addr_transform(RPMX6);
62 crb_addr_transform(RPMX5);
63 crb_addr_transform(RPMX4);
64 crb_addr_transform(RPMX3);
65 crb_addr_transform(RPMX2);
66 crb_addr_transform(RPMX1);
67 crb_addr_transform(RPMX0);
68 crb_addr_transform(ROMUSB);
69 crb_addr_transform(SN);
70 crb_addr_transform(QMN);
71 crb_addr_transform(QMS);
72 crb_addr_transform(PGNI);
73 crb_addr_transform(PGND);
74 crb_addr_transform(PGN3);
75 crb_addr_transform(PGN2);
76 crb_addr_transform(PGN1);
77 crb_addr_transform(PGN0);
78 crb_addr_transform(PGSI);
79 crb_addr_transform(PGSD);
80 crb_addr_transform(PGS3);
81 crb_addr_transform(PGS2);
82 crb_addr_transform(PGS1);
83 crb_addr_transform(PGS0);
84 crb_addr_transform(PS);
85 crb_addr_transform(PH);
86 crb_addr_transform(NIU);
87 crb_addr_transform(I2Q);
88 crb_addr_transform(EG);
89 crb_addr_transform(MN);
90 crb_addr_transform(MS);
91 crb_addr_transform(CAS2);
92 crb_addr_transform(CAS1);
93 crb_addr_transform(CAS0);
94 crb_addr_transform(CAM);
95 crb_addr_transform(C2C1);
96 crb_addr_transform(C2C0);
97 crb_addr_transform(SMB);
98 crb_addr_transform(OCM0);
99 crb_addr_transform(I2C0);
100}
101
102void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
103{
104 struct qlcnic_recv_context *recv_ctx;
105 struct qlcnic_host_rds_ring *rds_ring;
106 struct qlcnic_rx_buffer *rx_buf;
107 int i, ring;
108
109 recv_ctx = &adapter->recv_ctx;
110 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
111 rds_ring = &recv_ctx->rds_rings[ring];
112 for (i = 0; i < rds_ring->num_desc; ++i) {
113 rx_buf = &(rds_ring->rx_buf_arr[i]);
114 if (rx_buf->state == QLCNIC_BUFFER_FREE)
115 continue;
116 pci_unmap_single(adapter->pdev,
117 rx_buf->dma,
118 rds_ring->dma_size,
119 PCI_DMA_FROMDEVICE);
120 if (rx_buf->skb != NULL)
121 dev_kfree_skb_any(rx_buf->skb);
122 }
123 }
124}
125
126void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
127{
128 struct qlcnic_cmd_buffer *cmd_buf;
129 struct qlcnic_skb_frag *buffrag;
130 int i, j;
131 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
132
133 cmd_buf = tx_ring->cmd_buf_arr;
134 for (i = 0; i < tx_ring->num_desc; i++) {
135 buffrag = cmd_buf->frag_array;
136 if (buffrag->dma) {
137 pci_unmap_single(adapter->pdev, buffrag->dma,
138 buffrag->length, PCI_DMA_TODEVICE);
139 buffrag->dma = 0ULL;
140 }
141 for (j = 0; j < cmd_buf->frag_count; j++) {
142 buffrag++;
143 if (buffrag->dma) {
144 pci_unmap_page(adapter->pdev, buffrag->dma,
145 buffrag->length,
146 PCI_DMA_TODEVICE);
147 buffrag->dma = 0ULL;
148 }
149 }
150 if (cmd_buf->skb) {
151 dev_kfree_skb_any(cmd_buf->skb);
152 cmd_buf->skb = NULL;
153 }
154 cmd_buf++;
155 }
156}
157
158void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
159{
160 struct qlcnic_recv_context *recv_ctx;
161 struct qlcnic_host_rds_ring *rds_ring;
162 struct qlcnic_host_tx_ring *tx_ring;
163 int ring;
164
165 recv_ctx = &adapter->recv_ctx;
166
167 if (recv_ctx->rds_rings == NULL)
168 goto skip_rds;
169
170 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
171 rds_ring = &recv_ctx->rds_rings[ring];
172 vfree(rds_ring->rx_buf_arr);
173 rds_ring->rx_buf_arr = NULL;
174 }
175 kfree(recv_ctx->rds_rings);
176
177skip_rds:
178 if (adapter->tx_ring == NULL)
179 return;
180
181 tx_ring = adapter->tx_ring;
182 vfree(tx_ring->cmd_buf_arr);
183 kfree(adapter->tx_ring);
184}
185
186int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
187{
188 struct qlcnic_recv_context *recv_ctx;
189 struct qlcnic_host_rds_ring *rds_ring;
190 struct qlcnic_host_sds_ring *sds_ring;
191 struct qlcnic_host_tx_ring *tx_ring;
192 struct qlcnic_rx_buffer *rx_buf;
193 int ring, i, size;
194
195 struct qlcnic_cmd_buffer *cmd_buf_arr;
196 struct net_device *netdev = adapter->netdev;
197
198 size = sizeof(struct qlcnic_host_tx_ring);
199 tx_ring = kzalloc(size, GFP_KERNEL);
200 if (tx_ring == NULL) {
201 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
202 return -ENOMEM;
203 }
204 adapter->tx_ring = tx_ring;
205
206 tx_ring->num_desc = adapter->num_txd;
207 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
208
209 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
210 if (cmd_buf_arr == NULL) {
211 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
212 return -ENOMEM;
213 }
214 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
215 tx_ring->cmd_buf_arr = cmd_buf_arr;
216
217 recv_ctx = &adapter->recv_ctx;
218
219 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
220 rds_ring = kzalloc(size, GFP_KERNEL);
221 if (rds_ring == NULL) {
222 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
223 return -ENOMEM;
224 }
225 recv_ctx->rds_rings = rds_ring;
226
227 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
228 rds_ring = &recv_ctx->rds_rings[ring];
229 switch (ring) {
230 case RCV_RING_NORMAL:
231 rds_ring->num_desc = adapter->num_rxd;
232 if (adapter->ahw.cut_through) {
233 rds_ring->dma_size =
234 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
235 rds_ring->skb_size =
236 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
237 } else {
238 rds_ring->dma_size =
239 QLCNIC_P3_RX_BUF_MAX_LEN;
240 rds_ring->skb_size =
241 rds_ring->dma_size + NET_IP_ALIGN;
242 }
243 break;
244
245 case RCV_RING_JUMBO:
246 rds_ring->num_desc = adapter->num_jumbo_rxd;
247 rds_ring->dma_size =
248 QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
249
250 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
251 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
252
253 rds_ring->skb_size =
254 rds_ring->dma_size + NET_IP_ALIGN;
255 break;
256
257 case RCV_RING_LRO:
258 rds_ring->num_desc = adapter->num_lro_rxd;
259 rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
260 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
261 break;
262
263 }
264 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
265 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
266 if (rds_ring->rx_buf_arr == NULL) {
267 dev_err(&netdev->dev, "Failed to allocate "
268 "rx buffer ring %d\n", ring);
269 goto err_out;
270 }
271 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
272 INIT_LIST_HEAD(&rds_ring->free_list);
273 /*
274 * Now go through all of them, set reference handles
275 * and put them in the queues.
276 */
277 rx_buf = rds_ring->rx_buf_arr;
278 for (i = 0; i < rds_ring->num_desc; i++) {
279 list_add_tail(&rx_buf->list,
280 &rds_ring->free_list);
281 rx_buf->ref_handle = i;
282 rx_buf->state = QLCNIC_BUFFER_FREE;
283 rx_buf++;
284 }
285 spin_lock_init(&rds_ring->lock);
286 }
287
288 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
289 sds_ring = &recv_ctx->sds_rings[ring];
290 sds_ring->irq = adapter->msix_entries[ring].vector;
291 sds_ring->adapter = adapter;
292 sds_ring->num_desc = adapter->num_rxd;
293
294 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
295 INIT_LIST_HEAD(&sds_ring->free_list[i]);
296 }
297
298 return 0;
299
300err_out:
301 qlcnic_free_sw_resources(adapter);
302 return -ENOMEM;
303}
304
305/*
306 * Utility to translate from internal Phantom CRB address
307 * to external PCI CRB address.
308 */
309static u32 qlcnic_decode_crb_addr(u32 addr)
310{
311 int i;
312 u32 base_addr, offset, pci_base;
313
314 crb_addr_transform_setup();
315
316 pci_base = QLCNIC_ADDR_ERROR;
317 base_addr = addr & 0xfff00000;
318 offset = addr & 0x000fffff;
319
320 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
321 if (crb_addr_xform[i] == base_addr) {
322 pci_base = i << 20;
323 break;
324 }
325 }
326 if (pci_base == QLCNIC_ADDR_ERROR)
327 return pci_base;
328 else
329 return pci_base + offset;
330}
331
332#define QLCNIC_MAX_ROM_WAIT_USEC 100
333
334static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
335{
336 long timeout = 0;
337 long done = 0;
338
339 cond_resched();
340
341 while (done == 0) {
342 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
343 done &= 2;
344 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
345 dev_err(&adapter->pdev->dev,
346 "Timeout reached waiting for rom done");
347 return -EIO;
348 }
349 udelay(1);
350 }
351 return 0;
352}
353
354static int do_rom_fast_read(struct qlcnic_adapter *adapter,
355 int addr, int *valp)
356{
357 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
358 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
359 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
360 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
361 if (qlcnic_wait_rom_done(adapter)) {
362 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
363 return -EIO;
364 }
365 /* reset abyte_cnt and dummy_byte_cnt */
366 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
367 udelay(10);
368 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
369
370 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
371 return 0;
372}
373
374static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
375 u8 *bytes, size_t size)
376{
377 int addridx;
378 int ret = 0;
379
380 for (addridx = addr; addridx < (addr + size); addridx += 4) {
381 int v;
382 ret = do_rom_fast_read(adapter, addridx, &v);
383 if (ret != 0)
384 break;
385 *(__le32 *)bytes = cpu_to_le32(v);
386 bytes += 4;
387 }
388
389 return ret;
390}
391
392int
393qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
394 u8 *bytes, size_t size)
395{
396 int ret;
397
398 ret = qlcnic_rom_lock(adapter);
399 if (ret < 0)
400 return ret;
401
402 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
403
404 qlcnic_rom_unlock(adapter);
405 return ret;
406}
407
408int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
409{
410 int ret;
411
412 if (qlcnic_rom_lock(adapter) != 0)
413 return -EIO;
414
415 ret = do_rom_fast_read(adapter, addr, valp);
416 qlcnic_rom_unlock(adapter);
417 return ret;
418}
419
420int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
421{
422 int addr, val;
423 int i, n, init_delay;
424 struct crb_addr_pair *buf;
425 unsigned offset;
426 u32 off;
427 struct pci_dev *pdev = adapter->pdev;
428
429 /* resetall */
430 qlcnic_rom_lock(adapter);
431 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
432 qlcnic_rom_unlock(adapter);
433
434 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
435 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
436 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
437 return -EIO;
438 }
439 offset = n & 0xffffU;
440 n = (n >> 16) & 0xffffU;
441
442 if (n >= 1024) {
443 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
444 return -EIO;
445 }
446
447 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
448 if (buf == NULL) {
449 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
450 return -ENOMEM;
451 }
452
453 for (i = 0; i < n; i++) {
454 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
455 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
456 kfree(buf);
457 return -EIO;
458 }
459
460 buf[i].addr = addr;
461 buf[i].data = val;
462 }
463
464 for (i = 0; i < n; i++) {
465
466 off = qlcnic_decode_crb_addr(buf[i].addr);
467 if (off == QLCNIC_ADDR_ERROR) {
468 dev_err(&pdev->dev, "CRB init value out of range %x\n",
469 buf[i].addr);
470 continue;
471 }
472 off += QLCNIC_PCI_CRBSPACE;
473
474 if (off & 1)
475 continue;
476
477 /* skipping cold reboot MAGIC */
478 if (off == QLCNIC_CAM_RAM(0x1fc))
479 continue;
480 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
481 continue;
482 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
483 continue;
484 if (off == (ROMUSB_GLB + 0xa8))
485 continue;
486 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
487 continue;
488 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
489 continue;
490 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
491 continue;
492 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
493 continue;
494 /* skip the function enable register */
495 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
496 continue;
497 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
498 continue;
499 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
500 continue;
501
502 init_delay = 1;
503 /* After writing this register, HW needs time for CRB */
504 /* to quiet down (else crb_window returns 0xffffffff) */
505 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
506 init_delay = 1000;
507
508 QLCWR32(adapter, off, buf[i].data);
509
510 msleep(init_delay);
511 }
512 kfree(buf);
513
514 /* p2dn replyCount */
515 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
516 /* disable_peg_cache 0 & 1*/
517 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
518 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
519
520 /* peg_clr_all */
521 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
522 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
523 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
524 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
525 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
526 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
527 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
528 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
529 return 0;
530}
531
532static int
533qlcnic_has_mn(struct qlcnic_adapter *adapter)
534{
535 u32 capability, flashed_ver;
536 capability = 0;
537
538 qlcnic_rom_fast_read(adapter,
539 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
540 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
541
542 if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
543
544 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
545 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
546 return 1;
547 }
548 return 0;
549}
550
551static
552struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
553{
554 u32 i;
555 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
556 __le32 entries = cpu_to_le32(directory->num_entries);
557
558 for (i = 0; i < entries; i++) {
559
560 __le32 offs = cpu_to_le32(directory->findex) +
561 (i * cpu_to_le32(directory->entry_size));
562 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
563
564 if (tab_type == section)
565 return (struct uni_table_desc *) &unirom[offs];
566 }
567
568 return NULL;
569}
570
571static int
572qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
573{
574 struct uni_table_desc *ptab_descr;
575 const u8 *unirom = adapter->fw->data;
576 u32 i;
577 __le32 entries;
578 int mn_present = qlcnic_has_mn(adapter);
579
580 ptab_descr = qlcnic_get_table_desc(unirom,
581 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
582 if (ptab_descr == NULL)
583 return -1;
584
585 entries = cpu_to_le32(ptab_descr->num_entries);
586nomn:
587 for (i = 0; i < entries; i++) {
588
589 __le32 flags, file_chiprev, offs;
590 u8 chiprev = adapter->ahw.revision_id;
591 u32 flagbit;
592
593 offs = cpu_to_le32(ptab_descr->findex) +
594 (i * cpu_to_le32(ptab_descr->entry_size));
595 flags = cpu_to_le32(*((int *)&unirom[offs] +
596 QLCNIC_UNI_FLAGS_OFF));
597 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
598 QLCNIC_UNI_CHIP_REV_OFF));
599
600 flagbit = mn_present ? 1 : 2;
601
602 if ((chiprev == file_chiprev) &&
603 ((1ULL << flagbit) & flags)) {
604 adapter->file_prd_off = offs;
605 return 0;
606 }
607 }
608 if (mn_present) {
609 mn_present = 0;
610 goto nomn;
611 }
612 return -1;
613}
614
615static
616struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
617 u32 section, u32 idx_offset)
618{
619 const u8 *unirom = adapter->fw->data;
620 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
621 idx_offset));
622 struct uni_table_desc *tab_desc;
623 __le32 offs;
624
625 tab_desc = qlcnic_get_table_desc(unirom, section);
626
627 if (tab_desc == NULL)
628 return NULL;
629
630 offs = cpu_to_le32(tab_desc->findex) +
631 (cpu_to_le32(tab_desc->entry_size) * idx);
632
633 return (struct uni_data_desc *)&unirom[offs];
634}
635
636static u8 *
637qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
638{
639 u32 offs = QLCNIC_BOOTLD_START;
640
641 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
642 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
643 QLCNIC_UNI_DIR_SECT_BOOTLD,
644 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
645
646 return (u8 *)&adapter->fw->data[offs];
647}
648
649static u8 *
650qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
651{
652 u32 offs = QLCNIC_IMAGE_START;
653
654 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
655 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
656 QLCNIC_UNI_DIR_SECT_FW,
657 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
658
659 return (u8 *)&adapter->fw->data[offs];
660}
661
662static __le32
663qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
664{
665 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
666 return cpu_to_le32((qlcnic_get_data_desc(adapter,
667 QLCNIC_UNI_DIR_SECT_FW,
668 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
669 else
670 return cpu_to_le32(
671 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
672}
673
674static __le32
675qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
676{
677 struct uni_data_desc *fw_data_desc;
678 const struct firmware *fw = adapter->fw;
679 __le32 major, minor, sub;
680 const u8 *ver_str;
681 int i, ret;
682
683 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
684 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
685
686 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
687 QLCNIC_UNI_FIRMWARE_IDX_OFF);
688 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
689 cpu_to_le32(fw_data_desc->size) - 17;
690
691 for (i = 0; i < 12; i++) {
692 if (!strncmp(&ver_str[i], "REV=", 4)) {
693 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
694 &major, &minor, &sub);
695 if (ret != 3)
696 return 0;
697 else
698 return major + (minor << 8) + (sub << 16);
699 }
700 }
701
702 return 0;
703}
704
705static __le32
706qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
707{
708 const struct firmware *fw = adapter->fw;
709 __le32 bios_ver, prd_off = adapter->file_prd_off;
710
711 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
712 return cpu_to_le32(
713 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
714
715 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
716 + QLCNIC_UNI_BIOS_VERSION_OFF));
717
718 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
719}
720
721int
722qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
723{
724 u32 count, old_count;
725 u32 val, version, major, minor, build;
726 int i, timeout;
727
728 if (adapter->need_fw_reset)
729 return 1;
730
731 /* last attempt had failed */
732 if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
733 return 1;
734
735 old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
736
737 for (i = 0; i < 10; i++) {
738
739 timeout = msleep_interruptible(200);
740 if (timeout) {
741 QLCWR32(adapter, CRB_CMDPEG_STATE,
742 PHAN_INITIALIZE_FAILED);
743 return -EINTR;
744 }
745
746 count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
747 if (count != old_count)
748 break;
749 }
750
751 /* firmware is dead */
752 if (count == old_count)
753 return 1;
754
755 /* check if we have got newer or different file firmware */
756 if (adapter->fw) {
757
758 val = qlcnic_get_fw_version(adapter);
759
760 version = QLCNIC_DECODE_VERSION(val);
761
762 major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
763 minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
764 build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
765
766 if (version > QLCNIC_VERSION_CODE(major, minor, build))
767 return 1;
768 }
769
770 return 0;
771}
772
773static const char *fw_name[] = {
774 QLCNIC_UNIFIED_ROMIMAGE_NAME,
775 QLCNIC_FLASH_ROMIMAGE_NAME,
776};
777
778int
779qlcnic_load_firmware(struct qlcnic_adapter *adapter)
780{
781 u64 *ptr64;
782 u32 i, flashaddr, size;
783 const struct firmware *fw = adapter->fw;
784 struct pci_dev *pdev = adapter->pdev;
785
786 dev_info(&pdev->dev, "loading firmware from %s\n",
787 fw_name[adapter->fw_type]);
788
789 if (fw) {
790 __le64 data;
791
792 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
793
794 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
795 flashaddr = QLCNIC_BOOTLD_START;
796
797 for (i = 0; i < size; i++) {
798 data = cpu_to_le64(ptr64[i]);
799
800 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
801 return -EIO;
802
803 flashaddr += 8;
804 }
805
806 size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
807
808 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
809 flashaddr = QLCNIC_IMAGE_START;
810
811 for (i = 0; i < size; i++) {
812 data = cpu_to_le64(ptr64[i]);
813
814 if (qlcnic_pci_mem_write_2M(adapter,
815 flashaddr, data))
816 return -EIO;
817
818 flashaddr += 8;
819 }
820 } else {
821 u64 data;
822 u32 hi, lo;
823
824 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
825 flashaddr = QLCNIC_BOOTLD_START;
826
827 for (i = 0; i < size; i++) {
828 if (qlcnic_rom_fast_read(adapter,
829 flashaddr, (int *)&lo) != 0)
830 return -EIO;
831 if (qlcnic_rom_fast_read(adapter,
832 flashaddr + 4, (int *)&hi) != 0)
833 return -EIO;
834
835 data = (((u64)hi << 32) | lo);
836
837 if (qlcnic_pci_mem_write_2M(adapter,
838 flashaddr, data))
839 return -EIO;
840
841 flashaddr += 8;
842 }
843 }
844 msleep(1);
845
846 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
847 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
848 return 0;
849}
850
851static int
852qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
853{
854 __le32 val;
855 u32 ver, min_ver, bios, min_size;
856 struct pci_dev *pdev = adapter->pdev;
857 const struct firmware *fw = adapter->fw;
858 u8 fw_type = adapter->fw_type;
859
860 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
861 if (qlcnic_set_product_offs(adapter))
862 return -EINVAL;
863
864 min_size = QLCNIC_UNI_FW_MIN_SIZE;
865 } else {
866 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
867 if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
868 return -EINVAL;
869
870 min_size = QLCNIC_FW_MIN_SIZE;
871 }
872
873 if (fw->size < min_size)
874 return -EINVAL;
875
876 val = qlcnic_get_fw_version(adapter);
877
878 min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
879
880 ver = QLCNIC_DECODE_VERSION(val);
881
882 if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
883 dev_err(&pdev->dev,
884 "%s: firmware version %d.%d.%d unsupported\n",
885 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
886 return -EINVAL;
887 }
888
889 val = qlcnic_get_bios_version(adapter);
890 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
891 if ((__force u32)val != bios) {
892 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
893 fw_name[fw_type]);
894 return -EINVAL;
895 }
896
897 /* check if flashed firmware is newer */
898 if (qlcnic_rom_fast_read(adapter,
899 QLCNIC_FW_VERSION_OFFSET, (int *)&val))
900 return -EIO;
901
902 val = QLCNIC_DECODE_VERSION(val);
903 if (val > ver) {
904 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
905 fw_name[fw_type]);
906 return -EINVAL;
907 }
908
909 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
910 return 0;
911}
912
913static void
914qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
915{
916 u8 fw_type;
917
918 switch (adapter->fw_type) {
919 case QLCNIC_UNKNOWN_ROMIMAGE:
920 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
921 break;
922
923 case QLCNIC_UNIFIED_ROMIMAGE:
924 default:
925 fw_type = QLCNIC_FLASH_ROMIMAGE;
926 break;
927 }
928
929 adapter->fw_type = fw_type;
930}
931
932
933
934void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
935{
936 struct pci_dev *pdev = adapter->pdev;
937 int rc;
938
939 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
940
941next:
942 qlcnic_get_next_fwtype(adapter);
943
944 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
945 adapter->fw = NULL;
946 } else {
947 rc = request_firmware(&adapter->fw,
948 fw_name[adapter->fw_type], &pdev->dev);
949 if (rc != 0)
950 goto next;
951
952 rc = qlcnic_validate_firmware(adapter);
953 if (rc != 0) {
954 release_firmware(adapter->fw);
955 msleep(1);
956 goto next;
957 }
958 }
959}
960
961
962void
963qlcnic_release_firmware(struct qlcnic_adapter *adapter)
964{
965 if (adapter->fw)
966 release_firmware(adapter->fw);
967 adapter->fw = NULL;
968}
969
970int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
971{
972 u32 val;
973 int retries = 60;
974
975 do {
976 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
977
978 switch (val) {
979 case PHAN_INITIALIZE_COMPLETE:
980 case PHAN_INITIALIZE_ACK:
981 return 0;
982 case PHAN_INITIALIZE_FAILED:
983 goto out_err;
984 default:
985 break;
986 }
987
988 msleep(500);
989
990 } while (--retries);
991
992 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
993
994out_err:
995 dev_err(&adapter->pdev->dev, "firmware init failed\n");
996 return -EIO;
997}
998
999static int
1000qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
1001{
1002 u32 val;
1003 int retries = 2000;
1004
1005 do {
1006 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
1007
1008 if (val == PHAN_PEG_RCV_INITIALIZED)
1009 return 0;
1010
1011 msleep(10);
1012
1013 } while (--retries);
1014
1015 if (!retries) {
1016 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
1017 "complete, state: 0x%x.\n", val);
1018 return -EIO;
1019 }
1020
1021 return 0;
1022}
1023
1024int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1025{
1026 int err;
1027
1028 err = qlcnic_receive_peg_ready(adapter);
1029 if (err)
1030 return err;
1031
1032 QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
1033 QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1034 QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
1035 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1036
1037 return err;
1038}
1039
1040static void
1041qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1042 struct qlcnic_fw_msg *msg)
1043{
1044 u32 cable_OUI;
1045 u16 cable_len;
1046 u16 link_speed;
1047 u8 link_status, module, duplex, autoneg;
1048 struct net_device *netdev = adapter->netdev;
1049
1050 adapter->has_link_events = 1;
1051
1052 cable_OUI = msg->body[1] & 0xffffffff;
1053 cable_len = (msg->body[1] >> 32) & 0xffff;
1054 link_speed = (msg->body[1] >> 48) & 0xffff;
1055
1056 link_status = msg->body[2] & 0xff;
1057 duplex = (msg->body[2] >> 16) & 0xff;
1058 autoneg = (msg->body[2] >> 24) & 0xff;
1059
1060 module = (msg->body[2] >> 8) & 0xff;
1061 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1062 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1063 "length %d\n", cable_OUI, cable_len);
1064 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1065 dev_info(&netdev->dev, "unsupported cable length %d\n",
1066 cable_len);
1067
1068 qlcnic_advert_link_change(adapter, link_status);
1069
1070 if (duplex == LINKEVENT_FULL_DUPLEX)
1071 adapter->link_duplex = DUPLEX_FULL;
1072 else
1073 adapter->link_duplex = DUPLEX_HALF;
1074
1075 adapter->module_type = module;
1076 adapter->link_autoneg = autoneg;
1077 adapter->link_speed = link_speed;
1078}
1079
1080static void
1081qlcnic_handle_fw_message(int desc_cnt, int index,
1082 struct qlcnic_host_sds_ring *sds_ring)
1083{
1084 struct qlcnic_fw_msg msg;
1085 struct status_desc *desc;
1086 int i = 0, opcode;
1087
1088 while (desc_cnt > 0 && i < 8) {
1089 desc = &sds_ring->desc_head[index];
1090 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1091 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1092
1093 index = get_next_index(index, sds_ring->num_desc);
1094 desc_cnt--;
1095 }
1096
1097 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1098 switch (opcode) {
1099 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1100 qlcnic_handle_linkevent(sds_ring->adapter, &msg);
1101 break;
1102 default:
1103 break;
1104 }
1105}
1106
1107static int
1108qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1109 struct qlcnic_host_rds_ring *rds_ring,
1110 struct qlcnic_rx_buffer *buffer)
1111{
1112 struct sk_buff *skb;
1113 dma_addr_t dma;
1114 struct pci_dev *pdev = adapter->pdev;
1115
1116 buffer->skb = dev_alloc_skb(rds_ring->skb_size);
1117 if (!buffer->skb)
1118 return -ENOMEM;
1119
1120 skb = buffer->skb;
1121
1122 if (!adapter->ahw.cut_through)
1123 skb_reserve(skb, 2);
1124
1125 dma = pci_map_single(pdev, skb->data,
1126 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1127
1128 if (pci_dma_mapping_error(pdev, dma)) {
1129 dev_kfree_skb_any(skb);
1130 buffer->skb = NULL;
1131 return -ENOMEM;
1132 }
1133
1134 buffer->skb = skb;
1135 buffer->dma = dma;
1136 buffer->state = QLCNIC_BUFFER_BUSY;
1137
1138 return 0;
1139}
1140
1141static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1142 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1143{
1144 struct qlcnic_rx_buffer *buffer;
1145 struct sk_buff *skb;
1146
1147 buffer = &rds_ring->rx_buf_arr[index];
1148
1149 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1150 PCI_DMA_FROMDEVICE);
1151
1152 skb = buffer->skb;
1153 if (!skb)
1154 goto no_skb;
1155
1156 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1157 adapter->stats.csummed++;
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 } else {
1160 skb->ip_summed = CHECKSUM_NONE;
1161 }
1162
1163 skb->dev = adapter->netdev;
1164
1165 buffer->skb = NULL;
1166no_skb:
1167 buffer->state = QLCNIC_BUFFER_FREE;
1168 return skb;
1169}
1170
1171static struct qlcnic_rx_buffer *
1172qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1173 struct qlcnic_host_sds_ring *sds_ring,
1174 int ring, u64 sts_data0)
1175{
1176 struct net_device *netdev = adapter->netdev;
1177 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1178 struct qlcnic_rx_buffer *buffer;
1179 struct sk_buff *skb;
1180 struct qlcnic_host_rds_ring *rds_ring;
1181 int index, length, cksum, pkt_offset;
1182
1183 if (unlikely(ring >= adapter->max_rds_rings))
1184 return NULL;
1185
1186 rds_ring = &recv_ctx->rds_rings[ring];
1187
1188 index = qlcnic_get_sts_refhandle(sts_data0);
1189 if (unlikely(index >= rds_ring->num_desc))
1190 return NULL;
1191
1192 buffer = &rds_ring->rx_buf_arr[index];
1193
1194 length = qlcnic_get_sts_totallength(sts_data0);
1195 cksum = qlcnic_get_sts_status(sts_data0);
1196 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1197
1198 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1199 if (!skb)
1200 return buffer;
1201
1202 if (length > rds_ring->skb_size)
1203 skb_put(skb, rds_ring->skb_size);
1204 else
1205 skb_put(skb, length);
1206
1207 if (pkt_offset)
1208 skb_pull(skb, pkt_offset);
1209
1210 skb->truesize = skb->len + sizeof(struct sk_buff);
1211 skb->protocol = eth_type_trans(skb, netdev);
1212
1213 napi_gro_receive(&sds_ring->napi, skb);
1214
1215 adapter->stats.rx_pkts++;
1216 adapter->stats.rxbytes += length;
1217
1218 return buffer;
1219}
1220
1221#define QLC_TCP_HDR_SIZE 20
1222#define QLC_TCP_TS_OPTION_SIZE 12
1223#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1224
1225static struct qlcnic_rx_buffer *
1226qlcnic_process_lro(struct qlcnic_adapter *adapter,
1227 struct qlcnic_host_sds_ring *sds_ring,
1228 int ring, u64 sts_data0, u64 sts_data1)
1229{
1230 struct net_device *netdev = adapter->netdev;
1231 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1232 struct qlcnic_rx_buffer *buffer;
1233 struct sk_buff *skb;
1234 struct qlcnic_host_rds_ring *rds_ring;
1235 struct iphdr *iph;
1236 struct tcphdr *th;
1237 bool push, timestamp;
1238 int l2_hdr_offset, l4_hdr_offset;
1239 int index;
1240 u16 lro_length, length, data_offset;
1241 u32 seq_number;
1242
1243 if (unlikely(ring > adapter->max_rds_rings))
1244 return NULL;
1245
1246 rds_ring = &recv_ctx->rds_rings[ring];
1247
1248 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1249 if (unlikely(index > rds_ring->num_desc))
1250 return NULL;
1251
1252 buffer = &rds_ring->rx_buf_arr[index];
1253
1254 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1255 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1256 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1257 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1258 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1259 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1260
1261 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1262 if (!skb)
1263 return buffer;
1264
1265 if (timestamp)
1266 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1267 else
1268 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1269
1270 skb_put(skb, lro_length + data_offset);
1271
1272 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1273
1274 skb_pull(skb, l2_hdr_offset);
1275 skb->protocol = eth_type_trans(skb, netdev);
1276
1277 iph = (struct iphdr *)skb->data;
1278 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1279
1280 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1281 iph->tot_len = htons(length);
1282 iph->check = 0;
1283 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1284 th->psh = push;
1285 th->seq = htonl(seq_number);
1286
1287 length = skb->len;
1288
1289 netif_receive_skb(skb);
1290
1291 adapter->stats.lro_pkts++;
1292 adapter->stats.rxbytes += length;
1293
1294 return buffer;
1295}
1296
1297int
1298qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1299{
1300 struct qlcnic_adapter *adapter = sds_ring->adapter;
1301 struct list_head *cur;
1302 struct status_desc *desc;
1303 struct qlcnic_rx_buffer *rxbuf;
1304 u64 sts_data0, sts_data1;
1305
1306 int count = 0;
1307 int opcode, ring, desc_cnt;
1308 u32 consumer = sds_ring->consumer;
1309
1310 while (count < max) {
1311 desc = &sds_ring->desc_head[consumer];
1312 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1313
1314 if (!(sts_data0 & STATUS_OWNER_HOST))
1315 break;
1316
1317 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1318 opcode = qlcnic_get_sts_opcode(sts_data0);
1319
1320 switch (opcode) {
1321 case QLCNIC_RXPKT_DESC:
1322 case QLCNIC_OLD_RXPKT_DESC:
1323 case QLCNIC_SYN_OFFLOAD:
1324 ring = qlcnic_get_sts_type(sts_data0);
1325 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1326 ring, sts_data0);
1327 break;
1328 case QLCNIC_LRO_DESC:
1329 ring = qlcnic_get_lro_sts_type(sts_data0);
1330 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1331 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1332 ring, sts_data0, sts_data1);
1333 break;
1334 case QLCNIC_RESPONSE_DESC:
1335 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1336 default:
1337 goto skip;
1338 }
1339
1340 WARN_ON(desc_cnt > 1);
1341
1342 if (rxbuf)
1343 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1344
1345skip:
1346 for (; desc_cnt > 0; desc_cnt--) {
1347 desc = &sds_ring->desc_head[consumer];
1348 desc->status_desc_data[0] =
1349 cpu_to_le64(STATUS_OWNER_PHANTOM);
1350 consumer = get_next_index(consumer, sds_ring->num_desc);
1351 }
1352 count++;
1353 }
1354
1355 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1356 struct qlcnic_host_rds_ring *rds_ring =
1357 &adapter->recv_ctx.rds_rings[ring];
1358
1359 if (!list_empty(&sds_ring->free_list[ring])) {
1360 list_for_each(cur, &sds_ring->free_list[ring]) {
1361 rxbuf = list_entry(cur,
1362 struct qlcnic_rx_buffer, list);
1363 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1364 }
1365 spin_lock(&rds_ring->lock);
1366 list_splice_tail_init(&sds_ring->free_list[ring],
1367 &rds_ring->free_list);
1368 spin_unlock(&rds_ring->lock);
1369 }
1370
1371 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1372 }
1373
1374 if (count) {
1375 sds_ring->consumer = consumer;
1376 writel(consumer, sds_ring->crb_sts_consumer);
1377 }
1378
1379 return count;
1380}
1381
1382void
1383qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1384 struct qlcnic_host_rds_ring *rds_ring)
1385{
1386 struct rcv_desc *pdesc;
1387 struct qlcnic_rx_buffer *buffer;
1388 int producer, count = 0;
1389 struct list_head *head;
1390
1391 producer = rds_ring->producer;
1392
1393 spin_lock(&rds_ring->lock);
1394 head = &rds_ring->free_list;
1395 while (!list_empty(head)) {
1396
1397 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1398
1399 if (!buffer->skb) {
1400 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1401 break;
1402 }
1403
1404 count++;
1405 list_del(&buffer->list);
1406
1407 /* make a rcv descriptor */
1408 pdesc = &rds_ring->desc_head[producer];
1409 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1410 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1411 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1412
1413 producer = get_next_index(producer, rds_ring->num_desc);
1414 }
1415 spin_unlock(&rds_ring->lock);
1416
1417 if (count) {
1418 rds_ring->producer = producer;
1419 writel((producer-1) & (rds_ring->num_desc-1),
1420 rds_ring->crb_rcv_producer);
1421 }
1422}
1423
1424static void
1425qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1426 struct qlcnic_host_rds_ring *rds_ring)
1427{
1428 struct rcv_desc *pdesc;
1429 struct qlcnic_rx_buffer *buffer;
1430 int producer, count = 0;
1431 struct list_head *head;
1432
1433 producer = rds_ring->producer;
1434 if (!spin_trylock(&rds_ring->lock))
1435 return;
1436
1437 head = &rds_ring->free_list;
1438 while (!list_empty(head)) {
1439
1440 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1441
1442 if (!buffer->skb) {
1443 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1444 break;
1445 }
1446
1447 count++;
1448 list_del(&buffer->list);
1449
1450 /* make a rcv descriptor */
1451 pdesc = &rds_ring->desc_head[producer];
1452 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1453 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1454 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1455
1456 producer = get_next_index(producer, rds_ring->num_desc);
1457 }
1458
1459 if (count) {
1460 rds_ring->producer = producer;
1461 writel((producer - 1) & (rds_ring->num_desc - 1),
1462 rds_ring->crb_rcv_producer);
1463 }
1464 spin_unlock(&rds_ring->lock);
1465}
1466
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 00000000000..1698b6a68ed
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2604 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/vmalloc.h>
26#include <linux/interrupt.h>
27
28#include "qlcnic.h"
29
30#include <linux/dma-mapping.h>
31#include <linux/if_vlan.h>
32#include <net/ip.h>
33#include <linux/ipv6.h>
34#include <linux/inetdevice.h>
35#include <linux/sysfs.h>
36
37MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
38MODULE_LICENSE("GPL");
39MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
40MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
41
42char qlcnic_driver_name[] = "qlcnic";
43static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
44 QLCNIC_LINUX_VERSIONID;
45
46static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
47
48/* Default to restricted 1G auto-neg mode */
49static int wol_port_mode = 5;
50
51static int use_msi = 1;
52module_param(use_msi, int, 0644);
53MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
54
55static int use_msi_x = 1;
56module_param(use_msi_x, int, 0644);
57MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
58
59static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60module_param(auto_fw_reset, int, 0644);
61MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
62
63static int __devinit qlcnic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65static void __devexit qlcnic_remove(struct pci_dev *pdev);
66static int qlcnic_open(struct net_device *netdev);
67static int qlcnic_close(struct net_device *netdev);
68static netdev_tx_t qlcnic_xmit_frame(struct sk_buff *,
69 struct net_device *);
70static void qlcnic_tx_timeout(struct net_device *netdev);
71static void qlcnic_tx_timeout_task(struct work_struct *work);
72static void qlcnic_attach_work(struct work_struct *work);
73static void qlcnic_fwinit_work(struct work_struct *work);
74static void qlcnic_fw_poll_work(struct work_struct *work);
75static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
76 work_func_t func, int delay);
77static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
78static int qlcnic_poll(struct napi_struct *napi, int budget);
79#ifdef CONFIG_NET_POLL_CONTROLLER
80static void qlcnic_poll_controller(struct net_device *netdev);
81#endif
82
83static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
85static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
86static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
87
88static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
89static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
90
91static irqreturn_t qlcnic_intr(int irq, void *data);
92static irqreturn_t qlcnic_msi_intr(int irq, void *data);
93static irqreturn_t qlcnic_msix_intr(int irq, void *data);
94
95static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
96static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
97
98/* PCI Device ID Table */
99#define ENTRY(device) \
100 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
102
103#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
104
105static const struct pci_device_id qlcnic_pci_tbl[] __devinitdata = {
106 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
107 {0,}
108};
109
110MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
111
112
113void
114qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
115 struct qlcnic_host_tx_ring *tx_ring)
116{
117 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
118
119 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
120 netif_stop_queue(adapter->netdev);
121 smp_mb();
122 }
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
157 return (recv_ctx->sds_rings == NULL);
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
174 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
181 netif_napi_add(netdev, &sds_ring->napi,
182 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
183 }
184
185 return 0;
186}
187
188static void
189qlcnic_napi_del(struct qlcnic_adapter *adapter)
190{
191 int ring;
192 struct qlcnic_host_sds_ring *sds_ring;
193 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
194
195 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
196 sds_ring = &recv_ctx->sds_rings[ring];
197 netif_napi_del(&sds_ring->napi);
198 }
199
200 qlcnic_free_sds_rings(&adapter->recv_ctx);
201}
202
203static void
204qlcnic_napi_enable(struct qlcnic_adapter *adapter)
205{
206 int ring;
207 struct qlcnic_host_sds_ring *sds_ring;
208 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
209
210 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
211 sds_ring = &recv_ctx->sds_rings[ring];
212 napi_enable(&sds_ring->napi);
213 qlcnic_enable_int(sds_ring);
214 }
215}
216
217static void
218qlcnic_napi_disable(struct qlcnic_adapter *adapter)
219{
220 int ring;
221 struct qlcnic_host_sds_ring *sds_ring;
222 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
223
224 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
225 sds_ring = &recv_ctx->sds_rings[ring];
226 qlcnic_disable_int(sds_ring);
227 napi_synchronize(&sds_ring->napi);
228 napi_disable(&sds_ring->napi);
229 }
230}
231
232static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
233{
234 memset(&adapter->stats, 0, sizeof(adapter->stats));
235 return;
236}
237
238static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
239{
240 struct pci_dev *pdev = adapter->pdev;
241 u64 mask, cmask;
242
243 adapter->pci_using_dac = 0;
244
245 mask = DMA_BIT_MASK(39);
246 cmask = mask;
247
248 if (pci_set_dma_mask(pdev, mask) == 0 &&
249 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
250 adapter->pci_using_dac = 1;
251 return 0;
252 }
253
254 return -EIO;
255}
256
257/* Update addressable range if firmware supports it */
258static int
259qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
260{
261 int change, shift, err;
262 u64 mask, old_mask, old_cmask;
263 struct pci_dev *pdev = adapter->pdev;
264
265 change = 0;
266
267 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
268 if (shift > 32)
269 return 0;
270
271 if (shift > 9)
272 change = 1;
273
274 if (change) {
275 old_mask = pdev->dma_mask;
276 old_cmask = pdev->dev.coherent_dma_mask;
277
278 mask = DMA_BIT_MASK(32+shift);
279
280 err = pci_set_dma_mask(pdev, mask);
281 if (err)
282 goto err_out;
283
284 err = pci_set_consistent_dma_mask(pdev, mask);
285 if (err)
286 goto err_out;
287 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
288 }
289
290 return 0;
291
292err_out:
293 pci_set_dma_mask(pdev, old_mask);
294 pci_set_consistent_dma_mask(pdev, old_cmask);
295 return err;
296}
297
298static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
299{
300 u32 val, data;
301
302 val = adapter->ahw.board_type;
303 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
304 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
305 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
306 data = QLCNIC_PORT_MODE_802_3_AP;
307 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
308 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
309 data = QLCNIC_PORT_MODE_XG;
310 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
311 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
312 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
313 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
314 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
315 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
316 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
317 } else {
318 data = QLCNIC_PORT_MODE_AUTO_NEG;
319 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
320 }
321
322 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
323 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
324 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
325 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
326 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
327 }
328 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
329 }
330}
331
332static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
333{
334 u32 control;
335 int pos;
336
337 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
338 if (pos) {
339 pci_read_config_dword(pdev, pos, &control);
340 if (enable)
341 control |= PCI_MSIX_FLAGS_ENABLE;
342 else
343 control = 0;
344 pci_write_config_dword(pdev, pos, control);
345 }
346}
347
348static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
349{
350 int i;
351
352 for (i = 0; i < count; i++)
353 adapter->msix_entries[i].entry = i;
354}
355
356static int
357qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
358{
359 int i;
360 unsigned char *p;
361 u64 mac_addr;
362 struct net_device *netdev = adapter->netdev;
363 struct pci_dev *pdev = adapter->pdev;
364
365 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
366 return -EIO;
367
368 p = (unsigned char *)&mac_addr;
369 for (i = 0; i < 6; i++)
370 netdev->dev_addr[i] = *(p + 5 - i);
371
372 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
373 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
374
375 /* set station address */
376
377 if (!is_valid_ether_addr(netdev->perm_addr))
378 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
379 netdev->dev_addr);
380
381 return 0;
382}
383
384static int qlcnic_set_mac(struct net_device *netdev, void *p)
385{
386 struct qlcnic_adapter *adapter = netdev_priv(netdev);
387 struct sockaddr *addr = p;
388
389 if (!is_valid_ether_addr(addr->sa_data))
390 return -EINVAL;
391
392 if (netif_running(netdev)) {
393 netif_device_detach(netdev);
394 qlcnic_napi_disable(adapter);
395 }
396
397 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
398 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
399 qlcnic_set_multi(adapter->netdev);
400
401 if (netif_running(netdev)) {
402 netif_device_attach(netdev);
403 qlcnic_napi_enable(adapter);
404 }
405 return 0;
406}
407
408static const struct net_device_ops qlcnic_netdev_ops = {
409 .ndo_open = qlcnic_open,
410 .ndo_stop = qlcnic_close,
411 .ndo_start_xmit = qlcnic_xmit_frame,
412 .ndo_get_stats = qlcnic_get_stats,
413 .ndo_validate_addr = eth_validate_addr,
414 .ndo_set_multicast_list = qlcnic_set_multi,
415 .ndo_set_mac_address = qlcnic_set_mac,
416 .ndo_change_mtu = qlcnic_change_mtu,
417 .ndo_tx_timeout = qlcnic_tx_timeout,
418#ifdef CONFIG_NET_POLL_CONTROLLER
419 .ndo_poll_controller = qlcnic_poll_controller,
420#endif
421};
422
423static void
424qlcnic_setup_intr(struct qlcnic_adapter *adapter)
425{
426 const struct qlcnic_legacy_intr_set *legacy_intrp;
427 struct pci_dev *pdev = adapter->pdev;
428 int err, num_msix;
429
430 if (adapter->rss_supported) {
431 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
432 MSIX_ENTRIES_PER_ADAPTER : 2;
433 } else
434 num_msix = 1;
435
436 adapter->max_sds_rings = 1;
437
438 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
439
440 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
441
442 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
443 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
444 legacy_intrp->tgt_status_reg);
445 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
446 legacy_intrp->tgt_mask_reg);
447 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
448
449 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
450 ISR_INT_STATE_REG);
451
452 qlcnic_set_msix_bit(pdev, 0);
453
454 if (adapter->msix_supported) {
455
456 qlcnic_init_msix_entries(adapter, num_msix);
457 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
458 if (err == 0) {
459 adapter->flags |= QLCNIC_MSIX_ENABLED;
460 qlcnic_set_msix_bit(pdev, 1);
461
462 if (adapter->rss_supported)
463 adapter->max_sds_rings = num_msix;
464
465 dev_info(&pdev->dev, "using msi-x interrupts\n");
466 return;
467 }
468
469 if (err > 0)
470 pci_disable_msix(pdev);
471
472 /* fall through for msi */
473 }
474
475 if (use_msi && !pci_enable_msi(pdev)) {
476 adapter->flags |= QLCNIC_MSI_ENABLED;
477 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
478 msi_tgt_status[adapter->ahw.pci_func]);
479 dev_info(&pdev->dev, "using msi interrupts\n");
480 adapter->msix_entries[0].vector = pdev->irq;
481 return;
482 }
483
484 dev_info(&pdev->dev, "using legacy interrupts\n");
485 adapter->msix_entries[0].vector = pdev->irq;
486}
487
488static void
489qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
490{
491 if (adapter->flags & QLCNIC_MSIX_ENABLED)
492 pci_disable_msix(adapter->pdev);
493 if (adapter->flags & QLCNIC_MSI_ENABLED)
494 pci_disable_msi(adapter->pdev);
495}
496
497static void
498qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
499{
500 if (adapter->ahw.pci_base0 != NULL)
501 iounmap(adapter->ahw.pci_base0);
502}
503
504static int
505qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
506{
507 void __iomem *mem_ptr0 = NULL;
508 resource_size_t mem_base;
509 unsigned long mem_len, pci_len0 = 0;
510
511 struct pci_dev *pdev = adapter->pdev;
512 int pci_func = adapter->ahw.pci_func;
513
514 /*
515 * Set the CRB window to invalid. If any register in window 0 is
516 * accessed it should set the window to 0 and then reset it to 1.
517 */
518 adapter->ahw.crb_win = -1;
519 adapter->ahw.ocm_win = -1;
520
521 /* remap phys address */
522 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
523 mem_len = pci_resource_len(pdev, 0);
524
525 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
526
527 mem_ptr0 = pci_ioremap_bar(pdev, 0);
528 if (mem_ptr0 == NULL) {
529 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
530 return -EIO;
531 }
532 pci_len0 = mem_len;
533 } else {
534 return -EIO;
535 }
536
537 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
538
539 adapter->ahw.pci_base0 = mem_ptr0;
540 adapter->ahw.pci_len0 = pci_len0;
541
542 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
543 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
544
545 return 0;
546}
547
548static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
549{
550 struct pci_dev *pdev = adapter->pdev;
551 int i, found = 0;
552
553 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
554 if (qlcnic_boards[i].vendor == pdev->vendor &&
555 qlcnic_boards[i].device == pdev->device &&
556 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
557 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
558 strcpy(name, qlcnic_boards[i].short_name);
559 found = 1;
560 break;
561 }
562
563 }
564
565 if (!found)
566 name = "Unknown";
567}
568
569static void
570qlcnic_check_options(struct qlcnic_adapter *adapter)
571{
572 u32 fw_major, fw_minor, fw_build;
573 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
574 char serial_num[32];
575 int i, offset, val;
576 int *ptr32;
577 struct pci_dev *pdev = adapter->pdev;
578
579 adapter->driver_mismatch = 0;
580
581 ptr32 = (int *)&serial_num;
582 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
583 for (i = 0; i < 8; i++) {
584 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
585 dev_err(&pdev->dev, "error reading board info\n");
586 adapter->driver_mismatch = 1;
587 return;
588 }
589 ptr32[i] = cpu_to_le32(val);
590 offset += sizeof(u32);
591 }
592
593 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
594 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
595 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
596
597 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
598
599 if (adapter->portnum == 0) {
600 get_brd_name(adapter, brd_name);
601
602 pr_info("%s: %s Board Chip rev 0x%x\n",
603 module_name(THIS_MODULE),
604 brd_name, adapter->ahw.revision_id);
605 }
606
607 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
608 adapter->driver_mismatch = 1;
609 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
610 fw_major, fw_minor, fw_build);
611 return;
612 }
613
614 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
615 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
616
617 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
618 fw_major, fw_minor, fw_build,
619 adapter->ahw.cut_through ? "cut-through" : "legacy");
620
621 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
622 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
623
624 adapter->flags &= ~QLCNIC_LRO_ENABLED;
625
626 if (adapter->ahw.port_type == QLCNIC_XGBE) {
627 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
628 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
629 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
630 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
631 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
632 }
633
634 adapter->msix_supported = !!use_msi_x;
635 adapter->rss_supported = !!use_msi_x;
636
637 adapter->num_txd = MAX_CMD_DESCRIPTORS;
638
639 adapter->num_lro_rxd = 0;
640 adapter->max_rds_rings = 2;
641}
642
643static int
644qlcnic_start_firmware(struct qlcnic_adapter *adapter)
645{
646 int val, err, first_boot;
647
648 err = qlcnic_set_dma_mask(adapter);
649 if (err)
650 return err;
651
652 if (!qlcnic_can_start_firmware(adapter))
653 goto wait_init;
654
655 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
656 if (first_boot == 0x55555555)
657 /* This is the first boot after power up */
658 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
659
660 qlcnic_request_firmware(adapter);
661
662 err = qlcnic_need_fw_reset(adapter);
663 if (err < 0)
664 goto err_out;
665 if (err == 0)
666 goto wait_init;
667
668 if (first_boot != 0x55555555) {
669 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
670 qlcnic_pinit_from_rom(adapter);
671 msleep(1);
672 }
673
674 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
675 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
676 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
677
678 qlcnic_set_port_mode(adapter);
679
680 err = qlcnic_load_firmware(adapter);
681 if (err)
682 goto err_out;
683
684 qlcnic_release_firmware(adapter);
685
686 val = (_QLCNIC_LINUX_MAJOR << 16)
687 | ((_QLCNIC_LINUX_MINOR << 8))
688 | (_QLCNIC_LINUX_SUBVERSION);
689 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
690
691wait_init:
692 /* Handshake with the card before we register the devices. */
693 err = qlcnic_phantom_init(adapter);
694 if (err)
695 goto err_out;
696
697 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
698
699 qlcnic_update_dma_mask(adapter);
700
701 qlcnic_check_options(adapter);
702
703 adapter->need_fw_reset = 0;
704
705 /* fall through and release firmware */
706
707err_out:
708 qlcnic_release_firmware(adapter);
709 return err;
710}
711
712static int
713qlcnic_request_irq(struct qlcnic_adapter *adapter)
714{
715 irq_handler_t handler;
716 struct qlcnic_host_sds_ring *sds_ring;
717 int err, ring;
718
719 unsigned long flags = 0;
720 struct net_device *netdev = adapter->netdev;
721 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
722
723 if (adapter->flags & QLCNIC_MSIX_ENABLED)
724 handler = qlcnic_msix_intr;
725 else if (adapter->flags & QLCNIC_MSI_ENABLED)
726 handler = qlcnic_msi_intr;
727 else {
728 flags |= IRQF_SHARED;
729 handler = qlcnic_intr;
730 }
731 adapter->irq = netdev->irq;
732
733 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
734 sds_ring = &recv_ctx->sds_rings[ring];
735 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
736 err = request_irq(sds_ring->irq, handler,
737 flags, sds_ring->name, sds_ring);
738 if (err)
739 return err;
740 }
741
742 return 0;
743}
744
745static void
746qlcnic_free_irq(struct qlcnic_adapter *adapter)
747{
748 int ring;
749 struct qlcnic_host_sds_ring *sds_ring;
750
751 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
752
753 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
754 sds_ring = &recv_ctx->sds_rings[ring];
755 free_irq(sds_ring->irq, sds_ring);
756 }
757}
758
759static void
760qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
761{
762 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
763 adapter->coal.normal.data.rx_time_us =
764 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
765 adapter->coal.normal.data.rx_packets =
766 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
767 adapter->coal.normal.data.tx_time_us =
768 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
769 adapter->coal.normal.data.tx_packets =
770 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
771}
772
773static int
774__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
775{
776 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
777 return -EIO;
778
779 qlcnic_set_multi(netdev);
780 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
781
782 adapter->ahw.linkup = 0;
783
784 if (adapter->max_sds_rings > 1)
785 qlcnic_config_rss(adapter, 1);
786
787 qlcnic_config_intr_coalesce(adapter);
788
789 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
790 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
791
792 qlcnic_napi_enable(adapter);
793
794 qlcnic_linkevent_request(adapter, 1);
795
796 set_bit(__QLCNIC_DEV_UP, &adapter->state);
797 return 0;
798}
799
800/* Usage: During resume and firmware recovery module.*/
801
802static int
803qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
804{
805 int err = 0;
806
807 rtnl_lock();
808 if (netif_running(netdev))
809 err = __qlcnic_up(adapter, netdev);
810 rtnl_unlock();
811
812 return err;
813}
814
815static void
816__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
817{
818 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
819 return;
820
821 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
822 return;
823
824 smp_mb();
825 spin_lock(&adapter->tx_clean_lock);
826 netif_carrier_off(netdev);
827 netif_tx_disable(netdev);
828
829 qlcnic_free_mac_list(adapter);
830
831 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
832
833 qlcnic_napi_disable(adapter);
834
835 qlcnic_release_tx_buffers(adapter);
836 spin_unlock(&adapter->tx_clean_lock);
837}
838
839/* Usage: During suspend and firmware recovery module */
840
841static void
842qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
843{
844 rtnl_lock();
845 if (netif_running(netdev))
846 __qlcnic_down(adapter, netdev);
847 rtnl_unlock();
848
849}
850
851static int
852qlcnic_attach(struct qlcnic_adapter *adapter)
853{
854 struct net_device *netdev = adapter->netdev;
855 struct pci_dev *pdev = adapter->pdev;
856 int err, ring;
857 struct qlcnic_host_rds_ring *rds_ring;
858
859 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
860 return 0;
861
862 err = qlcnic_init_firmware(adapter);
863 if (err)
864 return err;
865
866 err = qlcnic_napi_add(adapter, netdev);
867 if (err)
868 return err;
869
870 err = qlcnic_alloc_sw_resources(adapter);
871 if (err) {
872 dev_err(&pdev->dev, "Error in setting sw resources\n");
873 return err;
874 }
875
876 err = qlcnic_alloc_hw_resources(adapter);
877 if (err) {
878 dev_err(&pdev->dev, "Error in setting hw resources\n");
879 goto err_out_free_sw;
880 }
881
882
883 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
884 rds_ring = &adapter->recv_ctx.rds_rings[ring];
885 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
886 }
887
888 err = qlcnic_request_irq(adapter);
889 if (err) {
890 dev_err(&pdev->dev, "failed to setup interrupt\n");
891 goto err_out_free_rxbuf;
892 }
893
894 qlcnic_init_coalesce_defaults(adapter);
895
896 qlcnic_create_sysfs_entries(adapter);
897
898 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
899 return 0;
900
901err_out_free_rxbuf:
902 qlcnic_release_rx_buffers(adapter);
903 qlcnic_free_hw_resources(adapter);
904err_out_free_sw:
905 qlcnic_free_sw_resources(adapter);
906 return err;
907}
908
909static void
910qlcnic_detach(struct qlcnic_adapter *adapter)
911{
912 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
913 return;
914
915 qlcnic_remove_sysfs_entries(adapter);
916
917 qlcnic_free_hw_resources(adapter);
918 qlcnic_release_rx_buffers(adapter);
919 qlcnic_free_irq(adapter);
920 qlcnic_napi_del(adapter);
921 qlcnic_free_sw_resources(adapter);
922
923 adapter->is_up = 0;
924}
925
926int
927qlcnic_reset_context(struct qlcnic_adapter *adapter)
928{
929 int err = 0;
930 struct net_device *netdev = adapter->netdev;
931
932 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
933 return -EBUSY;
934
935 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
936
937 netif_device_detach(netdev);
938
939 if (netif_running(netdev))
940 __qlcnic_down(adapter, netdev);
941
942 qlcnic_detach(adapter);
943
944 if (netif_running(netdev)) {
945 err = qlcnic_attach(adapter);
946 if (!err)
947 err = __qlcnic_up(adapter, netdev);
948
949 if (err)
950 goto done;
951 }
952
953 netif_device_attach(netdev);
954 }
955
956done:
957 clear_bit(__QLCNIC_RESETTING, &adapter->state);
958 return err;
959}
960
961static int
962qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
963 struct net_device *netdev)
964{
965 int err;
966 struct pci_dev *pdev = adapter->pdev;
967
968 adapter->rx_csum = 1;
969 adapter->mc_enabled = 0;
970 adapter->max_mc_count = 38;
971
972 netdev->netdev_ops = &qlcnic_netdev_ops;
973 netdev->watchdog_timeo = 2*HZ;
974
975 qlcnic_change_mtu(netdev, netdev->mtu);
976
977 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
978
979 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
980 netdev->features |= (NETIF_F_GRO);
981 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
982
983 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
984 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
985
986 if (adapter->pci_using_dac) {
987 netdev->features |= NETIF_F_HIGHDMA;
988 netdev->vlan_features |= NETIF_F_HIGHDMA;
989 }
990
991 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
992 netdev->features |= (NETIF_F_HW_VLAN_TX);
993
994 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
995 netdev->features |= NETIF_F_LRO;
996
997 netdev->irq = adapter->msix_entries[0].vector;
998
999 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1000
1001 if (qlcnic_read_mac_addr(adapter))
1002 dev_warn(&pdev->dev, "failed to read mac addr\n");
1003
1004 netif_carrier_off(netdev);
1005 netif_stop_queue(netdev);
1006
1007 err = register_netdev(netdev);
1008 if (err) {
1009 dev_err(&pdev->dev, "failed to register net device\n");
1010 return err;
1011 }
1012
1013 return 0;
1014}
1015
1016static int __devinit
1017qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1018{
1019 struct net_device *netdev = NULL;
1020 struct qlcnic_adapter *adapter = NULL;
1021 int err;
1022 int pci_func_id = PCI_FUNC(pdev->devfn);
1023 uint8_t revision_id;
1024
1025 err = pci_enable_device(pdev);
1026 if (err)
1027 return err;
1028
1029 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1030 err = -ENODEV;
1031 goto err_out_disable_pdev;
1032 }
1033
1034 err = pci_request_regions(pdev, qlcnic_driver_name);
1035 if (err)
1036 goto err_out_disable_pdev;
1037
1038 pci_set_master(pdev);
1039
1040 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1041 if (!netdev) {
1042 dev_err(&pdev->dev, "failed to allocate net_device\n");
1043 err = -ENOMEM;
1044 goto err_out_free_res;
1045 }
1046
1047 SET_NETDEV_DEV(netdev, &pdev->dev);
1048
1049 adapter = netdev_priv(netdev);
1050 adapter->netdev = netdev;
1051 adapter->pdev = pdev;
1052 adapter->ahw.pci_func = pci_func_id;
1053
1054 revision_id = pdev->revision;
1055 adapter->ahw.revision_id = revision_id;
1056
1057 rwlock_init(&adapter->ahw.crb_lock);
1058 mutex_init(&adapter->ahw.mem_lock);
1059
1060 spin_lock_init(&adapter->tx_clean_lock);
1061 INIT_LIST_HEAD(&adapter->mac_list);
1062
1063 err = qlcnic_setup_pci_map(adapter);
1064 if (err)
1065 goto err_out_free_netdev;
1066
1067 /* This will be reset for mezz cards */
1068 adapter->portnum = pci_func_id;
1069
1070 err = qlcnic_get_board_info(adapter);
1071 if (err) {
1072 dev_err(&pdev->dev, "Error getting board config info.\n");
1073 goto err_out_iounmap;
1074 }
1075
1076
1077 err = qlcnic_start_firmware(adapter);
1078 if (err)
1079 goto err_out_decr_ref;
1080
1081 /*
1082 * See if the firmware gave us a virtual-physical port mapping.
1083 */
1084 adapter->physical_port = adapter->portnum;
1085
1086 qlcnic_clear_stats(adapter);
1087
1088 qlcnic_setup_intr(adapter);
1089
1090 err = qlcnic_setup_netdev(adapter, netdev);
1091 if (err)
1092 goto err_out_disable_msi;
1093
1094 pci_set_drvdata(pdev, adapter);
1095
1096 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1097
1098 switch (adapter->ahw.port_type) {
1099 case QLCNIC_GBE:
1100 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1101 adapter->netdev->name);
1102 break;
1103 case QLCNIC_XGBE:
1104 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1105 adapter->netdev->name);
1106 break;
1107 }
1108
1109 qlcnic_create_diag_entries(adapter);
1110
1111 return 0;
1112
1113err_out_disable_msi:
1114 qlcnic_teardown_intr(adapter);
1115
1116err_out_decr_ref:
1117 qlcnic_clr_all_drv_state(adapter);
1118
1119err_out_iounmap:
1120 qlcnic_cleanup_pci_map(adapter);
1121
1122err_out_free_netdev:
1123 free_netdev(netdev);
1124
1125err_out_free_res:
1126 pci_release_regions(pdev);
1127
1128err_out_disable_pdev:
1129 pci_set_drvdata(pdev, NULL);
1130 pci_disable_device(pdev);
1131 return err;
1132}
1133
1134static void __devexit qlcnic_remove(struct pci_dev *pdev)
1135{
1136 struct qlcnic_adapter *adapter;
1137 struct net_device *netdev;
1138
1139 adapter = pci_get_drvdata(pdev);
1140 if (adapter == NULL)
1141 return;
1142
1143 netdev = adapter->netdev;
1144
1145 qlcnic_cancel_fw_work(adapter);
1146
1147 unregister_netdev(netdev);
1148
1149 cancel_work_sync(&adapter->tx_timeout_task);
1150
1151 qlcnic_detach(adapter);
1152
1153 qlcnic_clr_all_drv_state(adapter);
1154
1155 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1156
1157 qlcnic_teardown_intr(adapter);
1158
1159 qlcnic_remove_diag_entries(adapter);
1160
1161 qlcnic_cleanup_pci_map(adapter);
1162
1163 qlcnic_release_firmware(adapter);
1164
1165 pci_release_regions(pdev);
1166 pci_disable_device(pdev);
1167 pci_set_drvdata(pdev, NULL);
1168
1169 free_netdev(netdev);
1170}
1171static int __qlcnic_shutdown(struct pci_dev *pdev)
1172{
1173 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1174 struct net_device *netdev = adapter->netdev;
1175 int retval;
1176
1177 netif_device_detach(netdev);
1178
1179 qlcnic_cancel_fw_work(adapter);
1180
1181 if (netif_running(netdev))
1182 qlcnic_down(adapter, netdev);
1183
1184 cancel_work_sync(&adapter->tx_timeout_task);
1185
1186 qlcnic_detach(adapter);
1187
1188 qlcnic_clr_all_drv_state(adapter);
1189
1190 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1191
1192 retval = pci_save_state(pdev);
1193 if (retval)
1194 return retval;
1195
1196 if (qlcnic_wol_supported(adapter)) {
1197 pci_enable_wake(pdev, PCI_D3cold, 1);
1198 pci_enable_wake(pdev, PCI_D3hot, 1);
1199 }
1200
1201 return 0;
1202}
1203
1204static void qlcnic_shutdown(struct pci_dev *pdev)
1205{
1206 if (__qlcnic_shutdown(pdev))
1207 return;
1208
1209 pci_disable_device(pdev);
1210}
1211
1212#ifdef CONFIG_PM
1213static int
1214qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1215{
1216 int retval;
1217
1218 retval = __qlcnic_shutdown(pdev);
1219 if (retval)
1220 return retval;
1221
1222 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1223 return 0;
1224}
1225
1226static int
1227qlcnic_resume(struct pci_dev *pdev)
1228{
1229 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1230 struct net_device *netdev = adapter->netdev;
1231 int err;
1232
1233 err = pci_enable_device(pdev);
1234 if (err)
1235 return err;
1236
1237 pci_set_power_state(pdev, PCI_D0);
1238 pci_set_master(pdev);
1239 pci_restore_state(pdev);
1240
1241 adapter->ahw.crb_win = -1;
1242 adapter->ahw.ocm_win = -1;
1243
1244 err = qlcnic_start_firmware(adapter);
1245 if (err) {
1246 dev_err(&pdev->dev, "failed to start firmware\n");
1247 return err;
1248 }
1249
1250 if (netif_running(netdev)) {
1251 err = qlcnic_attach(adapter);
1252 if (err)
1253 goto err_out;
1254
1255 err = qlcnic_up(adapter, netdev);
1256 if (err)
1257 goto err_out_detach;
1258
1259
1260 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1261 }
1262
1263 netif_device_attach(netdev);
1264 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1265 return 0;
1266
1267err_out_detach:
1268 qlcnic_detach(adapter);
1269err_out:
1270 qlcnic_clr_all_drv_state(adapter);
1271 return err;
1272}
1273#endif
1274
1275static int qlcnic_open(struct net_device *netdev)
1276{
1277 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1278 int err;
1279
1280 if (adapter->driver_mismatch)
1281 return -EIO;
1282
1283 err = qlcnic_attach(adapter);
1284 if (err)
1285 return err;
1286
1287 err = __qlcnic_up(adapter, netdev);
1288 if (err)
1289 goto err_out;
1290
1291 netif_start_queue(netdev);
1292
1293 return 0;
1294
1295err_out:
1296 qlcnic_detach(adapter);
1297 return err;
1298}
1299
1300/*
1301 * qlcnic_close - Disables a network interface entry point
1302 */
1303static int qlcnic_close(struct net_device *netdev)
1304{
1305 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1306
1307 __qlcnic_down(adapter, netdev);
1308 return 0;
1309}
1310
1311static void
1312qlcnic_tso_check(struct net_device *netdev,
1313 struct qlcnic_host_tx_ring *tx_ring,
1314 struct cmd_desc_type0 *first_desc,
1315 struct sk_buff *skb)
1316{
1317 u8 opcode = TX_ETHER_PKT;
1318 __be16 protocol = skb->protocol;
1319 u16 flags = 0, vid = 0;
1320 u32 producer;
1321 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1322 struct cmd_desc_type0 *hwdesc;
1323 struct vlan_ethhdr *vh;
1324
1325 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1326
1327 vh = (struct vlan_ethhdr *)skb->data;
1328 protocol = vh->h_vlan_encapsulated_proto;
1329 flags = FLAGS_VLAN_TAGGED;
1330
1331 } else if (vlan_tx_tag_present(skb)) {
1332
1333 flags = FLAGS_VLAN_OOB;
1334 vid = vlan_tx_tag_get(skb);
1335 qlcnic_set_tx_vlan_tci(first_desc, vid);
1336 vlan_oob = 1;
1337 }
1338
1339 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1340 skb_shinfo(skb)->gso_size > 0) {
1341
1342 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1343
1344 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1345 first_desc->total_hdr_length = hdr_len;
1346 if (vlan_oob) {
1347 first_desc->total_hdr_length += VLAN_HLEN;
1348 first_desc->tcp_hdr_offset = VLAN_HLEN;
1349 first_desc->ip_hdr_offset = VLAN_HLEN;
1350 /* Only in case of TSO on vlan device */
1351 flags |= FLAGS_VLAN_TAGGED;
1352 }
1353
1354 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1355 TX_TCP_LSO6 : TX_TCP_LSO;
1356 tso = 1;
1357
1358 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1359 u8 l4proto;
1360
1361 if (protocol == cpu_to_be16(ETH_P_IP)) {
1362 l4proto = ip_hdr(skb)->protocol;
1363
1364 if (l4proto == IPPROTO_TCP)
1365 opcode = TX_TCP_PKT;
1366 else if (l4proto == IPPROTO_UDP)
1367 opcode = TX_UDP_PKT;
1368 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1369 l4proto = ipv6_hdr(skb)->nexthdr;
1370
1371 if (l4proto == IPPROTO_TCP)
1372 opcode = TX_TCPV6_PKT;
1373 else if (l4proto == IPPROTO_UDP)
1374 opcode = TX_UDPV6_PKT;
1375 }
1376 }
1377
1378 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1379 first_desc->ip_hdr_offset += skb_network_offset(skb);
1380 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1381
1382 if (!tso)
1383 return;
1384
1385 /* For LSO, we need to copy the MAC/IP/TCP headers into
1386 * the descriptor ring
1387 */
1388 producer = tx_ring->producer;
1389 copied = 0;
1390 offset = 2;
1391
1392 if (vlan_oob) {
1393 /* Create a TSO vlan header template for firmware */
1394
1395 hwdesc = &tx_ring->desc_head[producer];
1396 tx_ring->cmd_buf_arr[producer].skb = NULL;
1397
1398 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1399 hdr_len + VLAN_HLEN);
1400
1401 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1402 skb_copy_from_linear_data(skb, vh, 12);
1403 vh->h_vlan_proto = htons(ETH_P_8021Q);
1404 vh->h_vlan_TCI = htons(vid);
1405 skb_copy_from_linear_data_offset(skb, 12,
1406 (char *)vh + 16, copy_len - 16);
1407
1408 copied = copy_len - VLAN_HLEN;
1409 offset = 0;
1410
1411 producer = get_next_index(producer, tx_ring->num_desc);
1412 }
1413
1414 while (copied < hdr_len) {
1415
1416 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1417 (hdr_len - copied));
1418
1419 hwdesc = &tx_ring->desc_head[producer];
1420 tx_ring->cmd_buf_arr[producer].skb = NULL;
1421
1422 skb_copy_from_linear_data_offset(skb, copied,
1423 (char *)hwdesc + offset, copy_len);
1424
1425 copied += copy_len;
1426 offset = 0;
1427
1428 producer = get_next_index(producer, tx_ring->num_desc);
1429 }
1430
1431 tx_ring->producer = producer;
1432 barrier();
1433}
1434
1435static int
1436qlcnic_map_tx_skb(struct pci_dev *pdev,
1437 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1438{
1439 struct qlcnic_skb_frag *nf;
1440 struct skb_frag_struct *frag;
1441 int i, nr_frags;
1442 dma_addr_t map;
1443
1444 nr_frags = skb_shinfo(skb)->nr_frags;
1445 nf = &pbuf->frag_array[0];
1446
1447 map = pci_map_single(pdev, skb->data,
1448 skb_headlen(skb), PCI_DMA_TODEVICE);
1449 if (pci_dma_mapping_error(pdev, map))
1450 goto out_err;
1451
1452 nf->dma = map;
1453 nf->length = skb_headlen(skb);
1454
1455 for (i = 0; i < nr_frags; i++) {
1456 frag = &skb_shinfo(skb)->frags[i];
1457 nf = &pbuf->frag_array[i+1];
1458
1459 map = pci_map_page(pdev, frag->page, frag->page_offset,
1460 frag->size, PCI_DMA_TODEVICE);
1461 if (pci_dma_mapping_error(pdev, map))
1462 goto unwind;
1463
1464 nf->dma = map;
1465 nf->length = frag->size;
1466 }
1467
1468 return 0;
1469
1470unwind:
1471 while (--i >= 0) {
1472 nf = &pbuf->frag_array[i+1];
1473 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1474 }
1475
1476 nf = &pbuf->frag_array[0];
1477 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1478
1479out_err:
1480 return -ENOMEM;
1481}
1482
1483static inline void
1484qlcnic_clear_cmddesc(u64 *desc)
1485{
1486 desc[0] = 0ULL;
1487 desc[2] = 0ULL;
1488}
1489
1490static netdev_tx_t
1491qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1492{
1493 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1494 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1495 struct qlcnic_cmd_buffer *pbuf;
1496 struct qlcnic_skb_frag *buffrag;
1497 struct cmd_desc_type0 *hwdesc, *first_desc;
1498 struct pci_dev *pdev;
1499 int i, k;
1500
1501 u32 producer;
1502 int frag_count, no_of_desc;
1503 u32 num_txd = tx_ring->num_desc;
1504
1505 frag_count = skb_shinfo(skb)->nr_frags + 1;
1506
1507 /* 4 fragments per cmd des */
1508 no_of_desc = (frag_count + 3) >> 2;
1509
1510 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1511 netif_stop_queue(netdev);
1512 return NETDEV_TX_BUSY;
1513 }
1514
1515 producer = tx_ring->producer;
1516 pbuf = &tx_ring->cmd_buf_arr[producer];
1517
1518 pdev = adapter->pdev;
1519
1520 if (qlcnic_map_tx_skb(pdev, skb, pbuf))
1521 goto drop_packet;
1522
1523 pbuf->skb = skb;
1524 pbuf->frag_count = frag_count;
1525
1526 first_desc = hwdesc = &tx_ring->desc_head[producer];
1527 qlcnic_clear_cmddesc((u64 *)hwdesc);
1528
1529 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1530 qlcnic_set_tx_port(first_desc, adapter->portnum);
1531
1532 for (i = 0; i < frag_count; i++) {
1533
1534 k = i % 4;
1535
1536 if ((k == 0) && (i > 0)) {
1537 /* move to next desc.*/
1538 producer = get_next_index(producer, num_txd);
1539 hwdesc = &tx_ring->desc_head[producer];
1540 qlcnic_clear_cmddesc((u64 *)hwdesc);
1541 tx_ring->cmd_buf_arr[producer].skb = NULL;
1542 }
1543
1544 buffrag = &pbuf->frag_array[i];
1545
1546 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1547 switch (k) {
1548 case 0:
1549 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1550 break;
1551 case 1:
1552 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1553 break;
1554 case 2:
1555 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1556 break;
1557 case 3:
1558 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1559 break;
1560 }
1561 }
1562
1563 tx_ring->producer = get_next_index(producer, num_txd);
1564
1565 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1566
1567 qlcnic_update_cmd_producer(adapter, tx_ring);
1568
1569 adapter->stats.txbytes += skb->len;
1570 adapter->stats.xmitcalled++;
1571
1572 return NETDEV_TX_OK;
1573
1574drop_packet:
1575 adapter->stats.txdropped++;
1576 dev_kfree_skb_any(skb);
1577 return NETDEV_TX_OK;
1578}
1579
1580static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1581{
1582 struct net_device *netdev = adapter->netdev;
1583 u32 temp, temp_state, temp_val;
1584 int rv = 0;
1585
1586 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1587
1588 temp_state = qlcnic_get_temp_state(temp);
1589 temp_val = qlcnic_get_temp_val(temp);
1590
1591 if (temp_state == QLCNIC_TEMP_PANIC) {
1592 dev_err(&netdev->dev,
1593 "Device temperature %d degrees C exceeds"
1594 " maximum allowed. Hardware has been shut down.\n",
1595 temp_val);
1596 rv = 1;
1597 } else if (temp_state == QLCNIC_TEMP_WARN) {
1598 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1599 dev_err(&netdev->dev,
1600 "Device temperature %d degrees C "
1601 "exceeds operating range."
1602 " Immediate action needed.\n",
1603 temp_val);
1604 }
1605 } else {
1606 if (adapter->temp == QLCNIC_TEMP_WARN) {
1607 dev_info(&netdev->dev,
1608 "Device temperature is now %d degrees C"
1609 " in normal range.\n", temp_val);
1610 }
1611 }
1612 adapter->temp = temp_state;
1613 return rv;
1614}
1615
1616void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1617{
1618 struct net_device *netdev = adapter->netdev;
1619
1620 if (adapter->ahw.linkup && !linkup) {
1621 dev_info(&netdev->dev, "NIC Link is down\n");
1622 adapter->ahw.linkup = 0;
1623 if (netif_running(netdev)) {
1624 netif_carrier_off(netdev);
1625 netif_stop_queue(netdev);
1626 }
1627 } else if (!adapter->ahw.linkup && linkup) {
1628 dev_info(&netdev->dev, "NIC Link is up\n");
1629 adapter->ahw.linkup = 1;
1630 if (netif_running(netdev)) {
1631 netif_carrier_on(netdev);
1632 netif_wake_queue(netdev);
1633 }
1634 }
1635}
1636
1637static void qlcnic_tx_timeout(struct net_device *netdev)
1638{
1639 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1640
1641 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1642 return;
1643
1644 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1645 schedule_work(&adapter->tx_timeout_task);
1646}
1647
1648static void qlcnic_tx_timeout_task(struct work_struct *work)
1649{
1650 struct qlcnic_adapter *adapter =
1651 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1652
1653 if (!netif_running(adapter->netdev))
1654 return;
1655
1656 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1657 return;
1658
1659 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1660 goto request_reset;
1661
1662 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1663 if (!qlcnic_reset_context(adapter)) {
1664 adapter->netdev->trans_start = jiffies;
1665 return;
1666
1667 /* context reset failed, fall through for fw reset */
1668 }
1669
1670request_reset:
1671 adapter->need_fw_reset = 1;
1672 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1673}
1674
1675static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1676{
1677 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1678 struct net_device_stats *stats = &netdev->stats;
1679
1680 memset(stats, 0, sizeof(*stats));
1681
1682 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1683 stats->tx_packets = adapter->stats.xmitfinished;
1684 stats->rx_bytes = adapter->stats.rxbytes;
1685 stats->tx_bytes = adapter->stats.txbytes;
1686 stats->rx_dropped = adapter->stats.rxdropped;
1687 stats->tx_dropped = adapter->stats.txdropped;
1688
1689 return stats;
1690}
1691
1692static irqreturn_t qlcnic_intr(int irq, void *data)
1693{
1694 struct qlcnic_host_sds_ring *sds_ring = data;
1695 struct qlcnic_adapter *adapter = sds_ring->adapter;
1696 u32 status;
1697
1698 status = readl(adapter->isr_int_vec);
1699
1700 if (!(status & adapter->int_vec_bit))
1701 return IRQ_NONE;
1702
1703 /* check interrupt state machine, to be sure */
1704 status = readl(adapter->crb_int_state_reg);
1705 if (!ISR_LEGACY_INT_TRIGGERED(status))
1706 return IRQ_NONE;
1707
1708 writel(0xffffffff, adapter->tgt_status_reg);
1709 /* read twice to ensure write is flushed */
1710 readl(adapter->isr_int_vec);
1711 readl(adapter->isr_int_vec);
1712
1713 napi_schedule(&sds_ring->napi);
1714
1715 return IRQ_HANDLED;
1716}
1717
1718static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1719{
1720 struct qlcnic_host_sds_ring *sds_ring = data;
1721 struct qlcnic_adapter *adapter = sds_ring->adapter;
1722
1723 /* clear interrupt */
1724 writel(0xffffffff, adapter->tgt_status_reg);
1725
1726 napi_schedule(&sds_ring->napi);
1727 return IRQ_HANDLED;
1728}
1729
1730static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1731{
1732 struct qlcnic_host_sds_ring *sds_ring = data;
1733
1734 napi_schedule(&sds_ring->napi);
1735 return IRQ_HANDLED;
1736}
1737
1738static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1739{
1740 u32 sw_consumer, hw_consumer;
1741 int count = 0, i;
1742 struct qlcnic_cmd_buffer *buffer;
1743 struct pci_dev *pdev = adapter->pdev;
1744 struct net_device *netdev = adapter->netdev;
1745 struct qlcnic_skb_frag *frag;
1746 int done;
1747 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1748
1749 if (!spin_trylock(&adapter->tx_clean_lock))
1750 return 1;
1751
1752 sw_consumer = tx_ring->sw_consumer;
1753 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1754
1755 while (sw_consumer != hw_consumer) {
1756 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1757 if (buffer->skb) {
1758 frag = &buffer->frag_array[0];
1759 pci_unmap_single(pdev, frag->dma, frag->length,
1760 PCI_DMA_TODEVICE);
1761 frag->dma = 0ULL;
1762 for (i = 1; i < buffer->frag_count; i++) {
1763 frag++;
1764 pci_unmap_page(pdev, frag->dma, frag->length,
1765 PCI_DMA_TODEVICE);
1766 frag->dma = 0ULL;
1767 }
1768
1769 adapter->stats.xmitfinished++;
1770 dev_kfree_skb_any(buffer->skb);
1771 buffer->skb = NULL;
1772 }
1773
1774 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1775 if (++count >= MAX_STATUS_HANDLE)
1776 break;
1777 }
1778
1779 if (count && netif_running(netdev)) {
1780 tx_ring->sw_consumer = sw_consumer;
1781
1782 smp_mb();
1783
1784 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1785 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1786 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1787 netif_wake_queue(netdev);
1788 adapter->tx_timeo_cnt = 0;
1789 }
1790 __netif_tx_unlock(tx_ring->txq);
1791 }
1792 }
1793 /*
1794 * If everything is freed up to consumer then check if the ring is full
1795 * If the ring is full then check if more needs to be freed and
1796 * schedule the call back again.
1797 *
1798 * This happens when there are 2 CPUs. One could be freeing and the
1799 * other filling it. If the ring is full when we get out of here and
1800 * the card has already interrupted the host then the host can miss the
1801 * interrupt.
1802 *
1803 * There is still a possible race condition and the host could miss an
1804 * interrupt. The card has to take care of this.
1805 */
1806 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1807 done = (sw_consumer == hw_consumer);
1808 spin_unlock(&adapter->tx_clean_lock);
1809
1810 return done;
1811}
1812
1813static int qlcnic_poll(struct napi_struct *napi, int budget)
1814{
1815 struct qlcnic_host_sds_ring *sds_ring =
1816 container_of(napi, struct qlcnic_host_sds_ring, napi);
1817
1818 struct qlcnic_adapter *adapter = sds_ring->adapter;
1819
1820 int tx_complete;
1821 int work_done;
1822
1823 tx_complete = qlcnic_process_cmd_ring(adapter);
1824
1825 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1826
1827 if ((work_done < budget) && tx_complete) {
1828 napi_complete(&sds_ring->napi);
1829 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1830 qlcnic_enable_int(sds_ring);
1831 }
1832
1833 return work_done;
1834}
1835
1836#ifdef CONFIG_NET_POLL_CONTROLLER
1837static void qlcnic_poll_controller(struct net_device *netdev)
1838{
1839 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1840 disable_irq(adapter->irq);
1841 qlcnic_intr(adapter->irq, adapter);
1842 enable_irq(adapter->irq);
1843}
1844#endif
1845
1846static void
1847qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1848{
1849 u32 val;
1850
1851 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1852 state != QLCNIC_DEV_NEED_QUISCENT);
1853
1854 if (qlcnic_api_lock(adapter))
1855 return ;
1856
1857 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1858
1859 if (state == QLCNIC_DEV_NEED_RESET)
1860 val |= ((u32)0x1 << (adapter->portnum * 4));
1861 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1862 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
1863
1864 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1865
1866 qlcnic_api_unlock(adapter);
1867}
1868
1869static void
1870qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1871{
1872 u32 val;
1873
1874 if (qlcnic_api_lock(adapter))
1875 goto err;
1876
1877 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1878 val &= ~((u32)0x1 << (adapter->portnum * 4));
1879 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1880
1881 if (!(val & 0x11111111))
1882 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1883
1884 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1885 val &= ~((u32)0x3 << (adapter->portnum * 4));
1886 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1887
1888 qlcnic_api_unlock(adapter);
1889err:
1890 adapter->fw_fail_cnt = 0;
1891 clear_bit(__QLCNIC_START_FW, &adapter->state);
1892 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1893}
1894
1895static int
1896qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
1897{
1898 int act, state;
1899
1900 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1901 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1902
1903 if (((state & 0x11111111) == (act & 0x11111111)) ||
1904 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
1905 return 0;
1906 else
1907 return 1;
1908}
1909
1910static int
1911qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
1912{
1913 u32 val, prev_state;
1914 int cnt = 0;
1915 int portnum = adapter->portnum;
1916
1917 if (qlcnic_api_lock(adapter))
1918 return -1;
1919
1920 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1921 if (!(val & ((int)0x1 << (portnum * 4)))) {
1922 val |= ((u32)0x1 << (portnum * 4));
1923 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1924 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
1925 goto start_fw;
1926 }
1927
1928 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1929
1930 switch (prev_state) {
1931 case QLCNIC_DEV_COLD:
1932start_fw:
1933 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
1934 qlcnic_api_unlock(adapter);
1935 return 1;
1936
1937 case QLCNIC_DEV_READY:
1938 qlcnic_api_unlock(adapter);
1939 return 0;
1940
1941 case QLCNIC_DEV_NEED_RESET:
1942 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1943 val |= ((u32)0x1 << (portnum * 4));
1944 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1945 break;
1946
1947 case QLCNIC_DEV_NEED_QUISCENT:
1948 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1949 val |= ((u32)0x1 << ((portnum * 4) + 1));
1950 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1951 break;
1952
1953 case QLCNIC_DEV_FAILED:
1954 qlcnic_api_unlock(adapter);
1955 return -1;
1956 }
1957
1958 qlcnic_api_unlock(adapter);
1959 msleep(1000);
1960 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
1961 ++cnt < 20)
1962 msleep(1000);
1963
1964 if (cnt >= 20)
1965 return -1;
1966
1967 if (qlcnic_api_lock(adapter))
1968 return -1;
1969
1970 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1971 val &= ~((u32)0x3 << (portnum * 4));
1972 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1973
1974 qlcnic_api_unlock(adapter);
1975
1976 return 0;
1977}
1978
1979static void
1980qlcnic_fwinit_work(struct work_struct *work)
1981{
1982 struct qlcnic_adapter *adapter = container_of(work,
1983 struct qlcnic_adapter, fw_work.work);
1984 int dev_state;
1985
1986 if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
1987 goto err_ret;
1988
1989 if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
1990
1991 if (qlcnic_check_drv_state(adapter)) {
1992 qlcnic_schedule_work(adapter,
1993 qlcnic_fwinit_work, FW_POLL_DELAY);
1994 return;
1995 }
1996
1997 if (!qlcnic_start_firmware(adapter)) {
1998 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
1999 return;
2000 }
2001
2002 goto err_ret;
2003 }
2004
2005 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2006 switch (dev_state) {
2007 case QLCNIC_DEV_READY:
2008 if (!qlcnic_start_firmware(adapter)) {
2009 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2010 return;
2011 }
2012 case QLCNIC_DEV_FAILED:
2013 break;
2014
2015 default:
2016 qlcnic_schedule_work(adapter,
2017 qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
2018 return;
2019 }
2020
2021err_ret:
2022 qlcnic_clr_all_drv_state(adapter);
2023}
2024
2025static void
2026qlcnic_detach_work(struct work_struct *work)
2027{
2028 struct qlcnic_adapter *adapter = container_of(work,
2029 struct qlcnic_adapter, fw_work.work);
2030 struct net_device *netdev = adapter->netdev;
2031 u32 status;
2032
2033 netif_device_detach(netdev);
2034
2035 qlcnic_down(adapter, netdev);
2036
2037 qlcnic_detach(adapter);
2038
2039 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2040
2041 if (status & QLCNIC_RCODE_FATAL_ERROR)
2042 goto err_ret;
2043
2044 if (adapter->temp == QLCNIC_TEMP_PANIC)
2045 goto err_ret;
2046
2047 qlcnic_set_drv_state(adapter, adapter->dev_state);
2048
2049 adapter->fw_wait_cnt = 0;
2050
2051 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2052
2053 return;
2054
2055err_ret:
2056 qlcnic_clr_all_drv_state(adapter);
2057
2058}
2059
2060static void
2061qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2062{
2063 u32 state;
2064
2065 if (qlcnic_api_lock(adapter))
2066 return;
2067
2068 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2069
2070 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
2071 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2072 set_bit(__QLCNIC_START_FW, &adapter->state);
2073 }
2074
2075 qlcnic_api_unlock(adapter);
2076}
2077
2078static void
2079qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2080 work_func_t func, int delay)
2081{
2082 INIT_DELAYED_WORK(&adapter->fw_work, func);
2083 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2084}
2085
2086static void
2087qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2088{
2089 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2090 msleep(10);
2091
2092 cancel_delayed_work_sync(&adapter->fw_work);
2093}
2094
2095static void
2096qlcnic_attach_work(struct work_struct *work)
2097{
2098 struct qlcnic_adapter *adapter = container_of(work,
2099 struct qlcnic_adapter, fw_work.work);
2100 struct net_device *netdev = adapter->netdev;
2101 int err;
2102
2103 if (netif_running(netdev)) {
2104 err = qlcnic_attach(adapter);
2105 if (err)
2106 goto done;
2107
2108 err = qlcnic_up(adapter, netdev);
2109 if (err) {
2110 qlcnic_detach(adapter);
2111 goto done;
2112 }
2113
2114 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2115 }
2116
2117 netif_device_attach(netdev);
2118
2119done:
2120 adapter->fw_fail_cnt = 0;
2121 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2122 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2123}
2124
2125static int
2126qlcnic_check_health(struct qlcnic_adapter *adapter)
2127{
2128 u32 state = 0, heartbit;
2129 struct net_device *netdev = adapter->netdev;
2130
2131 if (qlcnic_check_temp(adapter))
2132 goto detach;
2133
2134 if (adapter->need_fw_reset) {
2135 qlcnic_dev_request_reset(adapter);
2136 goto detach;
2137 }
2138
2139 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2140 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2141 adapter->need_fw_reset = 1;
2142
2143 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2144 if (heartbit != adapter->heartbit) {
2145 adapter->heartbit = heartbit;
2146 adapter->fw_fail_cnt = 0;
2147 if (adapter->need_fw_reset)
2148 goto detach;
2149 return 0;
2150 }
2151
2152 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2153 return 0;
2154
2155 qlcnic_dev_request_reset(adapter);
2156
2157 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2158
2159 dev_info(&netdev->dev, "firmware hang detected\n");
2160
2161detach:
2162 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2163 QLCNIC_DEV_NEED_RESET;
2164
2165 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2166 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2167 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2168
2169 return 1;
2170}
2171
2172static void
2173qlcnic_fw_poll_work(struct work_struct *work)
2174{
2175 struct qlcnic_adapter *adapter = container_of(work,
2176 struct qlcnic_adapter, fw_work.work);
2177
2178 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2179 goto reschedule;
2180
2181
2182 if (qlcnic_check_health(adapter))
2183 return;
2184
2185reschedule:
2186 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2187}
2188
2189static ssize_t
2190qlcnic_store_bridged_mode(struct device *dev,
2191 struct device_attribute *attr, const char *buf, size_t len)
2192{
2193 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2194 unsigned long new;
2195 int ret = -EINVAL;
2196
2197 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2198 goto err_out;
2199
2200 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2201 goto err_out;
2202
2203 if (strict_strtoul(buf, 2, &new))
2204 goto err_out;
2205
2206 if (!qlcnic_config_bridged_mode(adapter, !!new))
2207 ret = len;
2208
2209err_out:
2210 return ret;
2211}
2212
2213static ssize_t
2214qlcnic_show_bridged_mode(struct device *dev,
2215 struct device_attribute *attr, char *buf)
2216{
2217 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2218 int bridged_mode = 0;
2219
2220 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2221 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2222
2223 return sprintf(buf, "%d\n", bridged_mode);
2224}
2225
2226static struct device_attribute dev_attr_bridged_mode = {
2227 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2228 .show = qlcnic_show_bridged_mode,
2229 .store = qlcnic_store_bridged_mode,
2230};
2231
2232static ssize_t
2233qlcnic_store_diag_mode(struct device *dev,
2234 struct device_attribute *attr, const char *buf, size_t len)
2235{
2236 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2237 unsigned long new;
2238
2239 if (strict_strtoul(buf, 2, &new))
2240 return -EINVAL;
2241
2242 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2243 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2244
2245 return len;
2246}
2247
2248static ssize_t
2249qlcnic_show_diag_mode(struct device *dev,
2250 struct device_attribute *attr, char *buf)
2251{
2252 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2253
2254 return sprintf(buf, "%d\n",
2255 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2256}
2257
2258static struct device_attribute dev_attr_diag_mode = {
2259 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2260 .show = qlcnic_show_diag_mode,
2261 .store = qlcnic_store_diag_mode,
2262};
2263
2264static int
2265qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2266 loff_t offset, size_t size)
2267{
2268 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2269 return -EIO;
2270
2271 if ((size != 4) || (offset & 0x3))
2272 return -EINVAL;
2273
2274 if (offset < QLCNIC_PCI_CRBSPACE)
2275 return -EINVAL;
2276
2277 return 0;
2278}
2279
2280static ssize_t
2281qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2282 char *buf, loff_t offset, size_t size)
2283{
2284 struct device *dev = container_of(kobj, struct device, kobj);
2285 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2286 u32 data;
2287 int ret;
2288
2289 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2290 if (ret != 0)
2291 return ret;
2292
2293 data = QLCRD32(adapter, offset);
2294 memcpy(buf, &data, size);
2295 return size;
2296}
2297
2298static ssize_t
2299qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2300 char *buf, loff_t offset, size_t size)
2301{
2302 struct device *dev = container_of(kobj, struct device, kobj);
2303 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2304 u32 data;
2305 int ret;
2306
2307 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2308 if (ret != 0)
2309 return ret;
2310
2311 memcpy(&data, buf, size);
2312 QLCWR32(adapter, offset, data);
2313 return size;
2314}
2315
2316static int
2317qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2318 loff_t offset, size_t size)
2319{
2320 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2321 return -EIO;
2322
2323 if ((size != 8) || (offset & 0x7))
2324 return -EIO;
2325
2326 return 0;
2327}
2328
2329static ssize_t
2330qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2331 char *buf, loff_t offset, size_t size)
2332{
2333 struct device *dev = container_of(kobj, struct device, kobj);
2334 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2335 u64 data;
2336 int ret;
2337
2338 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2339 if (ret != 0)
2340 return ret;
2341
2342 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2343 return -EIO;
2344
2345 memcpy(buf, &data, size);
2346
2347 return size;
2348}
2349
2350static ssize_t
2351qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2352 char *buf, loff_t offset, size_t size)
2353{
2354 struct device *dev = container_of(kobj, struct device, kobj);
2355 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2356 u64 data;
2357 int ret;
2358
2359 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2360 if (ret != 0)
2361 return ret;
2362
2363 memcpy(&data, buf, size);
2364
2365 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2366 return -EIO;
2367
2368 return size;
2369}
2370
2371
2372static struct bin_attribute bin_attr_crb = {
2373 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2374 .size = 0,
2375 .read = qlcnic_sysfs_read_crb,
2376 .write = qlcnic_sysfs_write_crb,
2377};
2378
2379static struct bin_attribute bin_attr_mem = {
2380 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2381 .size = 0,
2382 .read = qlcnic_sysfs_read_mem,
2383 .write = qlcnic_sysfs_write_mem,
2384};
2385
2386static void
2387qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2388{
2389 struct device *dev = &adapter->pdev->dev;
2390
2391 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2392 if (device_create_file(dev, &dev_attr_bridged_mode))
2393 dev_warn(dev,
2394 "failed to create bridged_mode sysfs entry\n");
2395}
2396
2397static void
2398qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2399{
2400 struct device *dev = &adapter->pdev->dev;
2401
2402 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2403 device_remove_file(dev, &dev_attr_bridged_mode);
2404}
2405
2406static void
2407qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2408{
2409 struct device *dev = &adapter->pdev->dev;
2410
2411 if (device_create_file(dev, &dev_attr_diag_mode))
2412 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2413 if (device_create_bin_file(dev, &bin_attr_crb))
2414 dev_info(dev, "failed to create crb sysfs entry\n");
2415 if (device_create_bin_file(dev, &bin_attr_mem))
2416 dev_info(dev, "failed to create mem sysfs entry\n");
2417}
2418
2419
2420static void
2421qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2422{
2423 struct device *dev = &adapter->pdev->dev;
2424
2425 device_remove_file(dev, &dev_attr_diag_mode);
2426 device_remove_bin_file(dev, &bin_attr_crb);
2427 device_remove_bin_file(dev, &bin_attr_mem);
2428}
2429
2430#ifdef CONFIG_INET
2431
2432#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2433
2434static int
2435qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2436{
2437 if (adapter->ahw.cut_through)
2438 return 0;
2439
2440 return 1;
2441}
2442
2443static void
2444qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2445{
2446 struct in_device *indev;
2447 struct qlcnic_adapter *adapter = netdev_priv(dev);
2448
2449 if (!qlcnic_destip_supported(adapter))
2450 return;
2451
2452 indev = in_dev_get(dev);
2453 if (!indev)
2454 return;
2455
2456 for_ifa(indev) {
2457 switch (event) {
2458 case NETDEV_UP:
2459 qlcnic_config_ipaddr(adapter,
2460 ifa->ifa_address, QLCNIC_IP_UP);
2461 break;
2462 case NETDEV_DOWN:
2463 qlcnic_config_ipaddr(adapter,
2464 ifa->ifa_address, QLCNIC_IP_DOWN);
2465 break;
2466 default:
2467 break;
2468 }
2469 } endfor_ifa(indev);
2470
2471 in_dev_put(indev);
2472 return;
2473}
2474
2475static int qlcnic_netdev_event(struct notifier_block *this,
2476 unsigned long event, void *ptr)
2477{
2478 struct qlcnic_adapter *adapter;
2479 struct net_device *dev = (struct net_device *)ptr;
2480
2481recheck:
2482 if (dev == NULL)
2483 goto done;
2484
2485 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2486 dev = vlan_dev_real_dev(dev);
2487 goto recheck;
2488 }
2489
2490 if (!is_qlcnic_netdev(dev))
2491 goto done;
2492
2493 adapter = netdev_priv(dev);
2494
2495 if (!adapter)
2496 goto done;
2497
2498 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2499 goto done;
2500
2501 qlcnic_config_indev_addr(dev, event);
2502done:
2503 return NOTIFY_DONE;
2504}
2505
2506static int
2507qlcnic_inetaddr_event(struct notifier_block *this,
2508 unsigned long event, void *ptr)
2509{
2510 struct qlcnic_adapter *adapter;
2511 struct net_device *dev;
2512
2513 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2514
2515 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2516
2517recheck:
2518 if (dev == NULL || !netif_running(dev))
2519 goto done;
2520
2521 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2522 dev = vlan_dev_real_dev(dev);
2523 goto recheck;
2524 }
2525
2526 if (!is_qlcnic_netdev(dev))
2527 goto done;
2528
2529 adapter = netdev_priv(dev);
2530
2531 if (!adapter || !qlcnic_destip_supported(adapter))
2532 goto done;
2533
2534 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2535 goto done;
2536
2537 switch (event) {
2538 case NETDEV_UP:
2539 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2540 break;
2541 case NETDEV_DOWN:
2542 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2543 break;
2544 default:
2545 break;
2546 }
2547
2548done:
2549 return NOTIFY_DONE;
2550}
2551
2552static struct notifier_block qlcnic_netdev_cb = {
2553 .notifier_call = qlcnic_netdev_event,
2554};
2555
2556static struct notifier_block qlcnic_inetaddr_cb = {
2557 .notifier_call = qlcnic_inetaddr_event,
2558};
2559#else
2560static void
2561qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2562{ }
2563#endif
2564
2565static struct pci_driver qlcnic_driver = {
2566 .name = qlcnic_driver_name,
2567 .id_table = qlcnic_pci_tbl,
2568 .probe = qlcnic_probe,
2569 .remove = __devexit_p(qlcnic_remove),
2570#ifdef CONFIG_PM
2571 .suspend = qlcnic_suspend,
2572 .resume = qlcnic_resume,
2573#endif
2574 .shutdown = qlcnic_shutdown
2575};
2576
2577static int __init qlcnic_init_module(void)
2578{
2579
2580 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2581
2582#ifdef CONFIG_INET
2583 register_netdevice_notifier(&qlcnic_netdev_cb);
2584 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2585#endif
2586
2587
2588 return pci_register_driver(&qlcnic_driver);
2589}
2590
2591module_init(qlcnic_init_module);
2592
2593static void __exit qlcnic_exit_module(void)
2594{
2595
2596 pci_unregister_driver(&qlcnic_driver);
2597
2598#ifdef CONFIG_INET
2599 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2600 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2601#endif
2602}
2603
2604module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf386..9169c4cf413 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -54,12 +54,8 @@
54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ 54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ 55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) 56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
57#define SMALL_BUFFER_SIZE 512
58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
59#define LARGE_BUFFER_MAX_SIZE 8192 57#define LARGE_BUFFER_MAX_SIZE 8192
60#define LARGE_BUFFER_MIN_SIZE 2048 58#define LARGE_BUFFER_MIN_SIZE 2048
61#define MAX_SPLIT_SIZE 1023
62#define QLGE_SB_PAD 32
63 59
64#define MAX_CQ 128 60#define MAX_CQ 128
65#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ 61#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +75,43 @@
79#define TX_DESC_PER_OAL 0 75#define TX_DESC_PER_OAL 0
80#endif 76#endif
81 77
78/* Word shifting for converting 64-bit
79 * address to a series of 16-bit words.
80 * This is used for some MPI firmware
81 * mailbox commands.
82 */
83#define LSW(x) ((u16)(x))
84#define MSW(x) ((u16)((u32)(x) >> 16))
85#define LSD(x) ((u32)((u64)(x)))
86#define MSD(x) ((u32)((((u64)(x)) >> 32)))
87
82/* MPI test register definitions. This register 88/* MPI test register definitions. This register
83 * is used for determining alternate NIC function's 89 * is used for determining alternate NIC function's
84 * PCI->func number. 90 * PCI->func number.
85 */ 91 */
86enum { 92enum {
87 MPI_TEST_FUNC_PORT_CFG = 0x1002, 93 MPI_TEST_FUNC_PORT_CFG = 0x1002,
94 MPI_TEST_FUNC_PRB_CTL = 0x100e,
95 MPI_TEST_FUNC_PRB_EN = 0x18a20000,
96 MPI_TEST_FUNC_RST_STS = 0x100a,
97 MPI_TEST_FUNC_RST_FRC = 0x00000003,
98 MPI_TEST_NIC_FUNC_MASK = 0x00000007,
99 MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
100 MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
88 MPI_TEST_NIC1_FUNC_SHIFT = 1, 101 MPI_TEST_NIC1_FUNC_SHIFT = 1,
102 MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
103 MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
89 MPI_TEST_NIC2_FUNC_SHIFT = 5, 104 MPI_TEST_NIC2_FUNC_SHIFT = 5,
90 MPI_TEST_NIC_FUNC_MASK = 0x00000007, 105 MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
106 MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
107 MPI_TEST_FC1_FUNCTION_SHIFT = 9,
108 MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
109 MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
110 MPI_TEST_FC2_FUNCTION_SHIFT = 13,
111
112 MPI_NIC_READ = 0x00000000,
113 MPI_NIC_REG_BLOCK = 0x00020000,
114 MPI_NIC_FUNCTION_SHIFT = 6,
91}; 115};
92 116
93/* 117/*
@@ -468,7 +492,7 @@ enum {
468 MDIO_PORT = 0x00000440, 492 MDIO_PORT = 0x00000440,
469 MDIO_STATUS = 0x00000450, 493 MDIO_STATUS = 0x00000450,
470 494
471 /* XGMAC AUX statistics registers */ 495 XGMAC_REGISTER_END = 0x00000740,
472}; 496};
473 497
474/* 498/*
@@ -509,6 +533,7 @@ enum {
509enum { 533enum {
510 MAC_ADDR_IDX_SHIFT = 4, 534 MAC_ADDR_IDX_SHIFT = 4,
511 MAC_ADDR_TYPE_SHIFT = 16, 535 MAC_ADDR_TYPE_SHIFT = 16,
536 MAC_ADDR_TYPE_COUNT = 10,
512 MAC_ADDR_TYPE_MASK = 0x000f0000, 537 MAC_ADDR_TYPE_MASK = 0x000f0000,
513 MAC_ADDR_TYPE_CAM_MAC = 0x00000000, 538 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
514 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, 539 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +551,30 @@ enum {
526 MAC_ADDR_MR = (1 << 30), 551 MAC_ADDR_MR = (1 << 30),
527 MAC_ADDR_MW = (1 << 31), 552 MAC_ADDR_MW = (1 << 31),
528 MAX_MULTICAST_ENTRIES = 32, 553 MAX_MULTICAST_ENTRIES = 32,
554
555 /* Entry count and words per entry
556 * for each address type in the filter.
557 */
558 MAC_ADDR_MAX_CAM_ENTRIES = 512,
559 MAC_ADDR_MAX_CAM_WCOUNT = 3,
560 MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
561 MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
562 MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
563 MAC_ADDR_MAX_VLAN_WCOUNT = 1,
564 MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
565 MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
566 MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
567 MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
568 MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
569 MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
570 MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
571 MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
572 MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
573 MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
574 MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
575 MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
576 MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
577 MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
529}; 578};
530 579
531/* 580/*
@@ -596,6 +645,7 @@ enum {
596enum { 645enum {
597 RT_IDX_IDX_SHIFT = 8, 646 RT_IDX_IDX_SHIFT = 8,
598 RT_IDX_TYPE_MASK = 0x000f0000, 647 RT_IDX_TYPE_MASK = 0x000f0000,
648 RT_IDX_TYPE_SHIFT = 16,
599 RT_IDX_TYPE_RT = 0x00000000, 649 RT_IDX_TYPE_RT = 0x00000000,
600 RT_IDX_TYPE_RT_INV = 0x00010000, 650 RT_IDX_TYPE_RT_INV = 0x00010000,
601 RT_IDX_TYPE_NICQ = 0x00020000, 651 RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +714,89 @@ enum {
664 RT_IDX_UNUSED013 = 13, 714 RT_IDX_UNUSED013 = 13,
665 RT_IDX_UNUSED014 = 14, 715 RT_IDX_UNUSED014 = 14,
666 RT_IDX_PROMISCUOUS_SLOT = 15, 716 RT_IDX_PROMISCUOUS_SLOT = 15,
667 RT_IDX_MAX_SLOTS = 16, 717 RT_IDX_MAX_RT_SLOTS = 8,
718 RT_IDX_MAX_NIC_SLOTS = 16,
719};
720
721/*
722 * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
723 */
724enum {
725 XG_SERDES_ADDR_RDY = (1 << 31),
726 XG_SERDES_ADDR_R = (1 << 30),
727
728 XG_SERDES_ADDR_STS = 0x00001E06,
729 XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
730 XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
731 XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
732
733 /* Serdes coredump definitions. */
734 XG_SERDES_XAUI_AN_START = 0x00000000,
735 XG_SERDES_XAUI_AN_END = 0x00000034,
736 XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
737 XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
738 XG_SERDES_XFI_AN_START = 0x00001000,
739 XG_SERDES_XFI_AN_END = 0x00001034,
740 XG_SERDES_XFI_TRAIN_START = 0x10001050,
741 XG_SERDES_XFI_TRAIN_END = 0x1000107C,
742 XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
743 XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
744 XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
745 XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
746 XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
747 XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
748 XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
749 XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
750};
751
752/*
753 * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
754 */
755enum {
756 PRB_MX_ADDR_ARE = (1 << 16),
757 PRB_MX_ADDR_UP = (1 << 15),
758 PRB_MX_ADDR_SWP = (1 << 14),
759
760 /* Module select values. */
761 PRB_MX_ADDR_MAX_MODS = 21,
762 PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
763 PRB_MX_ADDR_MOD_SEL_TBD = 0,
764 PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
765 PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
766 PRB_MX_ADDR_MOD_SEL_FRB = 3,
767 PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
768 PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
769 PRB_MX_ADDR_MOD_SEL_DA1 = 6,
770 PRB_MX_ADDR_MOD_SEL_DA2 = 7,
771 PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
772 PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
773 PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
774 PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
775 PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
776 PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
777 PRB_MX_ADDR_MOD_SEL_REG = 14,
778 PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
779 PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
780 PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
781 PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
782 PRB_MX_ADDR_MOD_SEL_MOP = 20,
783 /* Bit fields indicating which modules
784 * are valid for each clock domain.
785 */
786 PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
787 PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
788 PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
789 PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
790 PRB_MX_ADDR_VALID_TOTAL = 34,
791
792 /* Clock domain values. */
793 PRB_MX_ADDR_CLOCK_SHIFT = 6,
794 PRB_MX_ADDR_SYS_CLOCK = 0,
795 PRB_MX_ADDR_PCI_CLOCK = 2,
796 PRB_MX_ADDR_FC_CLOCK = 5,
797 PRB_MX_ADDR_XGM_CLOCK = 6,
798
799 PRB_MX_ADDR_MAX_MUX = 64,
668}; 800};
669 801
670/* 802/*
@@ -737,6 +869,21 @@ enum {
737 PRB_MX_DATA = 0xfc, /* Use semaphore */ 869 PRB_MX_DATA = 0xfc, /* Use semaphore */
738}; 870};
739 871
872#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
873#define SMALL_BUFFER_SIZE 256
874#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
875#define SPLT_SETTING FSC_DBRST_1024
876#define SPLT_LEN 0
877#define QLGE_SB_PAD 0
878#else
879#define SMALL_BUFFER_SIZE 512
880#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
881#define SPLT_SETTING FSC_SH
882#define SPLT_LEN (SPLT_HDR_EP | \
883 min(SMALL_BUF_MAP_SIZE, 1023))
884#define QLGE_SB_PAD 32
885#endif
886
740/* 887/*
741 * CAM output format. 888 * CAM output format.
742 */ 889 */
@@ -1421,7 +1568,7 @@ struct nic_stats {
1421 u64 rx_nic_fifo_drop; 1568 u64 rx_nic_fifo_drop;
1422}; 1569};
1423 1570
1424/* Address/Length pairs for the coredump. */ 1571/* Firmware coredump internal register address/length pairs. */
1425enum { 1572enum {
1426 MPI_CORE_REGS_ADDR = 0x00030000, 1573 MPI_CORE_REGS_ADDR = 0x00030000,
1427 MPI_CORE_REGS_CNT = 127, 1574 MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1623,7 @@ struct mpi_coredump_segment_header {
1476 u8 description[16]; 1623 u8 description[16];
1477}; 1624};
1478 1625
1479/* Reg dump segment numbers. */ 1626/* Firmware coredump header segment numbers. */
1480enum { 1627enum {
1481 CORE_SEG_NUM = 1, 1628 CORE_SEG_NUM = 1,
1482 TEST_LOGIC_SEG_NUM = 2, 1629 TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1674,67 @@ enum {
1527 1674
1528}; 1675};
1529 1676
1677/* There are 64 generic NIC registers. */
1678#define NIC_REGS_DUMP_WORD_COUNT 64
1679/* XGMAC word count. */
1680#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
1681/* Word counts for the SERDES blocks. */
1682#define XG_SERDES_XAUI_AN_COUNT 14
1683#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
1684#define XG_SERDES_XFI_AN_COUNT 14
1685#define XG_SERDES_XFI_TRAIN_COUNT 12
1686#define XG_SERDES_XFI_HSS_PCS_COUNT 15
1687#define XG_SERDES_XFI_HSS_TX_COUNT 32
1688#define XG_SERDES_XFI_HSS_RX_COUNT 32
1689#define XG_SERDES_XFI_HSS_PLL_COUNT 32
1690
1691/* There are 2 CNA ETS and 8 NIC ETS registers. */
1692#define ETS_REGS_DUMP_WORD_COUNT 10
1693
1694/* Each probe mux entry stores the probe type plus 64 entries
1695 * that are each each 64-bits in length. There are a total of
1696 * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
1697 */
1698#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
1699#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
1700 PRB_MX_ADDR_VALID_TOTAL)
1701/* Each routing entry consists of 4 32-bit words.
1702 * They are route type, index, index word, and result.
1703 * There are 2 route blocks with 8 entries each and
1704 * 2 NIC blocks with 16 entries each.
1705 * The totol entries is 48 with 4 words each.
1706 */
1707#define RT_IDX_DUMP_ENTRIES 48
1708#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
1709#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
1710 RT_IDX_DUMP_WORDS_PER_ENTRY)
1711/* There are 10 address blocks in filter, each with
1712 * different entry counts and different word-count-per-entry.
1713 */
1714#define MAC_ADDR_DUMP_ENTRIES \
1715 ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
1716 (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
1717 (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
1718 (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
1719 (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
1720 (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
1721 (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
1722 (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
1723 (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
1724 (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
1725#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
1726#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
1727 MAC_ADDR_DUMP_WORDS_PER_ENTRY)
1728/* Maximum of 4 functions whose semaphore registeres are
1729 * in the coredump.
1730 */
1731#define MAX_SEMAPHORE_FUNCTIONS 4
1732/* Defines for access the MPI shadow registers. */
1733#define RISC_124 0x0003007c
1734#define RISC_127 0x0003007f
1735#define SHADOW_OFFSET 0xb0000000
1736#define SHADOW_REG_SHIFT 20
1737
1530struct ql_nic_misc { 1738struct ql_nic_misc {
1531 u32 rx_ring_count; 1739 u32 rx_ring_count;
1532 u32 tx_ring_count; 1740 u32 tx_ring_count;
@@ -1568,6 +1776,199 @@ struct ql_reg_dump {
1568 u32 ets[8+2]; 1776 u32 ets[8+2];
1569}; 1777};
1570 1778
1779struct ql_mpi_coredump {
1780 /* segment 0 */
1781 struct mpi_coredump_global_header mpi_global_header;
1782
1783 /* segment 1 */
1784 struct mpi_coredump_segment_header core_regs_seg_hdr;
1785 u32 mpi_core_regs[MPI_CORE_REGS_CNT];
1786 u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
1787
1788 /* segment 2 */
1789 struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
1790 u32 test_logic_regs[TEST_REGS_CNT];
1791
1792 /* segment 3 */
1793 struct mpi_coredump_segment_header rmii_regs_seg_hdr;
1794 u32 rmii_regs[RMII_REGS_CNT];
1795
1796 /* segment 4 */
1797 struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
1798 u32 fcmac1_regs[FCMAC_REGS_CNT];
1799
1800 /* segment 5 */
1801 struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
1802 u32 fcmac2_regs[FCMAC_REGS_CNT];
1803
1804 /* segment 6 */
1805 struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
1806 u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
1807
1808 /* segment 7 */
1809 struct mpi_coredump_segment_header ide_regs_seg_hdr;
1810 u32 ide_regs[IDE_REGS_CNT];
1811
1812 /* segment 8 */
1813 struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
1814 u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
1815
1816 /* segment 9 */
1817 struct mpi_coredump_segment_header smbus_regs_seg_hdr;
1818 u32 smbus_regs[SMBUS_REGS_CNT];
1819
1820 /* segment 10 */
1821 struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
1822 u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
1823
1824 /* segment 11 */
1825 struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
1826 u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
1827
1828 /* segment 12 */
1829 struct mpi_coredump_segment_header i2c_regs_seg_hdr;
1830 u32 i2c_regs[I2C_REGS_CNT];
1831 /* segment 13 */
1832 struct mpi_coredump_segment_header memc_regs_seg_hdr;
1833 u32 memc_regs[MEMC_REGS_CNT];
1834
1835 /* segment 14 */
1836 struct mpi_coredump_segment_header pbus_regs_seg_hdr;
1837 u32 pbus_regs[PBUS_REGS_CNT];
1838
1839 /* segment 15 */
1840 struct mpi_coredump_segment_header mde_regs_seg_hdr;
1841 u32 mde_regs[MDE_REGS_CNT];
1842
1843 /* segment 16 */
1844 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1845 u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
1846
1847 /* segment 17 */
1848 struct mpi_coredump_segment_header nic2_regs_seg_hdr;
1849 u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
1850
1851 /* segment 18 */
1852 struct mpi_coredump_segment_header xgmac1_seg_hdr;
1853 u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
1854
1855 /* segment 19 */
1856 struct mpi_coredump_segment_header xgmac2_seg_hdr;
1857 u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
1858
1859 /* segment 20 */
1860 struct mpi_coredump_segment_header code_ram_seg_hdr;
1861 u32 code_ram[CODE_RAM_CNT];
1862
1863 /* segment 21 */
1864 struct mpi_coredump_segment_header memc_ram_seg_hdr;
1865 u32 memc_ram[MEMC_RAM_CNT];
1866
1867 /* segment 22 */
1868 struct mpi_coredump_segment_header xaui_an_hdr;
1869 u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1870
1871 /* segment 23 */
1872 struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
1873 u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1874
1875 /* segment 24 */
1876 struct mpi_coredump_segment_header xfi_an_hdr;
1877 u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
1878
1879 /* segment 25 */
1880 struct mpi_coredump_segment_header xfi_train_hdr;
1881 u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1882
1883 /* segment 26 */
1884 struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
1885 u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1886
1887 /* segment 27 */
1888 struct mpi_coredump_segment_header xfi_hss_tx_hdr;
1889 u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1890
1891 /* segment 28 */
1892 struct mpi_coredump_segment_header xfi_hss_rx_hdr;
1893 u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1894
1895 /* segment 29 */
1896 struct mpi_coredump_segment_header xfi_hss_pll_hdr;
1897 u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1898
1899 /* segment 30 */
1900 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1901 struct ql_nic_misc misc_nic_info;
1902
1903 /* segment 31 */
1904 /* one interrupt state for each CQ */
1905 struct mpi_coredump_segment_header intr_states_seg_hdr;
1906 u32 intr_states[MAX_RX_RINGS];
1907
1908 /* segment 32 */
1909 /* 3 cam words each for 16 unicast,
1910 * 2 cam words for each of 32 multicast.
1911 */
1912 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1913 u32 cam_entries[(16 * 3) + (32 * 3)];
1914
1915 /* segment 33 */
1916 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1917 u32 nic_routing_words[16];
1918 /* segment 34 */
1919 struct mpi_coredump_segment_header ets_seg_hdr;
1920 u32 ets[ETS_REGS_DUMP_WORD_COUNT];
1921
1922 /* segment 35 */
1923 struct mpi_coredump_segment_header probe_dump_seg_hdr;
1924 u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
1925
1926 /* segment 36 */
1927 struct mpi_coredump_segment_header routing_reg_seg_hdr;
1928 u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
1929
1930 /* segment 37 */
1931 struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
1932 u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
1933
1934 /* segment 38 */
1935 struct mpi_coredump_segment_header xaui2_an_hdr;
1936 u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1937
1938 /* segment 39 */
1939 struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
1940 u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1941
1942 /* segment 40 */
1943 struct mpi_coredump_segment_header xfi2_an_hdr;
1944 u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
1945
1946 /* segment 41 */
1947 struct mpi_coredump_segment_header xfi2_train_hdr;
1948 u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1949
1950 /* segment 42 */
1951 struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
1952 u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1953
1954 /* segment 43 */
1955 struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
1956 u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1957
1958 /* segment 44 */
1959 struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
1960 u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1961
1962 /* segment 45 */
1963 struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
1964 u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1965
1966 /* segment 50 */
1967 /* semaphore register for all 5 functions */
1968 struct mpi_coredump_segment_header sem_regs_seg_hdr;
1969 u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
1970};
1971
1571/* 1972/*
1572 * intr_context structure is used during initialization 1973 * intr_context structure is used during initialization
1573 * to hook the interrupts. It is also used in a single 1974 * to hook the interrupts. It is also used in a single
@@ -1603,6 +2004,7 @@ enum {
1603 QL_CAM_RT_SET = 8, 2004 QL_CAM_RT_SET = 8,
1604 QL_SELFTEST = 9, 2005 QL_SELFTEST = 9,
1605 QL_LB_LINK_UP = 10, 2006 QL_LB_LINK_UP = 10,
2007 QL_FRC_COREDUMP = 11,
1606}; 2008};
1607 2009
1608/* link_status bit definitions */ 2010/* link_status bit definitions */
@@ -1724,6 +2126,8 @@ struct ql_adapter {
1724 u32 port_link_up; 2126 u32 port_link_up;
1725 u32 port_init; 2127 u32 port_init;
1726 u32 link_status; 2128 u32 link_status;
2129 struct ql_mpi_coredump *mpi_coredump;
2130 u32 core_is_dumped;
1727 u32 link_config; 2131 u32 link_config;
1728 u32 led_config; 2132 u32 led_config;
1729 u32 max_frame_size; 2133 u32 max_frame_size;
@@ -1736,6 +2140,7 @@ struct ql_adapter {
1736 struct delayed_work mpi_work; 2140 struct delayed_work mpi_work;
1737 struct delayed_work mpi_port_cfg_work; 2141 struct delayed_work mpi_port_cfg_work;
1738 struct delayed_work mpi_idc_work; 2142 struct delayed_work mpi_idc_work;
2143 struct delayed_work mpi_core_to_log;
1739 struct completion ide_completion; 2144 struct completion ide_completion;
1740 struct nic_operations *nic_ops; 2145 struct nic_operations *nic_ops;
1741 u16 device_id; 2146 u16 device_id;
@@ -1807,6 +2212,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
1807void ql_queue_fw_error(struct ql_adapter *qdev); 2212void ql_queue_fw_error(struct ql_adapter *qdev);
1808void ql_mpi_work(struct work_struct *work); 2213void ql_mpi_work(struct work_struct *work);
1809void ql_mpi_reset_work(struct work_struct *work); 2214void ql_mpi_reset_work(struct work_struct *work);
2215void ql_mpi_core_to_log(struct work_struct *work);
1810int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 2216int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1811void ql_queue_asic_error(struct ql_adapter *qdev); 2217void ql_queue_asic_error(struct ql_adapter *qdev);
1812u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 2218u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2223,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
1817int ql_mb_get_fw_state(struct ql_adapter *qdev); 2223int ql_mb_get_fw_state(struct ql_adapter *qdev);
1818int ql_cam_route_initialize(struct ql_adapter *qdev); 2224int ql_cam_route_initialize(struct ql_adapter *qdev);
1819int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 2225int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
2226int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2227int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2228int ql_pause_mpi_risc(struct ql_adapter *qdev);
2229int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2230int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2231 u32 ram_addr, int word_count);
2232int ql_core_dump(struct ql_adapter *qdev,
2233 struct ql_mpi_coredump *mpi_coredump);
2234int ql_mb_sys_err(struct ql_adapter *qdev);
1820int ql_mb_about_fw(struct ql_adapter *qdev); 2235int ql_mb_about_fw(struct ql_adapter *qdev);
1821int ql_wol(struct ql_adapter *qdev); 2236int ql_wol(struct ql_adapter *qdev);
1822int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2237int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2248,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1833 struct ql_reg_dump *mpi_coredump); 2248 struct ql_reg_dump *mpi_coredump);
1834netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2249netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
1835void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2250void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2251int ql_own_firmware(struct ql_adapter *qdev);
1836int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2252int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
1837 2253
1838#if 1 2254#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c471076..57df835147e 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,405 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3/* Read a NIC register from the alternate function. */
4static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
5 u32 reg)
6{
7 u32 register_to_read;
8 u32 reg_val;
9 unsigned int status = 0;
10
11 register_to_read = MPI_NIC_REG_BLOCK
12 | MPI_NIC_READ
13 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
14 | reg;
15 status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
16 if (status != 0)
17 return 0xffffffff;
18
19 return reg_val;
20}
21
22/* Write a NIC register from the alternate function. */
23static int ql_write_other_func_reg(struct ql_adapter *qdev,
24 u32 reg, u32 reg_val)
25{
26 u32 register_to_read;
27 int status = 0;
28
29 register_to_read = MPI_NIC_REG_BLOCK
30 | MPI_NIC_READ
31 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
32 | reg;
33 status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
34
35 return status;
36}
37
38static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
39 u32 bit, u32 err_bit)
40{
41 u32 temp;
42 int count = 10;
43
44 while (count) {
45 temp = ql_read_other_func_reg(qdev, reg);
46
47 /* check for errors */
48 if (temp & err_bit)
49 return -1;
50 else if (temp & bit)
51 return 0;
52 mdelay(10);
53 count--;
54 }
55 return -1;
56}
57
58static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
59 u32 *data)
60{
61 int status;
62
63 /* wait for reg to come ready */
64 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
65 XG_SERDES_ADDR_RDY, 0);
66 if (status)
67 goto exit;
68
69 /* set up for reg read */
70 ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
71
72 /* wait for reg to come ready */
73 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
74 XG_SERDES_ADDR_RDY, 0);
75 if (status)
76 goto exit;
77
78 /* get the data */
79 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
80exit:
81 return status;
82}
83
84/* Read out the SERDES registers */
85static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
86{
87 int status;
88
89 /* wait for reg to come ready */
90 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
91 if (status)
92 goto exit;
93
94 /* set up for reg read */
95 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
96
97 /* wait for reg to come ready */
98 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
99 if (status)
100 goto exit;
101
102 /* get the data */
103 *data = ql_read32(qdev, XG_SERDES_DATA);
104exit:
105 return status;
106}
107
108static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
109 u32 *direct_ptr, u32 *indirect_ptr,
110 unsigned int direct_valid, unsigned int indirect_valid)
111{
112 unsigned int status;
113
114 status = 1;
115 if (direct_valid)
116 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
117 /* Dead fill any failures or invalids. */
118 if (status)
119 *direct_ptr = 0xDEADBEEF;
120
121 status = 1;
122 if (indirect_valid)
123 status = ql_read_other_func_serdes_reg(
124 qdev, addr, indirect_ptr);
125 /* Dead fill any failures or invalids. */
126 if (status)
127 *indirect_ptr = 0xDEADBEEF;
128}
129
130static int ql_get_serdes_regs(struct ql_adapter *qdev,
131 struct ql_mpi_coredump *mpi_coredump)
132{
133 int status;
134 unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
135 unsigned int xaui_indirect_valid, i;
136 u32 *direct_ptr, temp;
137 u32 *indirect_ptr;
138
139 xfi_direct_valid = xfi_indirect_valid = 0;
140 xaui_direct_valid = xaui_indirect_valid = 1;
141
142 /* The XAUI needs to be read out per port */
143 if (qdev->func & 1) {
144 /* We are NIC 2 */
145 status = ql_read_other_func_serdes_reg(qdev,
146 XG_SERDES_XAUI_HSS_PCS_START, &temp);
147 if (status)
148 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
149 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
150 XG_SERDES_ADDR_XAUI_PWR_DOWN)
151 xaui_indirect_valid = 0;
152
153 status = ql_read_serdes_reg(qdev,
154 XG_SERDES_XAUI_HSS_PCS_START, &temp);
155 if (status)
156 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
157
158 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
159 XG_SERDES_ADDR_XAUI_PWR_DOWN)
160 xaui_direct_valid = 0;
161 } else {
162 /* We are NIC 1 */
163 status = ql_read_other_func_serdes_reg(qdev,
164 XG_SERDES_XAUI_HSS_PCS_START, &temp);
165 if (status)
166 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
167 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
168 XG_SERDES_ADDR_XAUI_PWR_DOWN)
169 xaui_indirect_valid = 0;
170
171 status = ql_read_serdes_reg(qdev,
172 XG_SERDES_XAUI_HSS_PCS_START, &temp);
173 if (status)
174 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
175 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
176 XG_SERDES_ADDR_XAUI_PWR_DOWN)
177 xaui_direct_valid = 0;
178 }
179
180 /*
181 * XFI register is shared so only need to read one
182 * functions and then check the bits.
183 */
184 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
185 if (status)
186 temp = 0;
187
188 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
189 XG_SERDES_ADDR_XFI1_PWR_UP) {
190 /* now see if i'm NIC 1 or NIC 2 */
191 if (qdev->func & 1)
192 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
193 xfi_indirect_valid = 1;
194 else
195 xfi_direct_valid = 1;
196 }
197 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
198 XG_SERDES_ADDR_XFI2_PWR_UP) {
199 /* now see if i'm NIC 1 or NIC 2 */
200 if (qdev->func & 1)
201 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
202 xfi_direct_valid = 1;
203 else
204 xfi_indirect_valid = 1;
205 }
206
207 /* Get XAUI_AN register block. */
208 if (qdev->func & 1) {
209 /* Function 2 is direct */
210 direct_ptr = mpi_coredump->serdes2_xaui_an;
211 indirect_ptr = mpi_coredump->serdes_xaui_an;
212 } else {
213 /* Function 1 is direct */
214 direct_ptr = mpi_coredump->serdes_xaui_an;
215 indirect_ptr = mpi_coredump->serdes2_xaui_an;
216 }
217
218 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
219 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
220 xaui_direct_valid, xaui_indirect_valid);
221
222 /* Get XAUI_HSS_PCS register block. */
223 if (qdev->func & 1) {
224 direct_ptr =
225 mpi_coredump->serdes2_xaui_hss_pcs;
226 indirect_ptr =
227 mpi_coredump->serdes_xaui_hss_pcs;
228 } else {
229 direct_ptr =
230 mpi_coredump->serdes_xaui_hss_pcs;
231 indirect_ptr =
232 mpi_coredump->serdes2_xaui_hss_pcs;
233 }
234
235 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
236 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
237 xaui_direct_valid, xaui_indirect_valid);
238
239 /* Get XAUI_XFI_AN register block. */
240 if (qdev->func & 1) {
241 direct_ptr = mpi_coredump->serdes2_xfi_an;
242 indirect_ptr = mpi_coredump->serdes_xfi_an;
243 } else {
244 direct_ptr = mpi_coredump->serdes_xfi_an;
245 indirect_ptr = mpi_coredump->serdes2_xfi_an;
246 }
247
248 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
249 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
250 xfi_direct_valid, xfi_indirect_valid);
251
252 /* Get XAUI_XFI_TRAIN register block. */
253 if (qdev->func & 1) {
254 direct_ptr = mpi_coredump->serdes2_xfi_train;
255 indirect_ptr =
256 mpi_coredump->serdes_xfi_train;
257 } else {
258 direct_ptr = mpi_coredump->serdes_xfi_train;
259 indirect_ptr =
260 mpi_coredump->serdes2_xfi_train;
261 }
262
263 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
264 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
265 xfi_direct_valid, xfi_indirect_valid);
266
267 /* Get XAUI_XFI_HSS_PCS register block. */
268 if (qdev->func & 1) {
269 direct_ptr =
270 mpi_coredump->serdes2_xfi_hss_pcs;
271 indirect_ptr =
272 mpi_coredump->serdes_xfi_hss_pcs;
273 } else {
274 direct_ptr =
275 mpi_coredump->serdes_xfi_hss_pcs;
276 indirect_ptr =
277 mpi_coredump->serdes2_xfi_hss_pcs;
278 }
279
280 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
281 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
282 xfi_direct_valid, xfi_indirect_valid);
283
284 /* Get XAUI_XFI_HSS_TX register block. */
285 if (qdev->func & 1) {
286 direct_ptr =
287 mpi_coredump->serdes2_xfi_hss_tx;
288 indirect_ptr =
289 mpi_coredump->serdes_xfi_hss_tx;
290 } else {
291 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
292 indirect_ptr =
293 mpi_coredump->serdes2_xfi_hss_tx;
294 }
295 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
296 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
297 xfi_direct_valid, xfi_indirect_valid);
298
299 /* Get XAUI_XFI_HSS_RX register block. */
300 if (qdev->func & 1) {
301 direct_ptr =
302 mpi_coredump->serdes2_xfi_hss_rx;
303 indirect_ptr =
304 mpi_coredump->serdes_xfi_hss_rx;
305 } else {
306 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
307 indirect_ptr =
308 mpi_coredump->serdes2_xfi_hss_rx;
309 }
310
311 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
312 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
313 xfi_direct_valid, xfi_indirect_valid);
314
315
316 /* Get XAUI_XFI_HSS_PLL register block. */
317 if (qdev->func & 1) {
318 direct_ptr =
319 mpi_coredump->serdes2_xfi_hss_pll;
320 indirect_ptr =
321 mpi_coredump->serdes_xfi_hss_pll;
322 } else {
323 direct_ptr =
324 mpi_coredump->serdes_xfi_hss_pll;
325 indirect_ptr =
326 mpi_coredump->serdes2_xfi_hss_pll;
327 }
328 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
329 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
330 xfi_direct_valid, xfi_indirect_valid);
331 return 0;
332}
333
334static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
335 u32 *data)
336{
337 int status = 0;
338
339 /* wait for reg to come ready */
340 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
341 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
342 if (status)
343 goto exit;
344
345 /* set up for reg read */
346 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
347
348 /* wait for reg to come ready */
349 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
350 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
351 if (status)
352 goto exit;
353
354 /* get the data */
355 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
356exit:
357 return status;
358}
359
360/* Read the 400 xgmac control/statistics registers
361 * skipping unused locations.
362 */
363static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
364 unsigned int other_function)
365{
366 int status = 0;
367 int i;
368
369 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
370 /* We're reading 400 xgmac registers, but we filter out
371 * serveral locations that are non-responsive to reads.
372 */
373 if ((i == 0x00000114) ||
374 (i == 0x00000118) ||
375 (i == 0x0000013c) ||
376 (i == 0x00000140) ||
377 (i > 0x00000150 && i < 0x000001fc) ||
378 (i > 0x00000278 && i < 0x000002a0) ||
379 (i > 0x000002c0 && i < 0x000002cf) ||
380 (i > 0x000002dc && i < 0x000002f0) ||
381 (i > 0x000003c8 && i < 0x00000400) ||
382 (i > 0x00000400 && i < 0x00000410) ||
383 (i > 0x00000410 && i < 0x00000420) ||
384 (i > 0x00000420 && i < 0x00000430) ||
385 (i > 0x00000430 && i < 0x00000440) ||
386 (i > 0x00000440 && i < 0x00000450) ||
387 (i > 0x00000450 && i < 0x00000500) ||
388 (i > 0x0000054c && i < 0x00000568) ||
389 (i > 0x000005c8 && i < 0x00000600)) {
390 if (other_function)
391 status =
392 ql_read_other_func_xgmac_reg(qdev, i, buf);
393 else
394 status = ql_read_xgmac_reg(qdev, i, buf);
395
396 if (status)
397 *buf = 0xdeadbeef;
398 break;
399 }
400 }
401 return status;
402}
3 403
4static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) 404static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
5{ 405{
@@ -91,6 +491,226 @@ err:
91 return status; 491 return status;
92} 492}
93 493
494/* Read the MPI Processor shadow registers */
495static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
496{
497 u32 i;
498 int status;
499
500 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
501 status = ql_write_mpi_reg(qdev, RISC_124,
502 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
503 if (status)
504 goto end;
505 status = ql_read_mpi_reg(qdev, RISC_127, buf);
506 if (status)
507 goto end;
508 }
509end:
510 return status;
511}
512
513/* Read the MPI Processor core registers */
514static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
515 u32 offset, u32 count)
516{
517 int i, status = 0;
518 for (i = 0; i < count; i++, buf++) {
519 status = ql_read_mpi_reg(qdev, offset + i, buf);
520 if (status)
521 return status;
522 }
523 return status;
524}
525
526/* Read the ASIC probe dump */
527static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
528 u32 valid, u32 *buf)
529{
530 u32 module, mux_sel, probe, lo_val, hi_val;
531
532 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
533 if (!((valid >> module) & 1))
534 continue;
535 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
536 probe = clock
537 | PRB_MX_ADDR_ARE
538 | mux_sel
539 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
540 ql_write32(qdev, PRB_MX_ADDR, probe);
541 lo_val = ql_read32(qdev, PRB_MX_DATA);
542 if (mux_sel == 0) {
543 *buf = probe;
544 buf++;
545 }
546 probe |= PRB_MX_ADDR_UP;
547 ql_write32(qdev, PRB_MX_ADDR, probe);
548 hi_val = ql_read32(qdev, PRB_MX_DATA);
549 *buf = lo_val;
550 buf++;
551 *buf = hi_val;
552 buf++;
553 }
554 }
555 return buf;
556}
557
558static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
559{
560 /* First we have to enable the probe mux */
561 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
562 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
563 PRB_MX_ADDR_VALID_SYS_MOD, buf);
564 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
565 PRB_MX_ADDR_VALID_PCI_MOD, buf);
566 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
567 PRB_MX_ADDR_VALID_XGM_MOD, buf);
568 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
569 PRB_MX_ADDR_VALID_FC_MOD, buf);
570 return 0;
571
572}
573
574/* Read out the routing index registers */
575static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
576{
577 int status;
578 u32 type, index, index_max;
579 u32 result_index;
580 u32 result_data;
581 u32 val;
582
583 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
584 if (status)
585 return status;
586
587 for (type = 0; type < 4; type++) {
588 if (type < 2)
589 index_max = 8;
590 else
591 index_max = 16;
592 for (index = 0; index < index_max; index++) {
593 val = RT_IDX_RS
594 | (type << RT_IDX_TYPE_SHIFT)
595 | (index << RT_IDX_IDX_SHIFT);
596 ql_write32(qdev, RT_IDX, val);
597 result_index = 0;
598 while ((result_index & RT_IDX_MR) == 0)
599 result_index = ql_read32(qdev, RT_IDX);
600 result_data = ql_read32(qdev, RT_DATA);
601 *buf = type;
602 buf++;
603 *buf = index;
604 buf++;
605 *buf = result_index;
606 buf++;
607 *buf = result_data;
608 buf++;
609 }
610 }
611 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
612 return status;
613}
614
615/* Read out the MAC protocol registers */
616static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
617{
618 u32 result_index, result_data;
619 u32 type;
620 u32 index;
621 u32 offset;
622 u32 val;
623 u32 initial_val = MAC_ADDR_RS;
624 u32 max_index;
625 u32 max_offset;
626
627 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
628 switch (type) {
629
630 case 0: /* CAM */
631 initial_val |= MAC_ADDR_ADR;
632 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
633 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
634 break;
635 case 1: /* Multicast MAC Address */
636 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
637 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
638 break;
639 case 2: /* VLAN filter mask */
640 case 3: /* MC filter mask */
641 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
642 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
643 break;
644 case 4: /* FC MAC addresses */
645 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
646 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
647 break;
648 case 5: /* Mgmt MAC addresses */
649 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
650 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
651 break;
652 case 6: /* Mgmt VLAN addresses */
653 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
654 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
655 break;
656 case 7: /* Mgmt IPv4 address */
657 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
658 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
659 break;
660 case 8: /* Mgmt IPv6 address */
661 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
662 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
663 break;
664 case 9: /* Mgmt TCP/UDP Dest port */
665 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
666 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
667 break;
668 default:
669 printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
670 max_index = 0;
671 max_offset = 0;
672 break;
673 }
674 for (index = 0; index < max_index; index++) {
675 for (offset = 0; offset < max_offset; offset++) {
676 val = initial_val
677 | (type << MAC_ADDR_TYPE_SHIFT)
678 | (index << MAC_ADDR_IDX_SHIFT)
679 | (offset);
680 ql_write32(qdev, MAC_ADDR_IDX, val);
681 result_index = 0;
682 while ((result_index & MAC_ADDR_MR) == 0) {
683 result_index = ql_read32(qdev,
684 MAC_ADDR_IDX);
685 }
686 result_data = ql_read32(qdev, MAC_ADDR_DATA);
687 *buf = result_index;
688 buf++;
689 *buf = result_data;
690 buf++;
691 }
692 }
693 }
694}
695
696static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
697{
698 u32 func_num, reg, reg_val;
699 int status;
700
701 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
702 reg = MPI_NIC_REG_BLOCK
703 | (func_num << MPI_NIC_FUNCTION_SHIFT)
704 | (SEM / 4);
705 status = ql_read_mpi_reg(qdev, reg, &reg_val);
706 *buf = reg_val;
707 /* if the read failed then dead fill the element. */
708 if (!status)
709 *buf = 0xdeadbeef;
710 buf++;
711 }
712}
713
94/* Create a coredump segment header */ 714/* Create a coredump segment header */
95static void ql_build_coredump_seg_header( 715static void ql_build_coredump_seg_header(
96 struct mpi_coredump_segment_header *seg_hdr, 716 struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +723,527 @@ static void ql_build_coredump_seg_header(
103 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); 723 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
104} 724}
105 725
726/*
727 * This function should be called when a coredump / probedump
728 * is to be extracted from the HBA. It is assumed there is a
729 * qdev structure that contains the base address of the register
730 * space for this function as well as a coredump structure that
731 * will contain the dump.
732 */
733int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
734{
735 int status;
736 int i;
737
738 if (!mpi_coredump) {
739 QPRINTK(qdev, DRV, ERR,
740 "No memory available.\n");
741 return -ENOMEM;
742 }
743
744 /* Try to get the spinlock, but dont worry if
745 * it isn't available. If the firmware died it
746 * might be holding the sem.
747 */
748 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
749
750 status = ql_pause_mpi_risc(qdev);
751 if (status) {
752 QPRINTK(qdev, DRV, ERR,
753 "Failed RISC pause. Status = 0x%.08x\n", status);
754 goto err;
755 }
756
757 /* Insert the global header */
758 memset(&(mpi_coredump->mpi_global_header), 0,
759 sizeof(struct mpi_coredump_global_header));
760 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
761 mpi_coredump->mpi_global_header.headerSize =
762 sizeof(struct mpi_coredump_global_header);
763 mpi_coredump->mpi_global_header.imageSize =
764 sizeof(struct ql_mpi_coredump);
765 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
766 sizeof(mpi_coredump->mpi_global_header.idString));
767
768 /* Get generic NIC reg dump */
769 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
770 NIC1_CONTROL_SEG_NUM,
771 sizeof(struct mpi_coredump_segment_header) +
772 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
773
774 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
775 NIC2_CONTROL_SEG_NUM,
776 sizeof(struct mpi_coredump_segment_header) +
777 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
778
779 /* Get XGMac registers. (Segment 18, Rev C. step 21) */
780 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
781 NIC1_XGMAC_SEG_NUM,
782 sizeof(struct mpi_coredump_segment_header) +
783 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
784
785 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
786 NIC2_XGMAC_SEG_NUM,
787 sizeof(struct mpi_coredump_segment_header) +
788 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
789
790 if (qdev->func & 1) {
791 /* Odd means our function is NIC 2 */
792 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
793 mpi_coredump->nic2_regs[i] =
794 ql_read32(qdev, i * sizeof(u32));
795
796 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
797 mpi_coredump->nic_regs[i] =
798 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
799
800 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
801 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
802 } else {
803 /* Even means our function is NIC 1 */
804 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
805 mpi_coredump->nic_regs[i] =
806 ql_read32(qdev, i * sizeof(u32));
807 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
808 mpi_coredump->nic2_regs[i] =
809 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
810
811 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
812 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
813 }
814
815 /* Rev C. Step 20a */
816 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
817 XAUI_AN_SEG_NUM,
818 sizeof(struct mpi_coredump_segment_header) +
819 sizeof(mpi_coredump->serdes_xaui_an),
820 "XAUI AN Registers");
821
822 /* Rev C. Step 20b */
823 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
824 XAUI_HSS_PCS_SEG_NUM,
825 sizeof(struct mpi_coredump_segment_header) +
826 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
827 "XAUI HSS PCS Registers");
828
829 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
830 sizeof(struct mpi_coredump_segment_header) +
831 sizeof(mpi_coredump->serdes_xfi_an),
832 "XFI AN Registers");
833
834 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
835 XFI_TRAIN_SEG_NUM,
836 sizeof(struct mpi_coredump_segment_header) +
837 sizeof(mpi_coredump->serdes_xfi_train),
838 "XFI TRAIN Registers");
839
840 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
841 XFI_HSS_PCS_SEG_NUM,
842 sizeof(struct mpi_coredump_segment_header) +
843 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
844 "XFI HSS PCS Registers");
845
846 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
847 XFI_HSS_TX_SEG_NUM,
848 sizeof(struct mpi_coredump_segment_header) +
849 sizeof(mpi_coredump->serdes_xfi_hss_tx),
850 "XFI HSS TX Registers");
851
852 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
853 XFI_HSS_RX_SEG_NUM,
854 sizeof(struct mpi_coredump_segment_header) +
855 sizeof(mpi_coredump->serdes_xfi_hss_rx),
856 "XFI HSS RX Registers");
857
858 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
859 XFI_HSS_PLL_SEG_NUM,
860 sizeof(struct mpi_coredump_segment_header) +
861 sizeof(mpi_coredump->serdes_xfi_hss_pll),
862 "XFI HSS PLL Registers");
863
864 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
865 XAUI2_AN_SEG_NUM,
866 sizeof(struct mpi_coredump_segment_header) +
867 sizeof(mpi_coredump->serdes2_xaui_an),
868 "XAUI2 AN Registers");
869
870 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
871 XAUI2_HSS_PCS_SEG_NUM,
872 sizeof(struct mpi_coredump_segment_header) +
873 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
874 "XAUI2 HSS PCS Registers");
875
876 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
877 XFI2_AN_SEG_NUM,
878 sizeof(struct mpi_coredump_segment_header) +
879 sizeof(mpi_coredump->serdes2_xfi_an),
880 "XFI2 AN Registers");
881
882 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
883 XFI2_TRAIN_SEG_NUM,
884 sizeof(struct mpi_coredump_segment_header) +
885 sizeof(mpi_coredump->serdes2_xfi_train),
886 "XFI2 TRAIN Registers");
887
888 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
889 XFI2_HSS_PCS_SEG_NUM,
890 sizeof(struct mpi_coredump_segment_header) +
891 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
892 "XFI2 HSS PCS Registers");
893
894 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
895 XFI2_HSS_TX_SEG_NUM,
896 sizeof(struct mpi_coredump_segment_header) +
897 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
898 "XFI2 HSS TX Registers");
899
900 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
901 XFI2_HSS_RX_SEG_NUM,
902 sizeof(struct mpi_coredump_segment_header) +
903 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
904 "XFI2 HSS RX Registers");
905
906 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
907 XFI2_HSS_PLL_SEG_NUM,
908 sizeof(struct mpi_coredump_segment_header) +
909 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
910 "XFI2 HSS PLL Registers");
911
912 status = ql_get_serdes_regs(qdev, mpi_coredump);
913 if (status) {
914 QPRINTK(qdev, DRV, ERR,
915 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
916 status);
917 goto err;
918 }
919
920 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
921 CORE_SEG_NUM,
922 sizeof(mpi_coredump->core_regs_seg_hdr) +
923 sizeof(mpi_coredump->mpi_core_regs) +
924 sizeof(mpi_coredump->mpi_core_sh_regs),
925 "Core Registers");
926
927 /* Get the MPI Core Registers */
928 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
929 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
930 if (status)
931 goto err;
932 /* Get the 16 MPI shadow registers */
933 status = ql_get_mpi_shadow_regs(qdev,
934 &mpi_coredump->mpi_core_sh_regs[0]);
935 if (status)
936 goto err;
937
938 /* Get the Test Logic Registers */
939 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
940 TEST_LOGIC_SEG_NUM,
941 sizeof(struct mpi_coredump_segment_header)
942 + sizeof(mpi_coredump->test_logic_regs),
943 "Test Logic Regs");
944 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
945 TEST_REGS_ADDR, TEST_REGS_CNT);
946 if (status)
947 goto err;
948
949 /* Get the RMII Registers */
950 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
951 RMII_SEG_NUM,
952 sizeof(struct mpi_coredump_segment_header)
953 + sizeof(mpi_coredump->rmii_regs),
954 "RMII Registers");
955 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
956 RMII_REGS_ADDR, RMII_REGS_CNT);
957 if (status)
958 goto err;
959
960 /* Get the FCMAC1 Registers */
961 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
962 FCMAC1_SEG_NUM,
963 sizeof(struct mpi_coredump_segment_header)
964 + sizeof(mpi_coredump->fcmac1_regs),
965 "FCMAC1 Registers");
966 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
967 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
968 if (status)
969 goto err;
970
971 /* Get the FCMAC2 Registers */
972
973 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
974 FCMAC2_SEG_NUM,
975 sizeof(struct mpi_coredump_segment_header)
976 + sizeof(mpi_coredump->fcmac2_regs),
977 "FCMAC2 Registers");
978
979 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
980 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
981 if (status)
982 goto err;
983
984 /* Get the FC1 MBX Registers */
985 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
986 FC1_MBOX_SEG_NUM,
987 sizeof(struct mpi_coredump_segment_header)
988 + sizeof(mpi_coredump->fc1_mbx_regs),
989 "FC1 MBox Regs");
990 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
991 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
992 if (status)
993 goto err;
994
995 /* Get the IDE Registers */
996 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
997 IDE_SEG_NUM,
998 sizeof(struct mpi_coredump_segment_header)
999 + sizeof(mpi_coredump->ide_regs),
1000 "IDE Registers");
1001 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
1002 IDE_REGS_ADDR, IDE_REGS_CNT);
1003 if (status)
1004 goto err;
1005
1006 /* Get the NIC1 MBX Registers */
1007 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
1008 NIC1_MBOX_SEG_NUM,
1009 sizeof(struct mpi_coredump_segment_header)
1010 + sizeof(mpi_coredump->nic1_mbx_regs),
1011 "NIC1 MBox Regs");
1012 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
1013 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1014 if (status)
1015 goto err;
1016
1017 /* Get the SMBus Registers */
1018 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1019 SMBUS_SEG_NUM,
1020 sizeof(struct mpi_coredump_segment_header)
1021 + sizeof(mpi_coredump->smbus_regs),
1022 "SMBus Registers");
1023 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1024 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1025 if (status)
1026 goto err;
1027
1028 /* Get the FC2 MBX Registers */
1029 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1030 FC2_MBOX_SEG_NUM,
1031 sizeof(struct mpi_coredump_segment_header)
1032 + sizeof(mpi_coredump->fc2_mbx_regs),
1033 "FC2 MBox Regs");
1034 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1035 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1036 if (status)
1037 goto err;
1038
1039 /* Get the NIC2 MBX Registers */
1040 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1041 NIC2_MBOX_SEG_NUM,
1042 sizeof(struct mpi_coredump_segment_header)
1043 + sizeof(mpi_coredump->nic2_mbx_regs),
1044 "NIC2 MBox Regs");
1045 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1046 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1047 if (status)
1048 goto err;
1049
1050 /* Get the I2C Registers */
1051 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1052 I2C_SEG_NUM,
1053 sizeof(struct mpi_coredump_segment_header)
1054 + sizeof(mpi_coredump->i2c_regs),
1055 "I2C Registers");
1056 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1057 I2C_REGS_ADDR, I2C_REGS_CNT);
1058 if (status)
1059 goto err;
1060
1061 /* Get the MEMC Registers */
1062 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1063 MEMC_SEG_NUM,
1064 sizeof(struct mpi_coredump_segment_header)
1065 + sizeof(mpi_coredump->memc_regs),
1066 "MEMC Registers");
1067 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1068 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1069 if (status)
1070 goto err;
1071
1072 /* Get the PBus Registers */
1073 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1074 PBUS_SEG_NUM,
1075 sizeof(struct mpi_coredump_segment_header)
1076 + sizeof(mpi_coredump->pbus_regs),
1077 "PBUS Registers");
1078 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1079 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1080 if (status)
1081 goto err;
1082
1083 /* Get the MDE Registers */
1084 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1085 MDE_SEG_NUM,
1086 sizeof(struct mpi_coredump_segment_header)
1087 + sizeof(mpi_coredump->mde_regs),
1088 "MDE Registers");
1089 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1090 MDE_REGS_ADDR, MDE_REGS_CNT);
1091 if (status)
1092 goto err;
1093
1094 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1095 MISC_NIC_INFO_SEG_NUM,
1096 sizeof(struct mpi_coredump_segment_header)
1097 + sizeof(mpi_coredump->misc_nic_info),
1098 "MISC NIC INFO");
1099 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1100 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1101 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1102 mpi_coredump->misc_nic_info.function = qdev->func;
1103
1104 /* Segment 31 */
1105 /* Get indexed register values. */
1106 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1107 INTR_STATES_SEG_NUM,
1108 sizeof(struct mpi_coredump_segment_header)
1109 + sizeof(mpi_coredump->intr_states),
1110 "INTR States");
1111 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1112
1113 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1114 CAM_ENTRIES_SEG_NUM,
1115 sizeof(struct mpi_coredump_segment_header)
1116 + sizeof(mpi_coredump->cam_entries),
1117 "CAM Entries");
1118 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1119 if (status)
1120 goto err;
1121
1122 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1123 ROUTING_WORDS_SEG_NUM,
1124 sizeof(struct mpi_coredump_segment_header)
1125 + sizeof(mpi_coredump->nic_routing_words),
1126 "Routing Words");
1127 status = ql_get_routing_entries(qdev,
1128 &mpi_coredump->nic_routing_words[0]);
1129 if (status)
1130 goto err;
1131
1132 /* Segment 34 (Rev C. step 23) */
1133 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1134 ETS_SEG_NUM,
1135 sizeof(struct mpi_coredump_segment_header)
1136 + sizeof(mpi_coredump->ets),
1137 "ETS Registers");
1138 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1139 if (status)
1140 goto err;
1141
1142 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1143 PROBE_DUMP_SEG_NUM,
1144 sizeof(struct mpi_coredump_segment_header)
1145 + sizeof(mpi_coredump->probe_dump),
1146 "Probe Dump");
1147 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1148
1149 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1150 ROUTING_INDEX_SEG_NUM,
1151 sizeof(struct mpi_coredump_segment_header)
1152 + sizeof(mpi_coredump->routing_regs),
1153 "Routing Regs");
1154 status = ql_get_routing_index_registers(qdev,
1155 &mpi_coredump->routing_regs[0]);
1156 if (status)
1157 goto err;
1158
1159 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1160 MAC_PROTOCOL_SEG_NUM,
1161 sizeof(struct mpi_coredump_segment_header)
1162 + sizeof(mpi_coredump->mac_prot_regs),
1163 "MAC Prot Regs");
1164 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1165
1166 /* Get the semaphore registers for all 5 functions */
1167 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1168 SEM_REGS_SEG_NUM,
1169 sizeof(struct mpi_coredump_segment_header) +
1170 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1171
1172 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1173
1174 /* Prevent the mpi restarting while we dump the memory.*/
1175 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1176
1177 /* clear the pause */
1178 status = ql_unpause_mpi_risc(qdev);
1179 if (status) {
1180 QPRINTK(qdev, DRV, ERR,
1181 "Failed RISC unpause. Status = 0x%.08x\n", status);
1182 goto err;
1183 }
1184
1185 /* Reset the RISC so we can dump RAM */
1186 status = ql_hard_reset_mpi_risc(qdev);
1187 if (status) {
1188 QPRINTK(qdev, DRV, ERR,
1189 "Failed RISC reset. Status = 0x%.08x\n", status);
1190 goto err;
1191 }
1192
1193 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1194 WCS_RAM_SEG_NUM,
1195 sizeof(struct mpi_coredump_segment_header)
1196 + sizeof(mpi_coredump->code_ram),
1197 "WCS RAM");
1198 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1199 CODE_RAM_ADDR, CODE_RAM_CNT);
1200 if (status) {
1201 QPRINTK(qdev, DRV, ERR,
1202 "Failed Dump of CODE RAM. Status = 0x%.08x\n", status);
1203 goto err;
1204 }
1205
1206 /* Insert the segment header */
1207 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1208 MEMC_RAM_SEG_NUM,
1209 sizeof(struct mpi_coredump_segment_header)
1210 + sizeof(mpi_coredump->memc_ram),
1211 "MEMC RAM");
1212 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1213 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1214 if (status) {
1215 QPRINTK(qdev, DRV, ERR,
1216 "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status);
1217 goto err;
1218 }
1219err:
1220 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1221 return status;
1222
1223}
1224
1225static void ql_get_core_dump(struct ql_adapter *qdev)
1226{
1227 if (!ql_own_firmware(qdev)) {
1228 QPRINTK(qdev, DRV, ERR, "%s: Don't own firmware!\n",
1229 qdev->ndev->name);
1230 return;
1231 }
1232
1233 if (!netif_running(qdev->ndev)) {
1234 QPRINTK(qdev, IFUP, ERR,
1235 "Force Coredump can only be done from interface "
1236 "that is up.\n");
1237 return;
1238 }
1239
1240 if (ql_mb_sys_err(qdev)) {
1241 QPRINTK(qdev, IFUP, ERR,
1242 "Fail force coredump with ql_mb_sys_err().\n");
1243 return;
1244 }
1245}
1246
106void ql_gen_reg_dump(struct ql_adapter *qdev, 1247void ql_gen_reg_dump(struct ql_adapter *qdev,
107 struct ql_reg_dump *mpi_coredump) 1248 struct ql_reg_dump *mpi_coredump)
108{ 1249{
@@ -178,6 +1319,36 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
178 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1319 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
179 if (status) 1320 if (status)
180 return; 1321 return;
1322
1323 if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
1324 ql_get_core_dump(qdev);
1325}
1326
1327/* Coredump to messages log file using separate worker thread */
1328void ql_mpi_core_to_log(struct work_struct *work)
1329{
1330 struct ql_adapter *qdev =
1331 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1332 u32 *tmp, count;
1333 int i;
1334
1335 count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1336 tmp = (u32 *)qdev->mpi_coredump;
1337 QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n");
1338
1339 for (i = 0; i < count; i += 8) {
1340 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
1341 "%.08x %.08x %.08x \n", i,
1342 tmp[i + 0],
1343 tmp[i + 1],
1344 tmp[i + 2],
1345 tmp[i + 3],
1346 tmp[i + 4],
1347 tmp[i + 5],
1348 tmp[i + 6],
1349 tmp[i + 7]);
1350 msleep(5);
1351 }
181} 1352}
182 1353
183#ifdef QL_REG_DUMP 1354#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 894a7c84fae..4adca94a521 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
87
88static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, 90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79 /* required last entry */ 91 /* required last entry */
@@ -452,9 +464,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 if (set) { 464 if (set) {
453 addr = &qdev->ndev->dev_addr[0]; 465 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG, 466 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n", 467 "Set Mac addr %pM\n", addr);
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else { 468 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN); 469 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0]; 470 addr = &zero_mac_addr[0];
@@ -1433,6 +1443,254 @@ map_error:
1433 return NETDEV_TX_BUSY; 1443 return NETDEV_TX_BUSY;
1434} 1444}
1435 1445
1446/* Process an inbound completion from an rx ring. */
1447static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1448 struct rx_ring *rx_ring,
1449 struct ib_mac_iocb_rsp *ib_mac_rsp,
1450 u32 length,
1451 u16 vlan_id)
1452{
1453 struct sk_buff *skb;
1454 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1455 struct skb_frag_struct *rx_frag;
1456 int nr_frags;
1457 struct napi_struct *napi = &rx_ring->napi;
1458
1459 napi->dev = qdev->ndev;
1460
1461 skb = napi_get_frags(napi);
1462 if (!skb) {
1463 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1464 rx_ring->rx_dropped++;
1465 put_page(lbq_desc->p.pg_chunk.page);
1466 return;
1467 }
1468 prefetch(lbq_desc->p.pg_chunk.va);
1469 rx_frag = skb_shinfo(skb)->frags;
1470 nr_frags = skb_shinfo(skb)->nr_frags;
1471 rx_frag += nr_frags;
1472 rx_frag->page = lbq_desc->p.pg_chunk.page;
1473 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1474 rx_frag->size = length;
1475
1476 skb->len += length;
1477 skb->data_len += length;
1478 skb->truesize += length;
1479 skb_shinfo(skb)->nr_frags++;
1480
1481 rx_ring->rx_packets++;
1482 rx_ring->rx_bytes += length;
1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 skb_record_rx_queue(skb, rx_ring->cq_id);
1485 if (qdev->vlgrp && (vlan_id != 0xffff))
1486 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1487 else
1488 napi_gro_frags(napi);
1489}
1490
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497{
1498 struct net_device *ndev = qdev->ndev;
1499 struct sk_buff *skb = NULL;
1500 void *addr;
1501 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1502 struct napi_struct *napi = &rx_ring->napi;
1503
1504 skb = netdev_alloc_skb(ndev, length);
1505 if (!skb) {
1506 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1507 "need to unwind!.\n");
1508 rx_ring->rx_dropped++;
1509 put_page(lbq_desc->p.pg_chunk.page);
1510 return;
1511 }
1512
1513 addr = lbq_desc->p.pg_chunk.va;
1514 prefetch(addr);
1515
1516
1517 /* Frame error, so drop the packet. */
1518 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1519 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1520 ib_mac_rsp->flags2);
1521 rx_ring->rx_errors++;
1522 goto err_out;
1523 }
1524
1525 /* The max framesize filter on this chip is set higher than
1526 * MTU since FCoE uses 2k frames.
1527 */
1528 if (skb->len > ndev->mtu + ETH_HLEN) {
1529 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1530 rx_ring->rx_dropped++;
1531 goto err_out;
1532 }
1533 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1534 QPRINTK(qdev, RX_STATUS, DEBUG,
1535 "%d bytes of headers and data in large. Chain "
1536 "page to new skb and pull tail.\n", length);
1537 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1538 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1539 length-ETH_HLEN);
1540 skb->len += length-ETH_HLEN;
1541 skb->data_len += length-ETH_HLEN;
1542 skb->truesize += length-ETH_HLEN;
1543
1544 rx_ring->rx_packets++;
1545 rx_ring->rx_bytes += skb->len;
1546 skb->protocol = eth_type_trans(skb, ndev);
1547 skb->ip_summed = CHECKSUM_NONE;
1548
1549 if (qdev->rx_csum &&
1550 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1551 /* TCP frame. */
1552 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1553 QPRINTK(qdev, RX_STATUS, DEBUG,
1554 "TCP checksum done!\n");
1555 skb->ip_summed = CHECKSUM_UNNECESSARY;
1556 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1557 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1558 /* Unfragmented ipv4 UDP frame. */
1559 struct iphdr *iph = (struct iphdr *) skb->data;
1560 if (!(iph->frag_off &
1561 cpu_to_be16(IP_MF|IP_OFFSET))) {
1562 skb->ip_summed = CHECKSUM_UNNECESSARY;
1563 QPRINTK(qdev, RX_STATUS, DEBUG,
1564 "TCP checksum done!\n");
1565 }
1566 }
1567 }
1568
1569 skb_record_rx_queue(skb, rx_ring->cq_id);
1570 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1571 if (qdev->vlgrp && (vlan_id != 0xffff))
1572 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1573 else
1574 napi_gro_receive(napi, skb);
1575 } else {
1576 if (qdev->vlgrp && (vlan_id != 0xffff))
1577 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1578 else
1579 netif_receive_skb(skb);
1580 }
1581 return;
1582err_out:
1583 dev_kfree_skb_any(skb);
1584 put_page(lbq_desc->p.pg_chunk.page);
1585}
1586
1587/* Process an inbound completion from an rx ring. */
1588static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1589 struct rx_ring *rx_ring,
1590 struct ib_mac_iocb_rsp *ib_mac_rsp,
1591 u32 length,
1592 u16 vlan_id)
1593{
1594 struct net_device *ndev = qdev->ndev;
1595 struct sk_buff *skb = NULL;
1596 struct sk_buff *new_skb = NULL;
1597 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1598
1599 skb = sbq_desc->p.skb;
1600 /* Allocate new_skb and copy */
1601 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1602 if (new_skb == NULL) {
1603 QPRINTK(qdev, PROBE, ERR,
1604 "No skb available, drop the packet.\n");
1605 rx_ring->rx_dropped++;
1606 return;
1607 }
1608 skb_reserve(new_skb, NET_IP_ALIGN);
1609 memcpy(skb_put(new_skb, length), skb->data, length);
1610 skb = new_skb;
1611
1612 /* Frame error, so drop the packet. */
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1614 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1615 ib_mac_rsp->flags2);
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_errors++;
1618 return;
1619 }
1620
1621 /* loopback self test for ethtool */
1622 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1623 ql_check_lb_frame(qdev, skb);
1624 dev_kfree_skb_any(skb);
1625 return;
1626 }
1627
1628 /* The max framesize filter on this chip is set higher than
1629 * MTU since FCoE uses 2k frames.
1630 */
1631 if (skb->len > ndev->mtu + ETH_HLEN) {
1632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_dropped++;
1634 return;
1635 }
1636
1637 prefetch(skb->data);
1638 skb->dev = ndev;
1639 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1640 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1645 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1646 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1647 }
1648 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1649 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1650
1651 rx_ring->rx_packets++;
1652 rx_ring->rx_bytes += skb->len;
1653 skb->protocol = eth_type_trans(skb, ndev);
1654 skb->ip_summed = CHECKSUM_NONE;
1655
1656 /* If rx checksum is on, and there are no
1657 * csum or frame errors.
1658 */
1659 if (qdev->rx_csum &&
1660 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1661 /* TCP frame. */
1662 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1663 QPRINTK(qdev, RX_STATUS, DEBUG,
1664 "TCP checksum done!\n");
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1667 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1668 /* Unfragmented ipv4 UDP frame. */
1669 struct iphdr *iph = (struct iphdr *) skb->data;
1670 if (!(iph->frag_off &
1671 cpu_to_be16(IP_MF|IP_OFFSET))) {
1672 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673 QPRINTK(qdev, RX_STATUS, DEBUG,
1674 "TCP checksum done!\n");
1675 }
1676 }
1677 }
1678
1679 skb_record_rx_queue(skb, rx_ring->cq_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1681 if (qdev->vlgrp && (vlan_id != 0xffff))
1682 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1683 vlan_id, skb);
1684 else
1685 napi_gro_receive(&rx_ring->napi, skb);
1686 } else {
1687 if (qdev->vlgrp && (vlan_id != 0xffff))
1688 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1689 else
1690 netif_receive_skb(skb);
1691 }
1692}
1693
1436static void ql_realign_skb(struct sk_buff *skb, int len) 1694static void ql_realign_skb(struct sk_buff *skb, int len)
1437{ 1695{
1438 void *temp_addr = skb->data; 1696 void *temp_addr = skb->data;
@@ -1646,14 +1904,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1646} 1904}
1647 1905
1648/* Process an inbound completion from an rx ring. */ 1906/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev, 1907static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring, 1908 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp) 1909 struct ib_mac_iocb_rsp *ib_mac_rsp,
1910 u16 vlan_id)
1652{ 1911{
1653 struct net_device *ndev = qdev->ndev; 1912 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL; 1913 struct sk_buff *skb = NULL;
1655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
1657 1914
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 1915 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659 1916
@@ -1753,6 +2010,65 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1753 } 2010 }
1754} 2011}
1755 2012
2013/* Process an inbound completion from an rx ring. */
2014static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2015 struct rx_ring *rx_ring,
2016 struct ib_mac_iocb_rsp *ib_mac_rsp)
2017{
2018 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2019 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2020 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2021 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2022
2023 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2024
2025 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2026 /* The data and headers are split into
2027 * separate buffers.
2028 */
2029 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2030 vlan_id);
2031 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2032 /* The data fit in a single small buffer.
2033 * Allocate a new skb, copy the data and
2034 * return the buffer to the free pool.
2035 */
2036 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2037 length, vlan_id);
2038 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2039 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2041 /* TCP packet in a page chunk that's been checksummed.
2042 * Tack it on to our GRO skb and let it go.
2043 */
2044 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2045 length, vlan_id);
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2047 /* Non-TCP packet in a page chunk. Allocate an
2048 * skb, tack it on frags, and send it up.
2049 */
2050 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2051 length, vlan_id);
2052 } else {
2053 struct bq_desc *lbq_desc;
2054
2055 /* Free small buffer that holds the IAL */
2056 lbq_desc = ql_get_curr_sbuf(rx_ring);
2057 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2058 length, qdev->ndev->mtu);
2059
2060 /* Unwind the large buffers for this frame. */
2061 while (length > 0) {
2062 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2063 length -= (length < rx_ring->lbq_buf_size) ?
2064 length : rx_ring->lbq_buf_size;
2065 put_page(lbq_desc->p.pg_chunk.page);
2066 }
2067 }
2068
2069 return (unsigned long)length;
2070}
2071
1756/* Process an outbound completion from an rx ring. */ 2072/* Process an outbound completion from an rx ring. */
1757static void ql_process_mac_tx_intr(struct ql_adapter *qdev, 2073static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1758 struct ob_mac_iocb_rsp *mac_rsp) 2074 struct ob_mac_iocb_rsp *mac_rsp)
@@ -3332,15 +3648,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3332 3648
3333 /* Enable the function, set pagesize, enable error checking. */ 3649 /* Enable the function, set pagesize, enable error checking. */
3334 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | 3650 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3335 FSC_EC | FSC_VM_PAGE_4K | FSC_SH; 3651 FSC_EC | FSC_VM_PAGE_4K;
3652 value |= SPLT_SETTING;
3336 3653
3337 /* Set/clear header splitting. */ 3654 /* Set/clear header splitting. */
3338 mask = FSC_VM_PAGESIZE_MASK | 3655 mask = FSC_VM_PAGESIZE_MASK |
3339 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); 3656 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3340 ql_write32(qdev, FSC, mask | value); 3657 ql_write32(qdev, FSC, mask | value);
3341 3658
3342 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | 3659 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3343 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
3344 3660
3345 /* Set RX packet routing to use port/pci function on which the 3661 /* Set RX packet routing to use port/pci function on which the
3346 * packet arrived on in addition to usual frame routing. 3662 * packet arrived on in addition to usual frame routing.
@@ -3538,6 +3854,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3538 cancel_delayed_work_sync(&qdev->mpi_reset_work); 3854 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3539 cancel_delayed_work_sync(&qdev->mpi_work); 3855 cancel_delayed_work_sync(&qdev->mpi_work);
3540 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3856 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3857 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3541 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3858 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3542 3859
3543 for (i = 0; i < qdev->rss_ring_count; i++) 3860 for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4094,6 +4411,7 @@ static void ql_release_all(struct pci_dev *pdev)
4094 iounmap(qdev->reg_base); 4411 iounmap(qdev->reg_base);
4095 if (qdev->doorbell_area) 4412 if (qdev->doorbell_area)
4096 iounmap(qdev->doorbell_area); 4413 iounmap(qdev->doorbell_area);
4414 vfree(qdev->mpi_coredump);
4097 pci_release_regions(pdev); 4415 pci_release_regions(pdev);
4098 pci_set_drvdata(pdev, NULL); 4416 pci_set_drvdata(pdev, NULL);
4099} 4417}
@@ -4175,6 +4493,17 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4175 spin_lock_init(&qdev->hw_lock); 4493 spin_lock_init(&qdev->hw_lock);
4176 spin_lock_init(&qdev->stats_lock); 4494 spin_lock_init(&qdev->stats_lock);
4177 4495
4496 if (qlge_mpi_coredump) {
4497 qdev->mpi_coredump =
4498 vmalloc(sizeof(struct ql_mpi_coredump));
4499 if (qdev->mpi_coredump == NULL) {
4500 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4501 err = -ENOMEM;
4502 goto err_out;
4503 }
4504 if (qlge_force_coredump)
4505 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4506 }
4178 /* make sure the EEPROM is good */ 4507 /* make sure the EEPROM is good */
4179 err = qdev->nic_ops->get_flash(qdev); 4508 err = qdev->nic_ops->get_flash(qdev);
4180 if (err) { 4509 if (err) {
@@ -4204,6 +4533,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4204 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 4533 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4205 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 4534 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4206 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4535 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4536 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4207 init_completion(&qdev->ide_completion); 4537 init_completion(&qdev->ide_completion);
4208 4538
4209 if (!cards_found) { 4539 if (!cards_found) {
@@ -4327,6 +4657,7 @@ static void ql_eeh_close(struct net_device *ndev)
4327 cancel_delayed_work_sync(&qdev->mpi_reset_work); 4657 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4328 cancel_delayed_work_sync(&qdev->mpi_work); 4658 cancel_delayed_work_sync(&qdev->mpi_work);
4329 cancel_delayed_work_sync(&qdev->mpi_idc_work); 4659 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4660 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4330 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 4661 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4331 4662
4332 for (i = 0; i < qdev->rss_ring_count; i++) 4663 for (i = 0; i < qdev->rss_ring_count; i++)
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d..e2c846f17fc 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3int ql_unpause_mpi_risc(struct ql_adapter *qdev)
4{
5 u32 tmp;
6
7 /* Un-pause the RISC */
8 tmp = ql_read32(qdev, CSR);
9 if (!(tmp & CSR_RP))
10 return -EIO;
11
12 ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
13 return 0;
14}
15
16int ql_pause_mpi_risc(struct ql_adapter *qdev)
17{
18 u32 tmp;
19 int count = UDELAY_COUNT;
20
21 /* Pause the RISC */
22 ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
23 do {
24 tmp = ql_read32(qdev, CSR);
25 if (tmp & CSR_RP)
26 break;
27 mdelay(UDELAY_DELAY);
28 count--;
29 } while (count);
30 return (count == 0) ? -ETIMEDOUT : 0;
31}
32
33int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
34{
35 u32 tmp;
36 int count = UDELAY_COUNT;
37
38 /* Reset the RISC */
39 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
40 do {
41 tmp = ql_read32(qdev, CSR);
42 if (tmp & CSR_RR) {
43 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
44 break;
45 }
46 mdelay(UDELAY_DELAY);
47 count--;
48 } while (count);
49 return (count == 0) ? -ETIMEDOUT : 0;
50}
51
3int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 52int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{ 53{
5 int status; 54 int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
45 return status; 94 return status;
46} 95}
47 96
97/* Determine if we are in charge of the firwmare. If
98 * we are the lower of the 2 NIC pcie functions, or if
99 * we are the higher function and the lower function
100 * is not enabled.
101 */
102int ql_own_firmware(struct ql_adapter *qdev)
103{
104 u32 temp;
105
106 /* If we are the lower of the 2 NIC functions
107 * on the chip the we are responsible for
108 * core dump and firmware reset after an error.
109 */
110 if (qdev->func < qdev->alt_func)
111 return 1;
112
113 /* If we are the higher of the 2 NIC functions
114 * on the chip and the lower function is not
115 * enabled, then we are responsible for
116 * core dump and firmware reset after an error.
117 */
118 temp = ql_read32(qdev, STS);
119 if (!(temp & (1 << (8 + qdev->alt_func))))
120 return 1;
121
122 return 0;
123
124}
125
48static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) 126static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
49{ 127{
50 int i, status; 128 int i, status;
@@ -529,6 +607,22 @@ end:
529 return status; 607 return status;
530} 608}
531 609
610int ql_mb_sys_err(struct ql_adapter *qdev)
611{
612 struct mbox_params mbc;
613 struct mbox_params *mbcp = &mbc;
614 int status;
615
616 memset(mbcp, 0, sizeof(struct mbox_params));
617
618 mbcp->in_count = 1;
619 mbcp->out_count = 0;
620
621 mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
622
623 status = ql_mailbox_command(qdev, mbcp);
624 return status;
625}
532 626
533/* Get MPI firmware version. This will be used for 627/* Get MPI firmware version. This will be used for
534 * driver banner and for ethtool info. 628 * driver banner and for ethtool info.
@@ -669,6 +763,63 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
669 return status; 763 return status;
670} 764}
671 765
766int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
767 u32 size)
768{
769 int status = 0;
770 struct mbox_params mbc;
771 struct mbox_params *mbcp = &mbc;
772
773 memset(mbcp, 0, sizeof(struct mbox_params));
774
775 mbcp->in_count = 9;
776 mbcp->out_count = 1;
777
778 mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
779 mbcp->mbox_in[1] = LSW(addr);
780 mbcp->mbox_in[2] = MSW(req_dma);
781 mbcp->mbox_in[3] = LSW(req_dma);
782 mbcp->mbox_in[4] = MSW(size);
783 mbcp->mbox_in[5] = LSW(size);
784 mbcp->mbox_in[6] = MSW(MSD(req_dma));
785 mbcp->mbox_in[7] = LSW(MSD(req_dma));
786 mbcp->mbox_in[8] = MSW(addr);
787
788
789 status = ql_mailbox_command(qdev, mbcp);
790 if (status)
791 return status;
792
793 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
794 QPRINTK(qdev, DRV, ERR,
795 "Failed to dump risc RAM.\n");
796 status = -EIO;
797 }
798 return status;
799}
800
801/* Issue a mailbox command to dump RISC RAM. */
802int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
803 u32 ram_addr, int word_count)
804{
805 int status;
806 char *my_buf;
807 dma_addr_t buf_dma;
808
809 my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
810 &buf_dma);
811 if (!my_buf)
812 return -EIO;
813
814 status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
815 if (!status)
816 memcpy(buf, my_buf, word_count * sizeof(u32));
817
818 pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
819 buf_dma);
820 return status;
821}
822
672/* Get link settings and maximum frame size settings 823/* Get link settings and maximum frame size settings
673 * for the current port. 824 * for the current port.
674 * Most likely will block. 825 * Most likely will block.
@@ -1143,5 +1294,19 @@ void ql_mpi_reset_work(struct work_struct *work)
1143 cancel_delayed_work_sync(&qdev->mpi_work); 1294 cancel_delayed_work_sync(&qdev->mpi_work);
1144 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 1295 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
1145 cancel_delayed_work_sync(&qdev->mpi_idc_work); 1296 cancel_delayed_work_sync(&qdev->mpi_idc_work);
1297 /* If we're not the dominant NIC function,
1298 * then there is nothing to do.
1299 */
1300 if (!ql_own_firmware(qdev)) {
1301 QPRINTK(qdev, DRV, ERR, "Don't own firmware!\n");
1302 return;
1303 }
1304
1305 if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
1306 QPRINTK(qdev, DRV, ERR, "Core is dumped!\n");
1307 qdev->core_is_dumped = 1;
1308 queue_delayed_work(qdev->workqueue,
1309 &qdev->mpi_core_to_log, 5 * HZ);
1310 }
1146 ql_soft_reset_mpi_risc(qdev); 1311 ql_soft_reset_mpi_risc(qdev);
1147} 1312}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a..d68ba7a5863 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -1222,7 +1222,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
1222} 1222}
1223 1223
1224 1224
1225static struct pci_device_id r6040_pci_tbl[] = { 1225static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
1226 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) }, 1226 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1227 { 0 } 1227 { 0 }
1228}; 1228};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 60f96c468a2..c1bb24cf079 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
168static void rtl_hw_start_8168(struct net_device *); 168static void rtl_hw_start_8168(struct net_device *);
169static void rtl_hw_start_8101(struct net_device *); 169static void rtl_hw_start_8101(struct net_device *);
170 170
171static struct pci_device_id rtl8169_pci_tbl[] = { 171static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
172 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, 172 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
173 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, 173 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
174 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 174 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
@@ -3188,15 +3188,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3188 if (netif_msg_probe(tp)) { 3188 if (netif_msg_probe(tp)) {
3189 u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff; 3189 u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff;
3190 3190
3191 printk(KERN_INFO "%s: %s at 0x%lx, " 3191 printk(KERN_INFO "%s: %s at 0x%lx, %pM, XID %08x IRQ %d\n",
3192 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
3193 "XID %08x IRQ %d\n",
3194 dev->name, 3192 dev->name,
3195 rtl_chip_info[tp->chipset].name, 3193 rtl_chip_info[tp->chipset].name,
3196 dev->base_addr, 3194 dev->base_addr, dev->dev_addr, xid, dev->irq);
3197 dev->dev_addr[0], dev->dev_addr[1],
3198 dev->dev_addr[2], dev->dev_addr[3],
3199 dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
3200 } 3195 }
3201 3196
3202 rtl8169_init_phy(dev, tp); 3197 rtl8169_init_phy(dev, tp);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 1c257098d0a..266baf53496 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1688,7 +1688,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1688 } 1688 }
1689} 1689}
1690 1690
1691static struct pci_device_id rr_pci_tbl[] = { 1691static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
1692 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, 1692 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
1693 PCI_ANY_ID, PCI_ANY_ID, }, 1693 PCI_ANY_ID, PCI_ANY_ID, },
1694 { 0,} 1694 { 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 3c4836d0898..d1664586e8f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -523,7 +523,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
523 * S2IO device table. 523 * S2IO device table.
524 * This table lists all the devices that this driver supports. 524 * This table lists all the devices that this driver supports.
525 */ 525 */
526static struct pci_device_id s2io_tbl[] __devinitdata = { 526static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
527 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, 527 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
528 PCI_ANY_ID, PCI_ANY_ID}, 528 PCI_ANY_ID, PCI_ANY_ID},
529 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, 529 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f9..fd8cb506a2b 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1589,7 +1589,7 @@ out:
1589 return 0; 1589 return 0;
1590} 1590}
1591 1591
1592static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { 1592static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
1593 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, 1593 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, 1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1595 { PCI_DEVICE(0x1088, 0x2031) }, 1595 { PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 103e8b0e2a0..62d5cd51a9d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1940,7 +1940,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1940 **************************************************************************/ 1940 **************************************************************************/
1941 1941
1942/* PCI device ID table */ 1942/* PCI device ID table */
1943static struct pci_device_id efx_pci_table[] __devinitdata = { 1943static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
1944 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 1944 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1945 .driver_data = (unsigned long) &falcon_a1_nic_type}, 1945 .driver_data = (unsigned long) &falcon_a1_nic_type},
1946 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 1946 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7402b858cab..42a35f086a9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1473,13 +1473,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1473 if (ret) 1473 if (ret)
1474 goto out_unregister; 1474 goto out_unregister;
1475 1475
1476 /* pritnt device infomation */ 1476 /* print device infomation */
1477 pr_info("Base address at 0x%x, ", 1477 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1478 (u32)ndev->base_addr); 1478 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1479
1480 for (i = 0; i < 5; i++)
1481 printk("%02X:", ndev->dev_addr[i]);
1482 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1483 1479
1484 platform_set_drvdata(pdev, ndev); 1480 platform_set_drvdata(pdev, ndev);
1485 1481
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a..626de766443 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -334,7 +334,7 @@ static const struct {
334 { "SiS 191 PCI Gigabit Ethernet adapter" }, 334 { "SiS 191 PCI Gigabit Ethernet adapter" },
335}; 335};
336 336
337static struct pci_device_id sis190_pci_tbl[] = { 337static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, 338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, 339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
340 { 0, }, 340 { 0, },
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75..20c5ce47489 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
106 "SiS 900 PCI Fast Ethernet", 106 "SiS 900 PCI Fast Ethernet",
107 "SiS 7016 PCI Fast Ethernet" 107 "SiS 7016 PCI Fast Ethernet"
108}; 108};
109static struct pci_device_id sis900_pci_tbl [] = { 109static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
110 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900, 110 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900}, 111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
112 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016, 112 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a72850..6b955a4f19b 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
149extern void mac_drv_clear_rx_queue(struct s_smc *smc); 149extern void mac_drv_clear_rx_queue(struct s_smc *smc);
150extern void enable_tx_irq(struct s_smc *smc, u_short queue); 150extern void enable_tx_irq(struct s_smc *smc, u_short queue);
151 151
152static struct pci_device_id skfddi_pci_tbl[] = { 152static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, }, 153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
154 { } /* Terminating entry */ 154 { } /* Terminating entry */
155}; 155};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
435 goto fail; 435 goto fail;
436 } 436 }
437 read_address(smc, NULL); 437 read_address(smc, NULL);
438 pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n", 438 pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
439 smc->hw.fddi_canon_addr.a[0],
440 smc->hw.fddi_canon_addr.a[1],
441 smc->hw.fddi_canon_addr.a[2],
442 smc->hw.fddi_canon_addr.a[3],
443 smc->hw.fddi_canon_addr.a[4],
444 smc->hw.fddi_canon_addr.a[5]);
445 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6); 439 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
446 440
447 smt_reset_defaults(smc, 0); 441 smt_reset_defaults(smc, 0);
@@ -890,15 +884,8 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
890 (struct fddi_addr *)dmi->dmi_addr, 884 (struct fddi_addr *)dmi->dmi_addr,
891 1); 885 1);
892 886
893 pr_debug(KERN_INFO "ENABLE MC ADDRESS:"); 887 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
894 pr_debug(" %02x %02x %02x ", 888 dmi->dmi_addr);
895 dmi->dmi_addr[0],
896 dmi->dmi_addr[1],
897 dmi->dmi_addr[2]);
898 pr_debug("%02x %02x %02x\n",
899 dmi->dmi_addr[3],
900 dmi->dmi_addr[4],
901 dmi->dmi_addr[5]);
902 dmi = dmi->next; 889 dmi = dmi->next;
903 } // for 890 } // for
904 891
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc0016..5ff46eb18d0 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,7 +78,7 @@ static int debug = -1; /* defaults above */
78module_param(debug, int, 0); 78module_param(debug, int, 0);
79MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 79MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
80 80
81static const struct pci_device_id skge_id_table[] = { 81static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
82 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
83 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
84 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d760650c5c0..fecde669d6a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3190,7 +3190,9 @@ static void sky2_reset(struct sky2_hw *hw)
3190static void sky2_detach(struct net_device *dev) 3190static void sky2_detach(struct net_device *dev)
3191{ 3191{
3192 if (netif_running(dev)) { 3192 if (netif_running(dev)) {
3193 netif_tx_lock(dev);
3193 netif_device_detach(dev); /* stop txq */ 3194 netif_device_detach(dev); /* stop txq */
3195 netif_tx_unlock(dev);
3194 sky2_down(dev); 3196 sky2_down(dev);
3195 } 3197 }
3196} 3198}
@@ -3866,6 +3868,50 @@ static int sky2_get_regs_len(struct net_device *dev)
3866 return 0x4000; 3868 return 0x4000;
3867} 3869}
3868 3870
3871static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
3872{
3873 /* This complicated switch statement is to make sure and
3874 * only access regions that are unreserved.
3875 * Some blocks are only valid on dual port cards.
3876 */
3877 switch (b) {
3878 /* second port */
3879 case 5: /* Tx Arbiter 2 */
3880 case 9: /* RX2 */
3881 case 14 ... 15: /* TX2 */
3882 case 17: case 19: /* Ram Buffer 2 */
3883 case 22 ... 23: /* Tx Ram Buffer 2 */
3884 case 25: /* Rx MAC Fifo 1 */
3885 case 27: /* Tx MAC Fifo 2 */
3886 case 31: /* GPHY 2 */
3887 case 40 ... 47: /* Pattern Ram 2 */
3888 case 52: case 54: /* TCP Segmentation 2 */
3889 case 112 ... 116: /* GMAC 2 */
3890 return hw->ports > 1;
3891
3892 case 0: /* Control */
3893 case 2: /* Mac address */
3894 case 4: /* Tx Arbiter 1 */
3895 case 7: /* PCI express reg */
3896 case 8: /* RX1 */
3897 case 12 ... 13: /* TX1 */
3898 case 16: case 18:/* Rx Ram Buffer 1 */
3899 case 20 ... 21: /* Tx Ram Buffer 1 */
3900 case 24: /* Rx MAC Fifo 1 */
3901 case 26: /* Tx MAC Fifo 1 */
3902 case 28 ... 29: /* Descriptor and status unit */
3903 case 30: /* GPHY 1*/
3904 case 32 ... 39: /* Pattern Ram 1 */
3905 case 48: case 50: /* TCP Segmentation 1 */
3906 case 56 ... 60: /* PCI space */
3907 case 80 ... 84: /* GMAC 1 */
3908 return 1;
3909
3910 default:
3911 return 0;
3912 }
3913}
3914
3869/* 3915/*
3870 * Returns copy of control register region 3916 * Returns copy of control register region
3871 * Note: ethtool_get_regs always provides full size (16k) buffer 3917 * Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3880,55 +3926,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3880 regs->version = 1; 3926 regs->version = 1;
3881 3927
3882 for (b = 0; b < 128; b++) { 3928 for (b = 0; b < 128; b++) {
3883 /* This complicated switch statement is to make sure and 3929 /* skip poisonous diagnostic ram region in block 3 */
3884 * only access regions that are unreserved. 3930 if (b == 3)
3885 * Some blocks are only valid on dual port cards.
3886 * and block 3 has some special diagnostic registers that
3887 * are poison.
3888 */
3889 switch (b) {
3890 case 3:
3891 /* skip diagnostic ram region */
3892 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); 3931 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
3893 break; 3932 else if (sky2_reg_access_ok(sky2->hw, b))
3894
3895 /* dual port cards only */
3896 case 5: /* Tx Arbiter 2 */
3897 case 9: /* RX2 */
3898 case 14 ... 15: /* TX2 */
3899 case 17: case 19: /* Ram Buffer 2 */
3900 case 22 ... 23: /* Tx Ram Buffer 2 */
3901 case 25: /* Rx MAC Fifo 1 */
3902 case 27: /* Tx MAC Fifo 2 */
3903 case 31: /* GPHY 2 */
3904 case 40 ... 47: /* Pattern Ram 2 */
3905 case 52: case 54: /* TCP Segmentation 2 */
3906 case 112 ... 116: /* GMAC 2 */
3907 if (sky2->hw->ports == 1)
3908 goto reserved;
3909 /* fall through */
3910 case 0: /* Control */
3911 case 2: /* Mac address */
3912 case 4: /* Tx Arbiter 1 */
3913 case 7: /* PCI express reg */
3914 case 8: /* RX1 */
3915 case 12 ... 13: /* TX1 */
3916 case 16: case 18:/* Rx Ram Buffer 1 */
3917 case 20 ... 21: /* Tx Ram Buffer 1 */
3918 case 24: /* Rx MAC Fifo 1 */
3919 case 26: /* Tx MAC Fifo 1 */
3920 case 28 ... 29: /* Descriptor and status unit */
3921 case 30: /* GPHY 1*/
3922 case 32 ... 39: /* Pattern Ram 1 */
3923 case 48: case 50: /* TCP Segmentation 1 */
3924 case 56 ... 60: /* PCI space */
3925 case 80 ... 84: /* GMAC 1 */
3926 memcpy_fromio(p, io, 128); 3933 memcpy_fromio(p, io, 128);
3927 break; 3934 else
3928 default:
3929reserved:
3930 memset(p, 0, 128); 3935 memset(p, 0, 128);
3931 }
3932 3936
3933 p += 128; 3937 p += 128;
3934 io += 128; 3938 io += 128;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 44ebbaa7457..3c5a4f52345 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -2017,10 +2017,8 @@ static int __devinit smc911x_probe(struct net_device *dev)
2017 "set using ifconfig\n", dev->name); 2017 "set using ifconfig\n", dev->name);
2018 } else { 2018 } else {
2019 /* Print the Ethernet address */ 2019 /* Print the Ethernet address */
2020 printk("%s: Ethernet addr: ", dev->name); 2020 printk("%s: Ethernet addr: %pM\n",
2021 for (i = 0; i < 5; i++) 2021 dev->name, dev->dev_addr);
2022 printk("%2.2x:", dev->dev_addr[i]);
2023 printk("%2.2x\n", dev->dev_addr[5]);
2024 } 2022 }
2025 2023
2026 if (lp->phy_type == 0) { 2024 if (lp->phy_type == 0) {
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3..1495a5dd4b4 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -80,7 +80,7 @@ struct smsc9420_pdata {
80 int last_carrier; 80 int last_carrier;
81}; 81};
82 82
83static const struct pci_device_id smsc9420_id_table[] = { 83static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
84 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, 84 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
85 { 0, } 85 { 0, }
86}; 86};
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bf..16191998ac6 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
72 72
73char spider_net_driver_name[] = "spidernet"; 73char spider_net_driver_name[] = "spidernet";
74 74
75static struct pci_device_id spider_net_pci_tbl[] = { 75static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, 76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
78 { 0, } 78 { 0, }
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index f9521136a86..d0556a9b456 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
301 CH_6915 = 0, 301 CH_6915 = 0,
302}; 302};
303 303
304static struct pci_device_id starfire_pci_tbl[] = { 304static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 }, 305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
306 { 0, } 306 { 0, }
307}; 307};
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7..fb287649a30 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -4,8 +4,9 @@ config STMMAC_ETH
4 select PHYLIB 4 select PHYLIB
5 depends on NETDEVICES && CPU_SUBTYPE_ST40 5 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help 6 help
7 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet 7 This is the driver for the Ethernet IPs are built around a
8 controllers. ST Ethernet IPs are built around a Synopsys IP Core. 8 Synopsys IP Core and fully tested on the STMicroelectronics
9 platforms.
9 10
10if STMMAC_ETH 11if STMMAC_ETH
11 12
@@ -32,7 +33,8 @@ config STMMAC_TIMER
32 default n 33 default n
33 help 34 help
34 Use an external timer for mitigating the number of network 35 Use an external timer for mitigating the number of network
35 interrupts. 36 interrupts. Currently, for SH architectures, it is possible
37 to use the TMU channel 2 and the SH-RTC device.
36 38
37choice 39choice
38 prompt "Select Timer device" 40 prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564df..c776af15fe1 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o 1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o 2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 mac100.o gmac.o $(stmmac-y) 4 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
5 dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e88..7267bcd43d0 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include "descs.h" 25#include "descs.h"
26#include <linux/io.h> 26#include <linux/netdevice.h>
27
28/* *********************************************
29 DMA CRS Control and Status Register Mapping
30 * *********************************************/
31#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
32#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
33#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
34#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
35#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
36#define DMA_STATUS 0x00001014 /* Status Register */
37#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
38#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
39#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
40#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
41#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
42
43/* ********************************
44 DMA Control register defines
45 * ********************************/
46#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
47#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
48
49/* **************************************
50 DMA Interrupt Enable register defines
51 * **************************************/
52/**** NORMAL INTERRUPT ****/
53#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
54#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
55#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
56#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
57#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
58
59#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
60 DMA_INTR_ENA_TIE)
61
62/**** ABNORMAL INTERRUPT ****/
63#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
64#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
65#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
66#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
67#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
68#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
69#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
70#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
71#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
72#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
73
74#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
75 DMA_INTR_ENA_UNE)
76
77/* DMA default interrupt mask */
78#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
79
80/* ****************************
81 * DMA Status register defines
82 * ****************************/
83#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
84#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
85#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
86#define DMA_STATUS_GMI 0x08000000
87#define DMA_STATUS_GLI 0x04000000
88#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
89#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
90#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
91#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
92#define DMA_STATUS_TS_SHIFT 20
93#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
94#define DMA_STATUS_RS_SHIFT 17
95#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
96#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
97#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
98#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
99#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
100#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
101#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
102#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
103#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
104#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
105#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
106#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
107#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
108#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
109#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
110
111/* Other defines */
112#define HASH_TABLE_SIZE 64
113#define PAUSE_TIME 0x200
114
115/* Flow Control defines */
116#define FLOW_OFF 0
117#define FLOW_RX 1
118#define FLOW_TX 2
119#define FLOW_AUTO (FLOW_TX | FLOW_RX)
120
121/* DMA STORE-AND-FORWARD Operation Mode */
122#define SF_DMA_MODE 1
123
124#define HW_CSUM 1
125#define NO_HW_CSUM 0
126
127/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
128#define BUF_SIZE_16KiB 16384
129#define BUF_SIZE_8KiB 8192
130#define BUF_SIZE_4KiB 4096
131#define BUF_SIZE_2KiB 2048
132
133/* Power Down and WOL */
134#define PMT_NOT_SUPPORTED 0
135#define PMT_SUPPORTED 1
136
137/* Common MAC defines */
138#define MAC_CTRL_REG 0x00000000 /* MAC Control */
139#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
140#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
141
142/* MAC Management Counters register */
143#define MMC_CONTROL 0x00000100 /* MMC Control */
144#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
145#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
146#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
147#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
148
149#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
150#define MMC_CONTROL_MAX_FRM_SHIFT 3
151#define MMC_CONTROL_MAX_FRAME 0x7FF
152 27
153struct stmmac_extra_stats { 28struct stmmac_extra_stats {
154 /* Transmit errors */ 29 /* Transmit errors */
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
198 unsigned long normal_irq_n; 73 unsigned long normal_irq_n;
199}; 74};
200 75
201/* GMAC core can compute the checksums in HW. */ 76#define HASH_TABLE_SIZE 64
202enum rx_frame_status { 77#define PAUSE_TIME 0x200
78
79/* Flow Control defines */
80#define FLOW_OFF 0
81#define FLOW_RX 1
82#define FLOW_TX 2
83#define FLOW_AUTO (FLOW_TX | FLOW_RX)
84
85#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
86
87#define HW_CSUM 1
88#define NO_HW_CSUM 0
89enum rx_frame_status { /* IPC status */
203 good_frame = 0, 90 good_frame = 0,
204 discard_frame = 1, 91 discard_frame = 1,
205 csum_none = 2, 92 csum_none = 2,
206}; 93};
207 94
208static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 95enum tx_dma_irq_status {
209 unsigned int high, unsigned int low) 96 tx_hard_error = 1,
210{ 97 tx_hard_error_bump_tc = 2,
211 unsigned long data; 98 handle_tx_rx = 3,
212 99};
213 data = (addr[5] << 8) | addr[4];
214 writel(data, ioaddr + high);
215 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
216 writel(data, ioaddr + low);
217 100
218 return; 101/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
219} 102#define BUF_SIZE_16KiB 16384
103#define BUF_SIZE_8KiB 8192
104#define BUF_SIZE_4KiB 4096
105#define BUF_SIZE_2KiB 2048
220 106
221static inline void stmmac_get_mac_addr(unsigned long ioaddr, 107/* Power Down and WOL */
222 unsigned char *addr, unsigned int high, 108#define PMT_NOT_SUPPORTED 0
223 unsigned int low) 109#define PMT_SUPPORTED 1
224{
225 unsigned int hi_addr, lo_addr;
226 110
227 /* Read the MAC address from the hardware */ 111/* Common MAC defines */
228 hi_addr = readl(ioaddr + high); 112#define MAC_CTRL_REG 0x00000000 /* MAC Control */
229 lo_addr = readl(ioaddr + low); 113#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
114#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
230 115
231 /* Extract the MAC address from the high and low words */ 116/* MAC Management Counters register */
232 addr[0] = lo_addr & 0xff; 117#define MMC_CONTROL 0x00000100 /* MMC Control */
233 addr[1] = (lo_addr >> 8) & 0xff; 118#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
234 addr[2] = (lo_addr >> 16) & 0xff; 119#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
235 addr[3] = (lo_addr >> 24) & 0xff; 120#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
236 addr[4] = hi_addr & 0xff; 121#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
237 addr[5] = (hi_addr >> 8) & 0xff;
238 122
239 return; 123#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
240} 124#define MMC_CONTROL_MAX_FRM_SHIFT 3
125#define MMC_CONTROL_MAX_FRAME 0x7FF
241 126
242struct stmmac_ops { 127struct stmmac_desc_ops {
243 /* MAC core initialization */ 128 /* DMA RX descriptor ring initialization */
244 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
245 /* DMA core initialization */
246 int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
247 /* Dump MAC registers */
248 void (*dump_mac_regs) (unsigned long ioaddr);
249 /* Dump DMA registers */
250 void (*dump_dma_regs) (unsigned long ioaddr);
251 /* Set tx/rx threshold in the csr6 register
252 * An invalid value enables the store-and-forward mode */
253 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
254 /* To track extra statistic (if supported) */
255 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
256 unsigned long ioaddr);
257 /* RX descriptor ring initialization */
258 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 129 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
259 int disable_rx_ic); 130 int disable_rx_ic);
260 /* TX descriptor ring initialization */ 131 /* DMA TX descriptor ring initialization */
261 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); 132 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
262 133
263 /* Invoked by the xmit function to prepare the tx descriptor */ 134 /* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
281 /* Get the buffer size from the descriptor */ 152 /* Get the buffer size from the descriptor */
282 int (*get_tx_len) (struct dma_desc *p); 153 int (*get_tx_len) (struct dma_desc *p);
283 /* Handle extra events on specific interrupts hw dependent */ 154 /* Handle extra events on specific interrupts hw dependent */
284 void (*host_irq_status) (unsigned long ioaddr);
285 int (*get_rx_owner) (struct dma_desc *p); 155 int (*get_rx_owner) (struct dma_desc *p);
286 void (*set_rx_owner) (struct dma_desc *p); 156 void (*set_rx_owner) (struct dma_desc *p);
287 /* Get the receive frame size */ 157 /* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
289 /* Return the reception status looking at the RDES1 */ 159 /* Return the reception status looking at the RDES1 */
290 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 160 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
291 struct dma_desc *p); 161 struct dma_desc *p);
162};
163
164struct stmmac_dma_ops {
165 /* DMA core initialization */
166 int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
167 /* Dump DMA registers */
168 void (*dump_regs) (unsigned long ioaddr);
169 /* Set tx/rx threshold in the csr6 register
170 * An invalid value enables the store-and-forward mode */
171 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
172 /* To track extra statistic (if supported) */
173 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
174 unsigned long ioaddr);
175 void (*enable_dma_transmission) (unsigned long ioaddr);
176 void (*enable_dma_irq) (unsigned long ioaddr);
177 void (*disable_dma_irq) (unsigned long ioaddr);
178 void (*start_tx) (unsigned long ioaddr);
179 void (*stop_tx) (unsigned long ioaddr);
180 void (*start_rx) (unsigned long ioaddr);
181 void (*stop_rx) (unsigned long ioaddr);
182 int (*dma_interrupt) (unsigned long ioaddr,
183 struct stmmac_extra_stats *x);
184};
185
186struct stmmac_ops {
187 /* MAC core initialization */
188 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
189 /* Dump MAC registers */
190 void (*dump_regs) (unsigned long ioaddr);
191 /* Handle extra events on specific interrupts hw dependent */
192 void (*host_irq_status) (unsigned long ioaddr);
292 /* Multicast filter setting */ 193 /* Multicast filter setting */
293 void (*set_filter) (struct net_device *dev); 194 void (*set_filter) (struct net_device *dev);
294 /* Flow control setting */ 195 /* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
298 void (*pmt) (unsigned long ioaddr, unsigned long mode); 199 void (*pmt) (unsigned long ioaddr, unsigned long mode);
299 /* Set/Get Unicast MAC addresses */ 200 /* Set/Get Unicast MAC addresses */
300 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, 201 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
301 unsigned int reg_n); 202 unsigned int reg_n);
302 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, 203 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
303 unsigned int reg_n); 204 unsigned int reg_n);
304}; 205};
305 206
306struct mac_link { 207struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
314 unsigned int data; /* MII Data */ 215 unsigned int data; /* MII Data */
315}; 216};
316 217
317struct hw_cap { 218struct mac_device_info {
318 unsigned int version; /* Core Version register (GMAC) */ 219 struct stmmac_ops *mac;
319 unsigned int pmt; /* Power-Down mode (GMAC) */ 220 struct stmmac_desc_ops *desc;
221 struct stmmac_dma_ops *dma;
222 unsigned int pmt; /* support Power-Down */
223 struct mii_regs mii; /* MII register Addresses */
320 struct mac_link link; 224 struct mac_link link;
321 struct mii_regs mii;
322}; 225};
323 226
324struct mac_device_info { 227struct mac_device_info *dwmac1000_setup(unsigned long addr);
325 struct hw_cap hw; 228struct mac_device_info *dwmac100_setup(unsigned long addr);
326 struct stmmac_ops *ops;
327};
328 229
329struct mac_device_info *gmac_setup(unsigned long addr); 230extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
330struct mac_device_info *mac100_setup(unsigned long addr); 231 unsigned int high, unsigned int low);
232extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
233 unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e5..63a03e26469 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
1/******************************************************************************* 1/*******************************************************************************
2 Header File to describe the DMA descriptors 2 Header File to describe the DMA descriptors.
3 Use enhanced descriptors in case of GMAC Cores. 3 Enhanced descriptors have been in case of DWMAC1000 Cores.
4 4
5 This program is free software; you can redistribute it and/or modify it 5 This program is free software; you can redistribute it and/or modify it
6 under the terms and conditions of the GNU General Public License, 6 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062..82dde774d4c 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,23 @@
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h>
30#include <linux/crc32.h> 29#include <linux/crc32.h>
31#include <linux/mii.h> 30#include <linux/mii.h>
32#include <linux/phy.h> 31#include <linux/phy.h>
33 32
34#include "common.h" 33#include "common.h"
35#include "mac100.h" 34#include "dwmac100.h"
35#include "dwmac_dma.h"
36 36
37#undef MAC100_DEBUG 37#undef DWMAC100_DEBUG
38/*#define MAC100_DEBUG*/ 38/*#define DWMAC100_DEBUG*/
39#ifdef MAC100_DEBUG 39#ifdef DWMAC100_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args) 40#define DBG(fmt, args...) printk(fmt, ## args)
41#else 41#else
42#define DBG(fmt, args...) do { } while (0) 42#define DBG(fmt, args...) do { } while (0)
43#endif 43#endif
44 44
45static void mac100_core_init(unsigned long ioaddr) 45static void dwmac100_core_init(unsigned long ioaddr)
46{ 46{
47 u32 value = readl(ioaddr + MAC_CONTROL); 47 u32 value = readl(ioaddr + MAC_CONTROL);
48 48
@@ -54,43 +54,43 @@ static void mac100_core_init(unsigned long ioaddr)
54 return; 54 return;
55} 55}
56 56
57static void mac100_dump_mac_regs(unsigned long ioaddr) 57static void dwmac100_dump_mac_regs(unsigned long ioaddr)
58{ 58{
59 pr_info("\t----------------------------------------------\n" 59 pr_info("\t----------------------------------------------\n"
60 "\t MAC100 CSR (base addr = 0x%8x)\n" 60 "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
61 "\t----------------------------------------------\n", 61 "\t----------------------------------------------\n",
62 (unsigned int)ioaddr); 62 (unsigned int)ioaddr);
63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, 63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
64 readl(ioaddr + MAC_CONTROL)); 64 readl(ioaddr + MAC_CONTROL));
65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, 65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
66 readl(ioaddr + MAC_ADDR_HIGH)); 66 readl(ioaddr + MAC_ADDR_HIGH));
67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, 67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
68 readl(ioaddr + MAC_ADDR_LOW)); 68 readl(ioaddr + MAC_ADDR_LOW));
69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", 69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); 70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", 71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); 72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
73 pr_info("\tflow control (offset 0x%x): 0x%08x\n", 73 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); 74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, 75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
76 readl(ioaddr + MAC_VLAN1)); 76 readl(ioaddr + MAC_VLAN1));
77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, 77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
78 readl(ioaddr + MAC_VLAN2)); 78 readl(ioaddr + MAC_VLAN2));
79 pr_info("\n\tMAC management counter registers\n"); 79 pr_info("\n\tMAC management counter registers\n");
80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", 80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); 81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", 82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); 83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", 84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); 85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", 86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); 87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", 88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); 89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
90 return; 90 return;
91} 91}
92 92
93static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, 93static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
94 u32 dma_rx) 94 u32 dma_rx)
95{ 95{
96 u32 value = readl(ioaddr + DMA_BUS_MODE); 96 u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +117,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
117/* Store and Forward capability is not used at all.. 117/* Store and Forward capability is not used at all..
118 * The transmit threshold can be programmed by 118 * The transmit threshold can be programmed by
119 * setting the TTC bits in the DMA control register.*/ 119 * setting the TTC bits in the DMA control register.*/
120static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode, 120static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
121 int rxmode) 121 int rxmode)
122{ 122{
123 u32 csr6 = readl(ioaddr + DMA_CONTROL); 123 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +134,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
134 return; 134 return;
135} 135}
136 136
137static void mac100_dump_dma_regs(unsigned long ioaddr) 137static void dwmac100_dump_dma_regs(unsigned long ioaddr)
138{ 138{
139 int i; 139 int i;
140 140
141 DBG(KERN_DEBUG "MAC100 DMA CSR \n"); 141 DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
142 for (i = 0; i < 9; i++) 142 for (i = 0; i < 9; i++)
143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, 143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
144 (DMA_BUS_MODE + i * 4), 144 (DMA_BUS_MODE + i * 4),
@@ -151,8 +151,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
151} 151}
152 152
153/* DMA controller has two counters to track the number of 153/* DMA controller has two counters to track the number of
154 the receive missed frames. */ 154 * the receive missed frames. */
155static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 155static void dwmac100_dma_diagnostic_fr(void *data,
156 struct stmmac_extra_stats *x,
156 unsigned long ioaddr) 157 unsigned long ioaddr)
157{ 158{
158 struct net_device_stats *stats = (struct net_device_stats *)data; 159 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +182,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
181 return; 182 return;
182} 183}
183 184
184static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, 185static int dwmac100_get_tx_frame_status(void *data,
186 struct stmmac_extra_stats *x,
185 struct dma_desc *p, unsigned long ioaddr) 187 struct dma_desc *p, unsigned long ioaddr)
186{ 188{
187 int ret = 0; 189 int ret = 0;
@@ -217,7 +219,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
217 return ret; 219 return ret;
218} 220}
219 221
220static int mac100_get_tx_len(struct dma_desc *p) 222static int dwmac100_get_tx_len(struct dma_desc *p)
221{ 223{
222 return p->des01.tx.buffer1_size; 224 return p->des01.tx.buffer1_size;
223} 225}
@@ -226,14 +228,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
226 * and, if required, updates the multicast statistics. 228 * and, if required, updates the multicast statistics.
227 * In case of success, it returns csum_none becasue the device 229 * In case of success, it returns csum_none becasue the device
228 * is not able to compute the csum in HW. */ 230 * is not able to compute the csum in HW. */
229static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, 231static int dwmac100_get_rx_frame_status(void *data,
232 struct stmmac_extra_stats *x,
230 struct dma_desc *p) 233 struct dma_desc *p)
231{ 234{
232 int ret = csum_none; 235 int ret = csum_none;
233 struct net_device_stats *stats = (struct net_device_stats *)data; 236 struct net_device_stats *stats = (struct net_device_stats *)data;
234 237
235 if (unlikely(p->des01.rx.last_descriptor == 0)) { 238 if (unlikely(p->des01.rx.last_descriptor == 0)) {
236 pr_warning("mac100 Error: Oversized Ethernet " 239 pr_warning("dwmac100 Error: Oversized Ethernet "
237 "frame spanned multiple buffers\n"); 240 "frame spanned multiple buffers\n");
238 stats->rx_length_errors++; 241 stats->rx_length_errors++;
239 return discard_frame; 242 return discard_frame;
@@ -276,24 +279,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
276 return ret; 279 return ret;
277} 280}
278 281
279static void mac100_irq_status(unsigned long ioaddr) 282static void dwmac100_irq_status(unsigned long ioaddr)
280{ 283{
281 return; 284 return;
282} 285}
283 286
284static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 287static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
285 unsigned int reg_n) 288 unsigned int reg_n)
286{ 289{
287 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 290 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
288} 291}
289 292
290static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, 293static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
291 unsigned int reg_n) 294 unsigned int reg_n)
292{ 295{
293 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 296 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
294} 297}
295 298
296static void mac100_set_filter(struct net_device *dev) 299static void dwmac100_set_filter(struct net_device *dev)
297{ 300{
298 unsigned long ioaddr = dev->base_addr; 301 unsigned long ioaddr = dev->base_addr;
299 u32 value = readl(ioaddr + MAC_CONTROL); 302 u32 value = readl(ioaddr + MAC_CONTROL);
@@ -319,8 +322,8 @@ static void mac100_set_filter(struct net_device *dev)
319 /* Perfect filter mode for physical address and Hash 322 /* Perfect filter mode for physical address and Hash
320 filter for multicast */ 323 filter for multicast */
321 value |= MAC_CONTROL_HP; 324 value |= MAC_CONTROL_HP;
322 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF 325 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
323 | MAC_CONTROL_HO); 326 MAC_CONTROL_IF | MAC_CONTROL_HO);
324 327
325 memset(mc_filter, 0, sizeof(mc_filter)); 328 memset(mc_filter, 0, sizeof(mc_filter));
326 for (i = 0, mclist = dev->mc_list; 329 for (i = 0, mclist = dev->mc_list;
@@ -347,7 +350,7 @@ static void mac100_set_filter(struct net_device *dev)
347 return; 350 return;
348} 351}
349 352
350static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 353static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
351 unsigned int fc, unsigned int pause_time) 354 unsigned int fc, unsigned int pause_time)
352{ 355{
353 unsigned int flow = MAC_FLOW_CTRL_ENABLE; 356 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +362,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
359 return; 362 return;
360} 363}
361 364
362/* No PMT module supported in our SoC for the Ethernet Controller. */ 365/* No PMT module supported for this Ethernet Controller.
363static void mac100_pmt(unsigned long ioaddr, unsigned long mode) 366 * Tested on ST platforms only.
367 */
368static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
364{ 369{
365 return; 370 return;
366} 371}
367 372
368static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 373static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
369 int disable_rx_ic) 374 int disable_rx_ic)
370{ 375{
371 int i; 376 int i;
@@ -381,7 +386,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
381 return; 386 return;
382} 387}
383 388
384static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 389static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
385{ 390{
386 int i; 391 int i;
387 for (i = 0; i < ring_size; i++) { 392 for (i = 0; i < ring_size; i++) {
@@ -393,32 +398,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
393 return; 398 return;
394} 399}
395 400
396static int mac100_get_tx_owner(struct dma_desc *p) 401static int dwmac100_get_tx_owner(struct dma_desc *p)
397{ 402{
398 return p->des01.tx.own; 403 return p->des01.tx.own;
399} 404}
400 405
401static int mac100_get_rx_owner(struct dma_desc *p) 406static int dwmac100_get_rx_owner(struct dma_desc *p)
402{ 407{
403 return p->des01.rx.own; 408 return p->des01.rx.own;
404} 409}
405 410
406static void mac100_set_tx_owner(struct dma_desc *p) 411static void dwmac100_set_tx_owner(struct dma_desc *p)
407{ 412{
408 p->des01.tx.own = 1; 413 p->des01.tx.own = 1;
409} 414}
410 415
411static void mac100_set_rx_owner(struct dma_desc *p) 416static void dwmac100_set_rx_owner(struct dma_desc *p)
412{ 417{
413 p->des01.rx.own = 1; 418 p->des01.rx.own = 1;
414} 419}
415 420
416static int mac100_get_tx_ls(struct dma_desc *p) 421static int dwmac100_get_tx_ls(struct dma_desc *p)
417{ 422{
418 return p->des01.tx.last_segment; 423 return p->des01.tx.last_segment;
419} 424}
420 425
421static void mac100_release_tx_desc(struct dma_desc *p) 426static void dwmac100_release_tx_desc(struct dma_desc *p)
422{ 427{
423 int ter = p->des01.tx.end_ring; 428 int ter = p->des01.tx.end_ring;
424 429
@@ -444,74 +449,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
444 return; 449 return;
445} 450}
446 451
447static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 452static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
448 int csum_flag) 453 int csum_flag)
449{ 454{
450 p->des01.tx.first_segment = is_fs; 455 p->des01.tx.first_segment = is_fs;
451 p->des01.tx.buffer1_size = len; 456 p->des01.tx.buffer1_size = len;
452} 457}
453 458
454static void mac100_clear_tx_ic(struct dma_desc *p) 459static void dwmac100_clear_tx_ic(struct dma_desc *p)
455{ 460{
456 p->des01.tx.interrupt = 0; 461 p->des01.tx.interrupt = 0;
457} 462}
458 463
459static void mac100_close_tx_desc(struct dma_desc *p) 464static void dwmac100_close_tx_desc(struct dma_desc *p)
460{ 465{
461 p->des01.tx.last_segment = 1; 466 p->des01.tx.last_segment = 1;
462 p->des01.tx.interrupt = 1; 467 p->des01.tx.interrupt = 1;
463} 468}
464 469
465static int mac100_get_rx_frame_len(struct dma_desc *p) 470static int dwmac100_get_rx_frame_len(struct dma_desc *p)
466{ 471{
467 return p->des01.rx.frame_length; 472 return p->des01.rx.frame_length;
468} 473}
469 474
470struct stmmac_ops mac100_driver = { 475struct stmmac_ops dwmac100_ops = {
471 .core_init = mac100_core_init, 476 .core_init = dwmac100_core_init,
472 .dump_mac_regs = mac100_dump_mac_regs, 477 .dump_regs = dwmac100_dump_mac_regs,
473 .dma_init = mac100_dma_init, 478 .host_irq_status = dwmac100_irq_status,
474 .dump_dma_regs = mac100_dump_dma_regs, 479 .set_filter = dwmac100_set_filter,
475 .dma_mode = mac100_dma_operation_mode, 480 .flow_ctrl = dwmac100_flow_ctrl,
476 .dma_diagnostic_fr = mac100_dma_diagnostic_fr, 481 .pmt = dwmac100_pmt,
477 .tx_status = mac100_get_tx_frame_status, 482 .set_umac_addr = dwmac100_set_umac_addr,
478 .rx_status = mac100_get_rx_frame_status, 483 .get_umac_addr = dwmac100_get_umac_addr,
479 .get_tx_len = mac100_get_tx_len,
480 .set_filter = mac100_set_filter,
481 .flow_ctrl = mac100_flow_ctrl,
482 .pmt = mac100_pmt,
483 .init_rx_desc = mac100_init_rx_desc,
484 .init_tx_desc = mac100_init_tx_desc,
485 .get_tx_owner = mac100_get_tx_owner,
486 .get_rx_owner = mac100_get_rx_owner,
487 .release_tx_desc = mac100_release_tx_desc,
488 .prepare_tx_desc = mac100_prepare_tx_desc,
489 .clear_tx_ic = mac100_clear_tx_ic,
490 .close_tx_desc = mac100_close_tx_desc,
491 .get_tx_ls = mac100_get_tx_ls,
492 .set_tx_owner = mac100_set_tx_owner,
493 .set_rx_owner = mac100_set_rx_owner,
494 .get_rx_frame_len = mac100_get_rx_frame_len,
495 .host_irq_status = mac100_irq_status,
496 .set_umac_addr = mac100_set_umac_addr,
497 .get_umac_addr = mac100_get_umac_addr,
498}; 484};
499 485
500struct mac_device_info *mac100_setup(unsigned long ioaddr) 486struct stmmac_dma_ops dwmac100_dma_ops = {
487 .init = dwmac100_dma_init,
488 .dump_regs = dwmac100_dump_dma_regs,
489 .dma_mode = dwmac100_dma_operation_mode,
490 .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
491 .enable_dma_transmission = dwmac_enable_dma_transmission,
492 .enable_dma_irq = dwmac_enable_dma_irq,
493 .disable_dma_irq = dwmac_disable_dma_irq,
494 .start_tx = dwmac_dma_start_tx,
495 .stop_tx = dwmac_dma_stop_tx,
496 .start_rx = dwmac_dma_start_rx,
497 .stop_rx = dwmac_dma_stop_rx,
498 .dma_interrupt = dwmac_dma_interrupt,
499};
500
501struct stmmac_desc_ops dwmac100_desc_ops = {
502 .tx_status = dwmac100_get_tx_frame_status,
503 .rx_status = dwmac100_get_rx_frame_status,
504 .get_tx_len = dwmac100_get_tx_len,
505 .init_rx_desc = dwmac100_init_rx_desc,
506 .init_tx_desc = dwmac100_init_tx_desc,
507 .get_tx_owner = dwmac100_get_tx_owner,
508 .get_rx_owner = dwmac100_get_rx_owner,
509 .release_tx_desc = dwmac100_release_tx_desc,
510 .prepare_tx_desc = dwmac100_prepare_tx_desc,
511 .clear_tx_ic = dwmac100_clear_tx_ic,
512 .close_tx_desc = dwmac100_close_tx_desc,
513 .get_tx_ls = dwmac100_get_tx_ls,
514 .set_tx_owner = dwmac100_set_tx_owner,
515 .set_rx_owner = dwmac100_set_rx_owner,
516 .get_rx_frame_len = dwmac100_get_rx_frame_len,
517};
518
519struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
501{ 520{
502 struct mac_device_info *mac; 521 struct mac_device_info *mac;
503 522
504 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 523 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
505 524
506 pr_info("\tMAC 10/100\n"); 525 pr_info("\tDWMAC100\n");
526
527 mac->mac = &dwmac100_ops;
528 mac->desc = &dwmac100_desc_ops;
529 mac->dma = &dwmac100_dma_ops;
507 530
508 mac->ops = &mac100_driver; 531 mac->pmt = PMT_NOT_SUPPORTED;
509 mac->hw.pmt = PMT_NOT_SUPPORTED; 532 mac->link.port = MAC_CONTROL_PS;
510 mac->hw.link.port = MAC_CONTROL_PS; 533 mac->link.duplex = MAC_CONTROL_F;
511 mac->hw.link.duplex = MAC_CONTROL_F; 534 mac->link.speed = 0;
512 mac->hw.link.speed = 0; 535 mac->mii.addr = MAC_MII_ADDR;
513 mac->hw.mii.addr = MAC_MII_ADDR; 536 mac->mii.data = MAC_MII_DATA;
514 mac->hw.mii.data = MAC_MII_DATA;
515 537
516 return mac; 538 return mac;
517} 539}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004..0f8f110d004 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a14..62dca0e384e 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#include <linux/phy.h>
24#include "common.h"
25
23#define GMAC_CONTROL 0x00000000 /* Configuration */ 26#define GMAC_CONTROL 0x00000000 /* Configuration */
24#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ 27#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
25#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ 28#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
32#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ 35#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
33 36
34#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ 37#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
35enum gmac_irq_status { 38enum dwmac1000_irq_status {
36 time_stamp_irq = 0x0200, 39 time_stamp_irq = 0x0200,
37 mmc_rx_csum_offload_irq = 0x0080, 40 mmc_rx_csum_offload_irq = 0x0080,
38 mmc_tx_irq = 0x0040, 41 mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
202#define GMAC_MMC_RX_INTR 0x104 205#define GMAC_MMC_RX_INTR 0x104
203#define GMAC_MMC_TX_INTR 0x108 206#define GMAC_MMC_TX_INTR 0x108
204#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 207#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
208
209#undef DWMAC1000_DEBUG
210/* #define DWMAC1000__DEBUG */
211#undef FRAME_FILTER_DEBUG
212/* #define FRAME_FILTER_DEBUG */
213#ifdef DWMAC1000__DEBUG
214#define DBG(fmt, args...) printk(fmt, ## args)
215#else
216#define DBG(fmt, args...) do { } while (0)
217#endif
218
219extern struct stmmac_dma_ops dwmac1000_dma_ops;
220extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 00000000000..d812e9cdb3d
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,245 @@
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 This only implements the mac core functions for this chip.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/crc32.h>
30#include "dwmac1000.h"
31
32static void dwmac1000_core_init(unsigned long ioaddr)
33{
34 u32 value = readl(ioaddr + GMAC_CONTROL);
35 value |= GMAC_CORE_INIT;
36 writel(value, ioaddr + GMAC_CONTROL);
37
38 /* STBus Bridge Configuration */
39 /*writel(0xc5608, ioaddr + 0x00007000);*/
40
41 /* Freeze MMC counters */
42 writel(0x8, ioaddr + GMAC_MMC_CTRL);
43 /* Mask GMAC interrupts */
44 writel(0x207, ioaddr + GMAC_INT_MASK);
45
46#ifdef STMMAC_VLAN_TAG_USED
47 /* Tag detection without filtering */
48 writel(0x0, ioaddr + GMAC_VLAN_TAG);
49#endif
50 return;
51}
52
53static void dwmac1000_dump_regs(unsigned long ioaddr)
54{
55 int i;
56 pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
57
58 for (i = 0; i < 55; i++) {
59 int offset = i * 4;
60 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
61 offset, readl(ioaddr + offset));
62 }
63 return;
64}
65
66static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
67 unsigned int reg_n)
68{
69 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
70 GMAC_ADDR_LOW(reg_n));
71}
72
73static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
74 unsigned int reg_n)
75{
76 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
77 GMAC_ADDR_LOW(reg_n));
78}
79
80static void dwmac1000_set_filter(struct net_device *dev)
81{
82 unsigned long ioaddr = dev->base_addr;
83 unsigned int value = 0;
84
85 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
86 __func__, dev->mc_count, netdev_uc_count(dev));
87
88 if (dev->flags & IFF_PROMISC)
89 value = GMAC_FRAME_FILTER_PR;
90 else if ((dev->mc_count > HASH_TABLE_SIZE)
91 || (dev->flags & IFF_ALLMULTI)) {
92 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
93 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
94 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
95 } else if (dev->mc_count > 0) {
96 int i;
97 u32 mc_filter[2];
98 struct dev_mc_list *mclist;
99
100 /* Hash filter for multicast */
101 value = GMAC_FRAME_FILTER_HMC;
102
103 memset(mc_filter, 0, sizeof(mc_filter));
104 for (i = 0, mclist = dev->mc_list;
105 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
106 /* The upper 6 bits of the calculated CRC are used to
107 index the contens of the hash table */
108 int bit_nr =
109 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
110 /* The most significant bit determines the register to
111 * use (H/L) while the other 5 bits determine the bit
112 * within the register. */
113 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
114 }
115 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
116 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
117 }
118
119 /* Handle multiple unicast addresses (perfect filtering)*/
120 if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
121 /* Switch to promiscuous mode is more than 16 addrs
122 are required */
123 value |= GMAC_FRAME_FILTER_PR;
124 else {
125 int reg = 1;
126 struct netdev_hw_addr *ha;
127
128 netdev_for_each_uc_addr(ha, dev) {
129 dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
130 reg++;
131 }
132 }
133
134#ifdef FRAME_FILTER_DEBUG
135 /* Enable Receive all mode (to debug filtering_fail errors) */
136 value |= GMAC_FRAME_FILTER_RA;
137#endif
138 writel(value, ioaddr + GMAC_FRAME_FILTER);
139
140 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
141 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
142 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
143
144 return;
145}
146
147static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
148 unsigned int fc, unsigned int pause_time)
149{
150 unsigned int flow = 0;
151
152 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
153 if (fc & FLOW_RX) {
154 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
155 flow |= GMAC_FLOW_CTRL_RFE;
156 }
157 if (fc & FLOW_TX) {
158 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
159 flow |= GMAC_FLOW_CTRL_TFE;
160 }
161
162 if (duplex) {
163 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
164 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
165 }
166
167 writel(flow, ioaddr + GMAC_FLOW_CTRL);
168 return;
169}
170
171static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
172{
173 unsigned int pmt = 0;
174
175 if (mode == WAKE_MAGIC) {
176 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
177 pmt |= power_down | magic_pkt_en;
178 } else if (mode == WAKE_UCAST) {
179 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
180 pmt |= global_unicast;
181 }
182
183 writel(pmt, ioaddr + GMAC_PMT);
184 return;
185}
186
187
188static void dwmac1000_irq_status(unsigned long ioaddr)
189{
190 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
191
192 /* Not used events (e.g. MMC interrupts) are not handled. */
193 if ((intr_status & mmc_tx_irq))
194 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
195 readl(ioaddr + GMAC_MMC_TX_INTR));
196 if (unlikely(intr_status & mmc_rx_irq))
197 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
198 readl(ioaddr + GMAC_MMC_RX_INTR));
199 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
200 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
201 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
202 if (unlikely(intr_status & pmt_irq)) {
203 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
204 /* clear the PMT bits 5 and 6 by reading the PMT
205 * status register. */
206 readl(ioaddr + GMAC_PMT);
207 }
208
209 return;
210}
211
212struct stmmac_ops dwmac1000_ops = {
213 .core_init = dwmac1000_core_init,
214 .dump_regs = dwmac1000_dump_regs,
215 .host_irq_status = dwmac1000_irq_status,
216 .set_filter = dwmac1000_set_filter,
217 .flow_ctrl = dwmac1000_flow_ctrl,
218 .pmt = dwmac1000_pmt,
219 .set_umac_addr = dwmac1000_set_umac_addr,
220 .get_umac_addr = dwmac1000_get_umac_addr,
221};
222
223struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
224{
225 struct mac_device_info *mac;
226 u32 uid = readl(ioaddr + GMAC_VERSION);
227
228 pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
229 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
230
231 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
232
233 mac->mac = &dwmac1000_ops;
234 mac->desc = &dwmac1000_desc_ops;
235 mac->dma = &dwmac1000_dma_ops;
236
237 mac->pmt = PMT_SUPPORTED;
238 mac->link.port = GMAC_CONTROL_PS;
239 mac->link.duplex = GMAC_CONTROL_DM;
240 mac->link.speed = GMAC_CONTROL_FES;
241 mac->mii.addr = GMAC_MII_ADDR;
242 mac->mii.data = GMAC_MII_DATA;
243
244 return mac;
245}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee6895..68245508e2d 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for 3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code. 4 developing this code.
5 5
6 This contains the functions to handle the dma and descriptors.
7
6 Copyright (C) 2007-2009 STMicroelectronics Ltd 8 Copyright (C) 2007-2009 STMicroelectronics Ltd
7 9
8 This program is free software; you can redistribute it and/or modify it 10 This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/ 27*******************************************************************************/
26 28
27#include <linux/netdevice.h> 29#include "dwmac1000.h"
28#include <linux/crc32.h> 30#include "dwmac_dma.h"
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44 31
45static void gmac_dump_regs(unsigned long ioaddr) 32static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
46{ 33 u32 dma_rx)
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{ 34{
63 u32 value = readl(ioaddr + DMA_BUS_MODE); 35 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */ 36 /* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
87} 59}
88 60
89/* Transmit FIFO flush operation */ 61/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr) 62static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
91{ 63{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL); 64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); 65 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); 67 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96} 68}
97 69
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode, 70static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode) 71 int rxmode)
100{ 72{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL); 73 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
148} 120}
149 121
150/* Not yet implemented --- no RMON module */ 122/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 123static void dwmac1000_dma_diagnostic_fr(void *data,
152 unsigned long ioaddr) 124 struct stmmac_extra_stats *x, unsigned long ioaddr)
153{ 125{
154 return; 126 return;
155} 127}
156 128
157static void gmac_dump_dma_regs(unsigned long ioaddr) 129static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
158{ 130{
159 int i; 131 int i;
160 pr_info(" DMA registers\n"); 132 pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
169 return; 141 return;
170} 142}
171 143
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, 144static int dwmac1000_get_tx_frame_status(void *data,
173 struct dma_desc *p, unsigned long ioaddr) 145 struct stmmac_extra_stats *x,
146 struct dma_desc *p, unsigned long ioaddr)
174{ 147{
175 int ret = 0; 148 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data; 149 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
185 if (unlikely(p->des01.etx.frame_flushed)) { 158 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n"); 159 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++; 160 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr); 161 dwmac1000_flush_tx_fifo(ioaddr);
189 } 162 }
190 163
191 if (unlikely(p->des01.etx.loss_carrier)) { 164 if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
213 186
214 if (unlikely(p->des01.etx.underflow_error)) { 187 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n"); 188 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr); 189 dwmac1000_flush_tx_fifo(ioaddr);
217 x->tx_underflow++; 190 x->tx_underflow++;
218 } 191 }
219 192
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
225 if (unlikely(p->des01.etx.payload_error)) { 198 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n"); 199 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++; 200 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr); 201 dwmac1000_flush_tx_fifo(ioaddr);
229 } 202 }
230 203
231 ret = -1; 204 ret = -1;
@@ -245,12 +218,12 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
245 return ret; 218 return ret;
246} 219}
247 220
248static int gmac_get_tx_len(struct dma_desc *p) 221static int dwmac1000_get_tx_len(struct dma_desc *p)
249{ 222{
250 return p->des01.etx.buffer1_size; 223 return p->des01.etx.buffer1_size;
251} 224}
252 225
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err) 226static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
254{ 227{
255 int ret = good_frame; 228 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 229 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
293 return ret; 266 return ret;
294} 267}
295 268
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, 269static int dwmac1000_get_rx_frame_status(void *data,
297 struct dma_desc *p) 270 struct stmmac_extra_stats *x, struct dma_desc *p)
298{ 271{
299 int ret = good_frame; 272 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data; 273 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
339 * It doesn't match with the information reported into the databook. 312 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok 313 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */ 314 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error, 315 ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 316 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344 317
345 if (unlikely(p->des01.erx.dribbling)) { 318 if (unlikely(p->des01.erx.dribbling)) {
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
370 return ret; 343 return ret;
371} 344}
372 345
373static void gmac_irq_status(unsigned long ioaddr) 346static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
403 /* STBus Bridge Configuration */
404 /*writel(0xc5608, ioaddr + 0x00007000);*/
405
406 /* Freeze MMC counters */
407 writel(0x8, ioaddr + GMAC_MMC_CTRL);
408 /* Mask GMAC interrupts */
409 writel(0x207, ioaddr + GMAC_INT_MASK);
410
411#ifdef STMMAC_VLAN_TAG_USED
412 /* Tag detection without filtering */
413 writel(0x0, ioaddr + GMAC_VLAN_TAG);
414#endif
415 return;
416}
417
418static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 unsigned int reg_n)
420{
421 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422 GMAC_ADDR_LOW(reg_n));
423}
424
425static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 unsigned int reg_n)
427{
428 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429 GMAC_ADDR_LOW(reg_n));
430}
431
432static void gmac_set_filter(struct net_device *dev)
433{
434 unsigned long ioaddr = dev->base_addr;
435 unsigned int value = 0;
436
437 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438 __func__, dev->mc_count, dev->uc_count);
439
440 if (dev->flags & IFF_PROMISC)
441 value = GMAC_FRAME_FILTER_PR;
442 else if ((dev->mc_count > HASH_TABLE_SIZE)
443 || (dev->flags & IFF_ALLMULTI)) {
444 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447 } else if (dev->mc_count > 0) {
448 int i;
449 u32 mc_filter[2];
450 struct dev_mc_list *mclist;
451
452 /* Hash filter for multicast */
453 value = GMAC_FRAME_FILTER_HMC;
454
455 memset(mc_filter, 0, sizeof(mc_filter));
456 for (i = 0, mclist = dev->mc_list;
457 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458 /* The upper 6 bits of the calculated CRC are used to
459 index the contens of the hash table */
460 int bit_nr =
461 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462 /* The most significant bit determines the register to
463 * use (H/L) while the other 5 bits determine the bit
464 * within the register. */
465 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
466 }
467 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 }
470
471 /* Handle multiple unicast addresses (perfect filtering)*/
472 if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473 /* Switch to promiscuous mode is more than 16 addrs
474 are required */
475 value |= GMAC_FRAME_FILTER_PR;
476 else {
477 int i;
478 struct dev_addr_list *uc_ptr = dev->uc_list;
479
480 for (i = 0; i < dev->uc_count; i++) {
481 gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
482 i + 1);
483
484 DBG(KERN_INFO "\t%d "
485 "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
486 "%02x\n", i + 1,
487 uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488 uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489 uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490 uc_ptr = uc_ptr->next;
491 }
492 }
493
494#ifdef FRAME_FILTER_DEBUG
495 /* Enable Receive all mode (to debug filtering_fail errors) */
496 value |= GMAC_FRAME_FILTER_RA;
497#endif
498 writel(value, ioaddr + GMAC_FRAME_FILTER);
499
500 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
503
504 return;
505}
506
507static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508 unsigned int fc, unsigned int pause_time)
509{
510 unsigned int flow = 0;
511
512 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
513 if (fc & FLOW_RX) {
514 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515 flow |= GMAC_FLOW_CTRL_RFE;
516 }
517 if (fc & FLOW_TX) {
518 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519 flow |= GMAC_FLOW_CTRL_TFE;
520 }
521
522 if (duplex) {
523 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
525 }
526
527 writel(flow, ioaddr + GMAC_FLOW_CTRL);
528 return;
529}
530
531static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
532{
533 unsigned int pmt = 0;
534
535 if (mode == WAKE_MAGIC) {
536 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537 pmt |= power_down | magic_pkt_en;
538 } else if (mode == WAKE_UCAST) {
539 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540 pmt |= global_unicast;
541 }
542
543 writel(pmt, ioaddr + GMAC_PMT);
544 return;
545}
546
547static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
548 int disable_rx_ic) 347 int disable_rx_ic)
549{ 348{
550 int i; 349 int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
562 return; 361 return;
563} 362}
564 363
565static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 364static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
566{ 365{
567 int i; 366 int i;
568 367
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
576 return; 375 return;
577} 376}
578 377
579static int gmac_get_tx_owner(struct dma_desc *p) 378static int dwmac1000_get_tx_owner(struct dma_desc *p)
580{ 379{
581 return p->des01.etx.own; 380 return p->des01.etx.own;
582} 381}
583 382
584static int gmac_get_rx_owner(struct dma_desc *p) 383static int dwmac1000_get_rx_owner(struct dma_desc *p)
585{ 384{
586 return p->des01.erx.own; 385 return p->des01.erx.own;
587} 386}
588 387
589static void gmac_set_tx_owner(struct dma_desc *p) 388static void dwmac1000_set_tx_owner(struct dma_desc *p)
590{ 389{
591 p->des01.etx.own = 1; 390 p->des01.etx.own = 1;
592} 391}
593 392
594static void gmac_set_rx_owner(struct dma_desc *p) 393static void dwmac1000_set_rx_owner(struct dma_desc *p)
595{ 394{
596 p->des01.erx.own = 1; 395 p->des01.erx.own = 1;
597} 396}
598 397
599static int gmac_get_tx_ls(struct dma_desc *p) 398static int dwmac1000_get_tx_ls(struct dma_desc *p)
600{ 399{
601 return p->des01.etx.last_segment; 400 return p->des01.etx.last_segment;
602} 401}
603 402
604static void gmac_release_tx_desc(struct dma_desc *p) 403static void dwmac1000_release_tx_desc(struct dma_desc *p)
605{ 404{
606 int ter = p->des01.etx.end_ring; 405 int ter = p->des01.etx.end_ring;
607 406
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
611 return; 410 return;
612} 411}
613 412
614static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 413static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
615 int csum_flag) 414 int csum_flag)
616{ 415{
617 p->des01.etx.first_segment = is_fs; 416 p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
625 p->des01.etx.checksum_insertion = cic_full; 424 p->des01.etx.checksum_insertion = cic_full;
626} 425}
627 426
628static void gmac_clear_tx_ic(struct dma_desc *p) 427static void dwmac1000_clear_tx_ic(struct dma_desc *p)
629{ 428{
630 p->des01.etx.interrupt = 0; 429 p->des01.etx.interrupt = 0;
631} 430}
632 431
633static void gmac_close_tx_desc(struct dma_desc *p) 432static void dwmac1000_close_tx_desc(struct dma_desc *p)
634{ 433{
635 p->des01.etx.last_segment = 1; 434 p->des01.etx.last_segment = 1;
636 p->des01.etx.interrupt = 1; 435 p->des01.etx.interrupt = 1;
637} 436}
638 437
639static int gmac_get_rx_frame_len(struct dma_desc *p) 438static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
640{ 439{
641 return p->des01.erx.frame_length; 440 return p->des01.erx.frame_length;
642} 441}
643 442
644struct stmmac_ops gmac_driver = { 443struct stmmac_dma_ops dwmac1000_dma_ops = {
645 .core_init = gmac_core_init, 444 .init = dwmac1000_dma_init,
646 .dump_mac_regs = gmac_dump_regs, 445 .dump_regs = dwmac1000_dump_dma_regs,
647 .dma_init = gmac_dma_init, 446 .dma_mode = dwmac1000_dma_operation_mode,
648 .dump_dma_regs = gmac_dump_dma_regs, 447 .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
649 .dma_mode = gmac_dma_operation_mode, 448 .enable_dma_transmission = dwmac_enable_dma_transmission,
650 .dma_diagnostic_fr = gmac_dma_diagnostic_fr, 449 .enable_dma_irq = dwmac_enable_dma_irq,
651 .tx_status = gmac_get_tx_frame_status, 450 .disable_dma_irq = dwmac_disable_dma_irq,
652 .rx_status = gmac_get_rx_frame_status, 451 .start_tx = dwmac_dma_start_tx,
653 .get_tx_len = gmac_get_tx_len, 452 .stop_tx = dwmac_dma_stop_tx,
654 .set_filter = gmac_set_filter, 453 .start_rx = dwmac_dma_start_rx,
655 .flow_ctrl = gmac_flow_ctrl, 454 .stop_rx = dwmac_dma_stop_rx,
656 .pmt = gmac_pmt, 455 .dma_interrupt = dwmac_dma_interrupt,
657 .init_rx_desc = gmac_init_rx_desc,
658 .init_tx_desc = gmac_init_tx_desc,
659 .get_tx_owner = gmac_get_tx_owner,
660 .get_rx_owner = gmac_get_rx_owner,
661 .release_tx_desc = gmac_release_tx_desc,
662 .prepare_tx_desc = gmac_prepare_tx_desc,
663 .clear_tx_ic = gmac_clear_tx_ic,
664 .close_tx_desc = gmac_close_tx_desc,
665 .get_tx_ls = gmac_get_tx_ls,
666 .set_tx_owner = gmac_set_tx_owner,
667 .set_rx_owner = gmac_set_rx_owner,
668 .get_rx_frame_len = gmac_get_rx_frame_len,
669 .host_irq_status = gmac_irq_status,
670 .set_umac_addr = gmac_set_umac_addr,
671 .get_umac_addr = gmac_get_umac_addr,
672}; 456};
673 457
674struct mac_device_info *gmac_setup(unsigned long ioaddr) 458struct stmmac_desc_ops dwmac1000_desc_ops = {
675{ 459 .tx_status = dwmac1000_get_tx_frame_status,
676 struct mac_device_info *mac; 460 .rx_status = dwmac1000_get_rx_frame_status,
677 u32 uid = readl(ioaddr + GMAC_VERSION); 461 .get_tx_len = dwmac1000_get_tx_len,
678 462 .init_rx_desc = dwmac1000_init_rx_desc,
679 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", 463 .init_tx_desc = dwmac1000_init_tx_desc,
680 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); 464 .get_tx_owner = dwmac1000_get_tx_owner,
681 465 .get_rx_owner = dwmac1000_get_rx_owner,
682 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 466 .release_tx_desc = dwmac1000_release_tx_desc,
683 467 .prepare_tx_desc = dwmac1000_prepare_tx_desc,
684 mac->ops = &gmac_driver; 468 .clear_tx_ic = dwmac1000_clear_tx_ic,
685 mac->hw.pmt = PMT_SUPPORTED; 469 .close_tx_desc = dwmac1000_close_tx_desc,
686 mac->hw.link.port = GMAC_CONTROL_PS; 470 .get_tx_ls = dwmac1000_get_tx_ls,
687 mac->hw.link.duplex = GMAC_CONTROL_DM; 471 .set_tx_owner = dwmac1000_set_tx_owner,
688 mac->hw.link.speed = GMAC_CONTROL_FES; 472 .set_rx_owner = dwmac1000_set_rx_owner,
689 mac->hw.mii.addr = GMAC_MII_ADDR; 473 .get_rx_frame_len = dwmac1000_get_rx_frame_len,
690 mac->hw.mii.data = GMAC_MII_DATA; 474};
691
692 return mac;
693}
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 00000000000..de848d9f606
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
1/*******************************************************************************
2 DWMAC DMA Header file.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25/* DMA CRS Control and Status Register Mapping */
26#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
27#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
28#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
29#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
30#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
31#define DMA_STATUS 0x00001014 /* Status Register */
32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
35#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
36#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
37
38/* DMA Control register defines */
39#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
40#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
41
42/* DMA Normal interrupt */
43#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
44#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
45#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
46#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
47#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
48
49#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
50 DMA_INTR_ENA_TIE)
51
52/* DMA Abnormal interrupt */
53#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
54#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
55#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
56#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
57#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
58#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
59#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
60#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
61#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
62#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
63
64#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
65 DMA_INTR_ENA_UNE)
66
67/* DMA default interrupt mask */
68#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
69
70/* DMA Status register defines */
71#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
72#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
73#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
74#define DMA_STATUS_GMI 0x08000000
75#define DMA_STATUS_GLI 0x04000000
76#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
77#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
78#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
79#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
80#define DMA_STATUS_TS_SHIFT 20
81#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
82#define DMA_STATUS_RS_SHIFT 17
83#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
84#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
85#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
86#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
87#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
88#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
89#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
90#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
91#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
92#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
93#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
94#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
95#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
96#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
98
99extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
100extern void dwmac_enable_dma_irq(unsigned long ioaddr);
101extern void dwmac_disable_dma_irq(unsigned long ioaddr);
102extern void dwmac_dma_start_tx(unsigned long ioaddr);
103extern void dwmac_dma_stop_tx(unsigned long ioaddr);
104extern void dwmac_dma_start_rx(unsigned long ioaddr);
105extern void dwmac_dma_stop_rx(unsigned long ioaddr);
106extern int dwmac_dma_interrupt(unsigned long ioaddr,
107 struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 00000000000..d4adb1eaa44
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#include <linux/io.h>
24#include "common.h"
25#include "dwmac_dma.h"
26
27#undef DWMAC_DMA_DEBUG
28#ifdef DWMAC_DMA_DEBUG
29#define DBG(fmt, args...) printk(fmt, ## args)
30#else
31#define DBG(fmt, args...) do { } while (0)
32#endif
33
34/* CSR1 enables the transmit DMA to check for new descriptor */
35void dwmac_enable_dma_transmission(unsigned long ioaddr)
36{
37 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
38}
39
40void dwmac_enable_dma_irq(unsigned long ioaddr)
41{
42 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
43}
44
45void dwmac_disable_dma_irq(unsigned long ioaddr)
46{
47 writel(0, ioaddr + DMA_INTR_ENA);
48}
49
50void dwmac_dma_start_tx(unsigned long ioaddr)
51{
52 u32 value = readl(ioaddr + DMA_CONTROL);
53 value |= DMA_CONTROL_ST;
54 writel(value, ioaddr + DMA_CONTROL);
55 return;
56}
57
58void dwmac_dma_stop_tx(unsigned long ioaddr)
59{
60 u32 value = readl(ioaddr + DMA_CONTROL);
61 value &= ~DMA_CONTROL_ST;
62 writel(value, ioaddr + DMA_CONTROL);
63 return;
64}
65
66void dwmac_dma_start_rx(unsigned long ioaddr)
67{
68 u32 value = readl(ioaddr + DMA_CONTROL);
69 value |= DMA_CONTROL_SR;
70 writel(value, ioaddr + DMA_CONTROL);
71
72 return;
73}
74
75void dwmac_dma_stop_rx(unsigned long ioaddr)
76{
77 u32 value = readl(ioaddr + DMA_CONTROL);
78 value &= ~DMA_CONTROL_SR;
79 writel(value, ioaddr + DMA_CONTROL);
80
81 return;
82}
83
84#ifdef DWMAC_DMA_DEBUG
85static void show_tx_process_state(unsigned int status)
86{
87 unsigned int state;
88 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
89
90 switch (state) {
91 case 0:
92 pr_info("- TX (Stopped): Reset or Stop command\n");
93 break;
94 case 1:
95 pr_info("- TX (Running):Fetching the Tx desc\n");
96 break;
97 case 2:
98 pr_info("- TX (Running): Waiting for end of tx\n");
99 break;
100 case 3:
101 pr_info("- TX (Running): Reading the data "
102 "and queuing the data into the Tx buf\n");
103 break;
104 case 6:
105 pr_info("- TX (Suspended): Tx Buff Underflow "
106 "or an unavailable Transmit descriptor\n");
107 break;
108 case 7:
109 pr_info("- TX (Running): Closing Tx descriptor\n");
110 break;
111 default:
112 break;
113 }
114 return;
115}
116
117static void show_rx_process_state(unsigned int status)
118{
119 unsigned int state;
120 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
121
122 switch (state) {
123 case 0:
124 pr_info("- RX (Stopped): Reset or Stop command\n");
125 break;
126 case 1:
127 pr_info("- RX (Running): Fetching the Rx desc\n");
128 break;
129 case 2:
130 pr_info("- RX (Running):Checking for end of pkt\n");
131 break;
132 case 3:
133 pr_info("- RX (Running): Waiting for Rx pkt\n");
134 break;
135 case 4:
136 pr_info("- RX (Suspended): Unavailable Rx buf\n");
137 break;
138 case 5:
139 pr_info("- RX (Running): Closing Rx descriptor\n");
140 break;
141 case 6:
142 pr_info("- RX(Running): Flushing the current frame"
143 " from the Rx buf\n");
144 break;
145 case 7:
146 pr_info("- RX (Running): Queuing the Rx frame"
147 " from the Rx buf into memory\n");
148 break;
149 default:
150 break;
151 }
152 return;
153}
154#endif
155
156int dwmac_dma_interrupt(unsigned long ioaddr,
157 struct stmmac_extra_stats *x)
158{
159 int ret = 0;
160 /* read the status register (CSR5) */
161 u32 intr_status = readl(ioaddr + DMA_STATUS);
162
163 DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
164#ifdef DWMAC_DMA_DEBUG
165 /* It displays the DMA process states (CSR5 register) */
166 show_tx_process_state(intr_status);
167 show_rx_process_state(intr_status);
168#endif
169 /* ABNORMAL interrupts */
170 if (unlikely(intr_status & DMA_STATUS_AIS)) {
171 DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
172 if (unlikely(intr_status & DMA_STATUS_UNF)) {
173 DBG(INFO, "transmit underflow\n");
174 ret = tx_hard_error_bump_tc;
175 x->tx_undeflow_irq++;
176 }
177 if (unlikely(intr_status & DMA_STATUS_TJT)) {
178 DBG(INFO, "transmit jabber\n");
179 x->tx_jabber_irq++;
180 }
181 if (unlikely(intr_status & DMA_STATUS_OVF)) {
182 DBG(INFO, "recv overflow\n");
183 x->rx_overflow_irq++;
184 }
185 if (unlikely(intr_status & DMA_STATUS_RU)) {
186 DBG(INFO, "receive buffer unavailable\n");
187 x->rx_buf_unav_irq++;
188 }
189 if (unlikely(intr_status & DMA_STATUS_RPS)) {
190 DBG(INFO, "receive process stopped\n");
191 x->rx_process_stopped_irq++;
192 }
193 if (unlikely(intr_status & DMA_STATUS_RWT)) {
194 DBG(INFO, "receive watchdog\n");
195 x->rx_watchdog_irq++;
196 }
197 if (unlikely(intr_status & DMA_STATUS_ETI)) {
198 DBG(INFO, "transmit early interrupt\n");
199 x->tx_early_irq++;
200 }
201 if (unlikely(intr_status & DMA_STATUS_TPS)) {
202 DBG(INFO, "transmit process stopped\n");
203 x->tx_process_stopped_irq++;
204 ret = tx_hard_error;
205 }
206 if (unlikely(intr_status & DMA_STATUS_FBI)) {
207 DBG(INFO, "fatal bus error\n");
208 x->fatal_bus_error_irq++;
209 ret = tx_hard_error;
210 }
211 }
212 /* TX/RX NORMAL interrupts */
213 if (intr_status & DMA_STATUS_NIS) {
214 x->normal_irq_n++;
215 if (likely((intr_status & DMA_STATUS_RI) ||
216 (intr_status & (DMA_STATUS_TI))))
217 ret = handle_tx_rx;
218 }
219 /* Optional hardware blocks, interrupts should be disabled */
220 if (unlikely(intr_status &
221 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
222 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
223 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
224 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
225
226 DBG(INFO, "\n\n");
227 return ret;
228}
229
230
231void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
232 unsigned int high, unsigned int low)
233{
234 unsigned long data;
235
236 data = (addr[5] << 8) | addr[4];
237 writel(data, ioaddr + high);
238 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
239 writel(data, ioaddr + low);
240
241 return;
242}
243
244void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
245 unsigned int high, unsigned int low)
246{
247 unsigned int hi_addr, lo_addr;
248
249 /* Read the MAC address from the hardware */
250 hi_addr = readl(ioaddr + high);
251 lo_addr = readl(ioaddr + low);
252
253 /* Extract the MAC address from the high and low words */
254 addr[0] = lo_addr & 0xff;
255 addr[1] = (lo_addr >> 8) & 0xff;
256 addr[2] = (lo_addr >> 16) & 0xff;
257 addr[3] = (lo_addr >> 24) & 0xff;
258 addr[4] = hi_addr & 0xff;
259 addr[5] = (hi_addr >> 8) & 0xff;
260
261 return;
262}
263
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e..ba35e6943cf 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Oct_09" 23#define DRV_MODULE_VERSION "Jan_2010"
24#include <linux/stmmac.h>
24 25
25#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 26#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
26#define STMMAC_VLAN_TAG_USED 27#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
57 int rx_csum; 58 int rx_csum;
58 unsigned int dma_buf_sz; 59 unsigned int dma_buf_sz;
59 struct device *device; 60 struct device *device;
60 struct mac_device_info *mac_type; 61 struct mac_device_info *hw;
61 62
62 struct stmmac_extra_stats xstats; 63 struct stmmac_extra_stats xstats;
63 struct napi_struct napi; 64 struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
69 int phy_mask; 70 int phy_mask;
70 int (*phy_reset) (void *priv); 71 int (*phy_reset) (void *priv);
71 void (*fix_mac_speed) (void *priv, unsigned int speed); 72 void (*fix_mac_speed) (void *priv, unsigned int speed);
73 void (*bus_setup)(unsigned long ioaddr);
72 void *bsp_priv; 74 void *bsp_priv;
73 75
74 int phy_irq; 76 int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
93#endif 95#endif
94}; 96};
95 97
98#ifdef CONFIG_STM_DRIVERS
99#include <linux/stm/pad.h>
100static inline int stmmac_claim_resource(struct platform_device *pdev)
101{
102 int ret = 0;
103 struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
104
105 /* Pad routing setup */
106 if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
107 dev_name(&pdev->dev)))) {
108 printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
109 ret = -ENODEV;
110 }
111 return ret;
112}
113#else
114static inline int stmmac_claim_resource(struct platform_device *pdev)
115{
116 return 0;
117}
118#endif
119
96extern int stmmac_mdio_unregister(struct net_device *ndev); 120extern int stmmac_mdio_unregister(struct net_device *ndev);
97extern int stmmac_mdio_register(struct net_device *ndev); 121extern int stmmac_mdio_register(struct net_device *ndev);
98extern void stmmac_set_ethtool_ops(struct net_device *netdev); 122extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a075..0abeff6193a 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
28#include <linux/phy.h> 28#include <linux/phy.h>
29 29
30#include "stmmac.h" 30#include "stmmac.h"
31#include "dwmac_dma.h"
31 32
32#define REG_SPACE_SIZE 0x1054 33#define REG_SPACE_SIZE 0x1054
33#define MAC100_ETHTOOL_NAME "st_mac100" 34#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
268 } 269 }
269 } else { 270 } else {
270 unsigned long ioaddr = netdev->base_addr; 271 unsigned long ioaddr = netdev->base_addr;
271 priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex, 272 priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
272 priv->flow_ctrl, priv->pause); 273 priv->flow_ctrl, priv->pause);
273 } 274 }
274 spin_unlock(&priv->lock); 275 spin_unlock(&priv->lock);
275 return ret; 276 return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
283 int i; 284 int i;
284 285
285 /* Update HW stats if supported */ 286 /* Update HW stats if supported */
286 priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats, 287 priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
287 ioaddr); 288 ioaddr);
288 289
289 for (i = 0; i < STMMAC_STATS_LEN; i++) { 290 for (i = 0; i < STMMAC_STATS_LEN; i++) {
290 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 291 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07..a6733612d64 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
37#include <linux/platform_device.h> 36#include <linux/platform_device.h>
38#include <linux/ip.h> 37#include <linux/ip.h>
@@ -45,7 +44,6 @@
45#include <linux/phy.h> 44#include <linux/phy.h>
46#include <linux/if_vlan.h> 45#include <linux/if_vlan.h>
47#include <linux/dma-mapping.h> 46#include <linux/dma-mapping.h>
48#include <linux/stm/soc.h>
49#include "stmmac.h" 47#include "stmmac.h"
50 48
51#define STMMAC_RESOURCE_NAME "stmmaceth" 49#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +224,38 @@ static void stmmac_adjust_link(struct net_device *dev)
226 if (phydev->duplex != priv->oldduplex) { 224 if (phydev->duplex != priv->oldduplex) {
227 new_state = 1; 225 new_state = 1;
228 if (!(phydev->duplex)) 226 if (!(phydev->duplex))
229 ctrl &= ~priv->mac_type->hw.link.duplex; 227 ctrl &= ~priv->hw->link.duplex;
230 else 228 else
231 ctrl |= priv->mac_type->hw.link.duplex; 229 ctrl |= priv->hw->link.duplex;
232 priv->oldduplex = phydev->duplex; 230 priv->oldduplex = phydev->duplex;
233 } 231 }
234 /* Flow Control operation */ 232 /* Flow Control operation */
235 if (phydev->pause) 233 if (phydev->pause)
236 priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex, 234 priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
237 fc, pause_time); 235 fc, pause_time);
238 236
239 if (phydev->speed != priv->speed) { 237 if (phydev->speed != priv->speed) {
240 new_state = 1; 238 new_state = 1;
241 switch (phydev->speed) { 239 switch (phydev->speed) {
242 case 1000: 240 case 1000:
243 if (likely(priv->is_gmac)) 241 if (likely(priv->is_gmac))
244 ctrl &= ~priv->mac_type->hw.link.port; 242 ctrl &= ~priv->hw->link.port;
245 break; 243 break;
246 case 100: 244 case 100:
247 case 10: 245 case 10:
248 if (priv->is_gmac) { 246 if (priv->is_gmac) {
249 ctrl |= priv->mac_type->hw.link.port; 247 ctrl |= priv->hw->link.port;
250 if (phydev->speed == SPEED_100) { 248 if (phydev->speed == SPEED_100) {
251 ctrl |= 249 ctrl |= priv->hw->link.speed;
252 priv->mac_type->hw.link.
253 speed;
254 } else { 250 } else {
255 ctrl &= 251 ctrl &= ~(priv->hw->link.speed);
256 ~(priv->mac_type->hw.
257 link.speed);
258 } 252 }
259 } else { 253 } else {
260 ctrl &= ~priv->mac_type->hw.link.port; 254 ctrl &= ~priv->hw->link.port;
261 } 255 }
262 priv->fix_mac_speed(priv->bsp_priv, 256 if (likely(priv->fix_mac_speed))
263 phydev->speed); 257 priv->fix_mac_speed(priv->bsp_priv,
258 phydev->speed);
264 break; 259 break;
265 default: 260 default:
266 if (netif_msg_link(priv)) 261 if (netif_msg_link(priv))
@@ -305,8 +300,8 @@ static int stmmac_init_phy(struct net_device *dev)
305{ 300{
306 struct stmmac_priv *priv = netdev_priv(dev); 301 struct stmmac_priv *priv = netdev_priv(dev);
307 struct phy_device *phydev; 302 struct phy_device *phydev;
308 char phy_id[BUS_ID_SIZE]; /* PHY to connect */ 303 char phy_id[MII_BUS_ID_SIZE + 3];
309 char bus_id[BUS_ID_SIZE]; 304 char bus_id[MII_BUS_ID_SIZE];
310 305
311 priv->oldlink = 0; 306 priv->oldlink = 0;
312 priv->speed = 0; 307 priv->speed = 0;
@@ -318,7 +313,8 @@ static int stmmac_init_phy(struct net_device *dev)
318 } 313 }
319 314
320 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 315 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
321 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr); 316 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
317 priv->phy_addr);
322 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 318 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
323 319
324 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, 320 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +504,8 @@ static void init_dma_desc_rings(struct net_device *dev)
508 priv->cur_tx = 0; 504 priv->cur_tx = 0;
509 505
510 /* Clear the Rx/Tx descriptors */ 506 /* Clear the Rx/Tx descriptors */
511 priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic); 507 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
512 priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize); 508 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
513 509
514 if (netif_msg_hw(priv)) { 510 if (netif_msg_hw(priv)) {
515 pr_info("RX descriptor ring:\n"); 511 pr_info("RX descriptor ring:\n");
@@ -544,8 +540,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
544 struct dma_desc *p = priv->dma_tx + i; 540 struct dma_desc *p = priv->dma_tx + i;
545 if (p->des2) 541 if (p->des2)
546 dma_unmap_single(priv->device, p->des2, 542 dma_unmap_single(priv->device, p->des2,
547 priv->mac_type->ops->get_tx_len(p), 543 priv->hw->desc->get_tx_len(p),
548 DMA_TO_DEVICE); 544 DMA_TO_DEVICE);
549 dev_kfree_skb_any(priv->tx_skbuff[i]); 545 dev_kfree_skb_any(priv->tx_skbuff[i]);
550 priv->tx_skbuff[i] = NULL; 546 priv->tx_skbuff[i] = NULL;
551 } 547 }
@@ -575,50 +571,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
575} 571}
576 572
577/** 573/**
578 * stmmac_dma_start_tx
579 * @ioaddr: device I/O address
580 * Description: this function starts the DMA tx process.
581 */
582static void stmmac_dma_start_tx(unsigned long ioaddr)
583{
584 u32 value = readl(ioaddr + DMA_CONTROL);
585 value |= DMA_CONTROL_ST;
586 writel(value, ioaddr + DMA_CONTROL);
587 return;
588}
589
590static void stmmac_dma_stop_tx(unsigned long ioaddr)
591{
592 u32 value = readl(ioaddr + DMA_CONTROL);
593 value &= ~DMA_CONTROL_ST;
594 writel(value, ioaddr + DMA_CONTROL);
595 return;
596}
597
598/**
599 * stmmac_dma_start_rx
600 * @ioaddr: device I/O address
601 * Description: this function starts the DMA rx process.
602 */
603static void stmmac_dma_start_rx(unsigned long ioaddr)
604{
605 u32 value = readl(ioaddr + DMA_CONTROL);
606 value |= DMA_CONTROL_SR;
607 writel(value, ioaddr + DMA_CONTROL);
608
609 return;
610}
611
612static void stmmac_dma_stop_rx(unsigned long ioaddr)
613{
614 u32 value = readl(ioaddr + DMA_CONTROL);
615 value &= ~DMA_CONTROL_SR;
616 writel(value, ioaddr + DMA_CONTROL);
617
618 return;
619}
620
621/**
622 * stmmac_dma_operation_mode - HW DMA operation mode 574 * stmmac_dma_operation_mode - HW DMA operation mode
623 * @priv : pointer to the private device structure. 575 * @priv : pointer to the private device structure.
624 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 576 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +581,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
629{ 581{
630 if (!priv->is_gmac) { 582 if (!priv->is_gmac) {
631 /* MAC 10/100 */ 583 /* MAC 10/100 */
632 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0); 584 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
633 priv->tx_coe = NO_HW_CSUM; 585 priv->tx_coe = NO_HW_CSUM;
634 } else { 586 } else {
635 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 587 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
636 priv->mac_type->ops->dma_mode(priv->dev->base_addr, 588 priv->hw->dma->dma_mode(priv->dev->base_addr,
637 SF_DMA_MODE, SF_DMA_MODE); 589 SF_DMA_MODE, SF_DMA_MODE);
638 tc = SF_DMA_MODE; 590 tc = SF_DMA_MODE;
639 priv->tx_coe = HW_CSUM; 591 priv->tx_coe = HW_CSUM;
640 } else { 592 } else {
641 /* Checksum computation is performed in software. */ 593 /* Checksum computation is performed in software. */
642 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 594 priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
643 SF_DMA_MODE); 595 SF_DMA_MODE);
644 priv->tx_coe = NO_HW_CSUM; 596 priv->tx_coe = NO_HW_CSUM;
645 } 597 }
646 } 598 }
@@ -649,88 +601,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
649 return; 601 return;
650} 602}
651 603
652#ifdef STMMAC_DEBUG
653/**
654 * show_tx_process_state
655 * @status: tx descriptor status field
656 * Description: it shows the Transmit Process State for CSR5[22:20]
657 */
658static void show_tx_process_state(unsigned int status)
659{
660 unsigned int state;
661 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
662
663 switch (state) {
664 case 0:
665 pr_info("- TX (Stopped): Reset or Stop command\n");
666 break;
667 case 1:
668 pr_info("- TX (Running):Fetching the Tx desc\n");
669 break;
670 case 2:
671 pr_info("- TX (Running): Waiting for end of tx\n");
672 break;
673 case 3:
674 pr_info("- TX (Running): Reading the data "
675 "and queuing the data into the Tx buf\n");
676 break;
677 case 6:
678 pr_info("- TX (Suspended): Tx Buff Underflow "
679 "or an unavailable Transmit descriptor\n");
680 break;
681 case 7:
682 pr_info("- TX (Running): Closing Tx descriptor\n");
683 break;
684 default:
685 break;
686 }
687 return;
688}
689
690/**
691 * show_rx_process_state
692 * @status: rx descriptor status field
693 * Description: it shows the Receive Process State for CSR5[19:17]
694 */
695static void show_rx_process_state(unsigned int status)
696{
697 unsigned int state;
698 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
699
700 switch (state) {
701 case 0:
702 pr_info("- RX (Stopped): Reset or Stop command\n");
703 break;
704 case 1:
705 pr_info("- RX (Running): Fetching the Rx desc\n");
706 break;
707 case 2:
708 pr_info("- RX (Running):Checking for end of pkt\n");
709 break;
710 case 3:
711 pr_info("- RX (Running): Waiting for Rx pkt\n");
712 break;
713 case 4:
714 pr_info("- RX (Suspended): Unavailable Rx buf\n");
715 break;
716 case 5:
717 pr_info("- RX (Running): Closing Rx descriptor\n");
718 break;
719 case 6:
720 pr_info("- RX(Running): Flushing the current frame"
721 " from the Rx buf\n");
722 break;
723 case 7:
724 pr_info("- RX (Running): Queuing the Rx frame"
725 " from the Rx buf into memory\n");
726 break;
727 default:
728 break;
729 }
730 return;
731}
732#endif
733
734/** 604/**
735 * stmmac_tx: 605 * stmmac_tx:
736 * @priv: private driver structure 606 * @priv: private driver structure
@@ -748,16 +618,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
748 struct dma_desc *p = priv->dma_tx + entry; 618 struct dma_desc *p = priv->dma_tx + entry;
749 619
750 /* Check if the descriptor is owned by the DMA. */ 620 /* Check if the descriptor is owned by the DMA. */
751 if (priv->mac_type->ops->get_tx_owner(p)) 621 if (priv->hw->desc->get_tx_owner(p))
752 break; 622 break;
753 623
754 /* Verify tx error by looking at the last segment */ 624 /* Verify tx error by looking at the last segment */
755 last = priv->mac_type->ops->get_tx_ls(p); 625 last = priv->hw->desc->get_tx_ls(p);
756 if (likely(last)) { 626 if (likely(last)) {
757 int tx_error = 627 int tx_error =
758 priv->mac_type->ops->tx_status(&priv->dev->stats, 628 priv->hw->desc->tx_status(&priv->dev->stats,
759 &priv->xstats, 629 &priv->xstats, p,
760 p, ioaddr); 630 ioaddr);
761 if (likely(tx_error == 0)) { 631 if (likely(tx_error == 0)) {
762 priv->dev->stats.tx_packets++; 632 priv->dev->stats.tx_packets++;
763 priv->xstats.tx_pkt_n++; 633 priv->xstats.tx_pkt_n++;
@@ -769,7 +639,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
769 639
770 if (likely(p->des2)) 640 if (likely(p->des2))
771 dma_unmap_single(priv->device, p->des2, 641 dma_unmap_single(priv->device, p->des2,
772 priv->mac_type->ops->get_tx_len(p), 642 priv->hw->desc->get_tx_len(p),
773 DMA_TO_DEVICE); 643 DMA_TO_DEVICE);
774 if (unlikely(p->des3)) 644 if (unlikely(p->des3))
775 p->des3 = 0; 645 p->des3 = 0;
@@ -790,7 +660,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
790 priv->tx_skbuff[entry] = NULL; 660 priv->tx_skbuff[entry] = NULL;
791 } 661 }
792 662
793 priv->mac_type->ops->release_tx_desc(p); 663 priv->hw->desc->release_tx_desc(p);
794 664
795 entry = (++priv->dirty_tx) % txsize; 665 entry = (++priv->dirty_tx) % txsize;
796 } 666 }
@@ -814,7 +684,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
814 priv->tm->timer_start(tmrate); 684 priv->tm->timer_start(tmrate);
815 else 685 else
816#endif 686#endif
817 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); 687 priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
818} 688}
819 689
820static inline void stmmac_disable_irq(struct stmmac_priv *priv) 690static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +694,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
824 priv->tm->timer_stop(); 694 priv->tm->timer_stop();
825 else 695 else
826#endif 696#endif
827 writel(0, priv->dev->base_addr + DMA_INTR_ENA); 697 priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
828} 698}
829 699
830static int stmmac_has_work(struct stmmac_priv *priv) 700static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +702,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
832 unsigned int has_work = 0; 702 unsigned int has_work = 0;
833 int rxret, tx_work = 0; 703 int rxret, tx_work = 0;
834 704
835 rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx + 705 rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
836 (priv->cur_rx % priv->dma_rx_size)); 706 (priv->cur_rx % priv->dma_rx_size));
837 707
838 if (priv->dirty_tx != priv->cur_tx) 708 if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +753,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
883{ 753{
884 netif_stop_queue(priv->dev); 754 netif_stop_queue(priv->dev);
885 755
886 stmmac_dma_stop_tx(priv->dev->base_addr); 756 priv->hw->dma->stop_tx(priv->dev->base_addr);
887 dma_free_tx_skbufs(priv); 757 dma_free_tx_skbufs(priv);
888 priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 758 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
889 priv->dirty_tx = 0; 759 priv->dirty_tx = 0;
890 priv->cur_tx = 0; 760 priv->cur_tx = 0;
891 stmmac_dma_start_tx(priv->dev->base_addr); 761 priv->hw->dma->start_tx(priv->dev->base_addr);
892 762
893 priv->dev->stats.tx_errors++; 763 priv->dev->stats.tx_errors++;
894 netif_wake_queue(priv->dev); 764 netif_wake_queue(priv->dev);
@@ -896,95 +766,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
896 return; 766 return;
897} 767}
898 768
899/**
900 * stmmac_dma_interrupt - Interrupt handler for the driver
901 * @dev: net device structure
902 * Description: Interrupt handler for the driver (DMA).
903 */
904static void stmmac_dma_interrupt(struct net_device *dev)
905{
906 unsigned long ioaddr = dev->base_addr;
907 struct stmmac_priv *priv = netdev_priv(dev);
908 /* read the status register (CSR5) */
909 u32 intr_status = readl(ioaddr + DMA_STATUS);
910
911 DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
912 769
913#ifdef STMMAC_DEBUG 770static void stmmac_dma_interrupt(struct stmmac_priv *priv)
914 /* It displays the DMA transmit process state (CSR5 register) */ 771{
915 if (netif_msg_tx_done(priv)) 772 unsigned long ioaddr = priv->dev->base_addr;
916 show_tx_process_state(intr_status); 773 int status;
917 if (netif_msg_rx_status(priv)) 774
918 show_rx_process_state(intr_status); 775 status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
919#endif 776 &priv->xstats);
920 /* ABNORMAL interrupts */ 777 if (likely(status == handle_tx_rx))
921 if (unlikely(intr_status & DMA_STATUS_AIS)) { 778 _stmmac_schedule(priv);
922 DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: "); 779
923 if (unlikely(intr_status & DMA_STATUS_UNF)) { 780 else if (unlikely(status == tx_hard_error_bump_tc)) {
924 DBG(intr, INFO, "transmit underflow\n"); 781 /* Try to bump up the dma threshold on this failure */
925 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 782 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
926 /* Try to bump up the threshold */ 783 tc += 64;
927 tc += 64; 784 priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
928 priv->mac_type->ops->dma_mode(ioaddr, tc, 785 priv->xstats.threshold = tc;
929 SF_DMA_MODE);
930 priv->xstats.threshold = tc;
931 }
932 stmmac_tx_err(priv);
933 priv->xstats.tx_undeflow_irq++;
934 }
935 if (unlikely(intr_status & DMA_STATUS_TJT)) {
936 DBG(intr, INFO, "transmit jabber\n");
937 priv->xstats.tx_jabber_irq++;
938 }
939 if (unlikely(intr_status & DMA_STATUS_OVF)) {
940 DBG(intr, INFO, "recv overflow\n");
941 priv->xstats.rx_overflow_irq++;
942 }
943 if (unlikely(intr_status & DMA_STATUS_RU)) {
944 DBG(intr, INFO, "receive buffer unavailable\n");
945 priv->xstats.rx_buf_unav_irq++;
946 }
947 if (unlikely(intr_status & DMA_STATUS_RPS)) {
948 DBG(intr, INFO, "receive process stopped\n");
949 priv->xstats.rx_process_stopped_irq++;
950 }
951 if (unlikely(intr_status & DMA_STATUS_RWT)) {
952 DBG(intr, INFO, "receive watchdog\n");
953 priv->xstats.rx_watchdog_irq++;
954 }
955 if (unlikely(intr_status & DMA_STATUS_ETI)) {
956 DBG(intr, INFO, "transmit early interrupt\n");
957 priv->xstats.tx_early_irq++;
958 }
959 if (unlikely(intr_status & DMA_STATUS_TPS)) {
960 DBG(intr, INFO, "transmit process stopped\n");
961 priv->xstats.tx_process_stopped_irq++;
962 stmmac_tx_err(priv);
963 }
964 if (unlikely(intr_status & DMA_STATUS_FBI)) {
965 DBG(intr, INFO, "fatal bus error\n");
966 priv->xstats.fatal_bus_error_irq++;
967 stmmac_tx_err(priv);
968 } 786 }
969 } 787 stmmac_tx_err(priv);
970 788 } else if (unlikely(status == tx_hard_error))
971 /* TX/RX NORMAL interrupts */ 789 stmmac_tx_err(priv);
972 if (intr_status & DMA_STATUS_NIS) {
973 priv->xstats.normal_irq_n++;
974 if (likely((intr_status & DMA_STATUS_RI) ||
975 (intr_status & (DMA_STATUS_TI))))
976 _stmmac_schedule(priv);
977 }
978
979 /* Optional hardware blocks, interrupts should be disabled */
980 if (unlikely(intr_status &
981 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
982 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
983
984 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
985 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
986
987 DBG(intr, INFO, "\n\n");
988 790
989 return; 791 return;
990} 792}
@@ -1058,17 +860,20 @@ static int stmmac_open(struct net_device *dev)
1058 init_dma_desc_rings(dev); 860 init_dma_desc_rings(dev);
1059 861
1060 /* DMA initialization and SW reset */ 862 /* DMA initialization and SW reset */
1061 if (unlikely(priv->mac_type->ops->dma_init(ioaddr, 863 if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
1062 priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) { 864 priv->dma_rx_phy) < 0)) {
1063 865
1064 pr_err("%s: DMA initialization failed\n", __func__); 866 pr_err("%s: DMA initialization failed\n", __func__);
1065 return -1; 867 return -1;
1066 } 868 }
1067 869
1068 /* Copy the MAC addr into the HW */ 870 /* Copy the MAC addr into the HW */
1069 priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0); 871 priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
872 /* If required, perform hw setup of the bus. */
873 if (priv->bus_setup)
874 priv->bus_setup(ioaddr);
1070 /* Initialize the MAC Core */ 875 /* Initialize the MAC Core */
1071 priv->mac_type->ops->core_init(ioaddr); 876 priv->hw->mac->core_init(ioaddr);
1072 877
1073 priv->shutdown = 0; 878 priv->shutdown = 0;
1074 879
@@ -1089,16 +894,16 @@ static int stmmac_open(struct net_device *dev)
1089 894
1090 /* Start the ball rolling... */ 895 /* Start the ball rolling... */
1091 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 896 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1092 stmmac_dma_start_tx(ioaddr); 897 priv->hw->dma->start_tx(ioaddr);
1093 stmmac_dma_start_rx(ioaddr); 898 priv->hw->dma->start_rx(ioaddr);
1094 899
1095#ifdef CONFIG_STMMAC_TIMER 900#ifdef CONFIG_STMMAC_TIMER
1096 priv->tm->timer_start(tmrate); 901 priv->tm->timer_start(tmrate);
1097#endif 902#endif
1098 /* Dump DMA/MAC registers */ 903 /* Dump DMA/MAC registers */
1099 if (netif_msg_hw(priv)) { 904 if (netif_msg_hw(priv)) {
1100 priv->mac_type->ops->dump_mac_regs(ioaddr); 905 priv->hw->mac->dump_regs(ioaddr);
1101 priv->mac_type->ops->dump_dma_regs(ioaddr); 906 priv->hw->dma->dump_regs(ioaddr);
1102 } 907 }
1103 908
1104 if (priv->phydev) 909 if (priv->phydev)
@@ -1142,8 +947,8 @@ static int stmmac_release(struct net_device *dev)
1142 free_irq(dev->irq, dev); 947 free_irq(dev->irq, dev);
1143 948
1144 /* Stop TX/RX DMA and clear the descriptors */ 949 /* Stop TX/RX DMA and clear the descriptors */
1145 stmmac_dma_stop_tx(dev->base_addr); 950 priv->hw->dma->stop_tx(dev->base_addr);
1146 stmmac_dma_stop_rx(dev->base_addr); 951 priv->hw->dma->stop_rx(dev->base_addr);
1147 952
1148 /* Release and free the Rx/Tx resources */ 953 /* Release and free the Rx/Tx resources */
1149 free_dma_desc_resources(priv); 954 free_dma_desc_resources(priv);
@@ -1214,8 +1019,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1214 desc->des2 = dma_map_single(priv->device, skb->data, 1019 desc->des2 = dma_map_single(priv->device, skb->data,
1215 BUF_SIZE_8KiB, DMA_TO_DEVICE); 1020 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1216 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1021 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1217 priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB, 1022 priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1218 csum_insertion); 1023 csum_insertion);
1219 1024
1220 entry = (++priv->cur_tx) % txsize; 1025 entry = (++priv->cur_tx) % txsize;
1221 desc = priv->dma_tx + entry; 1026 desc = priv->dma_tx + entry;
@@ -1224,16 +1029,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1224 skb->data + BUF_SIZE_8KiB, 1029 skb->data + BUF_SIZE_8KiB,
1225 buf2_size, DMA_TO_DEVICE); 1030 buf2_size, DMA_TO_DEVICE);
1226 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1031 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1227 priv->mac_type->ops->prepare_tx_desc(desc, 0, 1032 priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
1228 buf2_size, csum_insertion); 1033 csum_insertion);
1229 priv->mac_type->ops->set_tx_owner(desc); 1034 priv->hw->desc->set_tx_owner(desc);
1230 priv->tx_skbuff[entry] = NULL; 1035 priv->tx_skbuff[entry] = NULL;
1231 } else { 1036 } else {
1232 desc->des2 = dma_map_single(priv->device, skb->data, 1037 desc->des2 = dma_map_single(priv->device, skb->data,
1233 nopaged_len, DMA_TO_DEVICE); 1038 nopaged_len, DMA_TO_DEVICE);
1234 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1039 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1235 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1040 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1236 csum_insertion); 1041 csum_insertion);
1237 } 1042 }
1238 return entry; 1043 return entry;
1239} 1044}
@@ -1301,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1301 unsigned int nopaged_len = skb_headlen(skb); 1106 unsigned int nopaged_len = skb_headlen(skb);
1302 desc->des2 = dma_map_single(priv->device, skb->data, 1107 desc->des2 = dma_map_single(priv->device, skb->data,
1303 nopaged_len, DMA_TO_DEVICE); 1108 nopaged_len, DMA_TO_DEVICE);
1304 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1109 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1305 csum_insertion); 1110 csum_insertion);
1306 } 1111 }
1307 1112
1308 for (i = 0; i < nfrags; i++) { 1113 for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1122,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1317 frag->page_offset, 1122 frag->page_offset,
1318 len, DMA_TO_DEVICE); 1123 len, DMA_TO_DEVICE);
1319 priv->tx_skbuff[entry] = NULL; 1124 priv->tx_skbuff[entry] = NULL;
1320 priv->mac_type->ops->prepare_tx_desc(desc, 0, len, 1125 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
1321 csum_insertion); 1126 priv->hw->desc->set_tx_owner(desc);
1322 priv->mac_type->ops->set_tx_owner(desc);
1323 } 1127 }
1324 1128
1325 /* Interrupt on completition only for the latest segment */ 1129 /* Interrupt on completition only for the latest segment */
1326 priv->mac_type->ops->close_tx_desc(desc); 1130 priv->hw->desc->close_tx_desc(desc);
1327 1131
1328#ifdef CONFIG_STMMAC_TIMER 1132#ifdef CONFIG_STMMAC_TIMER
1329 /* Clean IC while using timer */ 1133 /* Clean IC while using timer */
1330 if (likely(priv->tm->enable)) 1134 if (likely(priv->tm->enable))
1331 priv->mac_type->ops->clear_tx_ic(desc); 1135 priv->hw->desc->clear_tx_ic(desc);
1332#endif 1136#endif
1333 /* To avoid raise condition */ 1137 /* To avoid raise condition */
1334 priv->mac_type->ops->set_tx_owner(first); 1138 priv->hw->desc->set_tx_owner(first);
1335 1139
1336 priv->cur_tx++; 1140 priv->cur_tx++;
1337 1141
@@ -1353,8 +1157,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1353 1157
1354 dev->stats.tx_bytes += skb->len; 1158 dev->stats.tx_bytes += skb->len;
1355 1159
1356 /* CSR1 enables the transmit DMA to check for new descriptor */ 1160 priv->hw->dma->enable_dma_transmission(dev->base_addr);
1357 writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
1358 1161
1359 return NETDEV_TX_OK; 1162 return NETDEV_TX_OK;
1360} 1163}
@@ -1391,7 +1194,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1391 } 1194 }
1392 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1195 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1393 } 1196 }
1394 priv->mac_type->ops->set_rx_owner(p + entry); 1197 priv->hw->desc->set_rx_owner(p + entry);
1395 } 1198 }
1396 return; 1199 return;
1397} 1200}
@@ -1412,7 +1215,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1412 } 1215 }
1413#endif 1216#endif
1414 count = 0; 1217 count = 0;
1415 while (!priv->mac_type->ops->get_rx_owner(p)) { 1218 while (!priv->hw->desc->get_rx_owner(p)) {
1416 int status; 1219 int status;
1417 1220
1418 if (count >= limit) 1221 if (count >= limit)
@@ -1425,15 +1228,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1425 prefetch(p_next); 1228 prefetch(p_next);
1426 1229
1427 /* read the status of the incoming frame */ 1230 /* read the status of the incoming frame */
1428 status = (priv->mac_type->ops->rx_status(&priv->dev->stats, 1231 status = (priv->hw->desc->rx_status(&priv->dev->stats,
1429 &priv->xstats, p)); 1232 &priv->xstats, p));
1430 if (unlikely(status == discard_frame)) 1233 if (unlikely(status == discard_frame))
1431 priv->dev->stats.rx_errors++; 1234 priv->dev->stats.rx_errors++;
1432 else { 1235 else {
1433 struct sk_buff *skb; 1236 struct sk_buff *skb;
1434 /* Length should omit the CRC */ 1237 /* Length should omit the CRC */
1435 int frame_len = 1238 int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
1436 priv->mac_type->ops->get_rx_frame_len(p) - 4;
1437 1239
1438#ifdef STMMAC_RX_DEBUG 1240#ifdef STMMAC_RX_DEBUG
1439 if (frame_len > ETH_FRAME_LEN) 1241 if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1371,7 @@ static void stmmac_multicast_list(struct net_device *dev)
1569 struct stmmac_priv *priv = netdev_priv(dev); 1371 struct stmmac_priv *priv = netdev_priv(dev);
1570 1372
1571 spin_lock(&priv->lock); 1373 spin_lock(&priv->lock);
1572 priv->mac_type->ops->set_filter(dev); 1374 priv->hw->mac->set_filter(dev);
1573 spin_unlock(&priv->lock); 1375 spin_unlock(&priv->lock);
1574 return; 1376 return;
1575} 1377}
@@ -1623,9 +1425,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1623 if (priv->is_gmac) { 1425 if (priv->is_gmac) {
1624 unsigned long ioaddr = dev->base_addr; 1426 unsigned long ioaddr = dev->base_addr;
1625 /* To handle GMAC own interrupts */ 1427 /* To handle GMAC own interrupts */
1626 priv->mac_type->ops->host_irq_status(ioaddr); 1428 priv->hw->mac->host_irq_status(ioaddr);
1627 } 1429 }
1628 stmmac_dma_interrupt(dev); 1430
1431 stmmac_dma_interrupt(priv);
1629 1432
1630 return IRQ_HANDLED; 1433 return IRQ_HANDLED;
1631} 1434}
@@ -1744,7 +1547,7 @@ static int stmmac_probe(struct net_device *dev)
1744 netif_napi_add(dev, &priv->napi, stmmac_poll, 64); 1547 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1745 1548
1746 /* Get the MAC address */ 1549 /* Get the MAC address */
1747 priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0); 1550 priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
1748 1551
1749 if (!is_valid_ether_addr(dev->dev_addr)) 1552 if (!is_valid_ether_addr(dev->dev_addr))
1750 pr_warning("\tno valid MAC address;" 1553 pr_warning("\tno valid MAC address;"
@@ -1779,16 +1582,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1779 struct mac_device_info *device; 1582 struct mac_device_info *device;
1780 1583
1781 if (priv->is_gmac) 1584 if (priv->is_gmac)
1782 device = gmac_setup(ioaddr); 1585 device = dwmac1000_setup(ioaddr);
1783 else 1586 else
1784 device = mac100_setup(ioaddr); 1587 device = dwmac100_setup(ioaddr);
1785 1588
1786 if (!device) 1589 if (!device)
1787 return -ENOMEM; 1590 return -ENOMEM;
1788 1591
1789 priv->mac_type = device; 1592 priv->hw = device;
1790 1593
1791 priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */ 1594 priv->wolenabled = priv->hw->pmt; /* PMT supported */
1792 if (priv->wolenabled == PMT_SUPPORTED) 1595 if (priv->wolenabled == PMT_SUPPORTED)
1793 priv->wolopts = WAKE_MAGIC; /* Magic Frame */ 1596 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1794 1597
@@ -1797,8 +1600,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1797 1600
1798static int stmmacphy_dvr_probe(struct platform_device *pdev) 1601static int stmmacphy_dvr_probe(struct platform_device *pdev)
1799{ 1602{
1800 struct plat_stmmacphy_data *plat_dat; 1603 struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
1801 plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
1802 1604
1803 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n", 1605 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
1804 plat_dat->bus_id); 1606 plat_dat->bus_id);
@@ -1830,9 +1632,7 @@ static struct platform_driver stmmacphy_driver = {
1830static int stmmac_associate_phy(struct device *dev, void *data) 1632static int stmmac_associate_phy(struct device *dev, void *data)
1831{ 1633{
1832 struct stmmac_priv *priv = (struct stmmac_priv *)data; 1634 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1833 struct plat_stmmacphy_data *plat_dat; 1635 struct plat_stmmacphy_data *plat_dat = dev->platform_data;
1834
1835 plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
1836 1636
1837 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__, 1637 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
1838 plat_dat->bus_id); 1638 plat_dat->bus_id);
@@ -1922,7 +1722,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1922 priv = netdev_priv(ndev); 1722 priv = netdev_priv(ndev);
1923 priv->device = &(pdev->dev); 1723 priv->device = &(pdev->dev);
1924 priv->dev = ndev; 1724 priv->dev = ndev;
1925 plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data); 1725 plat_dat = pdev->dev.platform_data;
1926 priv->bus_id = plat_dat->bus_id; 1726 priv->bus_id = plat_dat->bus_id;
1927 priv->pbl = plat_dat->pbl; /* TLI */ 1727 priv->pbl = plat_dat->pbl; /* TLI */
1928 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1728 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1732,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1932 /* Set the I/O base addr */ 1732 /* Set the I/O base addr */
1933 ndev->base_addr = (unsigned long)addr; 1733 ndev->base_addr = (unsigned long)addr;
1934 1734
1735 /* Verify embedded resource for the platform */
1736 ret = stmmac_claim_resource(pdev);
1737 if (ret < 0)
1738 goto out;
1739
1935 /* MAC HW revice detection */ 1740 /* MAC HW revice detection */
1936 ret = stmmac_mac_device_setup(ndev); 1741 ret = stmmac_mac_device_setup(ndev);
1937 if (ret < 0) 1742 if (ret < 0)
@@ -1952,6 +1757,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1952 } 1757 }
1953 1758
1954 priv->fix_mac_speed = plat_dat->fix_mac_speed; 1759 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1760 priv->bus_setup = plat_dat->bus_setup;
1955 priv->bsp_priv = plat_dat->bsp_priv; 1761 priv->bsp_priv = plat_dat->bsp_priv;
1956 1762
1957 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1763 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1986,12 +1792,13 @@ out:
1986static int stmmac_dvr_remove(struct platform_device *pdev) 1792static int stmmac_dvr_remove(struct platform_device *pdev)
1987{ 1793{
1988 struct net_device *ndev = platform_get_drvdata(pdev); 1794 struct net_device *ndev = platform_get_drvdata(pdev);
1795 struct stmmac_priv *priv = netdev_priv(ndev);
1989 struct resource *res; 1796 struct resource *res;
1990 1797
1991 pr_info("%s:\n\tremoving driver", __func__); 1798 pr_info("%s:\n\tremoving driver", __func__);
1992 1799
1993 stmmac_dma_stop_rx(ndev->base_addr); 1800 priv->hw->dma->stop_rx(ndev->base_addr);
1994 stmmac_dma_stop_tx(ndev->base_addr); 1801 priv->hw->dma->stop_tx(ndev->base_addr);
1995 1802
1996 stmmac_mac_disable_rx(ndev->base_addr); 1803 stmmac_mac_disable_rx(ndev->base_addr);
1997 stmmac_mac_disable_tx(ndev->base_addr); 1804 stmmac_mac_disable_tx(ndev->base_addr);
@@ -2038,21 +1845,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2038 napi_disable(&priv->napi); 1845 napi_disable(&priv->napi);
2039 1846
2040 /* Stop TX/RX DMA */ 1847 /* Stop TX/RX DMA */
2041 stmmac_dma_stop_tx(dev->base_addr); 1848 priv->hw->dma->stop_tx(dev->base_addr);
2042 stmmac_dma_stop_rx(dev->base_addr); 1849 priv->hw->dma->stop_rx(dev->base_addr);
2043 /* Clear the Rx/Tx descriptors */ 1850 /* Clear the Rx/Tx descriptors */
2044 priv->mac_type->ops->init_rx_desc(priv->dma_rx, 1851 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
2045 priv->dma_rx_size, dis_ic); 1852 dis_ic);
2046 priv->mac_type->ops->init_tx_desc(priv->dma_tx, 1853 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
2047 priv->dma_tx_size);
2048 1854
2049 stmmac_mac_disable_tx(dev->base_addr); 1855 stmmac_mac_disable_tx(dev->base_addr);
2050 1856
2051 if (device_may_wakeup(&(pdev->dev))) { 1857 if (device_may_wakeup(&(pdev->dev))) {
2052 /* Enable Power down mode by programming the PMT regs */ 1858 /* Enable Power down mode by programming the PMT regs */
2053 if (priv->wolenabled == PMT_SUPPORTED) 1859 if (priv->wolenabled == PMT_SUPPORTED)
2054 priv->mac_type->ops->pmt(dev->base_addr, 1860 priv->hw->mac->pmt(dev->base_addr,
2055 priv->wolopts); 1861 priv->wolopts);
2056 } else { 1862 } else {
2057 stmmac_mac_disable_rx(dev->base_addr); 1863 stmmac_mac_disable_rx(dev->base_addr);
2058 } 1864 }
@@ -2093,15 +1899,15 @@ static int stmmac_resume(struct platform_device *pdev)
2093 * from another devices (e.g. serial console). */ 1899 * from another devices (e.g. serial console). */
2094 if (device_may_wakeup(&(pdev->dev))) 1900 if (device_may_wakeup(&(pdev->dev)))
2095 if (priv->wolenabled == PMT_SUPPORTED) 1901 if (priv->wolenabled == PMT_SUPPORTED)
2096 priv->mac_type->ops->pmt(dev->base_addr, 0); 1902 priv->hw->mac->pmt(dev->base_addr, 0);
2097 1903
2098 netif_device_attach(dev); 1904 netif_device_attach(dev);
2099 1905
2100 /* Enable the MAC and DMA */ 1906 /* Enable the MAC and DMA */
2101 stmmac_mac_enable_rx(ioaddr); 1907 stmmac_mac_enable_rx(ioaddr);
2102 stmmac_mac_enable_tx(ioaddr); 1908 stmmac_mac_enable_tx(ioaddr);
2103 stmmac_dma_start_tx(ioaddr); 1909 priv->hw->dma->start_tx(ioaddr);
2104 stmmac_dma_start_rx(ioaddr); 1910 priv->hw->dma->start_rx(ioaddr);
2105 1911
2106#ifdef CONFIG_STMMAC_TIMER 1912#ifdef CONFIG_STMMAC_TIMER
2107 priv->tm->timer_start(tmrate); 1913 priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22f..fffe1d037fe 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,7 +24,6 @@
24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> 24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/ 25*******************************************************************************/
26 26
27#include <linux/netdevice.h>
28#include <linux/mii.h> 27#include <linux/mii.h>
29#include <linux/phy.h> 28#include <linux/phy.h>
30 29
@@ -48,8 +47,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
48 struct net_device *ndev = bus->priv; 47 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev); 48 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr; 49 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->mac_type->hw.mii.addr; 50 unsigned int mii_address = priv->hw->mii.addr;
52 unsigned int mii_data = priv->mac_type->hw.mii.data; 51 unsigned int mii_data = priv->hw->mii.data;
53 52
54 int data; 53 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +79,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
80 struct net_device *ndev = bus->priv; 79 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev); 80 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr; 81 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->mac_type->hw.mii.addr; 82 unsigned int mii_address = priv->hw->mii.addr;
84 unsigned int mii_data = priv->mac_type->hw.mii.data; 83 unsigned int mii_data = priv->hw->mii.data;
85 84
86 u16 value = 85 u16 value =
87 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 86 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +111,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
112 struct net_device *ndev = bus->priv; 111 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev); 112 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr; 113 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->mac_type->hw.mii.addr; 114 unsigned int mii_address = priv->hw->mii.addr;
116 115
117 if (priv->phy_reset) { 116 if (priv->phy_reset) {
118 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 117 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca6..0c972e560cf 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -206,7 +206,7 @@ IVc. Errata
206#define USE_IO_OPS 1 206#define USE_IO_OPS 1
207#endif 207#endif
208 208
209static const struct pci_device_id sundance_pci_tbl[] = { 209static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, 210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, 211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, 212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab..b55ceb88d93 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
107#define GEM_MODULE_NAME "gem" 107#define GEM_MODULE_NAME "gem"
108#define PFX GEM_MODULE_NAME ": " 108#define PFX GEM_MODULE_NAME ": "
109 109
110static struct pci_device_id gem_pci_tbl[] = { 110static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
113 113
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8..76ccd31cbf5 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3211,7 +3211,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
3211 dev_set_drvdata(&pdev->dev, NULL); 3211 dev_set_drvdata(&pdev->dev, NULL);
3212} 3212}
3213 3213
3214static struct pci_device_id happymeal_pci_ids[] = { 3214static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
3215 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, 3215 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3216 { } /* Terminating entry */ 3216 { } /* Terminating entry */
3217}; 3217};
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index bc74db0d12f..d65764ea1d8 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1062,10 +1062,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1062 goto err_out_free_dev; 1062 goto err_out_free_dev;
1063 } 1063 }
1064 1064
1065 printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); 1065 printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
1066
1067 for (i = 0; i < 6; i++)
1068 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
1069 1066
1070 list_add(&vp->list, &vnet_list); 1067 list_add(&vp->list, &vnet_list);
1071 1068
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5..033408f589f 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
65 { "TOSHIBA TC35815/TX4939" }, 65 { "TOSHIBA TC35815/TX4939" },
66}; 66};
67 67
68static const struct pci_device_id tc35815_pci_tbl[] = { 68static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, 69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, 70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, 71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b93..b907bee31fd 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -64,7 +64,7 @@
64 64
65#include "tehuti.h" 65#include "tehuti.h"
66 66
67static struct pci_device_id __devinitdata bdx_pci_tbl[] = { 67static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
68 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 68 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
69 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 69 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 70 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7f82b0238e0..7195bdec17f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -174,7 +174,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174module_param(tg3_debug, int, 0); 174module_param(tg3_debug, int, 0);
175MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 175MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
176 176
177static struct pci_device_id tg3_pci_tbl[] = { 177static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
@@ -244,6 +244,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1564,7 +1570,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1564{ 1570{
1565 u32 reg; 1571 u32 reg;
1566 1572
1567 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 1573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1574 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1575 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1568 return; 1576 return;
1569 1577
1570 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 1578 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
@@ -1939,6 +1947,10 @@ static int tg3_phy_reset(struct tg3 *tp)
1939 } 1947 }
1940 } 1948 }
1941 1949
1950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1951 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1952 return 0;
1953
1942 tg3_phy_apply_otp(tp); 1954 tg3_phy_apply_otp(tp);
1943 1955
1944 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 1956 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
@@ -2019,7 +2031,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2019{ 2031{
2020 struct tg3 *tp_peer = tp; 2032 struct tg3 *tp_peer = tp;
2021 2033
2022 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) 2034 /* The GPIOs do something completely different on 57765. */
2035 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2023 return; 2037 return;
2024 2038
2025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
@@ -7439,10 +7453,13 @@ static void tg3_rings_reset(struct tg3 *tp)
7439 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { 7453 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7440 tp->napi[i].tx_prod = 0; 7454 tp->napi[i].tx_prod = 0;
7441 tp->napi[i].tx_cons = 0; 7455 tp->napi[i].tx_cons = 0;
7442 tw32_mailbox(tp->napi[i].prodmbox, 0); 7456 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7457 tw32_mailbox(tp->napi[i].prodmbox, 0);
7443 tw32_rx_mbox(tp->napi[i].consmbox, 0); 7458 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7444 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 7459 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7445 } 7460 }
7461 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7462 tw32_mailbox(tp->napi[0].prodmbox, 0);
7446 } else { 7463 } else {
7447 tp->napi[0].tx_prod = 0; 7464 tp->napi[0].tx_prod = 0;
7448 tp->napi[0].tx_cons = 0; 7465 tp->napi[0].tx_cons = 0;
@@ -7574,6 +7591,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7574 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 7591 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7575 } 7592 }
7576 7593
7594 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7595 u32 grc_mode = tr32(GRC_MODE);
7596
7597 /* Access the lower 1K of PL PCIE block registers. */
7598 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7599 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7600
7601 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7602 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7603 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7604
7605 tw32(GRC_MODE, grc_mode);
7606 }
7607
7577 /* This works around an issue with Athlon chipsets on 7608 /* This works around an issue with Athlon chipsets on
7578 * B3 tigon3 silicon. This bit has no effect on any 7609 * B3 tigon3 silicon. This bit has no effect on any
7579 * other revision. But do not set this on PCI Express 7610 * other revision. But do not set this on PCI Express
@@ -7772,7 +7803,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7772 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7803 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7773 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7804 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7774 BDINFO_FLAGS_USE_EXT_RECV); 7805 BDINFO_FLAGS_USE_EXT_RECV);
7775 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7806 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7776 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7807 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7777 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7808 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7778 } else { 7809 } else {
@@ -8143,7 +8174,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8143 /* Prevent chip from dropping frames when flow control 8174 /* Prevent chip from dropping frames when flow control
8144 * is enabled. 8175 * is enabled.
8145 */ 8176 */
8146 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); 8177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8178 val = 1;
8179 else
8180 val = 2;
8181 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8147 8182
8148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 8183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8149 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 8184 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
@@ -10640,12 +10675,27 @@ static int tg3_test_memory(struct tg3 *tp)
10640 { 0x00008000, 0x01000}, 10675 { 0x00008000, 0x01000},
10641 { 0x00010000, 0x01000}, 10676 { 0x00010000, 0x01000},
10642 { 0xffffffff, 0x00000} 10677 { 0xffffffff, 0x00000}
10678 }, mem_tbl_5717[] = {
10679 { 0x00000200, 0x00008},
10680 { 0x00010000, 0x0a000},
10681 { 0x00020000, 0x13c00},
10682 { 0xffffffff, 0x00000}
10683 }, mem_tbl_57765[] = {
10684 { 0x00000200, 0x00008},
10685 { 0x00004000, 0x00800},
10686 { 0x00006000, 0x09800},
10687 { 0x00010000, 0x0a000},
10688 { 0xffffffff, 0x00000}
10643 }; 10689 };
10644 struct mem_entry *mem_tbl; 10690 struct mem_entry *mem_tbl;
10645 int err = 0; 10691 int err = 0;
10646 int i; 10692 int i;
10647 10693
10648 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 10694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
10695 mem_tbl = mem_tbl_5717;
10696 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10697 mem_tbl = mem_tbl_57765;
10698 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10649 mem_tbl = mem_tbl_5755; 10699 mem_tbl = mem_tbl_5755;
10650 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 10700 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10651 mem_tbl = mem_tbl_5906; 10701 mem_tbl = mem_tbl_5906;
@@ -13102,6 +13152,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13102 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || 13152 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13103 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) 13153 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13104 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; 13154 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13155 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13156 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13105 } 13157 }
13106 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 13158 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13107 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13159 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
@@ -13290,7 +13342,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13290 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 13342 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13291 13343
13292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13294 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 13347 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13295 13348
13296 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 13349 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14086,9 +14139,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
14086 14139
14087static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14140static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14088{ 14141{
14089 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && 14142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14090 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 14143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { 14144 tp->bufmgr_config.mbuf_read_dma_low_water =
14145 DEFAULT_MB_RDMA_LOW_WATER_5705;
14146 tp->bufmgr_config.mbuf_mac_rx_low_water =
14147 DEFAULT_MB_MACRX_LOW_WATER_57765;
14148 tp->bufmgr_config.mbuf_high_water =
14149 DEFAULT_MB_HIGH_WATER_57765;
14150
14151 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14152 DEFAULT_MB_RDMA_LOW_WATER_5705;
14153 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14154 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14155 tp->bufmgr_config.mbuf_high_water_jumbo =
14156 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14157 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14092 tp->bufmgr_config.mbuf_read_dma_low_water = 14158 tp->bufmgr_config.mbuf_read_dma_low_water =
14093 DEFAULT_MB_RDMA_LOW_WATER_5705; 14159 DEFAULT_MB_RDMA_LOW_WATER_5705;
14094 tp->bufmgr_config.mbuf_mac_rx_low_water = 14160 tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14148,7 +14214,9 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
14148 case PHY_ID_BCM5756: return "5722/5756"; 14214 case PHY_ID_BCM5756: return "5722/5756";
14149 case PHY_ID_BCM5906: return "5906"; 14215 case PHY_ID_BCM5906: return "5906";
14150 case PHY_ID_BCM5761: return "5761"; 14216 case PHY_ID_BCM5761: return "5761";
14151 case PHY_ID_BCM5717: return "5717"; 14217 case PHY_ID_BCM5718C: return "5718C";
14218 case PHY_ID_BCM5718S: return "5718S";
14219 case PHY_ID_BCM57765: return "57765";
14152 case PHY_ID_BCM8002: return "8002/serdes"; 14220 case PHY_ID_BCM8002: return "8002/serdes";
14153 case 0: return "serdes"; 14221 case 0: return "serdes";
14154 default: return "unknown"; 14222 default: return "unknown";
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8a167912902..e7f6214a168 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1206,14 +1206,18 @@
1206#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020 1206#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
1207#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010 1207#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
1208#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004 1208#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
1209#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
1209#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098 1210#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
1210#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b 1211#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
1212#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
1211#define BUFMGR_MB_HIGH_WATER 0x00004418 1213#define BUFMGR_MB_HIGH_WATER 0x00004418
1212#define DEFAULT_MB_HIGH_WATER 0x00000060 1214#define DEFAULT_MB_HIGH_WATER 0x00000060
1213#define DEFAULT_MB_HIGH_WATER_5705 0x00000060 1215#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
1214#define DEFAULT_MB_HIGH_WATER_5906 0x00000010 1216#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
1217#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
1215#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c 1218#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
1216#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096 1219#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
1220#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
1217#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c 1221#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
1218#define BUFMGR_MB_ALLOC_BIT 0x10000000 1222#define BUFMGR_MB_ALLOC_BIT 0x10000000
1219#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420 1223#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
@@ -1543,6 +1547,8 @@
1543#define GRC_MODE_HOST_SENDBDS 0x00020000 1547#define GRC_MODE_HOST_SENDBDS 0x00020000
1544#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 1548#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
1545#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 1549#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
1550#define GRC_MODE_PCIE_TL_SEL 0x00000000
1551#define GRC_MODE_PCIE_PL_SEL 0x00400000
1546#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000 1552#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
1547#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000 1553#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
1548#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000 1554#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
@@ -1550,7 +1556,13 @@
1550#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000 1556#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
1551#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000 1557#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
1552#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000 1558#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
1559#define GRC_MODE_PCIE_DL_SEL 0x20000000
1553#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000 1560#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
1561#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
1562#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
1563 GRC_MODE_PCIE_PL_SEL | \
1564 GRC_MODE_PCIE_DL_SEL | \
1565 GRC_MODE_PCIE_HI_1K_EN)
1554#define GRC_MISC_CFG 0x00006804 1566#define GRC_MISC_CFG 0x00006804
1555#define GRC_MISC_CFG_CORECLK_RESET 0x00000001 1567#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
1556#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe 1568#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
@@ -1804,6 +1816,11 @@
1804/* 0x7e74 --> 0x8000 unused */ 1816/* 0x7e74 --> 0x8000 unused */
1805 1817
1806 1818
1819/* Alternate PCIE definitions */
1820#define TG3_PCIE_TLDLPL_PORT 0x00007c00
1821#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
1822#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
1823
1807/* OTP bit definitions */ 1824/* OTP bit definitions */
1808#define TG3_OTP_AGCTGT_MASK 0x000000e0 1825#define TG3_OTP_AGCTGT_MASK 0x000000e0
1809#define TG3_OTP_AGCTGT_SHIFT 1 1826#define TG3_OTP_AGCTGT_SHIFT 1
@@ -2812,6 +2829,7 @@ struct tg3 {
2812#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000 2829#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
2813#define TG3_FLG3_SHORT_DMA_BUG 0x00200000 2830#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2814#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 2831#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2832#define TG3_FLG3_L1PLLPD_EN 0x00800000
2815 2833
2816 struct timer_list timer; 2834 struct timer_list timer;
2817 u16 timer_counter; 2835 u16 timer_counter;
@@ -2878,7 +2896,9 @@ struct tg3 {
2878#define PHY_ID_BCM5756 0xbc050ed0 2896#define PHY_ID_BCM5756 0xbc050ed0
2879#define PHY_ID_BCM5784 0xbc050fa0 2897#define PHY_ID_BCM5784 0xbc050fa0
2880#define PHY_ID_BCM5761 0xbc050fd0 2898#define PHY_ID_BCM5761 0xbc050fd0
2881#define PHY_ID_BCM5717 0x5c0d8a00 2899#define PHY_ID_BCM5718C 0x5c0d8a00
2900#define PHY_ID_BCM5718S 0xbc050ff0
2901#define PHY_ID_BCM57765 0x5c0d8a40
2882#define PHY_ID_BCM5906 0xdc00ac40 2902#define PHY_ID_BCM5906 0xdc00ac40
2883#define PHY_ID_BCM8002 0x60010140 2903#define PHY_ID_BCM8002 0x60010140
2884#define PHY_ID_INVALID 0xffffffff 2904#define PHY_ID_INVALID 0xffffffff
@@ -2921,7 +2941,8 @@ struct tg3 {
2921 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ 2941 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2922 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ 2942 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
2923 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ 2943 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
2924 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002) 2944 (X) == PHY_ID_BCM5718C || (X) == PHY_ID_BCM5718S || \
2945 (X) == PHY_ID_BCM57765 || (X) == PHY_ID_BCM8002)
2925 2946
2926 struct tg3_hw_stats *hw_stats; 2947 struct tg3_hw_stats *hw_stats;
2927 dma_addr_t stats_mapping; 2948 dma_addr_t stats_mapping;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb315..3ec31dce99f 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
255}; 255};
256 256
257static struct pci_device_id tlan_pci_tbl[] = { 257static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, 258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, 260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
@@ -338,7 +338,7 @@ static int TLan_PhyInternalService( struct net_device * );
338static int TLan_PhyDp83840aCheck( struct net_device * ); 338static int TLan_PhyDp83840aCheck( struct net_device * );
339*/ 339*/
340 340
341static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); 341static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
342static void TLan_MiiSendData( u16, u32, unsigned ); 342static void TLan_MiiSendData( u16, u32, unsigned );
343static void TLan_MiiSync( u16 ); 343static void TLan_MiiSync( u16 );
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); 344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
@@ -2204,7 +2204,7 @@ TLan_ResetAdapter( struct net_device *dev )
2204 u32 data; 2204 u32 data;
2205 u8 data8; 2205 u8 data8;
2206 2206
2207 priv->tlanFullDuplex = FALSE; 2207 priv->tlanFullDuplex = false;
2208 priv->phyOnline=0; 2208 priv->phyOnline=0;
2209 netif_carrier_off(dev); 2209 netif_carrier_off(dev);
2210 2210
@@ -2259,7 +2259,7 @@ TLan_ResetAdapter( struct net_device *dev )
2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); 2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); 2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
2262 priv->tlanFullDuplex = TRUE; 2262 priv->tlanFullDuplex = true;
2263 } else { 2263 } else {
2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); 2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
2265 } 2265 }
@@ -2651,14 +2651,14 @@ static void TLan_PhyStartLink( struct net_device *dev )
2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); 2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
2652 } else if ( priv->speed == TLAN_SPEED_10 && 2652 } else if ( priv->speed == TLAN_SPEED_10 &&
2653 priv->duplex == TLAN_DUPLEX_FULL) { 2653 priv->duplex == TLAN_DUPLEX_FULL) {
2654 priv->tlanFullDuplex = TRUE; 2654 priv->tlanFullDuplex = true;
2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); 2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
2656 } else if ( priv->speed == TLAN_SPEED_100 && 2656 } else if ( priv->speed == TLAN_SPEED_100 &&
2657 priv->duplex == TLAN_DUPLEX_HALF) { 2657 priv->duplex == TLAN_DUPLEX_HALF) {
2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); 2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
2659 } else if ( priv->speed == TLAN_SPEED_100 && 2659 } else if ( priv->speed == TLAN_SPEED_100 &&
2660 priv->duplex == TLAN_DUPLEX_FULL) { 2660 priv->duplex == TLAN_DUPLEX_FULL) {
2661 priv->tlanFullDuplex = TRUE; 2661 priv->tlanFullDuplex = true;
2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); 2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
2663 } else { 2663 } else {
2664 2664
@@ -2695,7 +2695,7 @@ static void TLan_PhyStartLink( struct net_device *dev )
2695 tctl &= ~TLAN_TC_AUISEL; 2695 tctl &= ~TLAN_TC_AUISEL;
2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2697 control |= MII_GC_DUPLEX; 2697 control |= MII_GC_DUPLEX;
2698 priv->tlanFullDuplex = TRUE; 2698 priv->tlanFullDuplex = true;
2699 } 2699 }
2700 if ( priv->speed == TLAN_SPEED_100 ) { 2700 if ( priv->speed == TLAN_SPEED_100 ) {
2701 control |= MII_GC_SPEEDSEL; 2701 control |= MII_GC_SPEEDSEL;
@@ -2750,9 +2750,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); 2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
2751 mode = an_adv & an_lpa & 0x03E0; 2751 mode = an_adv & an_lpa & 0x03E0;
2752 if ( mode & 0x0100 ) { 2752 if ( mode & 0x0100 ) {
2753 priv->tlanFullDuplex = TRUE; 2753 priv->tlanFullDuplex = true;
2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { 2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
2755 priv->tlanFullDuplex = TRUE; 2755 priv->tlanFullDuplex = true;
2756 } 2756 }
2757 2757
2758 if ( ( ! ( mode & 0x0180 ) ) && 2758 if ( ( ! ( mode & 0x0180 ) ) &&
@@ -2855,8 +2855,8 @@ void TLan_PhyMonitor( struct net_device *dev )
2855 * TLan_MiiReadReg 2855 * TLan_MiiReadReg
2856 * 2856 *
2857 * Returns: 2857 * Returns:
2858 * 0 if ack received ok 2858 * false if ack received ok
2859 * 1 otherwise. 2859 * true if no ack received or other error
2860 * 2860 *
2861 * Parms: 2861 * Parms:
2862 * dev The device structure containing 2862 * dev The device structure containing
@@ -2875,17 +2875,17 @@ void TLan_PhyMonitor( struct net_device *dev )
2875 * 2875 *
2876 **************************************************************/ 2876 **************************************************************/
2877 2877
2878static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2878static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2879{ 2879{
2880 u8 nack; 2880 u8 nack;
2881 u16 sio, tmp; 2881 u16 sio, tmp;
2882 u32 i; 2882 u32 i;
2883 int err; 2883 bool err;
2884 int minten; 2884 int minten;
2885 TLanPrivateInfo *priv = netdev_priv(dev); 2885 TLanPrivateInfo *priv = netdev_priv(dev);
2886 unsigned long flags = 0; 2886 unsigned long flags = 0;
2887 2887
2888 err = FALSE; 2888 err = false;
2889 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); 2889 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2890 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2890 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2891 2891
@@ -2918,7 +2918,7 @@ static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2919 } 2919 }
2920 tmp = 0xffff; 2920 tmp = 0xffff;
2921 err = TRUE; 2921 err = true;
2922 } else { /* ACK, so read data */ 2922 } else { /* ACK, so read data */
2923 for (tmp = 0, i = 0x8000; i; i >>= 1) { 2923 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 4b82f283e98..d13ff12d750 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -31,9 +31,6 @@
31 * 31 *
32 ****************************************************************/ 32 ****************************************************************/
33 33
34#define FALSE 0
35#define TRUE 1
36
37#define TLAN_MIN_FRAME_SIZE 64 34#define TLAN_MIN_FRAME_SIZE 64
38#define TLAN_MAX_FRAME_SIZE 1600 35#define TLAN_MAX_FRAME_SIZE 1600
39 36
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d962..b0d7db9d8bb 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
117 * will be stuck with 1555 lines of hex #'s in the code. 117 * will be stuck with 1555 lines of hex #'s in the code.
118 */ 118 */
119 119
120static struct pci_device_id xl_pci_tbl[] = 120static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
121{ 121{
122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, }, 122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
123 { } /* terminate list */ 123 { } /* terminate list */
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a..515f122777a 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
45 45
46#define ABYSS_IO_EXTENT 64 46#define ABYSS_IO_EXTENT 64
47 47
48static struct pci_device_id abyss_pci_tbl[] = { 48static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
49 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2, 49 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
50 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, }, 50 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
51 { } /* Terminating entry */ 51 { } /* Terminating entry */
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d0..3f9d5a25562 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -146,7 +146,7 @@
146static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n" 146static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
147 " v0.5.3 11/13/02 - Kent Yoder"; 147 " v0.5.3 11/13/02 - Kent Yoder";
148 148
149static struct pci_device_id streamer_pci_tbl[] = { 149static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,}, 150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
151 {} /* terminating entry */ 151 {} /* terminating entry */
152}; 152};
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c513..f010a4dc5f1 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,}; 172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173module_param_array(network_monitor, int, NULL, 0); 173module_param_array(network_monitor, int, NULL, 0);
174 174
175static struct pci_device_id olympic_pci_tbl[] = { 175static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,}, 176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */ 177 { } /* Terminating Entry */
178}; 178};
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdca..d4c7c0c0a3d 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
57 { {0x03, 0x01}, "3Com Token Link Velocity"}, 57 { {0x03, 0x01}, "3Com Token Link Velocity"},
58}; 58};
59 59
60static struct pci_device_id tmspci_pci_tbl[] = { 60static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
61 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 61 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
62 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 62 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
63 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 63 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb7..87ea39e2037 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -337,7 +337,7 @@ static void de21041_media_timer (unsigned long data);
337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); 337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
338 338
339 339
340static struct pci_device_id de_pci_tbl[] = { 340static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, 341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, 343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 6f44ebf5891..2d9f09c6189 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -2089,7 +2089,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2089 2089
2090 2090
2091 2091
2092static struct pci_device_id dmfe_pci_tbl[] = { 2092static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2093 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID }, 2093 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2094 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID }, 2094 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2095 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID }, 2095 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 20696b5d60a..da4fc458f90 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -211,7 +211,7 @@ struct tulip_chip_table tulip_tbl[] = {
211}; 211};
212 212
213 213
214static struct pci_device_id tulip_pci_tbl[] = { 214static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
215 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, 215 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
216 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, 216 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
217 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, 217 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc35..d549042a01d 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1783,7 +1783,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1783} 1783}
1784 1784
1785 1785
1786static struct pci_device_id uli526x_pci_tbl[] = { 1786static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
1787 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, 1787 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1788 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, 1788 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1789 { 0, } 1789 { 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f..23395e1ff23 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -218,7 +218,7 @@ enum chip_capability_flags {
218 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, 218 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
219}; 219};
220 220
221static const struct pci_device_id w840_pci_tbl[] = { 221static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
222 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, 222 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
223 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 223 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
224 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 224 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d..c84123fd635 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -144,7 +144,7 @@ static int link_status(struct xircom_private *card);
144 144
145 145
146 146
147static struct pci_device_id xircom_pci_table[] = { 147static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
148 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, 148 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
149 {0,}, 149 {0,},
150}; 150};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2834a01bae2..5adb3d15055 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -144,6 +144,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
144 err = 0; 144 err = 0;
145 tfile->tun = tun; 145 tfile->tun = tun;
146 tun->tfile = tfile; 146 tun->tfile = tfile;
147 tun->socket.file = file;
147 dev_hold(tun->dev); 148 dev_hold(tun->dev);
148 sock_hold(tun->socket.sk); 149 sock_hold(tun->socket.sk);
149 atomic_inc(&tfile->count); 150 atomic_inc(&tfile->count);
@@ -158,6 +159,7 @@ static void __tun_detach(struct tun_struct *tun)
158 /* Detach from net device */ 159 /* Detach from net device */
159 netif_tx_lock_bh(tun->dev); 160 netif_tx_lock_bh(tun->dev);
160 tun->tfile = NULL; 161 tun->tfile = NULL;
162 tun->socket.file = NULL;
161 netif_tx_unlock_bh(tun->dev); 163 netif_tx_unlock_bh(tun->dev);
162 164
163 /* Drop read queue */ 165 /* Drop read queue */
@@ -387,7 +389,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
387 /* Notify and wake up reader process */ 389 /* Notify and wake up reader process */
388 if (tun->flags & TUN_FASYNC) 390 if (tun->flags & TUN_FASYNC)
389 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 391 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
390 wake_up_interruptible(&tun->socket.wait); 392 wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
393 POLLRDNORM | POLLRDBAND);
391 return NETDEV_TX_OK; 394 return NETDEV_TX_OK;
392 395
393drop: 396drop:
@@ -743,7 +746,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
743 len = min_t(int, skb->len, len); 746 len = min_t(int, skb->len, len);
744 747
745 skb_copy_datagram_const_iovec(skb, 0, iv, total, len); 748 skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
746 total += len; 749 total += skb->len;
747 750
748 tun->dev->stats.tx_packets++; 751 tun->dev->stats.tx_packets++;
749 tun->dev->stats.tx_bytes += len; 752 tun->dev->stats.tx_bytes += len;
@@ -751,34 +754,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
751 return total; 754 return total;
752} 755}
753 756
754static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, 757static ssize_t tun_do_read(struct tun_struct *tun,
755 unsigned long count, loff_t pos) 758 struct kiocb *iocb, const struct iovec *iv,
759 ssize_t len, int noblock)
756{ 760{
757 struct file *file = iocb->ki_filp;
758 struct tun_file *tfile = file->private_data;
759 struct tun_struct *tun = __tun_get(tfile);
760 DECLARE_WAITQUEUE(wait, current); 761 DECLARE_WAITQUEUE(wait, current);
761 struct sk_buff *skb; 762 struct sk_buff *skb;
762 ssize_t len, ret = 0; 763 ssize_t ret = 0;
763
764 if (!tun)
765 return -EBADFD;
766 764
767 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 765 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
768 766
769 len = iov_length(iv, count);
770 if (len < 0) {
771 ret = -EINVAL;
772 goto out;
773 }
774
775 add_wait_queue(&tun->socket.wait, &wait); 767 add_wait_queue(&tun->socket.wait, &wait);
776 while (len) { 768 while (len) {
777 current->state = TASK_INTERRUPTIBLE; 769 current->state = TASK_INTERRUPTIBLE;
778 770
779 /* Read frames from the queue */ 771 /* Read frames from the queue */
780 if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { 772 if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
781 if (file->f_flags & O_NONBLOCK) { 773 if (noblock) {
782 ret = -EAGAIN; 774 ret = -EAGAIN;
783 break; 775 break;
784 } 776 }
@@ -805,6 +797,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
805 current->state = TASK_RUNNING; 797 current->state = TASK_RUNNING;
806 remove_wait_queue(&tun->socket.wait, &wait); 798 remove_wait_queue(&tun->socket.wait, &wait);
807 799
800 return ret;
801}
802
803static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
804 unsigned long count, loff_t pos)
805{
806 struct file *file = iocb->ki_filp;
807 struct tun_file *tfile = file->private_data;
808 struct tun_struct *tun = __tun_get(tfile);
809 ssize_t len, ret;
810
811 if (!tun)
812 return -EBADFD;
813 len = iov_length(iv, count);
814 if (len < 0) {
815 ret = -EINVAL;
816 goto out;
817 }
818
819 ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
820 ret = min_t(ssize_t, ret, len);
808out: 821out:
809 tun_put(tun); 822 tun_put(tun);
810 return ret; 823 return ret;
@@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk)
847 return; 860 return;
848 861
849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 862 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
850 wake_up_interruptible_sync(sk->sk_sleep); 863 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
864 POLLWRNORM | POLLWRBAND);
851 865
852 tun = tun_sk(sk)->tun; 866 tun = tun_sk(sk)->tun;
853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 867 kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
858 free_netdev(tun_sk(sk)->tun->dev); 872 free_netdev(tun_sk(sk)->tun->dev);
859} 873}
860 874
875static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
876 struct msghdr *m, size_t total_len)
877{
878 struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
879 return tun_get_user(tun, m->msg_iov, total_len,
880 m->msg_flags & MSG_DONTWAIT);
881}
882
883static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
884 struct msghdr *m, size_t total_len,
885 int flags)
886{
887 struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
888 int ret;
889 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
890 return -EINVAL;
891 ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
892 flags & MSG_DONTWAIT);
893 if (ret > total_len) {
894 m->msg_flags |= MSG_TRUNC;
895 ret = flags & MSG_TRUNC ? ret : total_len;
896 }
897 return ret;
898}
899
900/* Ops structure to mimic raw sockets with tun */
901static const struct proto_ops tun_socket_ops = {
902 .sendmsg = tun_sendmsg,
903 .recvmsg = tun_recvmsg,
904};
905
861static struct proto tun_proto = { 906static struct proto tun_proto = {
862 .name = "tun", 907 .name = "tun",
863 .owner = THIS_MODULE, 908 .owner = THIS_MODULE,
@@ -986,6 +1031,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
986 goto err_free_dev; 1031 goto err_free_dev;
987 1032
988 init_waitqueue_head(&tun->socket.wait); 1033 init_waitqueue_head(&tun->socket.wait);
1034 tun->socket.ops = &tun_socket_ops;
989 sock_init_data(&tun->socket, sk); 1035 sock_init_data(&tun->socket, sk);
990 sk->sk_write_space = tun_sock_write_space; 1036 sk->sk_write_space = tun_sock_write_space;
991 sk->sk_sndbuf = INT_MAX; 1037 sk->sk_sndbuf = INT_MAX;
@@ -1525,6 +1571,23 @@ static void tun_cleanup(void)
1525 rtnl_link_unregister(&tun_link_ops); 1571 rtnl_link_unregister(&tun_link_ops);
1526} 1572}
1527 1573
1574/* Get an underlying socket object from tun file. Returns error unless file is
1575 * attached to a device. The returned object works like a packet socket, it
1576 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1577 * holding a reference to the file for as long as the socket is in use. */
1578struct socket *tun_get_socket(struct file *file)
1579{
1580 struct tun_struct *tun;
1581 if (file->f_op != &tun_fops)
1582 return ERR_PTR(-EINVAL);
1583 tun = tun_get(file);
1584 if (!tun)
1585 return ERR_PTR(-EBADFD);
1586 tun_put(tun);
1587 return &tun->socket;
1588}
1589EXPORT_SYMBOL_GPL(tun_get_socket);
1590
1528module_init(tun_init); 1591module_init(tun_init);
1529module_exit(tun_cleanup); 1592module_exit(tun_cleanup);
1530MODULE_DESCRIPTION(DRV_DESCRIPTION); 1593MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be..6e4f754c4ba 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -215,7 +215,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
215 * bit 8 indicates if this is a (0) copper or (1) fiber card 215 * bit 8 indicates if this is a (0) copper or (1) fiber card
216 * bits 12-16 indicate card type: (0) client and (1) server 216 * bits 12-16 indicate card type: (0) client and (1) server
217 */ 217 */
218static struct pci_device_id typhoon_pci_tbl[] = { 218static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, 219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, 220 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, 221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index eb8fe7e16c6..225f65812f2 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -37,6 +37,7 @@
37#include <asm/qe.h> 37#include <asm/qe.h>
38#include <asm/ucc.h> 38#include <asm/ucc.h>
39#include <asm/ucc_fast.h> 39#include <asm/ucc_fast.h>
40#include <asm/machdep.h>
40 41
41#include "ucc_geth.h" 42#include "ucc_geth.h"
42#include "fsl_pq_mdio.h" 43#include "fsl_pq_mdio.h"
@@ -1334,7 +1335,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1334 struct ucc_geth __iomem *ug_regs; 1335 struct ucc_geth __iomem *ug_regs;
1335 struct ucc_fast __iomem *uf_regs; 1336 struct ucc_fast __iomem *uf_regs;
1336 int ret_val; 1337 int ret_val;
1337 u32 upsmr, maccfg2, tbiBaseAddress; 1338 u32 upsmr, maccfg2;
1338 u16 value; 1339 u16 value;
1339 1340
1340 ugeth_vdbg("%s: IN", __func__); 1341 ugeth_vdbg("%s: IN", __func__);
@@ -1389,14 +1390,20 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1389 /* Note that this depends on proper setting in utbipar register. */ 1390 /* Note that this depends on proper setting in utbipar register. */
1390 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1391 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1391 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1392 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1392 tbiBaseAddress = in_be32(&ug_regs->utbipar); 1393 struct ucc_geth_info *ug_info = ugeth->ug_info;
1393 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; 1394 struct phy_device *tbiphy;
1394 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; 1395
1395 value = ugeth->phydev->bus->read(ugeth->phydev->bus, 1396 if (!ug_info->tbi_node)
1396 (u8) tbiBaseAddress, ENET_TBI_MII_CR); 1397 ugeth_warn("TBI mode requires that the device "
1398 "tree specify a tbi-handle\n");
1399
1400 tbiphy = of_phy_find_device(ug_info->tbi_node);
1401 if (!tbiphy)
1402 ugeth_warn("Could not get TBI device\n");
1403
1404 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1397 value &= ~0x1000; /* Turn off autonegotiation */ 1405 value &= ~0x1000; /* Turn off autonegotiation */
1398 ugeth->phydev->bus->write(ugeth->phydev->bus, 1406 phy_write(tbiphy, ENET_TBI_MII_CR, value);
1399 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
1400 } 1407 }
1401 1408
1402 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1409 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22b87e64a81..7d3fa06980c 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -897,11 +897,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
897 f5u011_rxmode(catc, catc->rxmode); 897 f5u011_rxmode(catc, catc->rxmode);
898 } 898 }
899 dbg("Init done."); 899 dbg("Init done.");
900 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ", 900 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
901 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", 901 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
902 usbdev->bus->bus_name, usbdev->devpath); 902 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
903 for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
904 printk("%2.2x.\n", netdev->dev_addr[i]);
905 usb_set_intfdata(intf, catc); 903 usb_set_intfdata(intf, catc);
906 904
907 SET_NETDEV_DEV(netdev, &intf->dev); 905 SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index fd19db0d250..21ac103fbb7 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -313,20 +313,17 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
313{ 313{
314 struct sockaddr *addr = p; 314 struct sockaddr *addr = p;
315 rtl8150_t *dev = netdev_priv(netdev); 315 rtl8150_t *dev = netdev_priv(netdev);
316 int i;
317 316
318 if (netif_running(netdev)) 317 if (netif_running(netdev))
319 return -EBUSY; 318 return -EBUSY;
320 319
321 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 320 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
322 dbg("%s: Setting MAC address to ", netdev->name); 321 dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
323 for (i = 0; i < 5; i++)
324 dbg("%02X:", netdev->dev_addr[i]);
325 dbg("%02X\n", netdev->dev_addr[i]);
326 /* Set the IDR registers. */ 322 /* Set the IDR registers. */
327 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); 323 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
328#ifdef EEPROM_WRITE 324#ifdef EEPROM_WRITE
329 { 325 {
326 int i;
330 u8 cr; 327 u8 cr;
331 /* Get the CR contents. */ 328 /* Get the CR contents. */
332 get_registers(dev, CR, 1, &cr); 329 get_registers(dev, CR, 1, &cr);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 611b8043595..a7e0c84426e 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -267,7 +267,7 @@ enum rhine_quirks {
267/* Beware of PCI posted writes */ 267/* Beware of PCI posted writes */
268#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 268#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
269 269
270static const struct pci_device_id rhine_pci_tbl[] = { 270static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
271 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ 271 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
272 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ 272 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
273 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ 273 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c93f58f5c6f..f15485efe40 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
361 * Describe the PCI device identifiers that we support in this 361 * Describe the PCI device identifiers that we support in this
362 * device driver. Used for hotplug autoloading. 362 * device driver. Used for hotplug autoloading.
363 */ 363 */
364static const struct pci_device_id velocity_id_table[] __devinitdata = { 364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, 365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366 { } 366 { }
367}; 367};
@@ -2702,10 +2702,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
2702 struct net_device *dev = vptr->dev; 2702 struct net_device *dev = vptr->dev;
2703 2703
2704 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); 2704 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2705 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 2705 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2706 dev->name, 2706 dev->name, dev->dev_addr);
2707 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2708 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2709} 2707}
2710 2708
2711static u32 velocity_get_link(struct net_device *dev) 2709static u32 velocity_get_link(struct net_device *dev)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ead30bd00c..6b92e383c65 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -674,6 +674,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
674 struct virtio_net_ctrl_mac *mac_data; 674 struct virtio_net_ctrl_mac *mac_data;
675 struct dev_addr_list *addr; 675 struct dev_addr_list *addr;
676 struct netdev_hw_addr *ha; 676 struct netdev_hw_addr *ha;
677 int uc_count;
677 void *buf; 678 void *buf;
678 int i; 679 int i;
679 680
@@ -700,8 +701,9 @@ static void virtnet_set_rx_mode(struct net_device *dev)
700 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 701 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
701 allmulti ? "en" : "dis"); 702 allmulti ? "en" : "dis");
702 703
704 uc_count = netdev_uc_count(dev);
703 /* MAC filter - use one buffer for both lists */ 705 /* MAC filter - use one buffer for both lists */
704 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) + 706 mac_data = buf = kzalloc(((uc_count + dev->mc_count) * ETH_ALEN) +
705 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 707 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
706 if (!buf) { 708 if (!buf) {
707 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 709 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
@@ -711,16 +713,16 @@ static void virtnet_set_rx_mode(struct net_device *dev)
711 sg_init_table(sg, 2); 713 sg_init_table(sg, 2);
712 714
713 /* Store the unicast list and count in the front of the buffer */ 715 /* Store the unicast list and count in the front of the buffer */
714 mac_data->entries = dev->uc.count; 716 mac_data->entries = uc_count;
715 i = 0; 717 i = 0;
716 list_for_each_entry(ha, &dev->uc.list, list) 718 netdev_for_each_uc_addr(ha, dev)
717 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 719 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
718 720
719 sg_set_buf(&sg[0], mac_data, 721 sg_set_buf(&sg[0], mac_data,
720 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN)); 722 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
721 723
722 /* multicast list and count fill the end */ 724 /* multicast list and count fill the end */
723 mac_data = (void *)&mac_data->macs[dev->uc.count][0]; 725 mac_data = (void *)&mac_data->macs[uc_count][0];
724 726
725 mac_data->entries = dev->mc_count; 727 mac_data->entries = dev->mc_count;
726 addr = dev->mc_list; 728 addr = dev->mc_list;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d7..b896f938611 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
35 * PCI Device ID Table 35 * PCI Device ID Table
36 * Last entry must be all 0s 36 * Last entry must be all 0s
37 */ 37 */
38static const struct pci_device_id vmxnet3_pciid_table[] = { 38static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
40 {0} 40 {0}
41}; 41};
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b9685e82f7b..a6606b8948e 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("Dual BSD/GPL");
54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" 54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter"); 55 "Virtualized Server Adapter");
56 56
57static struct pci_device_id vxge_id_table[] __devinitdata = { 57static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, 58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
59 PCI_ANY_ID}, 59 PCI_ANY_ID},
60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, 60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -4297,10 +4297,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4297 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4297 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4298 vdev->ndev->name, ll_config.device_hw_info.product_desc); 4298 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4299 4299
4300 vxge_debug_init(VXGE_TRACE, 4300 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4301 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X", 4301 vdev->ndev->name, macaddr);
4302 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
4303 macaddr[3], macaddr[4], macaddr[5]);
4304 4302
4305 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", 4303 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4306 vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); 4304 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca..f88c07c1319 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2050,7 +2050,7 @@ static int __init dscc4_setup(char *str)
2050__setup("dscc4.setup=", dscc4_setup); 2050__setup("dscc4.setup=", dscc4_setup);
2051#endif 2051#endif
2052 2052
2053static struct pci_device_id dscc4_pci_tbl[] = { 2053static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
2054 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, 2054 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2055 PCI_ANY_ID, PCI_ANY_ID, }, 2055 PCI_ANY_ID, PCI_ANY_ID, },
2056 { 0,} 2056 { 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e364915..40d724a8e02 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -528,7 +528,7 @@ static int fst_debug_mask = { FST_DEBUG };
528/* 528/*
529 * PCI ID lookup table 529 * PCI ID lookup table
530 */ 530 */
531static struct pci_device_id fst_pci_dev_id[] __devinitdata = { 531static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
532 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID, 532 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
533 PCI_ANY_ID, 0, 0, FST_TYPE_T2P}, 533 PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
534 534
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c82..b2785037712 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
77 77
78static int LMC_PKT_BUF_SZ = 1542; 78static int LMC_PKT_BUF_SZ = 1542;
79 79
80static struct pci_device_id lmc_pci_tbl[] = { 80static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
81 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 81 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
82 PCI_VENDOR_ID_LMC, PCI_ANY_ID }, 82 PCI_VENDOR_ID_LMC, PCI_ANY_ID },
83 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 83 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d395542..f4f1c00d0d2 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -251,7 +251,7 @@ static char rcsid[] =
251#undef PC300_DEBUG_RX 251#undef PC300_DEBUG_RX
252#undef PC300_DEBUG_OTHER 252#undef PC300_DEBUG_OTHER
253 253
254static struct pci_device_id cpc_pci_dev_id[] __devinitdata = { 254static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
255 /* PC300/RSV or PC300/X21, 2 chan */ 255 /* PC300/RSV or PC300/X21, 2 chan */
256 {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300}, 256 {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
257 /* PC300/RSV or PC300/X21, 1 chan */ 257 /* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd9..c7ab3becd26 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
481 481
482 482
483 483
484static struct pci_device_id pc300_pci_tbl[] __devinitdata = { 484static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
485 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID, 485 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
486 PCI_ANY_ID, 0, 0, 0 }, 486 PCI_ANY_ID, 0, 0, 0 },
487 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID, 487 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf02..e2cff64a446 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
417 417
418 418
419 419
420static struct pci_device_id pci200_pci_tbl[] __devinitdata = { 420static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
421 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, 421 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
422 PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 }, 422 PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
423 { 0, } 423 { 0, }
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624e..541c700dcee 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
814 return 0; 814 return 0;
815} 815}
816 816
817static struct pci_device_id wanxl_pci_tbl[] __devinitdata = { 817static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
818 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID, 818 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
819 PCI_ANY_ID, 0, 0, 0 }, 819 PCI_ANY_ID, 0, 0, 0 },
820 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID, 820 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09d..6cead321bc1 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -301,24 +301,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
301 /* Extract MAC addresss */ 301 /* Extract MAC addresss */
302 ddi = (void *) skb->data; 302 ddi = (void *) skb->data;
303 BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); 303 BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
304 d_printf(2, dev, "GET DEVICE INFO: mac addr " 304 d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
305 "%02x:%02x:%02x:%02x:%02x:%02x\n", 305 ddi->mac_address);
306 ddi->mac_address[0], ddi->mac_address[1],
307 ddi->mac_address[2], ddi->mac_address[3],
308 ddi->mac_address[4], ddi->mac_address[5]);
309 if (!memcmp(net_dev->perm_addr, ddi->mac_address, 306 if (!memcmp(net_dev->perm_addr, ddi->mac_address,
310 sizeof(ddi->mac_address))) 307 sizeof(ddi->mac_address)))
311 goto ok; 308 goto ok;
312 dev_warn(dev, "warning: device reports a different MAC address " 309 dev_warn(dev, "warning: device reports a different MAC address "
313 "to that of boot mode's\n"); 310 "to that of boot mode's\n");
314 dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n", 311 dev_warn(dev, "device reports %pM\n", ddi->mac_address);
315 ddi->mac_address[0], ddi->mac_address[1], 312 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
316 ddi->mac_address[2], ddi->mac_address[3],
317 ddi->mac_address[4], ddi->mac_address[5]);
318 dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
319 net_dev->perm_addr[0], net_dev->perm_addr[1],
320 net_dev->perm_addr[2], net_dev->perm_addr[3],
321 net_dev->perm_addr[4], net_dev->perm_addr[5]);
322 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) 313 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
323 dev_err(dev, "device reports an invalid MAC address, " 314 dev_err(dev, "device reports an invalid MAC address, "
324 "not updating\n"); 315 "not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299c..e803a7dc650 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1041,21 +1041,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
1041 dev_err(dev, "BM: read mac addr failed: %d\n", result); 1041 dev_err(dev, "BM: read mac addr failed: %d\n", result);
1042 goto error_read_mac; 1042 goto error_read_mac;
1043 } 1043 }
1044 d_printf(2, dev, 1044 d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
1045 "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
1046 ack_buf.ack_pl[0], ack_buf.ack_pl[1],
1047 ack_buf.ack_pl[2], ack_buf.ack_pl[3],
1048 ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
1049 if (i2400m->bus_bm_mac_addr_impaired == 1) { 1045 if (i2400m->bus_bm_mac_addr_impaired == 1) {
1050 ack_buf.ack_pl[0] = 0x00; 1046 ack_buf.ack_pl[0] = 0x00;
1051 ack_buf.ack_pl[1] = 0x16; 1047 ack_buf.ack_pl[1] = 0x16;
1052 ack_buf.ack_pl[2] = 0xd3; 1048 ack_buf.ack_pl[2] = 0xd3;
1053 get_random_bytes(&ack_buf.ack_pl[3], 3); 1049 get_random_bytes(&ack_buf.ack_pl[3], 3);
1054 dev_err(dev, "BM is MAC addr impaired, faking MAC addr to " 1050 dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
1055 "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n", 1051 "mac addr is %pM\n", ack_buf.ack_pl);
1056 ack_buf.ack_pl[0], ack_buf.ack_pl[1],
1057 ack_buf.ack_pl[2], ack_buf.ack_pl[3],
1058 ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
1059 result = 0; 1052 result = 0;
1060 } 1053 }
1061 net_dev->addr_len = ETH_ALEN; 1054 net_dev->addr_len = ETH_ALEN;
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 39410016b4f..e6ca3eb4c0d 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -39,7 +39,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
39module_param(tx_ring_size, uint, 0); 39module_param(tx_ring_size, uint, 0);
40module_param(rx_ring_size, uint, 0); 40module_param(rx_ring_size, uint, 0);
41 41
42static struct pci_device_id adm8211_pci_id_table[] __devinitdata = { 42static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
43 /* ADMtek ADM8211 */ 43 /* ADMtek ADM8211 */
44 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ 44 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
45 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ 45 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -1400,15 +1400,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
1400} 1400}
1401 1401
1402static int adm8211_add_interface(struct ieee80211_hw *dev, 1402static int adm8211_add_interface(struct ieee80211_hw *dev,
1403 struct ieee80211_if_init_conf *conf) 1403 struct ieee80211_vif *vif)
1404{ 1404{
1405 struct adm8211_priv *priv = dev->priv; 1405 struct adm8211_priv *priv = dev->priv;
1406 if (priv->mode != NL80211_IFTYPE_MONITOR) 1406 if (priv->mode != NL80211_IFTYPE_MONITOR)
1407 return -EOPNOTSUPP; 1407 return -EOPNOTSUPP;
1408 1408
1409 switch (conf->type) { 1409 switch (vif->type) {
1410 case NL80211_IFTYPE_STATION: 1410 case NL80211_IFTYPE_STATION:
1411 priv->mode = conf->type; 1411 priv->mode = vif->type;
1412 break; 1412 break;
1413 default: 1413 default:
1414 return -EOPNOTSUPP; 1414 return -EOPNOTSUPP;
@@ -1416,8 +1416,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1416 1416
1417 ADM8211_IDLE(); 1417 ADM8211_IDLE();
1418 1418
1419 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr)); 1419 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
1420 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); 1420 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
1421 1421
1422 adm8211_update_mode(dev); 1422 adm8211_update_mode(dev);
1423 1423
@@ -1427,7 +1427,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1427} 1427}
1428 1428
1429static void adm8211_remove_interface(struct ieee80211_hw *dev, 1429static void adm8211_remove_interface(struct ieee80211_hw *dev,
1430 struct ieee80211_if_init_conf *conf) 1430 struct ieee80211_vif *vif)
1431{ 1431{
1432 struct adm8211_priv *priv = dev->priv; 1432 struct adm8211_priv *priv = dev->priv;
1433 priv->mode = NL80211_IFTYPE_MONITOR; 1433 priv->mode = NL80211_IFTYPE_MONITOR;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc..37e4ab737f2 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -57,7 +57,7 @@
57#define DRV_NAME "airo" 57#define DRV_NAME "airo"
58 58
59#ifdef CONFIG_PCI 59#ifdef CONFIG_PCI
60static struct pci_device_id card_ids[] = { 60static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
61 { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, 61 { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
62 { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID }, 62 { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
63 { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, }, 63 { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 2517364d3eb..0fb419936df 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1789,7 +1789,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
1789} 1789}
1790 1790
1791static int at76_add_interface(struct ieee80211_hw *hw, 1791static int at76_add_interface(struct ieee80211_hw *hw,
1792 struct ieee80211_if_init_conf *conf) 1792 struct ieee80211_vif *vif)
1793{ 1793{
1794 struct at76_priv *priv = hw->priv; 1794 struct at76_priv *priv = hw->priv;
1795 int ret = 0; 1795 int ret = 0;
@@ -1798,7 +1798,7 @@ static int at76_add_interface(struct ieee80211_hw *hw,
1798 1798
1799 mutex_lock(&priv->mtx); 1799 mutex_lock(&priv->mtx);
1800 1800
1801 switch (conf->type) { 1801 switch (vif->type) {
1802 case NL80211_IFTYPE_STATION: 1802 case NL80211_IFTYPE_STATION:
1803 priv->iw_mode = IW_MODE_INFRA; 1803 priv->iw_mode = IW_MODE_INFRA;
1804 break; 1804 break;
@@ -1814,7 +1814,7 @@ exit:
1814} 1814}
1815 1815
1816static void at76_remove_interface(struct ieee80211_hw *hw, 1816static void at76_remove_interface(struct ieee80211_hw *hw,
1817 struct ieee80211_if_init_conf *conf) 1817 struct ieee80211_vif *vif)
1818{ 1818{
1819 at76_dbg(DBG_MAC80211, "%s()", __func__); 1819 at76_dbg(DBG_MAC80211, "%s()", __func__);
1820} 1820}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 9f9459860d8..b99a8c2053d 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,7 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
109 bool has_plcp; 109 bool has_plcp;
110}; 110};
111 111
112#define AR9170_NUM_MAX_BA_RETRY 5
113#define AR9170_NUM_TID 16 112#define AR9170_NUM_TID 16
114#define WME_BA_BMP_SIZE 64 113#define WME_BA_BMP_SIZE 64
115#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE) 114#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
@@ -143,7 +142,6 @@ struct ar9170_sta_tid {
143 u16 tid; 142 u16 tid;
144 enum ar9170_tid_state state; 143 enum ar9170_tid_state state;
145 bool active; 144 bool active;
146 u8 retry;
147}; 145};
148 146
149#define AR9170_QUEUE_TIMEOUT 64 147#define AR9170_QUEUE_TIMEOUT 64
@@ -154,6 +152,8 @@ struct ar9170_sta_tid {
154 152
155#define AR9170_NUM_TX_STATUS 128 153#define AR9170_NUM_TX_STATUS 128
156#define AR9170_NUM_TX_AGG_MAX 30 154#define AR9170_NUM_TX_AGG_MAX 30
155#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
156#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
157 157
158struct ar9170 { 158struct ar9170 {
159 struct ieee80211_hw *hw; 159 struct ieee80211_hw *hw;
@@ -248,13 +248,8 @@ struct ar9170_sta_info {
248 unsigned int ampdu_max_len; 248 unsigned int ampdu_max_len;
249}; 249};
250 250
251#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
252#define AR9170_TX_FLAG_NO_ACK BIT(1)
253#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
254
255struct ar9170_tx_info { 251struct ar9170_tx_info {
256 unsigned long timeout; 252 unsigned long timeout;
257 unsigned int flags;
258}; 253};
259 254
260#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED) 255#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 701ddb7d840..0a1d4c28e68 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -276,6 +276,7 @@ struct ar9170_tx_control {
276#define AR9170_TX_MAC_RATE_PROBE 0x8000 276#define AR9170_TX_MAC_RATE_PROBE 0x8000
277 277
278/* either-or */ 278/* either-or */
279#define AR9170_TX_PHY_MOD_MASK 0x00000003
279#define AR9170_TX_PHY_MOD_CCK 0x00000000 280#define AR9170_TX_PHY_MOD_CCK 0x00000000
280#define AR9170_TX_PHY_MOD_OFDM 0x00000001 281#define AR9170_TX_PHY_MOD_OFDM 0x00000001
281#define AR9170_TX_PHY_MOD_HT 0x00000002 282#define AR9170_TX_PHY_MOD_HT 0x00000002
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index ddc8c09dc79..857e8610429 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -117,7 +117,7 @@ int ar9170_set_qos(struct ar9170 *ar)
117 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP, 117 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
118 ar->edcf[0].txop | ar->edcf[1].txop << 16); 118 ar->edcf[0].txop | ar->edcf[1].txop << 16);
119 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP, 119 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
120 ar->edcf[1].txop | ar->edcf[3].txop << 16); 120 ar->edcf[2].txop | ar->edcf[3].txop << 16);
121 121
122 ar9170_regwrite_finish(); 122 ar9170_regwrite_finish();
123 123
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f9d6db8d013..4d27f7f67c7 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -194,12 +194,15 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
194 return ar9170_get_seq_h((void *) txc->frame_data); 194 return ar9170_get_seq_h((void *) txc->frame_data);
195} 195}
196 196
197static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
198{
199 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
200}
201
197static inline u16 ar9170_get_tid(struct sk_buff *skb) 202static inline u16 ar9170_get_tid(struct sk_buff *skb)
198{ 203{
199 struct ar9170_tx_control *txc = (void *) skb->data; 204 struct ar9170_tx_control *txc = (void *) skb->data;
200 struct ieee80211_hdr *hdr = (void *) txc->frame_data; 205 return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
201
202 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
203} 206}
204 207
205#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff) 208#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
@@ -213,10 +216,10 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
213 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; 216 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
214 struct ieee80211_hdr *hdr = (void *) txc->frame_data; 217 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
215 218
216 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d " 219 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
217 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n", 220 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
218 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 221 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
219 ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr), 222 ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
220 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control), 223 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
221 jiffies_to_msecs(arinfo->timeout - jiffies)); 224 jiffies_to_msecs(arinfo->timeout - jiffies));
222} 225}
@@ -430,7 +433,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
430 spin_lock_irqsave(&ar->tx_stats_lock, flags); 433 spin_lock_irqsave(&ar->tx_stats_lock, flags);
431 ar->tx_stats[queue].len--; 434 ar->tx_stats[queue].len--;
432 435
433 if (skb_queue_empty(&ar->tx_pending[queue])) { 436 if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
434#ifdef AR9170_QUEUE_STOP_DEBUG 437#ifdef AR9170_QUEUE_STOP_DEBUG
435 printk(KERN_DEBUG "%s: wake queue %d\n", 438 printk(KERN_DEBUG "%s: wake queue %d\n",
436 wiphy_name(ar->hw->wiphy), queue); 439 wiphy_name(ar->hw->wiphy), queue);
@@ -440,22 +443,17 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
440 } 443 }
441 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 444 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
442 445
443 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) { 446 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
444 ar9170_tx_ampdu_callback(ar, skb);
445 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
446 arinfo->timeout = jiffies +
447 msecs_to_jiffies(AR9170_TX_TIMEOUT);
448
449 skb_queue_tail(&ar->tx_status[queue], skb);
450 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
451 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED); 447 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
452 } else { 448 } else {
453#ifdef AR9170_QUEUE_DEBUG 449 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
454 printk(KERN_DEBUG "%s: unsupported frame flags!\n", 450 ar9170_tx_ampdu_callback(ar, skb);
455 wiphy_name(ar->hw->wiphy)); 451 } else {
456 ar9170_print_txheader(ar, skb); 452 arinfo->timeout = jiffies +
457#endif /* AR9170_QUEUE_DEBUG */ 453 msecs_to_jiffies(AR9170_TX_TIMEOUT);
458 dev_kfree_skb_any(skb); 454
455 skb_queue_tail(&ar->tx_status[queue], skb);
456 }
459 } 457 }
460 458
461 if (!ar->tx_stats[queue].len && 459 if (!ar->tx_stats[queue].len &&
@@ -1407,17 +1405,6 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1407 1405
1408 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && 1406 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1409 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) { 1407 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1410 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1411 if (unlikely(!info->control.sta))
1412 goto err_out;
1413
1414 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1415 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1416
1417 goto out;
1418 }
1419
1420 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1421 /* 1408 /*
1422 * WARNING: 1409 * WARNING:
1423 * Putting the QoS queue bits into an unexplored territory is 1410 * Putting the QoS queue bits into an unexplored territory is
@@ -1431,12 +1418,17 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1431 1418
1432 txc->phy_control |= 1419 txc->phy_control |=
1433 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT); 1420 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1434 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK; 1421
1435 } else { 1422 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1436 arinfo->flags = AR9170_TX_FLAG_NO_ACK; 1423 if (unlikely(!info->control.sta))
1424 goto err_out;
1425
1426 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1427 } else {
1428 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1429 }
1437 } 1430 }
1438 1431
1439out:
1440 return 0; 1432 return 0;
1441 1433
1442err_out: 1434err_out:
@@ -1671,8 +1663,7 @@ static bool ar9170_tx_ampdu(struct ar9170 *ar)
1671 * tell the FW/HW that this is the last frame, 1663 * tell the FW/HW that this is the last frame,
1672 * that way it will wait for the immediate block ack. 1664 * that way it will wait for the immediate block ack.
1673 */ 1665 */
1674 if (likely(skb_peek_tail(&agg))) 1666 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1675 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1676 1667
1677#ifdef AR9170_TXAGG_DEBUG 1668#ifdef AR9170_TXAGG_DEBUG
1678 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n", 1669 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@ -1716,6 +1707,21 @@ static void ar9170_tx(struct ar9170 *ar)
1716 1707
1717 for (i = 0; i < __AR9170_NUM_TXQ; i++) { 1708 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1718 spin_lock_irqsave(&ar->tx_stats_lock, flags); 1709 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1710 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1711 skb_queue_len(&ar->tx_pending[i]));
1712
1713 if (remaining_space < frames) {
1714#ifdef AR9170_QUEUE_DEBUG
1715 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1716 "remaining slots:%d, needed:%d\n",
1717 wiphy_name(ar->hw->wiphy), i, remaining_space,
1718 frames);
1719#endif /* AR9170_QUEUE_DEBUG */
1720 frames = remaining_space;
1721 }
1722
1723 ar->tx_stats[i].len += frames;
1724 ar->tx_stats[i].count += frames;
1719 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) { 1725 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1720#ifdef AR9170_QUEUE_DEBUG 1726#ifdef AR9170_QUEUE_DEBUG
1721 printk(KERN_DEBUG "%s: queue %d full\n", 1727 printk(KERN_DEBUG "%s: queue %d full\n",
@@ -1733,25 +1739,8 @@ static void ar9170_tx(struct ar9170 *ar)
1733 __ar9170_dump_txstats(ar); 1739 __ar9170_dump_txstats(ar);
1734#endif /* AR9170_QUEUE_STOP_DEBUG */ 1740#endif /* AR9170_QUEUE_STOP_DEBUG */
1735 ieee80211_stop_queue(ar->hw, i); 1741 ieee80211_stop_queue(ar->hw, i);
1736 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1737 continue;
1738 } 1742 }
1739 1743
1740 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1741 skb_queue_len(&ar->tx_pending[i]));
1742
1743 if (remaining_space < frames) {
1744#ifdef AR9170_QUEUE_DEBUG
1745 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1746 "remaining slots:%d, needed:%d\n",
1747 wiphy_name(ar->hw->wiphy), i, remaining_space,
1748 frames);
1749#endif /* AR9170_QUEUE_DEBUG */
1750 frames = remaining_space;
1751 }
1752
1753 ar->tx_stats[i].len += frames;
1754 ar->tx_stats[i].count += frames;
1755 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 1744 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1756 1745
1757 if (!frames) 1746 if (!frames)
@@ -1773,7 +1762,7 @@ static void ar9170_tx(struct ar9170 *ar)
1773 arinfo->timeout = jiffies + 1762 arinfo->timeout = jiffies +
1774 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1763 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1775 1764
1776 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1765 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1777 atomic_inc(&ar->tx_ampdu_pending); 1766 atomic_inc(&ar->tx_ampdu_pending);
1778 1767
1779#ifdef AR9170_QUEUE_DEBUG 1768#ifdef AR9170_QUEUE_DEBUG
@@ -1784,7 +1773,7 @@ static void ar9170_tx(struct ar9170 *ar)
1784 1773
1785 err = ar->tx(ar, skb); 1774 err = ar->tx(ar, skb);
1786 if (unlikely(err)) { 1775 if (unlikely(err)) {
1787 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1776 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1788 atomic_dec(&ar->tx_ampdu_pending); 1777 atomic_dec(&ar->tx_ampdu_pending);
1789 1778
1790 frames_failed++; 1779 frames_failed++;
@@ -1950,7 +1939,7 @@ err_free:
1950} 1939}
1951 1940
1952static int ar9170_op_add_interface(struct ieee80211_hw *hw, 1941static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1953 struct ieee80211_if_init_conf *conf) 1942 struct ieee80211_vif *vif)
1954{ 1943{
1955 struct ar9170 *ar = hw->priv; 1944 struct ar9170 *ar = hw->priv;
1956 struct ath_common *common = &ar->common; 1945 struct ath_common *common = &ar->common;
@@ -1963,8 +1952,8 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1963 goto unlock; 1952 goto unlock;
1964 } 1953 }
1965 1954
1966 ar->vif = conf->vif; 1955 ar->vif = vif;
1967 memcpy(common->macaddr, conf->mac_addr, ETH_ALEN); 1956 memcpy(common->macaddr, vif->addr, ETH_ALEN);
1968 1957
1969 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) { 1958 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1970 ar->rx_software_decryption = true; 1959 ar->rx_software_decryption = true;
@@ -1984,7 +1973,7 @@ unlock:
1984} 1973}
1985 1974
1986static void ar9170_op_remove_interface(struct ieee80211_hw *hw, 1975static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1987 struct ieee80211_if_init_conf *conf) 1976 struct ieee80211_vif *vif)
1988{ 1977{
1989 struct ar9170 *ar = hw->priv; 1978 struct ar9170 *ar = hw->priv;
1990 1979
@@ -2366,7 +2355,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
2366 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN; 2355 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2367 sta_info->agg[i].active = false; 2356 sta_info->agg[i].active = false;
2368 sta_info->agg[i].ssn = 0; 2357 sta_info->agg[i].ssn = 0;
2369 sta_info->agg[i].retry = 0;
2370 sta_info->agg[i].tid = i; 2358 sta_info->agg[i].tid = i;
2371 INIT_LIST_HEAD(&sta_info->agg[i].list); 2359 INIT_LIST_HEAD(&sta_info->agg[i].list);
2372 skb_queue_head_init(&sta_info->agg[i].queue); 2360 skb_queue_head_init(&sta_info->agg[i].queue);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d92405..0f361186b78 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -84,6 +84,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
84 { USB_DEVICE(0x0cde, 0x0023) }, 84 { USB_DEVICE(0x0cde, 0x0023) },
85 /* Z-Com UB82 ABG */ 85 /* Z-Com UB82 ABG */
86 { USB_DEVICE(0x0cde, 0x0026) }, 86 { USB_DEVICE(0x0cde, 0x0026) },
87 /* Sphairon Homelink 1202 */
88 { USB_DEVICE(0x0cde, 0x0027) },
87 /* Arcadyan WN7512 */ 89 /* Arcadyan WN7512 */
88 { USB_DEVICE(0x083a, 0xf522) }, 90 { USB_DEVICE(0x083a, 0xf522) },
89 /* Planex GWUS300 */ 91 /* Planex GWUS300 */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a9676111..66bcb506a11 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1063,6 +1063,7 @@ struct ath5k_hw {
1063 u32 ah_cw_min; 1063 u32 ah_cw_min;
1064 u32 ah_cw_max; 1064 u32 ah_cw_max;
1065 u32 ah_limit_tx_retries; 1065 u32 ah_limit_tx_retries;
1066 u8 ah_coverage_class;
1066 1067
1067 /* Antenna Control */ 1068 /* Antenna Control */
1068 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 1069 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1201,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1200 1201
1201/* Protocol Control Unit Functions */ 1202/* Protocol Control Unit Functions */
1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1203extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
1204extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1203/* BSSID Functions */ 1205/* BSSID Functions */
1204extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1206extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1205extern void ath5k_hw_set_associd(struct ath5k_hw *ah); 1207extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1233,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
1231extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah); 1233extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
1232extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout); 1234extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
1233extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah); 1235extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
1236/* Clock rate related functions */
1237unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1238unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1239unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
1234/* Key table (WEP) functions */ 1240/* Key table (WEP) functions */
1235extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry); 1241extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1236extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); 1242extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1316,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
1310 * Functions used internaly 1316 * Functions used internaly
1311 */ 1317 */
1312 1318
1313/*
1314 * Translate usec to hw clock units
1315 * TODO: Half/quarter rate
1316 */
1317static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
1318{
1319 return turbo ? (usec * 80) : (usec * 40);
1320}
1321
1322/*
1323 * Translate hw clock units to usec
1324 * TODO: Half/quarter rate
1325 */
1326static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1327{
1328 return turbo ? (clock / 80) : (clock / 40);
1329}
1330
1331static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah) 1319static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
1332{ 1320{
1333 return &ah->common; 1321 return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index e63b7c40d0e..5577bcc80ea 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -83,7 +83,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
83 83
84 84
85/* Known PCI ids */ 85/* Known PCI ids */
86static const struct pci_device_id ath5k_pci_id_table[] = { 86static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
87 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ 87 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
88 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ 88 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
89 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ 89 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -225,9 +225,9 @@ static int ath5k_reset_wake(struct ath5k_softc *sc);
225static int ath5k_start(struct ieee80211_hw *hw); 225static int ath5k_start(struct ieee80211_hw *hw);
226static void ath5k_stop(struct ieee80211_hw *hw); 226static void ath5k_stop(struct ieee80211_hw *hw);
227static int ath5k_add_interface(struct ieee80211_hw *hw, 227static int ath5k_add_interface(struct ieee80211_hw *hw,
228 struct ieee80211_if_init_conf *conf); 228 struct ieee80211_vif *vif);
229static void ath5k_remove_interface(struct ieee80211_hw *hw, 229static void ath5k_remove_interface(struct ieee80211_hw *hw,
230 struct ieee80211_if_init_conf *conf); 230 struct ieee80211_vif *vif);
231static int ath5k_config(struct ieee80211_hw *hw, u32 changed); 231static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
232static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 232static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
233 int mc_count, struct dev_addr_list *mc_list); 233 int mc_count, struct dev_addr_list *mc_list);
@@ -254,6 +254,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
254 u32 changes); 254 u32 changes);
255static void ath5k_sw_scan_start(struct ieee80211_hw *hw); 255static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
256static void ath5k_sw_scan_complete(struct ieee80211_hw *hw); 256static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
257static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
258 u8 coverage_class);
257 259
258static const struct ieee80211_ops ath5k_hw_ops = { 260static const struct ieee80211_ops ath5k_hw_ops = {
259 .tx = ath5k_tx, 261 .tx = ath5k_tx,
@@ -274,6 +276,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
274 .bss_info_changed = ath5k_bss_info_changed, 276 .bss_info_changed = ath5k_bss_info_changed,
275 .sw_scan_start = ath5k_sw_scan_start, 277 .sw_scan_start = ath5k_sw_scan_start,
276 .sw_scan_complete = ath5k_sw_scan_complete, 278 .sw_scan_complete = ath5k_sw_scan_complete,
279 .set_coverage_class = ath5k_set_coverage_class,
277}; 280};
278 281
279/* 282/*
@@ -2773,7 +2776,7 @@ static void ath5k_stop(struct ieee80211_hw *hw)
2773} 2776}
2774 2777
2775static int ath5k_add_interface(struct ieee80211_hw *hw, 2778static int ath5k_add_interface(struct ieee80211_hw *hw,
2776 struct ieee80211_if_init_conf *conf) 2779 struct ieee80211_vif *vif)
2777{ 2780{
2778 struct ath5k_softc *sc = hw->priv; 2781 struct ath5k_softc *sc = hw->priv;
2779 int ret; 2782 int ret;
@@ -2784,22 +2787,22 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2784 goto end; 2787 goto end;
2785 } 2788 }
2786 2789
2787 sc->vif = conf->vif; 2790 sc->vif = vif;
2788 2791
2789 switch (conf->type) { 2792 switch (vif->type) {
2790 case NL80211_IFTYPE_AP: 2793 case NL80211_IFTYPE_AP:
2791 case NL80211_IFTYPE_STATION: 2794 case NL80211_IFTYPE_STATION:
2792 case NL80211_IFTYPE_ADHOC: 2795 case NL80211_IFTYPE_ADHOC:
2793 case NL80211_IFTYPE_MESH_POINT: 2796 case NL80211_IFTYPE_MESH_POINT:
2794 case NL80211_IFTYPE_MONITOR: 2797 case NL80211_IFTYPE_MONITOR:
2795 sc->opmode = conf->type; 2798 sc->opmode = vif->type;
2796 break; 2799 break;
2797 default: 2800 default:
2798 ret = -EOPNOTSUPP; 2801 ret = -EOPNOTSUPP;
2799 goto end; 2802 goto end;
2800 } 2803 }
2801 2804
2802 ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); 2805 ath5k_hw_set_lladdr(sc->ah, vif->addr);
2803 ath5k_mode_setup(sc); 2806 ath5k_mode_setup(sc);
2804 2807
2805 ret = 0; 2808 ret = 0;
@@ -2810,13 +2813,13 @@ end:
2810 2813
2811static void 2814static void
2812ath5k_remove_interface(struct ieee80211_hw *hw, 2815ath5k_remove_interface(struct ieee80211_hw *hw,
2813 struct ieee80211_if_init_conf *conf) 2816 struct ieee80211_vif *vif)
2814{ 2817{
2815 struct ath5k_softc *sc = hw->priv; 2818 struct ath5k_softc *sc = hw->priv;
2816 u8 mac[ETH_ALEN] = {}; 2819 u8 mac[ETH_ALEN] = {};
2817 2820
2818 mutex_lock(&sc->lock); 2821 mutex_lock(&sc->lock);
2819 if (sc->vif != conf->vif) 2822 if (sc->vif != vif)
2820 goto end; 2823 goto end;
2821 2824
2822 ath5k_hw_set_lladdr(sc->ah, mac); 2825 ath5k_hw_set_lladdr(sc->ah, mac);
@@ -3262,3 +3265,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
3262 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3265 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3263 AR5K_LED_ASSOC : AR5K_LED_INIT); 3266 AR5K_LED_ASSOC : AR5K_LED_INIT);
3264} 3267}
3268
3269/**
3270 * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
3271 *
3272 * @hw: struct ieee80211_hw pointer
3273 * @coverage_class: IEEE 802.11 coverage class number
3274 *
3275 * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
3276 * coverage class. The values are persistent, they are restored after device
3277 * reset.
3278 */
3279static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3280{
3281 struct ath5k_softc *sc = hw->priv;
3282
3283 mutex_lock(&sc->lock);
3284 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3285 mutex_unlock(&sc->lock);
3286}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d..aefe84f9c04 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
187{ 187{
188 ATH5K_TRACE(ah->ah_sc); 188 ATH5K_TRACE(ah->ah_sc);
189 189
190 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, 190 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo); 191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
192} 192}
193 193
194/** 194/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) 200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
201{ 201{
202 ATH5K_TRACE(ah->ah_sc); 202 ATH5K_TRACE(ah->ah_sc);
203 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK), 203 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
204 ah->ah_turbo) <= timeout) 204 <= timeout)
205 return -EINVAL; 205 return -EINVAL;
206 206
207 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK, 207 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
208 ath5k_hw_htoclock(timeout, ah->ah_turbo)); 208 ath5k_hw_htoclock(ah, timeout));
209 209
210 return 0; 210 return 0;
211} 211}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah) 218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
219{ 219{
220 ATH5K_TRACE(ah->ah_sc); 220 ATH5K_TRACE(ah->ah_sc);
221 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, 221 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo); 222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
223} 223}
224 224
225/** 225/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) 231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
232{ 232{
233 ATH5K_TRACE(ah->ah_sc); 233 ATH5K_TRACE(ah->ah_sc);
234 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS), 234 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
235 ah->ah_turbo) <= timeout) 235 <= timeout)
236 return -EINVAL; 236 return -EINVAL;
237 237
238 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS, 238 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
239 ath5k_hw_htoclock(timeout, ah->ah_turbo)); 239 ath5k_hw_htoclock(ah, timeout));
240 240
241 return 0; 241 return 0;
242} 242}
243 243
244/** 244/**
245 * ath5k_hw_htoclock - Translate usec to hw clock units
246 *
247 * @ah: The &struct ath5k_hw
248 * @usec: value in microseconds
249 */
250unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
251{
252 return usec * ath5k_hw_get_clockrate(ah);
253}
254
255/**
256 * ath5k_hw_clocktoh - Translate hw clock units to usec
257 * @clock: value in hw clock units
258 */
259unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
260{
261 return clock / ath5k_hw_get_clockrate(ah);
262}
263
264/**
265 * ath5k_hw_get_clockrate - Get the clock rate for current mode
266 *
267 * @ah: The &struct ath5k_hw
268 */
269unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
270{
271 struct ieee80211_channel *channel = ah->ah_current_channel;
272 int clock;
273
274 if (channel->hw_value & CHANNEL_5GHZ)
275 clock = 40; /* 802.11a */
276 else if (channel->hw_value & CHANNEL_CCK)
277 clock = 22; /* 802.11b */
278 else
279 clock = 44; /* 802.11g */
280
281 /* Clock rate in turbo modes is twice the normal rate */
282 if (channel->hw_value & CHANNEL_TURBO)
283 clock *= 2;
284
285 return clock;
286}
287
288/**
289 * ath5k_hw_get_default_slottime - Get the default slot time for current mode
290 *
291 * @ah: The &struct ath5k_hw
292 */
293unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
294{
295 struct ieee80211_channel *channel = ah->ah_current_channel;
296
297 if (channel->hw_value & CHANNEL_TURBO)
298 return 6; /* both turbo modes */
299
300 if (channel->hw_value & CHANNEL_CCK)
301 return 20; /* 802.11b */
302
303 return 9; /* 802.11 a/g */
304}
305
306/**
307 * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
308 *
309 * @ah: The &struct ath5k_hw
310 */
311unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
312{
313 struct ieee80211_channel *channel = ah->ah_current_channel;
314
315 if (channel->hw_value & CHANNEL_TURBO)
316 return 8; /* both turbo modes */
317
318 if (channel->hw_value & CHANNEL_5GHZ)
319 return 16; /* 802.11a */
320
321 return 10; /* 802.11 b/g */
322}
323
324/**
245 * ath5k_hw_set_lladdr - Set station id 325 * ath5k_hw_set_lladdr - Set station id
246 * 326 *
247 * @ah: The &struct ath5k_hw 327 * @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
1050 return 0; 1130 return 0;
1051} 1131}
1052 1132
1133/**
1134 * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
1135 *
1136 * @ah: The &struct ath5k_hw
1137 * @coverage_class: IEEE 802.11 coverage class number
1138 *
1139 * Sets slot time, ACK timeout and CTS timeout for given coverage class.
1140 */
1141void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
1142{
1143 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1144 int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
1145 int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
1146 int cts_timeout = ack_timeout;
1147
1148 ath5k_hw_set_slot_time(ah, slot_time);
1149 ath5k_hw_set_ack_timeout(ah, ack_timeout);
1150 ath5k_hw_set_cts_timeout(ah, cts_timeout);
1151
1152 ah->ah_coverage_class = coverage_class;
1153}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef20..abe36c0d139 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -520,12 +520,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
520 */ 520 */
521unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah) 521unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
522{ 522{
523 unsigned int slot_time_clock;
524
523 ATH5K_TRACE(ah->ah_sc); 525 ATH5K_TRACE(ah->ah_sc);
526
524 if (ah->ah_version == AR5K_AR5210) 527 if (ah->ah_version == AR5K_AR5210)
525 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah, 528 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
526 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
527 else 529 else
528 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff; 530 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
531
532 return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
529} 533}
530 534
531/* 535/*
@@ -533,15 +537,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
533 */ 537 */
534int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 538int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
535{ 539{
540 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
541
536 ATH5K_TRACE(ah->ah_sc); 542 ATH5K_TRACE(ah->ah_sc);
537 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX) 543
544 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
538 return -EINVAL; 545 return -EINVAL;
539 546
540 if (ah->ah_version == AR5K_AR5210) 547 if (ah->ah_version == AR5K_AR5210)
541 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time, 548 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
542 ah->ah_turbo), AR5K_SLOT_TIME);
543 else 549 else
544 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT); 550 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
545 551
546 return 0; 552 return 0;
547} 553}
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc7786..6690923fd78 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
60 !(channel->hw_value & CHANNEL_OFDM)); 60 !(channel->hw_value & CHANNEL_OFDM));
61 61
62 /* Get coefficient 62 /* Get coefficient
63 * ALGO: coef = (5 * clock * carrier_freq) / 2) 63 * ALGO: coef = (5 * clock / carrier_freq) / 2
64 * we scale coef by shifting clock value by 24 for 64 * we scale coef by shifting clock value by 24 for
65 * better precision since we use integers */ 65 * better precision since we use integers */
66 /* TODO: Half/quarter rate */ 66 /* TODO: Half/quarter rate */
67 clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO); 67 clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
68
69 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq; 68 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
70 69
71 /* Get exponent 70 /* Get exponent
@@ -1317,6 +1316,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1317 /* Restore antenna mode */ 1316 /* Restore antenna mode */
1318 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); 1317 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
1319 1318
1319 /* Restore slot time and ACK timeouts */
1320 if (ah->ah_coverage_class > 0)
1321 ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
1322
1320 /* 1323 /*
1321 * Configure QCUs/DCUs 1324 * Configure QCUs/DCUs
1322 */ 1325 */
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a..6b50d5eb9ec 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
1ath9k-y += beacon.o \ 1ath9k-y += beacon.o \
2 gpio.o \
3 init.o \
2 main.o \ 4 main.o \
3 recv.o \ 5 recv.o \
4 xmit.o \ 6 xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137a..9e62a569e81 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -121,16 +121,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
121 sc->mem = mem; 121 sc->mem = mem;
122 sc->irq = irq; 122 sc->irq = irq;
123 123
124 ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops); 124 /* Will be cleared in ath9k_start() */
125 sc->sc_flags |= SC_OP_INVALID;
126
127 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
125 if (ret) { 128 if (ret) {
126 dev_err(&pdev->dev, "failed to initialize device\n"); 129 dev_err(&pdev->dev, "request_irq failed\n");
127 goto err_free_hw; 130 goto err_free_hw;
128 } 131 }
129 132
130 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc); 133 ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
131 if (ret) { 134 if (ret) {
132 dev_err(&pdev->dev, "request_irq failed\n"); 135 dev_err(&pdev->dev, "failed to initialize device\n");
133 goto err_detach; 136 goto err_irq;
134 } 137 }
135 138
136 ah = sc->sc_ah; 139 ah = sc->sc_ah;
@@ -143,8 +146,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
143 146
144 return 0; 147 return 0;
145 148
146 err_detach: 149 err_irq:
147 ath_detach(sc); 150 free_irq(irq, sc);
148 err_free_hw: 151 err_free_hw:
149 ieee80211_free_hw(hw); 152 ieee80211_free_hw(hw);
150 platform_set_drvdata(pdev, NULL); 153 platform_set_drvdata(pdev, NULL);
@@ -161,8 +164,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
161 if (hw) { 164 if (hw) {
162 struct ath_wiphy *aphy = hw->priv; 165 struct ath_wiphy *aphy = hw->priv;
163 struct ath_softc *sc = aphy->sc; 166 struct ath_softc *sc = aphy->sc;
167 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
164 168
165 ath_cleanup(sc); 169 ath9k_deinit_device(sc);
170 free_irq(sc->irq, sc);
171 ieee80211_free_hw(sc->hw);
172 ath_bus_cleanup(common);
166 platform_set_drvdata(pdev, NULL); 173 platform_set_drvdata(pdev, NULL);
167 } 174 }
168 175
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1597a42731e..bf3d4c4bfa5 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -341,6 +341,12 @@ int ath_beaconq_config(struct ath_softc *sc);
341#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 341#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
342#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 342#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
343 343
344void ath_ani_calibrate(unsigned long data);
345
346/**********/
347/* BTCOEX */
348/**********/
349
344/* Defines the BT AR_BT_COEX_WGHT used */ 350/* Defines the BT AR_BT_COEX_WGHT used */
345enum ath_stomp_type { 351enum ath_stomp_type {
346 ATH_BTCOEX_NO_STOMP, 352 ATH_BTCOEX_NO_STOMP,
@@ -361,6 +367,10 @@ struct ath_btcoex {
361 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ 367 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
362}; 368};
363 369
370int ath_init_btcoex_timer(struct ath_softc *sc);
371void ath9k_btcoex_timer_resume(struct ath_softc *sc);
372void ath9k_btcoex_timer_pause(struct ath_softc *sc);
373
364/********************/ 374/********************/
365/* LED Control */ 375/* LED Control */
366/********************/ 376/********************/
@@ -385,6 +395,9 @@ struct ath_led {
385 bool registered; 395 bool registered;
386}; 396};
387 397
398void ath_init_leds(struct ath_softc *sc);
399void ath_deinit_leds(struct ath_softc *sc);
400
388/********************/ 401/********************/
389/* Main driver core */ 402/* Main driver core */
390/********************/ 403/********************/
@@ -403,26 +416,28 @@ struct ath_led {
403#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 416#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
404#define ATH_RATE_DUMMY_MARKER 0 417#define ATH_RATE_DUMMY_MARKER 0
405 418
406#define SC_OP_INVALID BIT(0) 419#define SC_OP_INVALID BIT(0)
407#define SC_OP_BEACONS BIT(1) 420#define SC_OP_BEACONS BIT(1)
408#define SC_OP_RXAGGR BIT(2) 421#define SC_OP_RXAGGR BIT(2)
409#define SC_OP_TXAGGR BIT(3) 422#define SC_OP_TXAGGR BIT(3)
410#define SC_OP_FULL_RESET BIT(4) 423#define SC_OP_FULL_RESET BIT(4)
411#define SC_OP_PREAMBLE_SHORT BIT(5) 424#define SC_OP_PREAMBLE_SHORT BIT(5)
412#define SC_OP_PROTECT_ENABLE BIT(6) 425#define SC_OP_PROTECT_ENABLE BIT(6)
413#define SC_OP_RXFLUSH BIT(7) 426#define SC_OP_RXFLUSH BIT(7)
414#define SC_OP_LED_ASSOCIATED BIT(8) 427#define SC_OP_LED_ASSOCIATED BIT(8)
415#define SC_OP_WAIT_FOR_BEACON BIT(12) 428#define SC_OP_LED_ON BIT(9)
416#define SC_OP_LED_ON BIT(13) 429#define SC_OP_SCANNING BIT(10)
417#define SC_OP_SCANNING BIT(14) 430#define SC_OP_TSF_RESET BIT(11)
418#define SC_OP_TSF_RESET BIT(15) 431#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
419#define SC_OP_WAIT_FOR_CAB BIT(16) 432
420#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17) 433/* Powersave flags */
421#define SC_OP_WAIT_FOR_TX_ACK BIT(18) 434#define PS_WAIT_FOR_BEACON BIT(0)
422#define SC_OP_BEACON_SYNC BIT(19) 435#define PS_WAIT_FOR_CAB BIT(1)
423#define SC_OP_BT_PRIORITY_DETECTED BIT(21) 436#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
424#define SC_OP_NULLFUNC_COMPLETED BIT(22) 437#define PS_WAIT_FOR_TX_ACK BIT(3)
425#define SC_OP_PS_ENABLED BIT(23) 438#define PS_BEACON_SYNC BIT(4)
439#define PS_NULLFUNC_COMPLETED BIT(5)
440#define PS_ENABLED BIT(6)
426 441
427struct ath_wiphy; 442struct ath_wiphy;
428struct ath_rate_table; 443struct ath_rate_table;
@@ -453,12 +468,12 @@ struct ath_softc {
453 int irq; 468 int irq;
454 spinlock_t sc_resetlock; 469 spinlock_t sc_resetlock;
455 spinlock_t sc_serial_rw; 470 spinlock_t sc_serial_rw;
456 spinlock_t ani_lock;
457 spinlock_t sc_pm_lock; 471 spinlock_t sc_pm_lock;
458 struct mutex mutex; 472 struct mutex mutex;
459 473
460 u32 intrstatus; 474 u32 intrstatus;
461 u32 sc_flags; /* SC_OP_* */ 475 u32 sc_flags; /* SC_OP_* */
476 u16 ps_flags; /* PS_* */
462 u16 curtxpow; 477 u16 curtxpow;
463 u8 nbcnvifs; 478 u8 nbcnvifs;
464 u16 nvifs; 479 u16 nvifs;
@@ -509,6 +524,7 @@ struct ath_wiphy {
509 int chan_is_ht; 524 int chan_is_ht;
510}; 525};
511 526
527void ath9k_tasklet(unsigned long data);
512int ath_reset(struct ath_softc *sc, bool retry_tx); 528int ath_reset(struct ath_softc *sc, bool retry_tx);
513int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); 529int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
514int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 530int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -525,15 +541,15 @@ static inline void ath_bus_cleanup(struct ath_common *common)
525} 541}
526 542
527extern struct ieee80211_ops ath9k_ops; 543extern struct ieee80211_ops ath9k_ops;
544extern int modparam_nohwcrypt;
528 545
529irqreturn_t ath_isr(int irq, void *dev); 546irqreturn_t ath_isr(int irq, void *dev);
530void ath_cleanup(struct ath_softc *sc); 547int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
531int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
532 const struct ath_bus_ops *bus_ops); 548 const struct ath_bus_ops *bus_ops);
533void ath_detach(struct ath_softc *sc); 549void ath9k_deinit_device(struct ath_softc *sc);
534const char *ath_mac_bb_name(u32 mac_bb_version); 550const char *ath_mac_bb_name(u32 mac_bb_version);
535const char *ath_rf_name(u16 rf_version); 551const char *ath_rf_name(u16 rf_version);
536void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 552void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
537void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 553void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
538 struct ath9k_channel *ichan); 554 struct ath9k_channel *ichan);
539void ath_update_chainmask(struct ath_softc *sc, int is_ht); 555void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -542,6 +558,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
542 558
543void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); 559void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
544void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); 560void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
561bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
545 562
546#ifdef CONFIG_PCI 563#ifdef CONFIG_PCI
547int ath_pci_init(void); 564int ath_pci_init(void);
@@ -583,4 +600,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
583void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); 600void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
584 601
585int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); 602int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
603
604void ath_start_rfkill_poll(struct ath_softc *sc);
605extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
606
586#endif /* ATH9K_H */ 607#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1660ef17aaf..422454fe4ff 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -480,7 +480,8 @@ void ath_beacon_tasklet(unsigned long data)
480 sc->beacon.updateslot = COMMIT; /* commit next beacon */ 480 sc->beacon.updateslot = COMMIT; /* commit next beacon */
481 sc->beacon.slotupdate = slot; 481 sc->beacon.slotupdate = slot;
482 } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) { 482 } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
483 ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime); 483 ah->slottime = sc->beacon.slottime;
484 ath9k_hw_init_global_settings(ah);
484 sc->beacon.updateslot = OK; 485 sc->beacon.updateslot = OK;
485 } 486 }
486 if (bfaddr != 0) { 487 if (bfaddr != 0) {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b66f72dbf7b..9489b6b25b5 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -289,23 +289,49 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
289 if (sc->cur_rate_table == NULL) 289 if (sc->cur_rate_table == NULL)
290 return 0; 290 return 0;
291 291
292 max = 80 + sc->cur_rate_table->rate_cnt * 64; 292 max = 80 + sc->cur_rate_table->rate_cnt * 1024;
293 buf = kmalloc(max + 1, GFP_KERNEL); 293 buf = kmalloc(max + 1, GFP_KERNEL);
294 if (buf == NULL) 294 if (buf == NULL)
295 return 0; 295 return 0;
296 buf[max] = 0; 296 buf[max] = 0;
297 297
298 len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success", 298 len += sprintf(buf, "%6s %6s %6s "
299 "Retries", "XRetries", "PER"); 299 "%10s %10s %10s %10s\n",
300 "HT", "MCS", "Rate",
301 "Success", "Retries", "XRetries", "PER");
300 302
301 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) { 303 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
302 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps; 304 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
303 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i]; 305 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
306 char mcs[5];
307 char htmode[5];
308 int used_mcs = 0, used_htmode = 0;
309
310 if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
311 used_mcs = snprintf(mcs, 5, "%d",
312 sc->cur_rate_table->info[i].ratecode);
313
314 if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
315 used_htmode = snprintf(htmode, 5, "HT40");
316 else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
317 used_htmode = snprintf(htmode, 5, "HT20");
318 else
319 used_htmode = snprintf(htmode, 5, "????");
320 }
321
322 mcs[used_mcs] = '\0';
323 htmode[used_htmode] = '\0';
304 324
305 len += snprintf(buf + len, max - len, 325 len += snprintf(buf + len, max - len,
306 "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000, 326 "%6s %6s %3u.%d: "
307 (ratekbps % 1000) / 100, stats->success, 327 "%10u %10u %10u %10u\n",
308 stats->retries, stats->xretries, 328 htmode,
329 mcs,
330 ratekbps / 1000,
331 (ratekbps % 1000) / 100,
332 stats->success,
333 stats->retries,
334 stats->xretries,
309 stats->per); 335 stats->per);
310 } 336 }
311 337
@@ -554,6 +580,116 @@ static const struct file_operations fops_xmit = {
554 .owner = THIS_MODULE 580 .owner = THIS_MODULE
555}; 581};
556 582
583static ssize_t read_file_recv(struct file *file, char __user *user_buf,
584 size_t count, loff_t *ppos)
585{
586#define PHY_ERR(s, p) \
587 len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
588 sc->debug.stats.rxstats.phy_err_stats[p]);
589
590 struct ath_softc *sc = file->private_data;
591 char *buf;
592 unsigned int len = 0, size = 1152;
593 ssize_t retval = 0;
594
595 buf = kzalloc(size, GFP_KERNEL);
596 if (buf == NULL)
597 return 0;
598
599 len += snprintf(buf + len, size - len,
600 "%18s : %10u\n", "CRC ERR",
601 sc->debug.stats.rxstats.crc_err);
602 len += snprintf(buf + len, size - len,
603 "%18s : %10u\n", "DECRYPT CRC ERR",
604 sc->debug.stats.rxstats.decrypt_crc_err);
605 len += snprintf(buf + len, size - len,
606 "%18s : %10u\n", "PHY ERR",
607 sc->debug.stats.rxstats.phy_err);
608 len += snprintf(buf + len, size - len,
609 "%18s : %10u\n", "MIC ERR",
610 sc->debug.stats.rxstats.mic_err);
611 len += snprintf(buf + len, size - len,
612 "%18s : %10u\n", "PRE-DELIM CRC ERR",
613 sc->debug.stats.rxstats.pre_delim_crc_err);
614 len += snprintf(buf + len, size - len,
615 "%18s : %10u\n", "POST-DELIM CRC ERR",
616 sc->debug.stats.rxstats.post_delim_crc_err);
617 len += snprintf(buf + len, size - len,
618 "%18s : %10u\n", "DECRYPT BUSY ERR",
619 sc->debug.stats.rxstats.decrypt_busy_err);
620
621 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
622 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
623 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
624 PHY_ERR("RATE", ATH9K_PHYERR_RATE);
625 PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
626 PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
627 PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
628 PHY_ERR("TOR", ATH9K_PHYERR_TOR);
629 PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
630 PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
631 PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
632 PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
633 PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
634 PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
635 PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
636 PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
637 PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
638 PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
639 PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
640 PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
641 PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
642 PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
643 PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
644 PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
645 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
646 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
647
648 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
649 kfree(buf);
650
651 return retval;
652
653#undef PHY_ERR
654}
655
656void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
657{
658#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
659#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
660
661 struct ath_desc *ds = bf->bf_desc;
662 u32 phyerr;
663
664 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
665 RX_STAT_INC(crc_err);
666 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
667 RX_STAT_INC(decrypt_crc_err);
668 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
669 RX_STAT_INC(mic_err);
670 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
671 RX_STAT_INC(pre_delim_crc_err);
672 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
673 RX_STAT_INC(post_delim_crc_err);
674 if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
675 RX_STAT_INC(decrypt_busy_err);
676
677 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
678 RX_STAT_INC(phy_err);
679 phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
680 RX_PHY_ERR_INC(phyerr);
681 }
682
683#undef RX_STAT_INC
684#undef RX_PHY_ERR_INC
685}
686
687static const struct file_operations fops_recv = {
688 .read = read_file_recv,
689 .open = ath9k_debugfs_open,
690 .owner = THIS_MODULE
691};
692
557int ath9k_init_debug(struct ath_hw *ah) 693int ath9k_init_debug(struct ath_hw *ah)
558{ 694{
559 struct ath_common *common = ath9k_hw_common(ah); 695 struct ath_common *common = ath9k_hw_common(ah);
@@ -606,6 +742,13 @@ int ath9k_init_debug(struct ath_hw *ah)
606 if (!sc->debug.debugfs_xmit) 742 if (!sc->debug.debugfs_xmit)
607 goto err; 743 goto err;
608 744
745 sc->debug.debugfs_recv = debugfs_create_file("recv",
746 S_IRUSR,
747 sc->debug.debugfs_phy,
748 sc, &fops_recv);
749 if (!sc->debug.debugfs_recv)
750 goto err;
751
609 return 0; 752 return 0;
610err: 753err:
611 ath9k_exit_debug(ah); 754 ath9k_exit_debug(ah);
@@ -617,6 +760,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
617 struct ath_common *common = ath9k_hw_common(ah); 760 struct ath_common *common = ath9k_hw_common(ah);
618 struct ath_softc *sc = (struct ath_softc *) common->priv; 761 struct ath_softc *sc = (struct ath_softc *) common->priv;
619 762
763 debugfs_remove(sc->debug.debugfs_recv);
620 debugfs_remove(sc->debug.debugfs_xmit); 764 debugfs_remove(sc->debug.debugfs_xmit);
621 debugfs_remove(sc->debug.debugfs_wiphy); 765 debugfs_remove(sc->debug.debugfs_wiphy);
622 debugfs_remove(sc->debug.debugfs_rcstat); 766 debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee1..86780e68b31 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
116 u32 delim_underrun; 116 u32 delim_underrun;
117}; 117};
118 118
119/**
120 * struct ath_rx_stats - RX Statistics
121 * @crc_err: No. of frames with incorrect CRC value
122 * @decrypt_crc_err: No. of frames whose CRC check failed after
123 decryption process completed
124 * @phy_err: No. of frames whose reception failed because the PHY
125 encountered an error
126 * @mic_err: No. of frames with incorrect TKIP MIC verification failure
127 * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
128 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
129 * @decrypt_busy_err: Decryption interruptions counter
130 * @phy_err_stats: Individual PHY error statistics
131 */
132struct ath_rx_stats {
133 u32 crc_err;
134 u32 decrypt_crc_err;
135 u32 phy_err;
136 u32 mic_err;
137 u32 pre_delim_crc_err;
138 u32 post_delim_crc_err;
139 u32 decrypt_busy_err;
140 u32 phy_err_stats[ATH9K_PHYERR_MAX];
141};
142
119struct ath_stats { 143struct ath_stats {
120 struct ath_interrupt_stats istats; 144 struct ath_interrupt_stats istats;
121 struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; 145 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
122 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; 146 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
147 struct ath_rx_stats rxstats;
123}; 148};
124 149
125struct ath9k_debug { 150struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
130 struct dentry *debugfs_rcstat; 155 struct dentry *debugfs_rcstat;
131 struct dentry *debugfs_wiphy; 156 struct dentry *debugfs_wiphy;
132 struct dentry *debugfs_xmit; 157 struct dentry *debugfs_xmit;
158 struct dentry *debugfs_recv;
133 struct ath_stats stats; 159 struct ath_stats stats;
134}; 160};
135 161
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
142void ath_debug_stat_rc(struct ath_softc *sc, int final_rate); 168void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
143void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
144 struct ath_buf *bf); 170 struct ath_buf *bf);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
145void ath_debug_stat_retries(struct ath_softc *sc, int rix, 172void ath_debug_stat_retries(struct ath_softc *sc, int rix,
146 int xretries, int retries, u8 per); 173 int xretries, int retries, u8 per);
147 174
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
181{ 208{
182} 209}
183 210
211static inline void ath_debug_stat_rx(struct ath_softc *sc,
212 struct ath_buf *bf)
213{
214}
215
184static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix, 216static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
185 int xretries, int retries, u8 per) 217 int xretries, int retries, u8 per)
186{ 218{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 00000000000..e204bd25ff6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,428 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19/********************************/
20/* LED functions */
21/********************************/
22
23static void ath_led_blink_work(struct work_struct *work)
24{
25 struct ath_softc *sc = container_of(work, struct ath_softc,
26 ath_led_blink_work.work);
27
28 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
29 return;
30
31 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
32 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
33 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
34 else
35 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
36 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
37
38 ieee80211_queue_delayed_work(sc->hw,
39 &sc->ath_led_blink_work,
40 (sc->sc_flags & SC_OP_LED_ON) ?
41 msecs_to_jiffies(sc->led_off_duration) :
42 msecs_to_jiffies(sc->led_on_duration));
43
44 sc->led_on_duration = sc->led_on_cnt ?
45 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
46 ATH_LED_ON_DURATION_IDLE;
47 sc->led_off_duration = sc->led_off_cnt ?
48 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
49 ATH_LED_OFF_DURATION_IDLE;
50 sc->led_on_cnt = sc->led_off_cnt = 0;
51 if (sc->sc_flags & SC_OP_LED_ON)
52 sc->sc_flags &= ~SC_OP_LED_ON;
53 else
54 sc->sc_flags |= SC_OP_LED_ON;
55}
56
57static void ath_led_brightness(struct led_classdev *led_cdev,
58 enum led_brightness brightness)
59{
60 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
61 struct ath_softc *sc = led->sc;
62
63 switch (brightness) {
64 case LED_OFF:
65 if (led->led_type == ATH_LED_ASSOC ||
66 led->led_type == ATH_LED_RADIO) {
67 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
68 (led->led_type == ATH_LED_RADIO));
69 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
70 if (led->led_type == ATH_LED_RADIO)
71 sc->sc_flags &= ~SC_OP_LED_ON;
72 } else {
73 sc->led_off_cnt++;
74 }
75 break;
76 case LED_FULL:
77 if (led->led_type == ATH_LED_ASSOC) {
78 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
79 ieee80211_queue_delayed_work(sc->hw,
80 &sc->ath_led_blink_work, 0);
81 } else if (led->led_type == ATH_LED_RADIO) {
82 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
83 sc->sc_flags |= SC_OP_LED_ON;
84 } else {
85 sc->led_on_cnt++;
86 }
87 break;
88 default:
89 break;
90 }
91}
92
93static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
94 char *trigger)
95{
96 int ret;
97
98 led->sc = sc;
99 led->led_cdev.name = led->name;
100 led->led_cdev.default_trigger = trigger;
101 led->led_cdev.brightness_set = ath_led_brightness;
102
103 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
104 if (ret)
105 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
106 "Failed to register led:%s", led->name);
107 else
108 led->registered = 1;
109 return ret;
110}
111
112static void ath_unregister_led(struct ath_led *led)
113{
114 if (led->registered) {
115 led_classdev_unregister(&led->led_cdev);
116 led->registered = 0;
117 }
118}
119
120void ath_deinit_leds(struct ath_softc *sc)
121{
122 ath_unregister_led(&sc->assoc_led);
123 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
124 ath_unregister_led(&sc->tx_led);
125 ath_unregister_led(&sc->rx_led);
126 ath_unregister_led(&sc->radio_led);
127 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
128}
129
130void ath_init_leds(struct ath_softc *sc)
131{
132 char *trigger;
133 int ret;
134
135 if (AR_SREV_9287(sc->sc_ah))
136 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
137 else
138 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
139
140 /* Configure gpio 1 for output */
141 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
142 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
143 /* LED off, active low */
144 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
145
146 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
147
148 trigger = ieee80211_get_radio_led_name(sc->hw);
149 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
150 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
151 ret = ath_register_led(sc, &sc->radio_led, trigger);
152 sc->radio_led.led_type = ATH_LED_RADIO;
153 if (ret)
154 goto fail;
155
156 trigger = ieee80211_get_assoc_led_name(sc->hw);
157 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
158 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
159 ret = ath_register_led(sc, &sc->assoc_led, trigger);
160 sc->assoc_led.led_type = ATH_LED_ASSOC;
161 if (ret)
162 goto fail;
163
164 trigger = ieee80211_get_tx_led_name(sc->hw);
165 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
166 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
167 ret = ath_register_led(sc, &sc->tx_led, trigger);
168 sc->tx_led.led_type = ATH_LED_TX;
169 if (ret)
170 goto fail;
171
172 trigger = ieee80211_get_rx_led_name(sc->hw);
173 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
174 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
175 ret = ath_register_led(sc, &sc->rx_led, trigger);
176 sc->rx_led.led_type = ATH_LED_RX;
177 if (ret)
178 goto fail;
179
180 return;
181
182fail:
183 cancel_delayed_work_sync(&sc->ath_led_blink_work);
184 ath_deinit_leds(sc);
185}
186
187/*******************/
188/* Rfkill */
189/*******************/
190
191static bool ath_is_rfkill_set(struct ath_softc *sc)
192{
193 struct ath_hw *ah = sc->sc_ah;
194
195 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
196 ah->rfkill_polarity;
197}
198
199void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
200{
201 struct ath_wiphy *aphy = hw->priv;
202 struct ath_softc *sc = aphy->sc;
203 bool blocked = !!ath_is_rfkill_set(sc);
204
205 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
206}
207
208void ath_start_rfkill_poll(struct ath_softc *sc)
209{
210 struct ath_hw *ah = sc->sc_ah;
211
212 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
213 wiphy_rfkill_start_polling(sc->hw->wiphy);
214}
215
216/******************/
217/* BTCOEX */
218/******************/
219
220/*
221 * Detects if there is any priority bt traffic
222 */
223static void ath_detect_bt_priority(struct ath_softc *sc)
224{
225 struct ath_btcoex *btcoex = &sc->btcoex;
226 struct ath_hw *ah = sc->sc_ah;
227
228 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
229 btcoex->bt_priority_cnt++;
230
231 if (time_after(jiffies, btcoex->bt_priority_time +
232 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
233 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
234 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
235 "BT priority traffic detected");
236 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
237 } else {
238 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
239 }
240
241 btcoex->bt_priority_cnt = 0;
242 btcoex->bt_priority_time = jiffies;
243 }
244}
245
246/*
247 * Configures appropriate weight based on stomp type.
248 */
249static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
250 enum ath_stomp_type stomp_type)
251{
252 struct ath_hw *ah = sc->sc_ah;
253
254 switch (stomp_type) {
255 case ATH_BTCOEX_STOMP_ALL:
256 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
257 AR_STOMP_ALL_WLAN_WGHT);
258 break;
259 case ATH_BTCOEX_STOMP_LOW:
260 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
261 AR_STOMP_LOW_WLAN_WGHT);
262 break;
263 case ATH_BTCOEX_STOMP_NONE:
264 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
265 AR_STOMP_NONE_WLAN_WGHT);
266 break;
267 default:
268 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
269 "Invalid Stomptype\n");
270 break;
271 }
272
273 ath9k_hw_btcoex_enable(ah);
274}
275
276static void ath9k_gen_timer_start(struct ath_hw *ah,
277 struct ath_gen_timer *timer,
278 u32 timer_next,
279 u32 timer_period)
280{
281 struct ath_common *common = ath9k_hw_common(ah);
282 struct ath_softc *sc = (struct ath_softc *) common->priv;
283
284 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
285
286 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
287 ath9k_hw_set_interrupts(ah, 0);
288 sc->imask |= ATH9K_INT_GENTIMER;
289 ath9k_hw_set_interrupts(ah, sc->imask);
290 }
291}
292
293static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
294{
295 struct ath_common *common = ath9k_hw_common(ah);
296 struct ath_softc *sc = (struct ath_softc *) common->priv;
297 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
298
299 ath9k_hw_gen_timer_stop(ah, timer);
300
301 /* if no timer is enabled, turn off interrupt mask */
302 if (timer_table->timer_mask.val == 0) {
303 ath9k_hw_set_interrupts(ah, 0);
304 sc->imask &= ~ATH9K_INT_GENTIMER;
305 ath9k_hw_set_interrupts(ah, sc->imask);
306 }
307}
308
309/*
310 * This is the master bt coex timer which runs for every
311 * 45ms, bt traffic will be given priority during 55% of this
312 * period while wlan gets remaining 45%
313 */
314static void ath_btcoex_period_timer(unsigned long data)
315{
316 struct ath_softc *sc = (struct ath_softc *) data;
317 struct ath_hw *ah = sc->sc_ah;
318 struct ath_btcoex *btcoex = &sc->btcoex;
319
320 ath_detect_bt_priority(sc);
321
322 spin_lock_bh(&btcoex->btcoex_lock);
323
324 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
325
326 spin_unlock_bh(&btcoex->btcoex_lock);
327
328 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
329 if (btcoex->hw_timer_enabled)
330 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
331
332 ath9k_gen_timer_start(ah,
333 btcoex->no_stomp_timer,
334 (ath9k_hw_gettsf32(ah) +
335 btcoex->btcoex_no_stomp),
336 btcoex->btcoex_no_stomp * 10);
337 btcoex->hw_timer_enabled = true;
338 }
339
340 mod_timer(&btcoex->period_timer, jiffies +
341 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
342}
343
344/*
345 * Generic tsf based hw timer which configures weight
346 * registers to time slice between wlan and bt traffic
347 */
348static void ath_btcoex_no_stomp_timer(void *arg)
349{
350 struct ath_softc *sc = (struct ath_softc *)arg;
351 struct ath_hw *ah = sc->sc_ah;
352 struct ath_btcoex *btcoex = &sc->btcoex;
353
354 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
355 "no stomp timer running \n");
356
357 spin_lock_bh(&btcoex->btcoex_lock);
358
359 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
360 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
361 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
362 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
363
364 spin_unlock_bh(&btcoex->btcoex_lock);
365}
366
367int ath_init_btcoex_timer(struct ath_softc *sc)
368{
369 struct ath_btcoex *btcoex = &sc->btcoex;
370
371 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
372 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
373 btcoex->btcoex_period / 100;
374
375 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
376 (unsigned long) sc);
377
378 spin_lock_init(&btcoex->btcoex_lock);
379
380 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
381 ath_btcoex_no_stomp_timer,
382 ath_btcoex_no_stomp_timer,
383 (void *) sc, AR_FIRST_NDP_TIMER);
384
385 if (!btcoex->no_stomp_timer)
386 return -ENOMEM;
387
388 return 0;
389}
390
391/*
392 * (Re)start btcoex timers
393 */
394void ath9k_btcoex_timer_resume(struct ath_softc *sc)
395{
396 struct ath_btcoex *btcoex = &sc->btcoex;
397 struct ath_hw *ah = sc->sc_ah;
398
399 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
400 "Starting btcoex timers");
401
402 /* make sure duty cycle timer is also stopped when resuming */
403 if (btcoex->hw_timer_enabled)
404 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
405
406 btcoex->bt_priority_cnt = 0;
407 btcoex->bt_priority_time = jiffies;
408 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
409
410 mod_timer(&btcoex->period_timer, jiffies);
411}
412
413
414/*
415 * Pause btcoex timer and bt duty cycle timer
416 */
417void ath9k_btcoex_timer_pause(struct ath_softc *sc)
418{
419 struct ath_btcoex *btcoex = &sc->btcoex;
420 struct ath_hw *ah = sc->sc_ah;
421
422 del_timer_sync(&btcoex->period_timer);
423
424 if (btcoex->hw_timer_enabled)
425 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
426
427 btcoex->hw_timer_enabled = false;
428}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ae371448b5a..1a27f39c1ad 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -52,28 +52,6 @@ module_exit(ath9k_exit);
52/* Helper Functions */ 52/* Helper Functions */
53/********************/ 53/********************/
54 54
55static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
56{
57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
58
59 if (!ah->curchan) /* should really check for CCK instead */
60 return clks / ATH9K_CLOCK_RATE_CCK;
61 if (conf->channel->band == IEEE80211_BAND_2GHZ)
62 return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
63
64 return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
65}
66
67static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
68{
69 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
70
71 if (conf_is_ht40(conf))
72 return ath9k_hw_mac_usec(ah, clks) / 2;
73 else
74 return ath9k_hw_mac_usec(ah, clks);
75}
76
77static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) 55static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
78{ 56{
79 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -343,30 +321,6 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
343 return true; 321 return true;
344} 322}
345 323
346static const char *ath9k_hw_devname(u16 devid)
347{
348 switch (devid) {
349 case AR5416_DEVID_PCI:
350 return "Atheros 5416";
351 case AR5416_DEVID_PCIE:
352 return "Atheros 5418";
353 case AR9160_DEVID_PCI:
354 return "Atheros 9160";
355 case AR5416_AR9100_DEVID:
356 return "Atheros 9100";
357 case AR9280_DEVID_PCI:
358 case AR9280_DEVID_PCIE:
359 return "Atheros 9280";
360 case AR9285_DEVID_PCIE:
361 return "Atheros 9285";
362 case AR5416_DEVID_AR9287_PCI:
363 case AR5416_DEVID_AR9287_PCIE:
364 return "Atheros 9287";
365 }
366
367 return NULL;
368}
369
370static void ath9k_hw_init_config(struct ath_hw *ah) 324static void ath9k_hw_init_config(struct ath_hw *ah)
371{ 325{
372 int i; 326 int i;
@@ -392,7 +346,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
392 ah->config.spurchans[i][1] = AR_NO_SPUR; 346 ah->config.spurchans[i][1] = AR_NO_SPUR;
393 } 347 }
394 348
395 ah->config.intr_mitigation = true; 349 ah->config.rx_intr_mitigation = true;
396 350
397 /* 351 /*
398 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 352 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -437,8 +391,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
437 ah->beacon_interval = 100; 391 ah->beacon_interval = 100;
438 ah->enable_32kHz_clock = DONT_USE_32KHZ; 392 ah->enable_32kHz_clock = DONT_USE_32KHZ;
439 ah->slottime = (u32) -1; 393 ah->slottime = (u32) -1;
440 ah->acktimeout = (u32) -1;
441 ah->ctstimeout = (u32) -1;
442 ah->globaltxtimeout = (u32) -1; 394 ah->globaltxtimeout = (u32) -1;
443 ah->power_mode = ATH9K_PM_UNDEFINED; 395 ah->power_mode = ATH9K_PM_UNDEFINED;
444} 396}
@@ -1183,7 +1135,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1183 AR_IMR_RXORN | 1135 AR_IMR_RXORN |
1184 AR_IMR_BCNMISC; 1136 AR_IMR_BCNMISC;
1185 1137
1186 if (ah->config.intr_mitigation) 1138 if (ah->config.rx_intr_mitigation)
1187 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 1139 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
1188 else 1140 else
1189 ah->mask_reg |= AR_IMR_RXOK; 1141 ah->mask_reg |= AR_IMR_RXOK;
@@ -1203,34 +1155,25 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1203 } 1155 }
1204} 1156}
1205 1157
1206static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) 1158static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
1207{ 1159{
1208 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { 1160 u32 val = ath9k_hw_mac_to_clks(ah, us);
1209 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1161 val = min(val, (u32) 0xFFFF);
1210 "bad ack timeout %u\n", us); 1162 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
1211 ah->acktimeout = (u32) -1;
1212 return false;
1213 } else {
1214 REG_RMW_FIELD(ah, AR_TIME_OUT,
1215 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
1216 ah->acktimeout = us;
1217 return true;
1218 }
1219} 1163}
1220 1164
1221static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) 1165static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1222{ 1166{
1223 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { 1167 u32 val = ath9k_hw_mac_to_clks(ah, us);
1224 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1168 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
1225 "bad cts timeout %u\n", us); 1169 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
1226 ah->ctstimeout = (u32) -1; 1170}
1227 return false; 1171
1228 } else { 1172static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1229 REG_RMW_FIELD(ah, AR_TIME_OUT, 1173{
1230 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us)); 1174 u32 val = ath9k_hw_mac_to_clks(ah, us);
1231 ah->ctstimeout = us; 1175 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
1232 return true; 1176 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
1233 }
1234} 1177}
1235 1178
1236static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) 1179static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
@@ -1247,31 +1190,37 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1247 } 1190 }
1248} 1191}
1249 1192
1250static void ath9k_hw_init_user_settings(struct ath_hw *ah) 1193void ath9k_hw_init_global_settings(struct ath_hw *ah)
1251{ 1194{
1195 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
1196 int acktimeout;
1197 int slottime;
1198 int sifstime;
1199
1252 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", 1200 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
1253 ah->misc_mode); 1201 ah->misc_mode);
1254 1202
1255 if (ah->misc_mode != 0) 1203 if (ah->misc_mode != 0)
1256 REG_WRITE(ah, AR_PCU_MISC, 1204 REG_WRITE(ah, AR_PCU_MISC,
1257 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode); 1205 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
1258 if (ah->slottime != (u32) -1) 1206
1259 ath9k_hw_setslottime(ah, ah->slottime); 1207 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
1260 if (ah->acktimeout != (u32) -1) 1208 sifstime = 16;
1261 ath9k_hw_set_ack_timeout(ah, ah->acktimeout); 1209 else
1262 if (ah->ctstimeout != (u32) -1) 1210 sifstime = 10;
1263 ath9k_hw_set_cts_timeout(ah, ah->ctstimeout); 1211
1212 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1213 slottime = ah->slottime + 3 * ah->coverage_class;
1214 acktimeout = slottime + sifstime;
1215 ath9k_hw_setslottime(ah, slottime);
1216 ath9k_hw_set_ack_timeout(ah, acktimeout);
1217 ath9k_hw_set_cts_timeout(ah, acktimeout);
1264 if (ah->globaltxtimeout != (u32) -1) 1218 if (ah->globaltxtimeout != (u32) -1)
1265 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); 1219 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
1266} 1220}
1221EXPORT_SYMBOL(ath9k_hw_init_global_settings);
1267 1222
1268const char *ath9k_hw_probe(u16 vendorid, u16 devid) 1223void ath9k_hw_deinit(struct ath_hw *ah)
1269{
1270 return vendorid == ATHEROS_VENDOR_ID ?
1271 ath9k_hw_devname(devid) : NULL;
1272}
1273
1274void ath9k_hw_detach(struct ath_hw *ah)
1275{ 1224{
1276 struct ath_common *common = ath9k_hw_common(ah); 1225 struct ath_common *common = ath9k_hw_common(ah);
1277 1226
@@ -1289,7 +1238,7 @@ free_hw:
1289 kfree(ah); 1238 kfree(ah);
1290 ah = NULL; 1239 ah = NULL;
1291} 1240}
1292EXPORT_SYMBOL(ath9k_hw_detach); 1241EXPORT_SYMBOL(ath9k_hw_deinit);
1293 1242
1294/*******/ 1243/*******/
1295/* INI */ 1244/* INI */
@@ -2090,7 +2039,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2090 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2039 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2091 ath9k_enable_rfkill(ah); 2040 ath9k_enable_rfkill(ah);
2092 2041
2093 ath9k_hw_init_user_settings(ah); 2042 ath9k_hw_init_global_settings(ah);
2094 2043
2095 if (AR_SREV_9287_12_OR_LATER(ah)) { 2044 if (AR_SREV_9287_12_OR_LATER(ah)) {
2096 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 2045 REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
@@ -2120,7 +2069,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2120 2069
2121 REG_WRITE(ah, AR_OBS, 8); 2070 REG_WRITE(ah, AR_OBS, 8);
2122 2071
2123 if (ah->config.intr_mitigation) { 2072 if (ah->config.rx_intr_mitigation) {
2124 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 2073 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
2125 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); 2074 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
2126 } 2075 }
@@ -2780,7 +2729,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
2780 2729
2781 *masked = isr & ATH9K_INT_COMMON; 2730 *masked = isr & ATH9K_INT_COMMON;
2782 2731
2783 if (ah->config.intr_mitigation) { 2732 if (ah->config.rx_intr_mitigation) {
2784 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) 2733 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
2785 *masked |= ATH9K_INT_RX; 2734 *masked |= ATH9K_INT_RX;
2786 } 2735 }
@@ -2913,7 +2862,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2913 } 2862 }
2914 if (ints & ATH9K_INT_RX) { 2863 if (ints & ATH9K_INT_RX) {
2915 mask |= AR_IMR_RXERR; 2864 mask |= AR_IMR_RXERR;
2916 if (ah->config.intr_mitigation) 2865 if (ah->config.rx_intr_mitigation)
2917 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 2866 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
2918 else 2867 else
2919 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 2868 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3687,21 +3636,6 @@ u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
3687} 3636}
3688EXPORT_SYMBOL(ath9k_hw_extend_tsf); 3637EXPORT_SYMBOL(ath9k_hw_extend_tsf);
3689 3638
3690bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
3691{
3692 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
3693 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
3694 "bad slot time %u\n", us);
3695 ah->slottime = (u32) -1;
3696 return false;
3697 } else {
3698 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
3699 ah->slottime = us;
3700 return true;
3701 }
3702}
3703EXPORT_SYMBOL(ath9k_hw_setslottime);
3704
3705void ath9k_hw_set11nmac2040(struct ath_hw *ah) 3639void ath9k_hw_set11nmac2040(struct ath_hw *ah)
3706{ 3640{
3707 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 3641 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e2b0c73a616..ab1f1981d85 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -212,7 +212,7 @@ struct ath9k_ops_config {
212 u32 cck_trig_low; 212 u32 cck_trig_low;
213 u32 enable_ani; 213 u32 enable_ani;
214 int serialize_regmode; 214 int serialize_regmode;
215 bool intr_mitigation; 215 bool rx_intr_mitigation;
216#define SPUR_DISABLE 0 216#define SPUR_DISABLE 0
217#define SPUR_ENABLE_IOCTL 1 217#define SPUR_ENABLE_IOCTL 1
218#define SPUR_ENABLE_EEPROM 2 218#define SPUR_ENABLE_EEPROM 2
@@ -551,10 +551,9 @@ struct ath_hw {
551 u32 *bank6Temp; 551 u32 *bank6Temp;
552 552
553 int16_t txpower_indexoffset; 553 int16_t txpower_indexoffset;
554 int coverage_class;
554 u32 beacon_interval; 555 u32 beacon_interval;
555 u32 slottime; 556 u32 slottime;
556 u32 acktimeout;
557 u32 ctstimeout;
558 u32 globaltxtimeout; 557 u32 globaltxtimeout;
559 558
560 /* ANI */ 559 /* ANI */
@@ -616,7 +615,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
616 615
617/* Initialization, Detach, Reset */ 616/* Initialization, Detach, Reset */
618const char *ath9k_hw_probe(u16 vendorid, u16 devid); 617const char *ath9k_hw_probe(u16 vendorid, u16 devid);
619void ath9k_hw_detach(struct ath_hw *ah); 618void ath9k_hw_deinit(struct ath_hw *ah);
620int ath9k_hw_init(struct ath_hw *ah); 619int ath9k_hw_init(struct ath_hw *ah);
621int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 620int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
622 bool bChannelChange); 621 bool bChannelChange);
@@ -668,7 +667,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
668void ath9k_hw_reset_tsf(struct ath_hw *ah); 667void ath9k_hw_reset_tsf(struct ath_hw *ah);
669void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 668void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
670u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp); 669u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
671bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us); 670void ath9k_hw_init_global_settings(struct ath_hw *ah);
672void ath9k_hw_set11nmac2040(struct ath_hw *ah); 671void ath9k_hw_set11nmac2040(struct ath_hw *ah);
673void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 672void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
674void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 673void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 00000000000..5f78d7a5ff2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,861 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19static char *dev_info = "ath9k";
20
21MODULE_AUTHOR("Atheros Communications");
22MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
23MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
24MODULE_LICENSE("Dual BSD/GPL");
25
26static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
27module_param_named(debug, ath9k_debug, uint, 0);
28MODULE_PARM_DESC(debug, "Debugging mask");
29
30int modparam_nohwcrypt;
31module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
32MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
33
34/* We use the hw_value as an index into our private channel structure */
35
36#define CHAN2G(_freq, _idx) { \
37 .center_freq = (_freq), \
38 .hw_value = (_idx), \
39 .max_power = 20, \
40}
41
42#define CHAN5G(_freq, _idx) { \
43 .band = IEEE80211_BAND_5GHZ, \
44 .center_freq = (_freq), \
45 .hw_value = (_idx), \
46 .max_power = 20, \
47}
48
49/* Some 2 GHz radios are actually tunable on 2312-2732
50 * on 5 MHz steps, we support the channels which we know
51 * we have calibration data for all cards though to make
52 * this static */
53static struct ieee80211_channel ath9k_2ghz_chantable[] = {
54 CHAN2G(2412, 0), /* Channel 1 */
55 CHAN2G(2417, 1), /* Channel 2 */
56 CHAN2G(2422, 2), /* Channel 3 */
57 CHAN2G(2427, 3), /* Channel 4 */
58 CHAN2G(2432, 4), /* Channel 5 */
59 CHAN2G(2437, 5), /* Channel 6 */
60 CHAN2G(2442, 6), /* Channel 7 */
61 CHAN2G(2447, 7), /* Channel 8 */
62 CHAN2G(2452, 8), /* Channel 9 */
63 CHAN2G(2457, 9), /* Channel 10 */
64 CHAN2G(2462, 10), /* Channel 11 */
65 CHAN2G(2467, 11), /* Channel 12 */
66 CHAN2G(2472, 12), /* Channel 13 */
67 CHAN2G(2484, 13), /* Channel 14 */
68};
69
70/* Some 5 GHz radios are actually tunable on XXXX-YYYY
71 * on 5 MHz steps, we support the channels which we know
72 * we have calibration data for all cards though to make
73 * this static */
74static struct ieee80211_channel ath9k_5ghz_chantable[] = {
75 /* _We_ call this UNII 1 */
76 CHAN5G(5180, 14), /* Channel 36 */
77 CHAN5G(5200, 15), /* Channel 40 */
78 CHAN5G(5220, 16), /* Channel 44 */
79 CHAN5G(5240, 17), /* Channel 48 */
80 /* _We_ call this UNII 2 */
81 CHAN5G(5260, 18), /* Channel 52 */
82 CHAN5G(5280, 19), /* Channel 56 */
83 CHAN5G(5300, 20), /* Channel 60 */
84 CHAN5G(5320, 21), /* Channel 64 */
85 /* _We_ call this "Middle band" */
86 CHAN5G(5500, 22), /* Channel 100 */
87 CHAN5G(5520, 23), /* Channel 104 */
88 CHAN5G(5540, 24), /* Channel 108 */
89 CHAN5G(5560, 25), /* Channel 112 */
90 CHAN5G(5580, 26), /* Channel 116 */
91 CHAN5G(5600, 27), /* Channel 120 */
92 CHAN5G(5620, 28), /* Channel 124 */
93 CHAN5G(5640, 29), /* Channel 128 */
94 CHAN5G(5660, 30), /* Channel 132 */
95 CHAN5G(5680, 31), /* Channel 136 */
96 CHAN5G(5700, 32), /* Channel 140 */
97 /* _We_ call this UNII 3 */
98 CHAN5G(5745, 33), /* Channel 149 */
99 CHAN5G(5765, 34), /* Channel 153 */
100 CHAN5G(5785, 35), /* Channel 157 */
101 CHAN5G(5805, 36), /* Channel 161 */
102 CHAN5G(5825, 37), /* Channel 165 */
103};
104
105/* Atheros hardware rate code addition for short premble */
106#define SHPCHECK(__hw_rate, __flags) \
107 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
108
109#define RATE(_bitrate, _hw_rate, _flags) { \
110 .bitrate = (_bitrate), \
111 .flags = (_flags), \
112 .hw_value = (_hw_rate), \
113 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
114}
115
116static struct ieee80211_rate ath9k_legacy_rates[] = {
117 RATE(10, 0x1b, 0),
118 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
119 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
120 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(60, 0x0b, 0),
122 RATE(90, 0x0f, 0),
123 RATE(120, 0x0a, 0),
124 RATE(180, 0x0e, 0),
125 RATE(240, 0x09, 0),
126 RATE(360, 0x0d, 0),
127 RATE(480, 0x08, 0),
128 RATE(540, 0x0c, 0),
129};
130
131static void ath9k_deinit_softc(struct ath_softc *sc);
132
133/*
134 * Read and write, they both share the same lock. We do this to serialize
135 * reads and writes on Atheros 802.11n PCI devices only. This is required
136 * as the FIFO on these devices can only accept sanely 2 requests.
137 */
138
139static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
140{
141 struct ath_hw *ah = (struct ath_hw *) hw_priv;
142 struct ath_common *common = ath9k_hw_common(ah);
143 struct ath_softc *sc = (struct ath_softc *) common->priv;
144
145 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
146 unsigned long flags;
147 spin_lock_irqsave(&sc->sc_serial_rw, flags);
148 iowrite32(val, sc->mem + reg_offset);
149 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
150 } else
151 iowrite32(val, sc->mem + reg_offset);
152}
153
154static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
155{
156 struct ath_hw *ah = (struct ath_hw *) hw_priv;
157 struct ath_common *common = ath9k_hw_common(ah);
158 struct ath_softc *sc = (struct ath_softc *) common->priv;
159 u32 val;
160
161 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
162 unsigned long flags;
163 spin_lock_irqsave(&sc->sc_serial_rw, flags);
164 val = ioread32(sc->mem + reg_offset);
165 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
166 } else
167 val = ioread32(sc->mem + reg_offset);
168 return val;
169}
170
171static const struct ath_ops ath9k_common_ops = {
172 .read = ath9k_ioread32,
173 .write = ath9k_iowrite32,
174};
175
176/**************************/
177/* Initialization */
178/**************************/
179
180static void setup_ht_cap(struct ath_softc *sc,
181 struct ieee80211_sta_ht_cap *ht_info)
182{
183 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
184 u8 tx_streams, rx_streams;
185
186 ht_info->ht_supported = true;
187 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
188 IEEE80211_HT_CAP_SM_PS |
189 IEEE80211_HT_CAP_SGI_40 |
190 IEEE80211_HT_CAP_DSSSCCK40;
191
192 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
193 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
194
195 /* set up supported mcs set */
196 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
197 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
198 1 : 2;
199 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
200 1 : 2;
201
202 if (tx_streams != rx_streams) {
203 ath_print(common, ATH_DBG_CONFIG,
204 "TX streams %d, RX streams: %d\n",
205 tx_streams, rx_streams);
206 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
207 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
208 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
209 }
210
211 ht_info->mcs.rx_mask[0] = 0xff;
212 if (rx_streams >= 2)
213 ht_info->mcs.rx_mask[1] = 0xff;
214
215 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
216}
217
218static int ath9k_reg_notifier(struct wiphy *wiphy,
219 struct regulatory_request *request)
220{
221 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
222 struct ath_wiphy *aphy = hw->priv;
223 struct ath_softc *sc = aphy->sc;
224 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
225
226 return ath_reg_notifier_apply(wiphy, request, reg);
227}
228
229/*
230 * This function will allocate both the DMA descriptor structure, and the
231 * buffers it contains. These are used to contain the descriptors used
232 * by the system.
233*/
234int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
235 struct list_head *head, const char *name,
236 int nbuf, int ndesc)
237{
238#define DS2PHYS(_dd, _ds) \
239 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
240#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
241#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
243 struct ath_desc *ds;
244 struct ath_buf *bf;
245 int i, bsize, error;
246
247 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
248 name, nbuf, ndesc);
249
250 INIT_LIST_HEAD(head);
251 /* ath_desc must be a multiple of DWORDs */
252 if ((sizeof(struct ath_desc) % 4) != 0) {
253 ath_print(common, ATH_DBG_FATAL,
254 "ath_desc not DWORD aligned\n");
255 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
256 error = -ENOMEM;
257 goto fail;
258 }
259
260 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
261
262 /*
263 * Need additional DMA memory because we can't use
264 * descriptors that cross the 4K page boundary. Assume
265 * one skipped descriptor per 4K page.
266 */
267 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
268 u32 ndesc_skipped =
269 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
270 u32 dma_len;
271
272 while (ndesc_skipped) {
273 dma_len = ndesc_skipped * sizeof(struct ath_desc);
274 dd->dd_desc_len += dma_len;
275
276 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
277 };
278 }
279
280 /* allocate descriptors */
281 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
282 &dd->dd_desc_paddr, GFP_KERNEL);
283 if (dd->dd_desc == NULL) {
284 error = -ENOMEM;
285 goto fail;
286 }
287 ds = dd->dd_desc;
288 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
289 name, ds, (u32) dd->dd_desc_len,
290 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
291
292 /* allocate buffers */
293 bsize = sizeof(struct ath_buf) * nbuf;
294 bf = kzalloc(bsize, GFP_KERNEL);
295 if (bf == NULL) {
296 error = -ENOMEM;
297 goto fail2;
298 }
299 dd->dd_bufptr = bf;
300
301 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
302 bf->bf_desc = ds;
303 bf->bf_daddr = DS2PHYS(dd, ds);
304
305 if (!(sc->sc_ah->caps.hw_caps &
306 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
307 /*
308 * Skip descriptor addresses which can cause 4KB
309 * boundary crossing (addr + length) with a 32 dword
310 * descriptor fetch.
311 */
312 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
313 BUG_ON((caddr_t) bf->bf_desc >=
314 ((caddr_t) dd->dd_desc +
315 dd->dd_desc_len));
316
317 ds += ndesc;
318 bf->bf_desc = ds;
319 bf->bf_daddr = DS2PHYS(dd, ds);
320 }
321 }
322 list_add_tail(&bf->list, head);
323 }
324 return 0;
325fail2:
326 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
327 dd->dd_desc_paddr);
328fail:
329 memset(dd, 0, sizeof(*dd));
330 return error;
331#undef ATH_DESC_4KB_BOUND_CHECK
332#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
333#undef DS2PHYS
334}
335
336static void ath9k_init_crypto(struct ath_softc *sc)
337{
338 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
339 int i = 0;
340
341 /* Get the hardware key cache size. */
342 common->keymax = sc->sc_ah->caps.keycache_size;
343 if (common->keymax > ATH_KEYMAX) {
344 ath_print(common, ATH_DBG_ANY,
345 "Warning, using only %u entries in %u key cache\n",
346 ATH_KEYMAX, common->keymax);
347 common->keymax = ATH_KEYMAX;
348 }
349
350 /*
351 * Reset the key cache since some parts do not
352 * reset the contents on initial power up.
353 */
354 for (i = 0; i < common->keymax; i++)
355 ath9k_hw_keyreset(sc->sc_ah, (u16) i);
356
357 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
358 ATH9K_CIPHER_TKIP, NULL)) {
359 /*
360 * Whether we should enable h/w TKIP MIC.
361 * XXX: if we don't support WME TKIP MIC, then we wouldn't
362 * report WMM capable, so it's always safe to turn on
363 * TKIP MIC in this case.
364 */
365 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
366 }
367
368 /*
369 * Check whether the separate key cache entries
370 * are required to handle both tx+rx MIC keys.
371 * With split mic keys the number of stations is limited
372 * to 27 otherwise 59.
373 */
374 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
375 ATH9K_CIPHER_TKIP, NULL)
376 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
377 ATH9K_CIPHER_MIC, NULL)
378 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
379 0, NULL))
380 common->splitmic = 1;
381
382 /* turn on mcast key search if possible */
383 if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
384 (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
385 1, 1, NULL);
386
387}
388
389static int ath9k_init_btcoex(struct ath_softc *sc)
390{
391 int r, qnum;
392
393 switch (sc->sc_ah->btcoex_hw.scheme) {
394 case ATH_BTCOEX_CFG_NONE:
395 break;
396 case ATH_BTCOEX_CFG_2WIRE:
397 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
398 break;
399 case ATH_BTCOEX_CFG_3WIRE:
400 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
401 r = ath_init_btcoex_timer(sc);
402 if (r)
403 return -1;
404 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
405 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
406 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
407 break;
408 default:
409 WARN_ON(1);
410 break;
411 }
412
413 return 0;
414}
415
416static int ath9k_init_queues(struct ath_softc *sc)
417{
418 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
419 int i = 0;
420
421 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
422 sc->tx.hwq_map[i] = -1;
423
424 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
425 if (sc->beacon.beaconq == -1) {
426 ath_print(common, ATH_DBG_FATAL,
427 "Unable to setup a beacon xmit queue\n");
428 goto err;
429 }
430
431 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
432 if (sc->beacon.cabq == NULL) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup CAB xmit queue\n");
435 goto err;
436 }
437
438 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
439 ath_cabq_update(sc);
440
441 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
442 ath_print(common, ATH_DBG_FATAL,
443 "Unable to setup xmit queue for BK traffic\n");
444 goto err;
445 }
446
447 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
448 ath_print(common, ATH_DBG_FATAL,
449 "Unable to setup xmit queue for BE traffic\n");
450 goto err;
451 }
452 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
453 ath_print(common, ATH_DBG_FATAL,
454 "Unable to setup xmit queue for VI traffic\n");
455 goto err;
456 }
457 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
458 ath_print(common, ATH_DBG_FATAL,
459 "Unable to setup xmit queue for VO traffic\n");
460 goto err;
461 }
462
463 return 0;
464
465err:
466 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
467 if (ATH_TXQ_SETUP(sc, i))
468 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
469
470 return -EIO;
471}
472
473static void ath9k_init_channels_rates(struct ath_softc *sc)
474{
475 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
476 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
477 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
478 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
479 ARRAY_SIZE(ath9k_2ghz_chantable);
480 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
481 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
482 ARRAY_SIZE(ath9k_legacy_rates);
483 }
484
485 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
486 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
487 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
488 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
489 ARRAY_SIZE(ath9k_5ghz_chantable);
490 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
491 ath9k_legacy_rates + 4;
492 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
493 ARRAY_SIZE(ath9k_legacy_rates) - 4;
494 }
495}
496
497static void ath9k_init_misc(struct ath_softc *sc)
498{
499 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
500 int i = 0;
501
502 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
503 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
504
505 sc->config.txpowlimit = ATH_TXPOWER_MAX;
506
507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
508 sc->sc_flags |= SC_OP_TXAGGR;
509 sc->sc_flags |= SC_OP_RXAGGR;
510 }
511
512 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
513 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
514
515 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
516 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
517
518 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
519 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
520
521 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
522
523 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
524 sc->beacon.bslot[i] = NULL;
525 sc->beacon.bslot_aphy[i] = NULL;
526 }
527}
528
529static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
530 const struct ath_bus_ops *bus_ops)
531{
532 struct ath_hw *ah = NULL;
533 struct ath_common *common;
534 int ret = 0, i;
535 int csz = 0;
536
537 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
538 if (!ah)
539 return -ENOMEM;
540
541 ah->hw_version.devid = devid;
542 ah->hw_version.subsysid = subsysid;
543 sc->sc_ah = ah;
544
545 common = ath9k_hw_common(ah);
546 common->ops = &ath9k_common_ops;
547 common->bus_ops = bus_ops;
548 common->ah = ah;
549 common->hw = sc->hw;
550 common->priv = sc;
551 common->debug_mask = ath9k_debug;
552
553 spin_lock_init(&sc->wiphy_lock);
554 spin_lock_init(&sc->sc_resetlock);
555 spin_lock_init(&sc->sc_serial_rw);
556 spin_lock_init(&sc->sc_pm_lock);
557 mutex_init(&sc->mutex);
558 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
559 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
560 (unsigned long)sc);
561
562 /*
563 * Cache line size is used to size and align various
564 * structures used to communicate with the hardware.
565 */
566 ath_read_cachesize(common, &csz);
567 common->cachelsz = csz << 2; /* convert to bytes */
568
569 ret = ath9k_hw_init(ah);
570 if (ret) {
571 ath_print(common, ATH_DBG_FATAL,
572 "Unable to initialize hardware; "
573 "initialization status: %d\n", ret);
574 goto err_hw;
575 }
576
577 ret = ath9k_init_debug(ah);
578 if (ret) {
579 ath_print(common, ATH_DBG_FATAL,
580 "Unable to create debugfs files\n");
581 goto err_debug;
582 }
583
584 ret = ath9k_init_queues(sc);
585 if (ret)
586 goto err_queues;
587
588 ret = ath9k_init_btcoex(sc);
589 if (ret)
590 goto err_btcoex;
591
592 ath9k_init_crypto(sc);
593 ath9k_init_channels_rates(sc);
594 ath9k_init_misc(sc);
595
596 return 0;
597
598err_btcoex:
599 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
600 if (ATH_TXQ_SETUP(sc, i))
601 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
602err_queues:
603 ath9k_exit_debug(ah);
604err_debug:
605 ath9k_hw_deinit(ah);
606err_hw:
607 tasklet_kill(&sc->intr_tq);
608 tasklet_kill(&sc->bcon_tasklet);
609
610 kfree(ah);
611 sc->sc_ah = NULL;
612
613 return ret;
614}
615
616void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
617{
618 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
619
620 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
621 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
622 IEEE80211_HW_SIGNAL_DBM |
623 IEEE80211_HW_AMPDU_AGGREGATION |
624 IEEE80211_HW_SUPPORTS_PS |
625 IEEE80211_HW_PS_NULLFUNC_STACK |
626 IEEE80211_HW_SPECTRUM_MGMT;
627
628 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
629 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
630
631 hw->wiphy->interface_modes =
632 BIT(NL80211_IFTYPE_AP) |
633 BIT(NL80211_IFTYPE_STATION) |
634 BIT(NL80211_IFTYPE_ADHOC) |
635 BIT(NL80211_IFTYPE_MESH_POINT);
636
637 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
638
639 hw->queues = 4;
640 hw->max_rates = 4;
641 hw->channel_change_time = 5000;
642 hw->max_listen_interval = 10;
643 /* Hardware supports 10 but we use 4 */
644 hw->max_rate_tries = 4;
645 hw->sta_data_size = sizeof(struct ath_node);
646 hw->vif_data_size = sizeof(struct ath_vif);
647
648 hw->rate_control_algorithm = "ath9k_rate_control";
649
650 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
651 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
652 &sc->sbands[IEEE80211_BAND_2GHZ];
653 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
654 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
655 &sc->sbands[IEEE80211_BAND_5GHZ];
656
657 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
658 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
659 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
660 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
661 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
662 }
663
664 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
665}
666
667int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
668 const struct ath_bus_ops *bus_ops)
669{
670 struct ieee80211_hw *hw = sc->hw;
671 struct ath_common *common;
672 struct ath_hw *ah;
673 int error = 0;
674 struct ath_regulatory *reg;
675
676 /* Bring up device */
677 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
678 if (error != 0)
679 goto error_init;
680
681 ah = sc->sc_ah;
682 common = ath9k_hw_common(ah);
683 ath9k_set_hw_capab(sc, hw);
684
685 /* Initialize regulatory */
686 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
687 ath9k_reg_notifier);
688 if (error)
689 goto error_regd;
690
691 reg = &common->regulatory;
692
693 /* Setup TX DMA */
694 error = ath_tx_init(sc, ATH_TXBUF);
695 if (error != 0)
696 goto error_tx;
697
698 /* Setup RX DMA */
699 error = ath_rx_init(sc, ATH_RXBUF);
700 if (error != 0)
701 goto error_rx;
702
703 /* Register with mac80211 */
704 error = ieee80211_register_hw(hw);
705 if (error)
706 goto error_register;
707
708 /* Handle world regulatory */
709 if (!ath_is_world_regd(reg)) {
710 error = regulatory_hint(hw->wiphy, reg->alpha2);
711 if (error)
712 goto error_world;
713 }
714
715 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
716 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
717 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
718
719 ath_init_leds(sc);
720 ath_start_rfkill_poll(sc);
721
722 return 0;
723
724error_world:
725 ieee80211_unregister_hw(hw);
726error_register:
727 ath_rx_cleanup(sc);
728error_rx:
729 ath_tx_cleanup(sc);
730error_tx:
731 /* Nothing */
732error_regd:
733 ath9k_deinit_softc(sc);
734error_init:
735 return error;
736}
737
738/*****************************/
739/* De-Initialization */
740/*****************************/
741
742static void ath9k_deinit_softc(struct ath_softc *sc)
743{
744 int i = 0;
745
746 if ((sc->btcoex.no_stomp_timer) &&
747 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
748 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
749
750 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
751 if (ATH_TXQ_SETUP(sc, i))
752 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
753
754 ath9k_exit_debug(sc->sc_ah);
755 ath9k_hw_deinit(sc->sc_ah);
756
757 tasklet_kill(&sc->intr_tq);
758 tasklet_kill(&sc->bcon_tasklet);
759}
760
761void ath9k_deinit_device(struct ath_softc *sc)
762{
763 struct ieee80211_hw *hw = sc->hw;
764 int i = 0;
765
766 ath9k_ps_wakeup(sc);
767
768 wiphy_rfkill_stop_polling(sc->hw->wiphy);
769 ath_deinit_leds(sc);
770
771 for (i = 0; i < sc->num_sec_wiphy; i++) {
772 struct ath_wiphy *aphy = sc->sec_wiphy[i];
773 if (aphy == NULL)
774 continue;
775 sc->sec_wiphy[i] = NULL;
776 ieee80211_unregister_hw(aphy->hw);
777 ieee80211_free_hw(aphy->hw);
778 }
779 kfree(sc->sec_wiphy);
780
781 ieee80211_unregister_hw(hw);
782 ath_rx_cleanup(sc);
783 ath_tx_cleanup(sc);
784 ath9k_deinit_softc(sc);
785}
786
787void ath_descdma_cleanup(struct ath_softc *sc,
788 struct ath_descdma *dd,
789 struct list_head *head)
790{
791 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
792 dd->dd_desc_paddr);
793
794 INIT_LIST_HEAD(head);
795 kfree(dd->dd_bufptr);
796 memset(dd, 0, sizeof(*dd));
797}
798
799/************************/
800/* Module Hooks */
801/************************/
802
803static int __init ath9k_init(void)
804{
805 int error;
806
807 /* Register rate control algorithm */
808 error = ath_rate_control_register();
809 if (error != 0) {
810 printk(KERN_ERR
811 "ath9k: Unable to register rate control "
812 "algorithm: %d\n",
813 error);
814 goto err_out;
815 }
816
817 error = ath9k_debug_create_root();
818 if (error) {
819 printk(KERN_ERR
820 "ath9k: Unable to create debugfs root: %d\n",
821 error);
822 goto err_rate_unregister;
823 }
824
825 error = ath_pci_init();
826 if (error < 0) {
827 printk(KERN_ERR
828 "ath9k: No PCI devices found, driver not installed.\n");
829 error = -ENODEV;
830 goto err_remove_root;
831 }
832
833 error = ath_ahb_init();
834 if (error < 0) {
835 error = -ENODEV;
836 goto err_pci_exit;
837 }
838
839 return 0;
840
841 err_pci_exit:
842 ath_pci_exit();
843
844 err_remove_root:
845 ath9k_debug_remove_root();
846 err_rate_unregister:
847 ath_rate_control_unregister();
848 err_out:
849 return error;
850}
851module_init(ath9k_init);
852
853static void __exit ath9k_exit(void)
854{
855 ath_ahb_exit();
856 ath_pci_exit();
857 ath9k_debug_remove_root();
858 ath_rate_control_unregister();
859 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
860}
861module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e185479e295..29851e6376a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -167,6 +167,40 @@ struct ath_rx_status {
167#define ATH9K_RXKEYIX_INVALID ((u8)-1) 167#define ATH9K_RXKEYIX_INVALID ((u8)-1)
168#define ATH9K_TXKEYIX_INVALID ((u32)-1) 168#define ATH9K_TXKEYIX_INVALID ((u32)-1)
169 169
170enum ath9k_phyerr {
171 ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
172 ATH9K_PHYERR_TIMING = 1, /* Timing error */
173 ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
174 ATH9K_PHYERR_RATE = 3, /* Illegal rate */
175 ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
176 ATH9K_PHYERR_RADAR = 5, /* Radar detect */
177 ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
178 ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
179
180 ATH9K_PHYERR_OFDM_TIMING = 17,
181 ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
182 ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
183 ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
184 ATH9K_PHYERR_OFDM_POWER_DROP = 21,
185 ATH9K_PHYERR_OFDM_SERVICE = 22,
186 ATH9K_PHYERR_OFDM_RESTART = 23,
187 ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
188
189 ATH9K_PHYERR_CCK_TIMING = 25,
190 ATH9K_PHYERR_CCK_HEADER_CRC = 26,
191 ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
192 ATH9K_PHYERR_CCK_SERVICE = 30,
193 ATH9K_PHYERR_CCK_RESTART = 31,
194 ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
195 ATH9K_PHYERR_CCK_POWER_DROP = 33,
196
197 ATH9K_PHYERR_HT_CRC_ERROR = 34,
198 ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
199 ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
200
201 ATH9K_PHYERR_MAX = 37,
202};
203
170struct ath_desc { 204struct ath_desc {
171 u32 ds_link; 205 u32 ds_link;
172 u32 ds_data; 206 u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 643bea35686..6aaca0026da 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
18#include "ath9k.h" 18#include "ath9k.h"
19#include "btcoex.h" 19#include "btcoex.h"
20 20
21static char *dev_info = "ath9k";
22
23MODULE_AUTHOR("Atheros Communications");
24MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26MODULE_LICENSE("Dual BSD/GPL");
27
28static int modparam_nohwcrypt;
29module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
30MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
31
32static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
33module_param_named(debug, ath9k_debug, uint, 0);
34MODULE_PARM_DESC(debug, "Debugging mask");
35
36/* We use the hw_value as an index into our private channel structure */
37
38#define CHAN2G(_freq, _idx) { \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42}
43
44#define CHAN5G(_freq, _idx) { \
45 .band = IEEE80211_BAND_5GHZ, \
46 .center_freq = (_freq), \
47 .hw_value = (_idx), \
48 .max_power = 20, \
49}
50
51/* Some 2 GHz radios are actually tunable on 2312-2732
52 * on 5 MHz steps, we support the channels which we know
53 * we have calibration data for all cards though to make
54 * this static */
55static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 CHAN2G(2412, 0), /* Channel 1 */
57 CHAN2G(2417, 1), /* Channel 2 */
58 CHAN2G(2422, 2), /* Channel 3 */
59 CHAN2G(2427, 3), /* Channel 4 */
60 CHAN2G(2432, 4), /* Channel 5 */
61 CHAN2G(2437, 5), /* Channel 6 */
62 CHAN2G(2442, 6), /* Channel 7 */
63 CHAN2G(2447, 7), /* Channel 8 */
64 CHAN2G(2452, 8), /* Channel 9 */
65 CHAN2G(2457, 9), /* Channel 10 */
66 CHAN2G(2462, 10), /* Channel 11 */
67 CHAN2G(2467, 11), /* Channel 12 */
68 CHAN2G(2472, 12), /* Channel 13 */
69 CHAN2G(2484, 13), /* Channel 14 */
70};
71
72/* Some 5 GHz radios are actually tunable on XXXX-YYYY
73 * on 5 MHz steps, we support the channels which we know
74 * we have calibration data for all cards though to make
75 * this static */
76static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 /* _We_ call this UNII 1 */
78 CHAN5G(5180, 14), /* Channel 36 */
79 CHAN5G(5200, 15), /* Channel 40 */
80 CHAN5G(5220, 16), /* Channel 44 */
81 CHAN5G(5240, 17), /* Channel 48 */
82 /* _We_ call this UNII 2 */
83 CHAN5G(5260, 18), /* Channel 52 */
84 CHAN5G(5280, 19), /* Channel 56 */
85 CHAN5G(5300, 20), /* Channel 60 */
86 CHAN5G(5320, 21), /* Channel 64 */
87 /* _We_ call this "Middle band" */
88 CHAN5G(5500, 22), /* Channel 100 */
89 CHAN5G(5520, 23), /* Channel 104 */
90 CHAN5G(5540, 24), /* Channel 108 */
91 CHAN5G(5560, 25), /* Channel 112 */
92 CHAN5G(5580, 26), /* Channel 116 */
93 CHAN5G(5600, 27), /* Channel 120 */
94 CHAN5G(5620, 28), /* Channel 124 */
95 CHAN5G(5640, 29), /* Channel 128 */
96 CHAN5G(5660, 30), /* Channel 132 */
97 CHAN5G(5680, 31), /* Channel 136 */
98 CHAN5G(5700, 32), /* Channel 140 */
99 /* _We_ call this UNII 3 */
100 CHAN5G(5745, 33), /* Channel 149 */
101 CHAN5G(5765, 34), /* Channel 153 */
102 CHAN5G(5785, 35), /* Channel 157 */
103 CHAN5G(5805, 36), /* Channel 161 */
104 CHAN5G(5825, 37), /* Channel 165 */
105};
106
107/* Atheros hardware rate code addition for short premble */
108#define SHPCHECK(__hw_rate, __flags) \
109 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110
111#define RATE(_bitrate, _hw_rate, _flags) { \
112 .bitrate = (_bitrate), \
113 .flags = (_flags), \
114 .hw_value = (_hw_rate), \
115 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
116}
117
118static struct ieee80211_rate ath9k_legacy_rates[] = {
119 RATE(10, 0x1b, 0),
120 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 RATE(60, 0x0b, 0),
124 RATE(90, 0x0f, 0),
125 RATE(120, 0x0a, 0),
126 RATE(180, 0x0e, 0),
127 RATE(240, 0x09, 0),
128 RATE(360, 0x0d, 0),
129 RATE(480, 0x08, 0),
130 RATE(540, 0x0c, 0),
131};
132
133static void ath_cache_conf_rate(struct ath_softc *sc, 21static void ath_cache_conf_rate(struct ath_softc *sc,
134 struct ieee80211_conf *conf) 22 struct ieee80211_conf *conf)
135{ 23{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
221 return channel; 109 return channel;
222} 110}
223 111
224static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 112bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
225{ 113{
226 unsigned long flags; 114 unsigned long flags;
227 bool ret; 115 bool ret;
@@ -256,10 +144,10 @@ void ath9k_ps_restore(struct ath_softc *sc)
256 goto unlock; 144 goto unlock;
257 145
258 if (sc->ps_enabled && 146 if (sc->ps_enabled &&
259 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 147 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
260 SC_OP_WAIT_FOR_CAB | 148 PS_WAIT_FOR_CAB |
261 SC_OP_WAIT_FOR_PSPOLL_DATA | 149 PS_WAIT_FOR_PSPOLL_DATA |
262 SC_OP_WAIT_FOR_TX_ACK))) 150 PS_WAIT_FOR_TX_ACK)))
263 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP); 151 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
264 152
265 unlock: 153 unlock:
@@ -349,7 +237,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
349 * When the task is complete, it reschedules itself depending on the 237 * When the task is complete, it reschedules itself depending on the
350 * appropriate interval that was calculated. 238 * appropriate interval that was calculated.
351 */ 239 */
352static void ath_ani_calibrate(unsigned long data) 240void ath_ani_calibrate(unsigned long data)
353{ 241{
354 struct ath_softc *sc = (struct ath_softc *)data; 242 struct ath_softc *sc = (struct ath_softc *)data;
355 struct ath_hw *ah = sc->sc_ah; 243 struct ath_hw *ah = sc->sc_ah;
@@ -363,14 +251,6 @@ static void ath_ani_calibrate(unsigned long data)
363 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? 251 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
364 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; 252 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
365 253
366 /*
367 * don't calibrate when we're scanning.
368 * we are most likely not on our home channel.
369 */
370 spin_lock(&sc->ani_lock);
371 if (sc->sc_flags & SC_OP_SCANNING)
372 goto set_timer;
373
374 /* Only calibrate if awake */ 254 /* Only calibrate if awake */
375 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) 255 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
376 goto set_timer; 256 goto set_timer;
@@ -437,7 +317,6 @@ static void ath_ani_calibrate(unsigned long data)
437 ath9k_ps_restore(sc); 317 ath9k_ps_restore(sc);
438 318
439set_timer: 319set_timer:
440 spin_unlock(&sc->ani_lock);
441 /* 320 /*
442 * Set timer interval based on previous results. 321 * Set timer interval based on previous results.
443 * The interval must be the shortest necessary to satisfy ANI, 322 * The interval must be the shortest necessary to satisfy ANI,
@@ -513,7 +392,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
513 ath_tx_node_cleanup(sc, an); 392 ath_tx_node_cleanup(sc, an);
514} 393}
515 394
516static void ath9k_tasklet(unsigned long data) 395void ath9k_tasklet(unsigned long data)
517{ 396{
518 struct ath_softc *sc = (struct ath_softc *)data; 397 struct ath_softc *sc = (struct ath_softc *)data;
519 struct ath_hw *ah = sc->sc_ah; 398 struct ath_hw *ah = sc->sc_ah;
@@ -545,7 +424,7 @@ static void ath9k_tasklet(unsigned long data)
545 */ 424 */
546 ath_print(common, ATH_DBG_PS, 425 ath_print(common, ATH_DBG_PS,
547 "TSFOOR - Sync with next Beacon\n"); 426 "TSFOOR - Sync with next Beacon\n");
548 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC; 427 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
549 } 428 }
550 429
551 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 430 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -646,7 +525,7 @@ irqreturn_t ath_isr(int irq, void *dev)
646 * receive frames */ 525 * receive frames */
647 ath9k_setpower(sc, ATH9K_PM_AWAKE); 526 ath9k_setpower(sc, ATH9K_PM_AWAKE);
648 ath9k_hw_setrxabort(sc->sc_ah, 0); 527 ath9k_hw_setrxabort(sc->sc_ah, 0);
649 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON; 528 sc->ps_flags |= PS_WAIT_FOR_BEACON;
650 } 529 }
651 530
652chip_reset: 531chip_reset:
@@ -933,44 +812,6 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
933 } 812 }
934} 813}
935 814
936static void setup_ht_cap(struct ath_softc *sc,
937 struct ieee80211_sta_ht_cap *ht_info)
938{
939 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
940 u8 tx_streams, rx_streams;
941
942 ht_info->ht_supported = true;
943 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
944 IEEE80211_HT_CAP_SM_PS |
945 IEEE80211_HT_CAP_SGI_40 |
946 IEEE80211_HT_CAP_DSSSCCK40;
947
948 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
949 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
950
951 /* set up supported mcs set */
952 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
953 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
954 1 : 2;
955 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
956 1 : 2;
957
958 if (tx_streams != rx_streams) {
959 ath_print(common, ATH_DBG_CONFIG,
960 "TX streams %d, RX streams: %d\n",
961 tx_streams, rx_streams);
962 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
963 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
964 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
965 }
966
967 ht_info->mcs.rx_mask[0] = 0xff;
968 if (rx_streams >= 2)
969 ht_info->mcs.rx_mask[1] = 0xff;
970
971 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
972}
973
974static void ath9k_bss_assoc_info(struct ath_softc *sc, 815static void ath9k_bss_assoc_info(struct ath_softc *sc,
975 struct ieee80211_vif *vif, 816 struct ieee80211_vif *vif,
976 struct ieee80211_bss_conf *bss_conf) 817 struct ieee80211_bss_conf *bss_conf)
@@ -992,7 +833,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
992 * on the receipt of the first Beacon frame (i.e., 833 * on the receipt of the first Beacon frame (i.e.,
993 * after time sync with the AP). 834 * after time sync with the AP).
994 */ 835 */
995 sc->sc_flags |= SC_OP_BEACON_SYNC; 836 sc->ps_flags |= PS_BEACON_SYNC;
996 837
997 /* Configure the beacon */ 838 /* Configure the beacon */
998 ath_beacon_config(sc, vif); 839 ath_beacon_config(sc, vif);
@@ -1009,174 +850,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
1009 } 850 }
1010} 851}
1011 852
1012/********************************/
1013/* LED functions */
1014/********************************/
1015
1016static void ath_led_blink_work(struct work_struct *work)
1017{
1018 struct ath_softc *sc = container_of(work, struct ath_softc,
1019 ath_led_blink_work.work);
1020
1021 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
1022 return;
1023
1024 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
1025 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
1026 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1027 else
1028 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1029 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
1030
1031 ieee80211_queue_delayed_work(sc->hw,
1032 &sc->ath_led_blink_work,
1033 (sc->sc_flags & SC_OP_LED_ON) ?
1034 msecs_to_jiffies(sc->led_off_duration) :
1035 msecs_to_jiffies(sc->led_on_duration));
1036
1037 sc->led_on_duration = sc->led_on_cnt ?
1038 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
1039 ATH_LED_ON_DURATION_IDLE;
1040 sc->led_off_duration = sc->led_off_cnt ?
1041 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
1042 ATH_LED_OFF_DURATION_IDLE;
1043 sc->led_on_cnt = sc->led_off_cnt = 0;
1044 if (sc->sc_flags & SC_OP_LED_ON)
1045 sc->sc_flags &= ~SC_OP_LED_ON;
1046 else
1047 sc->sc_flags |= SC_OP_LED_ON;
1048}
1049
1050static void ath_led_brightness(struct led_classdev *led_cdev,
1051 enum led_brightness brightness)
1052{
1053 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
1054 struct ath_softc *sc = led->sc;
1055
1056 switch (brightness) {
1057 case LED_OFF:
1058 if (led->led_type == ATH_LED_ASSOC ||
1059 led->led_type == ATH_LED_RADIO) {
1060 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1061 (led->led_type == ATH_LED_RADIO));
1062 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1063 if (led->led_type == ATH_LED_RADIO)
1064 sc->sc_flags &= ~SC_OP_LED_ON;
1065 } else {
1066 sc->led_off_cnt++;
1067 }
1068 break;
1069 case LED_FULL:
1070 if (led->led_type == ATH_LED_ASSOC) {
1071 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
1072 ieee80211_queue_delayed_work(sc->hw,
1073 &sc->ath_led_blink_work, 0);
1074 } else if (led->led_type == ATH_LED_RADIO) {
1075 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1076 sc->sc_flags |= SC_OP_LED_ON;
1077 } else {
1078 sc->led_on_cnt++;
1079 }
1080 break;
1081 default:
1082 break;
1083 }
1084}
1085
1086static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1087 char *trigger)
1088{
1089 int ret;
1090
1091 led->sc = sc;
1092 led->led_cdev.name = led->name;
1093 led->led_cdev.default_trigger = trigger;
1094 led->led_cdev.brightness_set = ath_led_brightness;
1095
1096 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1097 if (ret)
1098 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1099 "Failed to register led:%s", led->name);
1100 else
1101 led->registered = 1;
1102 return ret;
1103}
1104
1105static void ath_unregister_led(struct ath_led *led)
1106{
1107 if (led->registered) {
1108 led_classdev_unregister(&led->led_cdev);
1109 led->registered = 0;
1110 }
1111}
1112
1113static void ath_deinit_leds(struct ath_softc *sc)
1114{
1115 ath_unregister_led(&sc->assoc_led);
1116 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1117 ath_unregister_led(&sc->tx_led);
1118 ath_unregister_led(&sc->rx_led);
1119 ath_unregister_led(&sc->radio_led);
1120 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1121}
1122
1123static void ath_init_leds(struct ath_softc *sc)
1124{
1125 char *trigger;
1126 int ret;
1127
1128 if (AR_SREV_9287(sc->sc_ah))
1129 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
1130 else
1131 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
1132
1133 /* Configure gpio 1 for output */
1134 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
1135 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1136 /* LED off, active low */
1137 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1138
1139 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1140
1141 trigger = ieee80211_get_radio_led_name(sc->hw);
1142 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1143 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1144 ret = ath_register_led(sc, &sc->radio_led, trigger);
1145 sc->radio_led.led_type = ATH_LED_RADIO;
1146 if (ret)
1147 goto fail;
1148
1149 trigger = ieee80211_get_assoc_led_name(sc->hw);
1150 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1151 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1152 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1153 sc->assoc_led.led_type = ATH_LED_ASSOC;
1154 if (ret)
1155 goto fail;
1156
1157 trigger = ieee80211_get_tx_led_name(sc->hw);
1158 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1159 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1160 ret = ath_register_led(sc, &sc->tx_led, trigger);
1161 sc->tx_led.led_type = ATH_LED_TX;
1162 if (ret)
1163 goto fail;
1164
1165 trigger = ieee80211_get_rx_led_name(sc->hw);
1166 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1167 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1168 ret = ath_register_led(sc, &sc->rx_led, trigger);
1169 sc->rx_led.led_type = ATH_LED_RX;
1170 if (ret)
1171 goto fail;
1172
1173 return;
1174
1175fail:
1176 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1177 ath_deinit_leds(sc);
1178}
1179
1180void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) 853void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
1181{ 854{
1182 struct ath_hw *ah = sc->sc_ah; 855 struct ath_hw *ah = sc->sc_ah;
@@ -1261,711 +934,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
1261 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 934 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
1262} 935}
1263 936
1264/*******************/
1265/* Rfkill */
1266/*******************/
1267
1268static bool ath_is_rfkill_set(struct ath_softc *sc)
1269{
1270 struct ath_hw *ah = sc->sc_ah;
1271
1272 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1273 ah->rfkill_polarity;
1274}
1275
1276static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
1277{
1278 struct ath_wiphy *aphy = hw->priv;
1279 struct ath_softc *sc = aphy->sc;
1280 bool blocked = !!ath_is_rfkill_set(sc);
1281
1282 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1283}
1284
1285static void ath_start_rfkill_poll(struct ath_softc *sc)
1286{
1287 struct ath_hw *ah = sc->sc_ah;
1288
1289 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1290 wiphy_rfkill_start_polling(sc->hw->wiphy);
1291}
1292
1293static void ath9k_uninit_hw(struct ath_softc *sc)
1294{
1295 struct ath_hw *ah = sc->sc_ah;
1296
1297 BUG_ON(!ah);
1298
1299 ath9k_exit_debug(ah);
1300 ath9k_hw_detach(ah);
1301 sc->sc_ah = NULL;
1302}
1303
1304static void ath_clean_core(struct ath_softc *sc)
1305{
1306 struct ieee80211_hw *hw = sc->hw;
1307 struct ath_hw *ah = sc->sc_ah;
1308 int i = 0;
1309
1310 ath9k_ps_wakeup(sc);
1311
1312 dev_dbg(sc->dev, "Detach ATH hw\n");
1313
1314 ath_deinit_leds(sc);
1315 wiphy_rfkill_stop_polling(sc->hw->wiphy);
1316
1317 for (i = 0; i < sc->num_sec_wiphy; i++) {
1318 struct ath_wiphy *aphy = sc->sec_wiphy[i];
1319 if (aphy == NULL)
1320 continue;
1321 sc->sec_wiphy[i] = NULL;
1322 ieee80211_unregister_hw(aphy->hw);
1323 ieee80211_free_hw(aphy->hw);
1324 }
1325 ieee80211_unregister_hw(hw);
1326 ath_rx_cleanup(sc);
1327 ath_tx_cleanup(sc);
1328
1329 tasklet_kill(&sc->intr_tq);
1330 tasklet_kill(&sc->bcon_tasklet);
1331
1332 if (!(sc->sc_flags & SC_OP_INVALID))
1333 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1334
1335 /* cleanup tx queues */
1336 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1337 if (ATH_TXQ_SETUP(sc, i))
1338 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1339
1340 if ((sc->btcoex.no_stomp_timer) &&
1341 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1342 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
1343}
1344
1345void ath_detach(struct ath_softc *sc)
1346{
1347 ath_clean_core(sc);
1348 ath9k_uninit_hw(sc);
1349}
1350
1351void ath_cleanup(struct ath_softc *sc)
1352{
1353 struct ath_hw *ah = sc->sc_ah;
1354 struct ath_common *common = ath9k_hw_common(ah);
1355
1356 ath_clean_core(sc);
1357 free_irq(sc->irq, sc);
1358 ath_bus_cleanup(common);
1359 kfree(sc->sec_wiphy);
1360 ieee80211_free_hw(sc->hw);
1361
1362 ath9k_uninit_hw(sc);
1363}
1364
1365static int ath9k_reg_notifier(struct wiphy *wiphy,
1366 struct regulatory_request *request)
1367{
1368 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1369 struct ath_wiphy *aphy = hw->priv;
1370 struct ath_softc *sc = aphy->sc;
1371 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
1372
1373 return ath_reg_notifier_apply(wiphy, request, reg);
1374}
1375
1376/*
1377 * Detects if there is any priority bt traffic
1378 */
1379static void ath_detect_bt_priority(struct ath_softc *sc)
1380{
1381 struct ath_btcoex *btcoex = &sc->btcoex;
1382 struct ath_hw *ah = sc->sc_ah;
1383
1384 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
1385 btcoex->bt_priority_cnt++;
1386
1387 if (time_after(jiffies, btcoex->bt_priority_time +
1388 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
1389 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
1390 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
1391 "BT priority traffic detected");
1392 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
1393 } else {
1394 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
1395 }
1396
1397 btcoex->bt_priority_cnt = 0;
1398 btcoex->bt_priority_time = jiffies;
1399 }
1400}
1401
1402/*
1403 * Configures appropriate weight based on stomp type.
1404 */
1405static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
1406 enum ath_stomp_type stomp_type)
1407{
1408 struct ath_hw *ah = sc->sc_ah;
1409
1410 switch (stomp_type) {
1411 case ATH_BTCOEX_STOMP_ALL:
1412 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1413 AR_STOMP_ALL_WLAN_WGHT);
1414 break;
1415 case ATH_BTCOEX_STOMP_LOW:
1416 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1417 AR_STOMP_LOW_WLAN_WGHT);
1418 break;
1419 case ATH_BTCOEX_STOMP_NONE:
1420 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1421 AR_STOMP_NONE_WLAN_WGHT);
1422 break;
1423 default:
1424 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1425 "Invalid Stomptype\n");
1426 break;
1427 }
1428
1429 ath9k_hw_btcoex_enable(ah);
1430}
1431
1432static void ath9k_gen_timer_start(struct ath_hw *ah,
1433 struct ath_gen_timer *timer,
1434 u32 timer_next,
1435 u32 timer_period)
1436{
1437 struct ath_common *common = ath9k_hw_common(ah);
1438 struct ath_softc *sc = (struct ath_softc *) common->priv;
1439
1440 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
1441
1442 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
1443 ath9k_hw_set_interrupts(ah, 0);
1444 sc->imask |= ATH9K_INT_GENTIMER;
1445 ath9k_hw_set_interrupts(ah, sc->imask);
1446 }
1447}
1448
1449static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
1450{
1451 struct ath_common *common = ath9k_hw_common(ah);
1452 struct ath_softc *sc = (struct ath_softc *) common->priv;
1453 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
1454
1455 ath9k_hw_gen_timer_stop(ah, timer);
1456
1457 /* if no timer is enabled, turn off interrupt mask */
1458 if (timer_table->timer_mask.val == 0) {
1459 ath9k_hw_set_interrupts(ah, 0);
1460 sc->imask &= ~ATH9K_INT_GENTIMER;
1461 ath9k_hw_set_interrupts(ah, sc->imask);
1462 }
1463}
1464
1465/*
1466 * This is the master bt coex timer which runs for every
1467 * 45ms, bt traffic will be given priority during 55% of this
1468 * period while wlan gets remaining 45%
1469 */
1470static void ath_btcoex_period_timer(unsigned long data)
1471{
1472 struct ath_softc *sc = (struct ath_softc *) data;
1473 struct ath_hw *ah = sc->sc_ah;
1474 struct ath_btcoex *btcoex = &sc->btcoex;
1475
1476 ath_detect_bt_priority(sc);
1477
1478 spin_lock_bh(&btcoex->btcoex_lock);
1479
1480 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
1481
1482 spin_unlock_bh(&btcoex->btcoex_lock);
1483
1484 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
1485 if (btcoex->hw_timer_enabled)
1486 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
1487
1488 ath9k_gen_timer_start(ah,
1489 btcoex->no_stomp_timer,
1490 (ath9k_hw_gettsf32(ah) +
1491 btcoex->btcoex_no_stomp),
1492 btcoex->btcoex_no_stomp * 10);
1493 btcoex->hw_timer_enabled = true;
1494 }
1495
1496 mod_timer(&btcoex->period_timer, jiffies +
1497 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
1498}
1499
1500/*
1501 * Generic tsf based hw timer which configures weight
1502 * registers to time slice between wlan and bt traffic
1503 */
1504static void ath_btcoex_no_stomp_timer(void *arg)
1505{
1506 struct ath_softc *sc = (struct ath_softc *)arg;
1507 struct ath_hw *ah = sc->sc_ah;
1508 struct ath_btcoex *btcoex = &sc->btcoex;
1509
1510 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1511 "no stomp timer running \n");
1512
1513 spin_lock_bh(&btcoex->btcoex_lock);
1514
1515 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
1516 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
1517 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
1518 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
1519
1520 spin_unlock_bh(&btcoex->btcoex_lock);
1521}
1522
1523static int ath_init_btcoex_timer(struct ath_softc *sc)
1524{
1525 struct ath_btcoex *btcoex = &sc->btcoex;
1526
1527 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
1528 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
1529 btcoex->btcoex_period / 100;
1530
1531 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
1532 (unsigned long) sc);
1533
1534 spin_lock_init(&btcoex->btcoex_lock);
1535
1536 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
1537 ath_btcoex_no_stomp_timer,
1538 ath_btcoex_no_stomp_timer,
1539 (void *) sc, AR_FIRST_NDP_TIMER);
1540
1541 if (!btcoex->no_stomp_timer)
1542 return -ENOMEM;
1543
1544 return 0;
1545}
1546
1547/*
1548 * Read and write, they both share the same lock. We do this to serialize
1549 * reads and writes on Atheros 802.11n PCI devices only. This is required
1550 * as the FIFO on these devices can only accept sanely 2 requests. After
1551 * that the device goes bananas. Serializing the reads/writes prevents this
1552 * from happening.
1553 */
1554
1555static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
1556{
1557 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1558 struct ath_common *common = ath9k_hw_common(ah);
1559 struct ath_softc *sc = (struct ath_softc *) common->priv;
1560
1561 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1562 unsigned long flags;
1563 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1564 iowrite32(val, sc->mem + reg_offset);
1565 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1566 } else
1567 iowrite32(val, sc->mem + reg_offset);
1568}
1569
1570static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
1571{
1572 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1573 struct ath_common *common = ath9k_hw_common(ah);
1574 struct ath_softc *sc = (struct ath_softc *) common->priv;
1575 u32 val;
1576
1577 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1578 unsigned long flags;
1579 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1580 val = ioread32(sc->mem + reg_offset);
1581 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1582 } else
1583 val = ioread32(sc->mem + reg_offset);
1584 return val;
1585}
1586
1587static const struct ath_ops ath9k_common_ops = {
1588 .read = ath9k_ioread32,
1589 .write = ath9k_iowrite32,
1590};
1591
1592/*
1593 * Initialize and fill ath_softc, ath_sofct is the
1594 * "Software Carrier" struct. Historically it has existed
1595 * to allow the separation between hardware specific
1596 * variables (now in ath_hw) and driver specific variables.
1597 */
1598static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
1599 const struct ath_bus_ops *bus_ops)
1600{
1601 struct ath_hw *ah = NULL;
1602 struct ath_common *common;
1603 int r = 0, i;
1604 int csz = 0;
1605 int qnum;
1606
1607 /* XXX: hardware will not be ready until ath_open() being called */
1608 sc->sc_flags |= SC_OP_INVALID;
1609
1610 spin_lock_init(&sc->wiphy_lock);
1611 spin_lock_init(&sc->sc_resetlock);
1612 spin_lock_init(&sc->sc_serial_rw);
1613 spin_lock_init(&sc->ani_lock);
1614 spin_lock_init(&sc->sc_pm_lock);
1615 mutex_init(&sc->mutex);
1616 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1617 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1618 (unsigned long)sc);
1619
1620 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
1621 if (!ah)
1622 return -ENOMEM;
1623
1624 ah->hw_version.devid = devid;
1625 ah->hw_version.subsysid = subsysid;
1626 sc->sc_ah = ah;
1627
1628 common = ath9k_hw_common(ah);
1629 common->ops = &ath9k_common_ops;
1630 common->bus_ops = bus_ops;
1631 common->ah = ah;
1632 common->hw = sc->hw;
1633 common->priv = sc;
1634 common->debug_mask = ath9k_debug;
1635
1636 /*
1637 * Cache line size is used to size and align various
1638 * structures used to communicate with the hardware.
1639 */
1640 ath_read_cachesize(common, &csz);
1641 /* XXX assert csz is non-zero */
1642 common->cachelsz = csz << 2; /* convert to bytes */
1643
1644 r = ath9k_hw_init(ah);
1645 if (r) {
1646 ath_print(common, ATH_DBG_FATAL,
1647 "Unable to initialize hardware; "
1648 "initialization status: %d\n", r);
1649 goto bad_free_hw;
1650 }
1651
1652 if (ath9k_init_debug(ah) < 0) {
1653 ath_print(common, ATH_DBG_FATAL,
1654 "Unable to create debugfs files\n");
1655 goto bad_free_hw;
1656 }
1657
1658 /* Get the hardware key cache size. */
1659 common->keymax = ah->caps.keycache_size;
1660 if (common->keymax > ATH_KEYMAX) {
1661 ath_print(common, ATH_DBG_ANY,
1662 "Warning, using only %u entries in %u key cache\n",
1663 ATH_KEYMAX, common->keymax);
1664 common->keymax = ATH_KEYMAX;
1665 }
1666
1667 /*
1668 * Reset the key cache since some parts do not
1669 * reset the contents on initial power up.
1670 */
1671 for (i = 0; i < common->keymax; i++)
1672 ath9k_hw_keyreset(ah, (u16) i);
1673
1674 /* default to MONITOR mode */
1675 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1676
1677 /*
1678 * Allocate hardware transmit queues: one queue for
1679 * beacon frames and one data queue for each QoS
1680 * priority. Note that the hal handles reseting
1681 * these queues at the needed time.
1682 */
1683 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
1684 if (sc->beacon.beaconq == -1) {
1685 ath_print(common, ATH_DBG_FATAL,
1686 "Unable to setup a beacon xmit queue\n");
1687 r = -EIO;
1688 goto bad2;
1689 }
1690 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1691 if (sc->beacon.cabq == NULL) {
1692 ath_print(common, ATH_DBG_FATAL,
1693 "Unable to setup CAB xmit queue\n");
1694 r = -EIO;
1695 goto bad2;
1696 }
1697
1698 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1699 ath_cabq_update(sc);
1700
1701 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1702 sc->tx.hwq_map[i] = -1;
1703
1704 /* Setup data queues */
1705 /* NB: ensure BK queue is the lowest priority h/w queue */
1706 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1707 ath_print(common, ATH_DBG_FATAL,
1708 "Unable to setup xmit queue for BK traffic\n");
1709 r = -EIO;
1710 goto bad2;
1711 }
1712
1713 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1714 ath_print(common, ATH_DBG_FATAL,
1715 "Unable to setup xmit queue for BE traffic\n");
1716 r = -EIO;
1717 goto bad2;
1718 }
1719 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1720 ath_print(common, ATH_DBG_FATAL,
1721 "Unable to setup xmit queue for VI traffic\n");
1722 r = -EIO;
1723 goto bad2;
1724 }
1725 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1726 ath_print(common, ATH_DBG_FATAL,
1727 "Unable to setup xmit queue for VO traffic\n");
1728 r = -EIO;
1729 goto bad2;
1730 }
1731
1732 /* Initializes the noise floor to a reasonable default value.
1733 * Later on this will be updated during ANI processing. */
1734
1735 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1736 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1737
1738 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1739 ATH9K_CIPHER_TKIP, NULL)) {
1740 /*
1741 * Whether we should enable h/w TKIP MIC.
1742 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1743 * report WMM capable, so it's always safe to turn on
1744 * TKIP MIC in this case.
1745 */
1746 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1747 0, 1, NULL);
1748 }
1749
1750 /*
1751 * Check whether the separate key cache entries
1752 * are required to handle both tx+rx MIC keys.
1753 * With split mic keys the number of stations is limited
1754 * to 27 otherwise 59.
1755 */
1756 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1757 ATH9K_CIPHER_TKIP, NULL)
1758 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1759 ATH9K_CIPHER_MIC, NULL)
1760 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1761 0, NULL))
1762 common->splitmic = 1;
1763
1764 /* turn on mcast key search if possible */
1765 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1766 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1767 1, NULL);
1768
1769 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1770
1771 /* 11n Capabilities */
1772 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1773 sc->sc_flags |= SC_OP_TXAGGR;
1774 sc->sc_flags |= SC_OP_RXAGGR;
1775 }
1776
1777 common->tx_chainmask = ah->caps.tx_chainmask;
1778 common->rx_chainmask = ah->caps.rx_chainmask;
1779
1780 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1781 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1782
1783 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1784 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
1785
1786 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1787
1788 /* initialize beacon slots */
1789 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1790 sc->beacon.bslot[i] = NULL;
1791 sc->beacon.bslot_aphy[i] = NULL;
1792 }
1793
1794 /* setup channels and rates */
1795
1796 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
1797 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1798 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1799 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1800 ARRAY_SIZE(ath9k_2ghz_chantable);
1801 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
1802 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
1803 ARRAY_SIZE(ath9k_legacy_rates);
1804 }
1805
1806 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1807 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1808 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1809 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1810 ARRAY_SIZE(ath9k_5ghz_chantable);
1811 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1812 ath9k_legacy_rates + 4;
1813 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
1814 ARRAY_SIZE(ath9k_legacy_rates) - 4;
1815 }
1816
1817 switch (ah->btcoex_hw.scheme) {
1818 case ATH_BTCOEX_CFG_NONE:
1819 break;
1820 case ATH_BTCOEX_CFG_2WIRE:
1821 ath9k_hw_btcoex_init_2wire(ah);
1822 break;
1823 case ATH_BTCOEX_CFG_3WIRE:
1824 ath9k_hw_btcoex_init_3wire(ah);
1825 r = ath_init_btcoex_timer(sc);
1826 if (r)
1827 goto bad2;
1828 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
1829 ath9k_hw_init_btcoex_hw(ah, qnum);
1830 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
1831 break;
1832 default:
1833 WARN_ON(1);
1834 break;
1835 }
1836
1837 return 0;
1838bad2:
1839 /* cleanup tx queues */
1840 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1841 if (ATH_TXQ_SETUP(sc, i))
1842 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1843
1844bad_free_hw:
1845 ath9k_uninit_hw(sc);
1846 return r;
1847}
1848
1849void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1850{
1851 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1852 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1853 IEEE80211_HW_SIGNAL_DBM |
1854 IEEE80211_HW_AMPDU_AGGREGATION |
1855 IEEE80211_HW_SUPPORTS_PS |
1856 IEEE80211_HW_PS_NULLFUNC_STACK |
1857 IEEE80211_HW_SPECTRUM_MGMT;
1858
1859 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1860 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1861
1862 hw->wiphy->interface_modes =
1863 BIT(NL80211_IFTYPE_AP) |
1864 BIT(NL80211_IFTYPE_STATION) |
1865 BIT(NL80211_IFTYPE_ADHOC) |
1866 BIT(NL80211_IFTYPE_MESH_POINT);
1867
1868 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1869
1870 hw->queues = 4;
1871 hw->max_rates = 4;
1872 hw->channel_change_time = 5000;
1873 hw->max_listen_interval = 10;
1874 /* Hardware supports 10 but we use 4 */
1875 hw->max_rate_tries = 4;
1876 hw->sta_data_size = sizeof(struct ath_node);
1877 hw->vif_data_size = sizeof(struct ath_vif);
1878
1879 hw->rate_control_algorithm = "ath9k_rate_control";
1880
1881 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
1882 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1883 &sc->sbands[IEEE80211_BAND_2GHZ];
1884 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1885 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1886 &sc->sbands[IEEE80211_BAND_5GHZ];
1887}
1888
1889/* Device driver core initialization */
1890int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
1891 const struct ath_bus_ops *bus_ops)
1892{
1893 struct ieee80211_hw *hw = sc->hw;
1894 struct ath_common *common;
1895 struct ath_hw *ah;
1896 int error = 0, i;
1897 struct ath_regulatory *reg;
1898
1899 dev_dbg(sc->dev, "Attach ATH hw\n");
1900
1901 error = ath_init_softc(devid, sc, subsysid, bus_ops);
1902 if (error != 0)
1903 return error;
1904
1905 ah = sc->sc_ah;
1906 common = ath9k_hw_common(ah);
1907
1908 /* get mac address from hardware and set in mac80211 */
1909
1910 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
1911
1912 ath_set_hw_capab(sc, hw);
1913
1914 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
1915 ath9k_reg_notifier);
1916 if (error)
1917 return error;
1918
1919 reg = &common->regulatory;
1920
1921 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1922 if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
1923 setup_ht_cap(sc,
1924 &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1925 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
1926 setup_ht_cap(sc,
1927 &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1928 }
1929
1930 /* initialize tx/rx engine */
1931 error = ath_tx_init(sc, ATH_TXBUF);
1932 if (error != 0)
1933 goto error_attach;
1934
1935 error = ath_rx_init(sc, ATH_RXBUF);
1936 if (error != 0)
1937 goto error_attach;
1938
1939 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1940 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1941 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1942
1943 error = ieee80211_register_hw(hw);
1944
1945 if (!ath_is_world_regd(reg)) {
1946 error = regulatory_hint(hw->wiphy, reg->alpha2);
1947 if (error)
1948 goto error_attach;
1949 }
1950
1951 /* Initialize LED control */
1952 ath_init_leds(sc);
1953
1954 ath_start_rfkill_poll(sc);
1955
1956 return 0;
1957
1958error_attach:
1959 /* cleanup tx queues */
1960 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1961 if (ATH_TXQ_SETUP(sc, i))
1962 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1963
1964 ath9k_uninit_hw(sc);
1965
1966 return error;
1967}
1968
1969int ath_reset(struct ath_softc *sc, bool retry_tx) 937int ath_reset(struct ath_softc *sc, bool retry_tx)
1970{ 938{
1971 struct ath_hw *ah = sc->sc_ah; 939 struct ath_hw *ah = sc->sc_ah;
@@ -1976,6 +944,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1976 /* Stop ANI */ 944 /* Stop ANI */
1977 del_timer_sync(&common->ani.timer); 945 del_timer_sync(&common->ani.timer);
1978 946
947 ieee80211_stop_queues(hw);
948
1979 ath9k_hw_set_interrupts(ah, 0); 949 ath9k_hw_set_interrupts(ah, 0);
1980 ath_drain_all_txq(sc, retry_tx); 950 ath_drain_all_txq(sc, retry_tx);
1981 ath_stoprecv(sc); 951 ath_stoprecv(sc);
@@ -2017,131 +987,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
2017 } 987 }
2018 } 988 }
2019 989
990 ieee80211_wake_queues(hw);
991
2020 /* Start ANI */ 992 /* Start ANI */
2021 ath_start_ani(common); 993 ath_start_ani(common);
2022 994
2023 return r; 995 return r;
2024} 996}
2025 997
2026/*
2027 * This function will allocate both the DMA descriptor structure, and the
2028 * buffers it contains. These are used to contain the descriptors used
2029 * by the system.
2030*/
2031int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
2032 struct list_head *head, const char *name,
2033 int nbuf, int ndesc)
2034{
2035#define DS2PHYS(_dd, _ds) \
2036 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2037#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
2038#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
2039 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2040 struct ath_desc *ds;
2041 struct ath_buf *bf;
2042 int i, bsize, error;
2043
2044 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
2045 name, nbuf, ndesc);
2046
2047 INIT_LIST_HEAD(head);
2048 /* ath_desc must be a multiple of DWORDs */
2049 if ((sizeof(struct ath_desc) % 4) != 0) {
2050 ath_print(common, ATH_DBG_FATAL,
2051 "ath_desc not DWORD aligned\n");
2052 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
2053 error = -ENOMEM;
2054 goto fail;
2055 }
2056
2057 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
2058
2059 /*
2060 * Need additional DMA memory because we can't use
2061 * descriptors that cross the 4K page boundary. Assume
2062 * one skipped descriptor per 4K page.
2063 */
2064 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2065 u32 ndesc_skipped =
2066 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
2067 u32 dma_len;
2068
2069 while (ndesc_skipped) {
2070 dma_len = ndesc_skipped * sizeof(struct ath_desc);
2071 dd->dd_desc_len += dma_len;
2072
2073 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
2074 };
2075 }
2076
2077 /* allocate descriptors */
2078 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2079 &dd->dd_desc_paddr, GFP_KERNEL);
2080 if (dd->dd_desc == NULL) {
2081 error = -ENOMEM;
2082 goto fail;
2083 }
2084 ds = dd->dd_desc;
2085 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
2086 name, ds, (u32) dd->dd_desc_len,
2087 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
2088
2089 /* allocate buffers */
2090 bsize = sizeof(struct ath_buf) * nbuf;
2091 bf = kzalloc(bsize, GFP_KERNEL);
2092 if (bf == NULL) {
2093 error = -ENOMEM;
2094 goto fail2;
2095 }
2096 dd->dd_bufptr = bf;
2097
2098 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
2099 bf->bf_desc = ds;
2100 bf->bf_daddr = DS2PHYS(dd, ds);
2101
2102 if (!(sc->sc_ah->caps.hw_caps &
2103 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2104 /*
2105 * Skip descriptor addresses which can cause 4KB
2106 * boundary crossing (addr + length) with a 32 dword
2107 * descriptor fetch.
2108 */
2109 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
2110 BUG_ON((caddr_t) bf->bf_desc >=
2111 ((caddr_t) dd->dd_desc +
2112 dd->dd_desc_len));
2113
2114 ds += ndesc;
2115 bf->bf_desc = ds;
2116 bf->bf_daddr = DS2PHYS(dd, ds);
2117 }
2118 }
2119 list_add_tail(&bf->list, head);
2120 }
2121 return 0;
2122fail2:
2123 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2124 dd->dd_desc_paddr);
2125fail:
2126 memset(dd, 0, sizeof(*dd));
2127 return error;
2128#undef ATH_DESC_4KB_BOUND_CHECK
2129#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
2130#undef DS2PHYS
2131}
2132
2133void ath_descdma_cleanup(struct ath_softc *sc,
2134 struct ath_descdma *dd,
2135 struct list_head *head)
2136{
2137 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2138 dd->dd_desc_paddr);
2139
2140 INIT_LIST_HEAD(head);
2141 kfree(dd->dd_bufptr);
2142 memset(dd, 0, sizeof(*dd));
2143}
2144
2145int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 998int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
2146{ 999{
2147 int qnum; 1000 int qnum;
@@ -2220,28 +1073,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
2220/* mac80211 callbacks */ 1073/* mac80211 callbacks */
2221/**********************/ 1074/**********************/
2222 1075
2223/*
2224 * (Re)start btcoex timers
2225 */
2226static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
2227{
2228 struct ath_btcoex *btcoex = &sc->btcoex;
2229 struct ath_hw *ah = sc->sc_ah;
2230
2231 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
2232 "Starting btcoex timers");
2233
2234 /* make sure duty cycle timer is also stopped when resuming */
2235 if (btcoex->hw_timer_enabled)
2236 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
2237
2238 btcoex->bt_priority_cnt = 0;
2239 btcoex->bt_priority_time = jiffies;
2240 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
2241
2242 mod_timer(&btcoex->period_timer, jiffies);
2243}
2244
2245static int ath9k_start(struct ieee80211_hw *hw) 1076static int ath9k_start(struct ieee80211_hw *hw)
2246{ 1077{
2247 struct ath_wiphy *aphy = hw->priv; 1078 struct ath_wiphy *aphy = hw->priv;
@@ -2411,11 +1242,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2411 if (ieee80211_is_pspoll(hdr->frame_control)) { 1242 if (ieee80211_is_pspoll(hdr->frame_control)) {
2412 ath_print(common, ATH_DBG_PS, 1243 ath_print(common, ATH_DBG_PS,
2413 "Sending PS-Poll to pick a buffered frame\n"); 1244 "Sending PS-Poll to pick a buffered frame\n");
2414 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA; 1245 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
2415 } else { 1246 } else {
2416 ath_print(common, ATH_DBG_PS, 1247 ath_print(common, ATH_DBG_PS,
2417 "Wake up to complete TX\n"); 1248 "Wake up to complete TX\n");
2418 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK; 1249 sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
2419 } 1250 }
2420 /* 1251 /*
2421 * The actual restore operation will happen only after 1252 * The actual restore operation will happen only after
@@ -2468,22 +1299,6 @@ exit:
2468 return 0; 1299 return 0;
2469} 1300}
2470 1301
2471/*
2472 * Pause btcoex timer and bt duty cycle timer
2473 */
2474static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
2475{
2476 struct ath_btcoex *btcoex = &sc->btcoex;
2477 struct ath_hw *ah = sc->sc_ah;
2478
2479 del_timer_sync(&btcoex->period_timer);
2480
2481 if (btcoex->hw_timer_enabled)
2482 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
2483
2484 btcoex->hw_timer_enabled = false;
2485}
2486
2487static void ath9k_stop(struct ieee80211_hw *hw) 1302static void ath9k_stop(struct ieee80211_hw *hw)
2488{ 1303{
2489 struct ath_wiphy *aphy = hw->priv; 1304 struct ath_wiphy *aphy = hw->priv;
@@ -2550,12 +1365,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2550} 1365}
2551 1366
2552static int ath9k_add_interface(struct ieee80211_hw *hw, 1367static int ath9k_add_interface(struct ieee80211_hw *hw,
2553 struct ieee80211_if_init_conf *conf) 1368 struct ieee80211_vif *vif)
2554{ 1369{
2555 struct ath_wiphy *aphy = hw->priv; 1370 struct ath_wiphy *aphy = hw->priv;
2556 struct ath_softc *sc = aphy->sc; 1371 struct ath_softc *sc = aphy->sc;
2557 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1372 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2558 struct ath_vif *avp = (void *)conf->vif->drv_priv; 1373 struct ath_vif *avp = (void *)vif->drv_priv;
2559 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 1374 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2560 int ret = 0; 1375 int ret = 0;
2561 1376
@@ -2567,7 +1382,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2567 goto out; 1382 goto out;
2568 } 1383 }
2569 1384
2570 switch (conf->type) { 1385 switch (vif->type) {
2571 case NL80211_IFTYPE_STATION: 1386 case NL80211_IFTYPE_STATION:
2572 ic_opmode = NL80211_IFTYPE_STATION; 1387 ic_opmode = NL80211_IFTYPE_STATION;
2573 break; 1388 break;
@@ -2578,11 +1393,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2578 ret = -ENOBUFS; 1393 ret = -ENOBUFS;
2579 goto out; 1394 goto out;
2580 } 1395 }
2581 ic_opmode = conf->type; 1396 ic_opmode = vif->type;
2582 break; 1397 break;
2583 default: 1398 default:
2584 ath_print(common, ATH_DBG_FATAL, 1399 ath_print(common, ATH_DBG_FATAL,
2585 "Interface type %d not yet supported\n", conf->type); 1400 "Interface type %d not yet supported\n", vif->type);
2586 ret = -EOPNOTSUPP; 1401 ret = -EOPNOTSUPP;
2587 goto out; 1402 goto out;
2588 } 1403 }
@@ -2614,18 +1429,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2614 * Enable MIB interrupts when there are hardware phy counters. 1429 * Enable MIB interrupts when there are hardware phy counters.
2615 * Note we only do this (at the moment) for station mode. 1430 * Note we only do this (at the moment) for station mode.
2616 */ 1431 */
2617 if ((conf->type == NL80211_IFTYPE_STATION) || 1432 if ((vif->type == NL80211_IFTYPE_STATION) ||
2618 (conf->type == NL80211_IFTYPE_ADHOC) || 1433 (vif->type == NL80211_IFTYPE_ADHOC) ||
2619 (conf->type == NL80211_IFTYPE_MESH_POINT)) { 1434 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2620 sc->imask |= ATH9K_INT_MIB; 1435 sc->imask |= ATH9K_INT_MIB;
2621 sc->imask |= ATH9K_INT_TSFOOR; 1436 sc->imask |= ATH9K_INT_TSFOOR;
2622 } 1437 }
2623 1438
2624 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 1439 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2625 1440
2626 if (conf->type == NL80211_IFTYPE_AP || 1441 if (vif->type == NL80211_IFTYPE_AP ||
2627 conf->type == NL80211_IFTYPE_ADHOC || 1442 vif->type == NL80211_IFTYPE_ADHOC ||
2628 conf->type == NL80211_IFTYPE_MONITOR) 1443 vif->type == NL80211_IFTYPE_MONITOR)
2629 ath_start_ani(common); 1444 ath_start_ani(common);
2630 1445
2631out: 1446out:
@@ -2634,12 +1449,12 @@ out:
2634} 1449}
2635 1450
2636static void ath9k_remove_interface(struct ieee80211_hw *hw, 1451static void ath9k_remove_interface(struct ieee80211_hw *hw,
2637 struct ieee80211_if_init_conf *conf) 1452 struct ieee80211_vif *vif)
2638{ 1453{
2639 struct ath_wiphy *aphy = hw->priv; 1454 struct ath_wiphy *aphy = hw->priv;
2640 struct ath_softc *sc = aphy->sc; 1455 struct ath_softc *sc = aphy->sc;
2641 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1456 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2642 struct ath_vif *avp = (void *)conf->vif->drv_priv; 1457 struct ath_vif *avp = (void *)vif->drv_priv;
2643 int i; 1458 int i;
2644 1459
2645 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1460 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -2662,7 +1477,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2662 sc->sc_flags &= ~SC_OP_BEACONS; 1477 sc->sc_flags &= ~SC_OP_BEACONS;
2663 1478
2664 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 1479 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2665 if (sc->beacon.bslot[i] == conf->vif) { 1480 if (sc->beacon.bslot[i] == vif) {
2666 printk(KERN_DEBUG "%s: vif had allocated beacon " 1481 printk(KERN_DEBUG "%s: vif had allocated beacon "
2667 "slot\n", __func__); 1482 "slot\n", __func__);
2668 sc->beacon.bslot[i] = NULL; 1483 sc->beacon.bslot[i] = NULL;
@@ -2727,7 +1542,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2727 */ 1542 */
2728 if (changed & IEEE80211_CONF_CHANGE_PS) { 1543 if (changed & IEEE80211_CONF_CHANGE_PS) {
2729 if (conf->flags & IEEE80211_CONF_PS) { 1544 if (conf->flags & IEEE80211_CONF_PS) {
2730 sc->sc_flags |= SC_OP_PS_ENABLED; 1545 sc->ps_flags |= PS_ENABLED;
2731 if (!(ah->caps.hw_caps & 1546 if (!(ah->caps.hw_caps &
2732 ATH9K_HW_CAP_AUTOSLEEP)) { 1547 ATH9K_HW_CAP_AUTOSLEEP)) {
2733 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { 1548 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -2740,23 +1555,23 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2740 * At this point we know hardware has received an ACK 1555 * At this point we know hardware has received an ACK
2741 * of a previously sent null data frame. 1556 * of a previously sent null data frame.
2742 */ 1557 */
2743 if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) { 1558 if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
2744 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; 1559 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
2745 sc->ps_enabled = true; 1560 sc->ps_enabled = true;
2746 ath9k_hw_setrxabort(sc->sc_ah, 1); 1561 ath9k_hw_setrxabort(sc->sc_ah, 1);
2747 } 1562 }
2748 } else { 1563 } else {
2749 sc->ps_enabled = false; 1564 sc->ps_enabled = false;
2750 sc->sc_flags &= ~(SC_OP_PS_ENABLED | 1565 sc->ps_flags &= ~(PS_ENABLED |
2751 SC_OP_NULLFUNC_COMPLETED); 1566 PS_NULLFUNC_COMPLETED);
2752 ath9k_setpower(sc, ATH9K_PM_AWAKE); 1567 ath9k_setpower(sc, ATH9K_PM_AWAKE);
2753 if (!(ah->caps.hw_caps & 1568 if (!(ah->caps.hw_caps &
2754 ATH9K_HW_CAP_AUTOSLEEP)) { 1569 ATH9K_HW_CAP_AUTOSLEEP)) {
2755 ath9k_hw_setrxabort(sc->sc_ah, 0); 1570 ath9k_hw_setrxabort(sc->sc_ah, 0);
2756 sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | 1571 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
2757 SC_OP_WAIT_FOR_CAB | 1572 PS_WAIT_FOR_CAB |
2758 SC_OP_WAIT_FOR_PSPOLL_DATA | 1573 PS_WAIT_FOR_PSPOLL_DATA |
2759 SC_OP_WAIT_FOR_TX_ACK); 1574 PS_WAIT_FOR_TX_ACK);
2760 if (sc->imask & ATH9K_INT_TIM_TIMER) { 1575 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2761 sc->imask &= ~ATH9K_INT_TIM_TIMER; 1576 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2762 ath9k_hw_set_interrupts(sc->sc_ah, 1577 ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2766,6 +1581,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2766 } 1581 }
2767 } 1582 }
2768 1583
1584 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1585 if (conf->flags & IEEE80211_CONF_MONITOR) {
1586 ath_print(common, ATH_DBG_CONFIG,
1587 "HW opmode set to Monitor mode\n");
1588 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1589 }
1590 }
1591
2769 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1592 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2770 struct ieee80211_channel *curchan = hw->conf.channel; 1593 struct ieee80211_channel *curchan = hw->conf.channel;
2771 int pos = curchan->hw_value; 1594 int pos = curchan->hw_value;
@@ -2966,6 +1789,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2966 struct ath_hw *ah = sc->sc_ah; 1789 struct ath_hw *ah = sc->sc_ah;
2967 struct ath_common *common = ath9k_hw_common(ah); 1790 struct ath_common *common = ath9k_hw_common(ah);
2968 struct ath_vif *avp = (void *)vif->drv_priv; 1791 struct ath_vif *avp = (void *)vif->drv_priv;
1792 int slottime;
2969 int error; 1793 int error;
2970 1794
2971 mutex_lock(&sc->mutex); 1795 mutex_lock(&sc->mutex);
@@ -3001,6 +1825,25 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
3001 ath_beacon_config(sc, vif); 1825 ath_beacon_config(sc, vif);
3002 } 1826 }
3003 1827
1828 if (changed & BSS_CHANGED_ERP_SLOT) {
1829 if (bss_conf->use_short_slot)
1830 slottime = 9;
1831 else
1832 slottime = 20;
1833 if (vif->type == NL80211_IFTYPE_AP) {
1834 /*
1835 * Defer update, so that connected stations can adjust
1836 * their settings at the same time.
1837 * See beacon.c for more details
1838 */
1839 sc->beacon.slottime = slottime;
1840 sc->beacon.updateslot = UPDATE;
1841 } else {
1842 ah->slottime = slottime;
1843 ath9k_hw_init_global_settings(ah);
1844 }
1845 }
1846
3004 /* Disable transmission of beacons */ 1847 /* Disable transmission of beacons */
3005 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) 1848 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
3006 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1849 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -3133,6 +1976,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
3133{ 1976{
3134 struct ath_wiphy *aphy = hw->priv; 1977 struct ath_wiphy *aphy = hw->priv;
3135 struct ath_softc *sc = aphy->sc; 1978 struct ath_softc *sc = aphy->sc;
1979 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
3136 1980
3137 mutex_lock(&sc->mutex); 1981 mutex_lock(&sc->mutex);
3138 if (ath9k_wiphy_scanning(sc)) { 1982 if (ath9k_wiphy_scanning(sc)) {
@@ -3148,10 +1992,9 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
3148 1992
3149 aphy->state = ATH_WIPHY_SCAN; 1993 aphy->state = ATH_WIPHY_SCAN;
3150 ath9k_wiphy_pause_all_forced(sc, aphy); 1994 ath9k_wiphy_pause_all_forced(sc, aphy);
3151
3152 spin_lock_bh(&sc->ani_lock);
3153 sc->sc_flags |= SC_OP_SCANNING; 1995 sc->sc_flags |= SC_OP_SCANNING;
3154 spin_unlock_bh(&sc->ani_lock); 1996 del_timer_sync(&common->ani.timer);
1997 cancel_delayed_work_sync(&sc->tx_complete_work);
3155 mutex_unlock(&sc->mutex); 1998 mutex_unlock(&sc->mutex);
3156} 1999}
3157 2000
@@ -3159,17 +2002,30 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
3159{ 2002{
3160 struct ath_wiphy *aphy = hw->priv; 2003 struct ath_wiphy *aphy = hw->priv;
3161 struct ath_softc *sc = aphy->sc; 2004 struct ath_softc *sc = aphy->sc;
2005 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
3162 2006
3163 mutex_lock(&sc->mutex); 2007 mutex_lock(&sc->mutex);
3164 spin_lock_bh(&sc->ani_lock);
3165 aphy->state = ATH_WIPHY_ACTIVE; 2008 aphy->state = ATH_WIPHY_ACTIVE;
3166 sc->sc_flags &= ~SC_OP_SCANNING; 2009 sc->sc_flags &= ~SC_OP_SCANNING;
3167 sc->sc_flags |= SC_OP_FULL_RESET; 2010 sc->sc_flags |= SC_OP_FULL_RESET;
3168 spin_unlock_bh(&sc->ani_lock); 2011 ath_start_ani(common);
2012 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
3169 ath_beacon_config(sc, NULL); 2013 ath_beacon_config(sc, NULL);
3170 mutex_unlock(&sc->mutex); 2014 mutex_unlock(&sc->mutex);
3171} 2015}
3172 2016
2017static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2018{
2019 struct ath_wiphy *aphy = hw->priv;
2020 struct ath_softc *sc = aphy->sc;
2021 struct ath_hw *ah = sc->sc_ah;
2022
2023 mutex_lock(&sc->mutex);
2024 ah->coverage_class = coverage_class;
2025 ath9k_hw_init_global_settings(ah);
2026 mutex_unlock(&sc->mutex);
2027}
2028
3173struct ieee80211_ops ath9k_ops = { 2029struct ieee80211_ops ath9k_ops = {
3174 .tx = ath9k_tx, 2030 .tx = ath9k_tx,
3175 .start = ath9k_start, 2031 .start = ath9k_start,
@@ -3189,64 +2045,5 @@ struct ieee80211_ops ath9k_ops = {
3189 .sw_scan_start = ath9k_sw_scan_start, 2045 .sw_scan_start = ath9k_sw_scan_start,
3190 .sw_scan_complete = ath9k_sw_scan_complete, 2046 .sw_scan_complete = ath9k_sw_scan_complete,
3191 .rfkill_poll = ath9k_rfkill_poll_state, 2047 .rfkill_poll = ath9k_rfkill_poll_state,
2048 .set_coverage_class = ath9k_set_coverage_class,
3192}; 2049};
3193
3194static int __init ath9k_init(void)
3195{
3196 int error;
3197
3198 /* Register rate control algorithm */
3199 error = ath_rate_control_register();
3200 if (error != 0) {
3201 printk(KERN_ERR
3202 "ath9k: Unable to register rate control "
3203 "algorithm: %d\n",
3204 error);
3205 goto err_out;
3206 }
3207
3208 error = ath9k_debug_create_root();
3209 if (error) {
3210 printk(KERN_ERR
3211 "ath9k: Unable to create debugfs root: %d\n",
3212 error);
3213 goto err_rate_unregister;
3214 }
3215
3216 error = ath_pci_init();
3217 if (error < 0) {
3218 printk(KERN_ERR
3219 "ath9k: No PCI devices found, driver not installed.\n");
3220 error = -ENODEV;
3221 goto err_remove_root;
3222 }
3223
3224 error = ath_ahb_init();
3225 if (error < 0) {
3226 error = -ENODEV;
3227 goto err_pci_exit;
3228 }
3229
3230 return 0;
3231
3232 err_pci_exit:
3233 ath_pci_exit();
3234
3235 err_remove_root:
3236 ath9k_debug_remove_root();
3237 err_rate_unregister:
3238 ath_rate_control_unregister();
3239 err_out:
3240 return error;
3241}
3242module_init(ath9k_init);
3243
3244static void __exit ath9k_exit(void)
3245{
3246 ath_ahb_exit();
3247 ath_pci_exit();
3248 ath9k_debug_remove_root();
3249 ath_rate_control_unregister();
3250 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
3251}
3252module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f7af5ea5475..fe2c3a644a6 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,7 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include "ath9k.h" 19#include "ath9k.h"
20 20
21static struct pci_device_id ath_pci_id_table[] __devinitdata = { 21static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
22 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ 22 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
23 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ 23 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
24 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ 24 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
@@ -113,25 +113,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
113 u16 subsysid; 113 u16 subsysid;
114 u32 val; 114 u32 val;
115 int ret = 0; 115 int ret = 0;
116 struct ath_hw *ah;
117 char hw_name[64]; 116 char hw_name[64];
118 117
119 if (pci_enable_device(pdev)) 118 if (pci_enable_device(pdev))
120 return -EIO; 119 return -EIO;
121 120
122 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 121 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
123
124 if (ret) { 122 if (ret) {
125 printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); 123 printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
126 goto bad; 124 goto err_dma;
127 } 125 }
128 126
129 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 127 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
130
131 if (ret) { 128 if (ret) {
132 printk(KERN_ERR "ath9k: 32-bit DMA consistent " 129 printk(KERN_ERR "ath9k: 32-bit DMA consistent "
133 "DMA enable failed\n"); 130 "DMA enable failed\n");
134 goto bad; 131 goto err_dma;
135 } 132 }
136 133
137 /* 134 /*
@@ -171,22 +168,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
171 if (ret) { 168 if (ret) {
172 dev_err(&pdev->dev, "PCI memory region reserve error\n"); 169 dev_err(&pdev->dev, "PCI memory region reserve error\n");
173 ret = -ENODEV; 170 ret = -ENODEV;
174 goto bad; 171 goto err_region;
175 } 172 }
176 173
177 mem = pci_iomap(pdev, 0, 0); 174 mem = pci_iomap(pdev, 0, 0);
178 if (!mem) { 175 if (!mem) {
179 printk(KERN_ERR "PCI memory map error\n") ; 176 printk(KERN_ERR "PCI memory map error\n") ;
180 ret = -EIO; 177 ret = -EIO;
181 goto bad1; 178 goto err_iomap;
182 } 179 }
183 180
184 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + 181 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
185 sizeof(struct ath_softc), &ath9k_ops); 182 sizeof(struct ath_softc), &ath9k_ops);
186 if (!hw) { 183 if (!hw) {
187 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 184 dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
188 ret = -ENOMEM; 185 ret = -ENOMEM;
189 goto bad2; 186 goto err_alloc_hw;
190 } 187 }
191 188
192 SET_IEEE80211_DEV(hw, &pdev->dev); 189 SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +198,25 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
201 sc->dev = &pdev->dev; 198 sc->dev = &pdev->dev;
202 sc->mem = mem; 199 sc->mem = mem;
203 200
204 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid); 201 /* Will be cleared in ath9k_start() */
205 ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops); 202 sc->sc_flags |= SC_OP_INVALID;
206 if (ret) {
207 dev_err(&pdev->dev, "failed to initialize device\n");
208 goto bad3;
209 }
210
211 /* setup interrupt service routine */
212 203
213 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); 204 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
214 if (ret) { 205 if (ret) {
215 dev_err(&pdev->dev, "request_irq failed\n"); 206 dev_err(&pdev->dev, "request_irq failed\n");
216 goto bad4; 207 goto err_irq;
217 } 208 }
218 209
219 sc->irq = pdev->irq; 210 sc->irq = pdev->irq;
220 211
221 ah = sc->sc_ah; 212 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
222 ath9k_hw_name(ah, hw_name, sizeof(hw_name)); 213 ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
214 if (ret) {
215 dev_err(&pdev->dev, "Failed to initialize device\n");
216 goto err_init;
217 }
218
219 ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
223 printk(KERN_INFO 220 printk(KERN_INFO
224 "%s: %s mem=0x%lx, irq=%d\n", 221 "%s: %s mem=0x%lx, irq=%d\n",
225 wiphy_name(hw->wiphy), 222 wiphy_name(hw->wiphy),
@@ -227,15 +224,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
227 (unsigned long)mem, pdev->irq); 224 (unsigned long)mem, pdev->irq);
228 225
229 return 0; 226 return 0;
230bad4: 227
231 ath_detach(sc); 228err_init:
232bad3: 229 free_irq(sc->irq, sc);
230err_irq:
233 ieee80211_free_hw(hw); 231 ieee80211_free_hw(hw);
234bad2: 232err_alloc_hw:
235 pci_iounmap(pdev, mem); 233 pci_iounmap(pdev, mem);
236bad1: 234err_iomap:
237 pci_release_region(pdev, 0); 235 pci_release_region(pdev, 0);
238bad: 236err_region:
237 /* Nothing */
238err_dma:
239 pci_disable_device(pdev); 239 pci_disable_device(pdev);
240 return ret; 240 return ret;
241} 241}
@@ -245,8 +245,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
245 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 245 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
246 struct ath_wiphy *aphy = hw->priv; 246 struct ath_wiphy *aphy = hw->priv;
247 struct ath_softc *sc = aphy->sc; 247 struct ath_softc *sc = aphy->sc;
248 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
248 249
249 ath_cleanup(sc); 250 ath9k_deinit_device(sc);
251 free_irq(sc->irq, sc);
252 ieee80211_free_hw(sc->hw);
253 ath_bus_cleanup(common);
250} 254}
251 255
252#ifdef CONFIG_PM 256#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 9eb96f50699..4f6d6fd442f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -57,6 +57,10 @@ enum {
57 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 57 || (_phy == WLAN_RC_PHY_HT_40_DS) \
58 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 58 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
59 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 59 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
60#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
61 || (_phy == WLAN_RC_PHY_HT_20_DS) \
62 || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
63 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI))
60#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ 64#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
61 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 65 || (_phy == WLAN_RC_PHY_HT_40_DS) \
62 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 66 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae6..40b5d05edcc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
364 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 364 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
365 return; /* not from our current AP */ 365 return; /* not from our current AP */
366 366
367 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; 367 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
368 368
369 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 369 if (sc->ps_flags & PS_BEACON_SYNC) {
370 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 370 sc->ps_flags &= ~PS_BEACON_SYNC;
371 ath_print(common, ATH_DBG_PS, 371 ath_print(common, ATH_DBG_PS,
372 "Reconfigure Beacon timers based on " 372 "Reconfigure Beacon timers based on "
373 "timestamp from the AP\n"); 373 "timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
384 */ 384 */
385 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 385 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
386 "buffered broadcast/multicast frame(s)\n"); 386 "buffered broadcast/multicast frame(s)\n");
387 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; 387 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
388 return; 388 return;
389 } 389 }
390 390
391 if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) { 391 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
392 /* 392 /*
393 * This can happen if a broadcast frame is dropped or the AP 393 * This can happen if a broadcast frame is dropped or the AP
394 * fails to send a frame indicating that all CAB frames have 394 * fails to send a frame indicating that all CAB frames have
395 * been delivered. 395 * been delivered.
396 */ 396 */
397 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 397 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
398 ath_print(common, ATH_DBG_PS, 398 ath_print(common, ATH_DBG_PS,
399 "PS wait for CAB frames timed out\n"); 399 "PS wait for CAB frames timed out\n");
400 } 400 }
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
408 hdr = (struct ieee80211_hdr *)skb->data; 408 hdr = (struct ieee80211_hdr *)skb->data;
409 409
410 /* Process Beacon and CAB receive in PS state */ 410 /* Process Beacon and CAB receive in PS state */
411 if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) && 411 if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
412 ieee80211_is_beacon(hdr->frame_control)) 412 ieee80211_is_beacon(hdr->frame_control))
413 ath_rx_ps_beacon(sc, skb); 413 ath_rx_ps_beacon(sc, skb);
414 else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) && 414 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
415 (ieee80211_is_data(hdr->frame_control) || 415 (ieee80211_is_data(hdr->frame_control) ||
416 ieee80211_is_action(hdr->frame_control)) && 416 ieee80211_is_action(hdr->frame_control)) &&
417 is_multicast_ether_addr(hdr->addr1) && 417 is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
420 * No more broadcast/multicast frames to be received at this 420 * No more broadcast/multicast frames to be received at this
421 * point. 421 * point.
422 */ 422 */
423 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 423 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
424 ath_print(common, ATH_DBG_PS, 424 ath_print(common, ATH_DBG_PS,
425 "All PS CAB frames received, back to sleep\n"); 425 "All PS CAB frames received, back to sleep\n");
426 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 426 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
427 !is_multicast_ether_addr(hdr->addr1) && 427 !is_multicast_ether_addr(hdr->addr1) &&
428 !ieee80211_has_morefrags(hdr->frame_control)) { 428 !ieee80211_has_morefrags(hdr->frame_control)) {
429 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 429 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
430 ath_print(common, ATH_DBG_PS, 430 ath_print(common, ATH_DBG_PS,
431 "Going back to sleep after having received " 431 "Going back to sleep after having received "
432 "PS-Poll data (0x%x)\n", 432 "PS-Poll data (0x%x)\n",
433 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 433 sc->ps_flags & (PS_WAIT_FOR_BEACON |
434 SC_OP_WAIT_FOR_CAB | 434 PS_WAIT_FOR_CAB |
435 SC_OP_WAIT_FOR_PSPOLL_DATA | 435 PS_WAIT_FOR_PSPOLL_DATA |
436 SC_OP_WAIT_FOR_TX_ACK)); 436 PS_WAIT_FOR_TX_ACK));
437 } 437 }
438} 438}
439 439
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
571 hw = ath_get_virt_hw(sc, hdr); 571 hw = ath_get_virt_hw(sc, hdr);
572 rx_stats = &ds->ds_rxstat; 572 rx_stats = &ds->ds_rxstat;
573 573
574 ath_debug_stat_rx(sc, bf);
575
574 /* 576 /*
575 * If we're asked to flush receive queue, directly 577 * If we're asked to flush receive queue, directly
576 * chain it back at the queue without processing it. 578 * chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
631 sc->rx.rxotherant = 0; 633 sc->rx.rxotherant = 0;
632 } 634 }
633 635
634 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 636 if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
635 SC_OP_WAIT_FOR_CAB | 637 PS_WAIT_FOR_CAB |
636 SC_OP_WAIT_FOR_PSPOLL_DATA))) 638 PS_WAIT_FOR_PSPOLL_DATA)))
637 ath_rx_ps(sc, skb); 639 ath_rx_ps(sc, skb);
638 640
639 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 641 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e..a43fbf84dab 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -152,7 +152,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
152 152
153 SET_IEEE80211_PERM_ADDR(hw, addr); 153 SET_IEEE80211_PERM_ADDR(hw, addr);
154 154
155 ath_set_hw_capab(sc, hw); 155 ath9k_set_hw_capab(sc, hw);
156 156
157 error = ieee80211_register_hw(hw); 157 error = ieee80211_register_hw(hw);
158 158
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0..a821bb687b3 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1648,7 +1648,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1648 /* tag if this is a nullfunc frame to enable PS when AP acks it */ 1648 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1649 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { 1649 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1650 bf->bf_isnullfunc = true; 1650 bf->bf_isnullfunc = true;
1651 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; 1651 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1652 } else 1652 } else
1653 bf->bf_isnullfunc = false; 1653 bf->bf_isnullfunc = false;
1654 1654
@@ -1858,15 +1858,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1858 skb_pull(skb, padsize); 1858 skb_pull(skb, padsize);
1859 } 1859 }
1860 1860
1861 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) { 1861 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1862 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK; 1862 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1863 ath_print(common, ATH_DBG_PS, 1863 ath_print(common, ATH_DBG_PS,
1864 "Going back to sleep after having " 1864 "Going back to sleep after having "
1865 "received TX status (0x%x)\n", 1865 "received TX status (0x%x)\n",
1866 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 1866 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1867 SC_OP_WAIT_FOR_CAB | 1867 PS_WAIT_FOR_CAB |
1868 SC_OP_WAIT_FOR_PSPOLL_DATA | 1868 PS_WAIT_FOR_PSPOLL_DATA |
1869 SC_OP_WAIT_FOR_TX_ACK)); 1869 PS_WAIT_FOR_TX_ACK));
1870 } 1870 }
1871 1871
1872 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1872 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2053,11 +2053,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2053 */ 2053 */
2054 if (bf->bf_isnullfunc && 2054 if (bf->bf_isnullfunc &&
2055 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { 2055 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
2056 if ((sc->sc_flags & SC_OP_PS_ENABLED)) { 2056 if ((sc->ps_flags & PS_ENABLED)) {
2057 sc->ps_enabled = true; 2057 sc->ps_enabled = true;
2058 ath9k_hw_setrxabort(sc->sc_ah, 1); 2058 ath9k_hw_setrxabort(sc->sc_ah, 1);
2059 } else 2059 } else
2060 sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED; 2060 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2061 } 2061 }
2062 2062
2063 /* 2063 /*
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750..9ab1192004c 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); 32MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
33 33
34static struct pci_device_id card_ids[] = { 34static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
35 { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, 35 { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
36 { 0, } 36 { 0, }
37}; 37};
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced..073be566d05 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -3,6 +3,7 @@ config B43
3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA 3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
4 select SSB 4 select SSB
5 select FW_LOADER 5 select FW_LOADER
6 select SSB_BLOCKIO
6 ---help--- 7 ---help---
7 b43 is a driver for the Broadcom 43xx series wireless devices. 8 b43 is a driver for the Broadcom 43xx series wireless devices.
8 9
@@ -78,14 +79,6 @@ config B43_SDIO
78 79
79 If unsure, say N. 80 If unsure, say N.
80 81
81# Data transfers to the device via PIO
82# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
83config B43_PIO
84 bool
85 depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
86 select SSB_BLOCKIO
87 default y
88
89config B43_NPHY 82config B43_NPHY
90 bool "Pre IEEE 802.11n support (BROKEN)" 83 bool "Pre IEEE 802.11n support (BROKEN)"
91 depends on B43 && EXPERIMENTAL && BROKEN 84 depends on B43 && EXPERIMENTAL && BROKEN
@@ -137,12 +130,4 @@ config B43_DEBUG
137 for production use. 130 for production use.
138 Only say Y, if you are debugging a problem in the b43 driver sourcecode. 131 Only say Y, if you are debugging a problem in the b43 driver sourcecode.
139 132
140config B43_FORCE_PIO
141 bool "Force usage of PIO instead of DMA"
142 depends on B43 && B43_DEBUG
143 ---help---
144 This will disable DMA and always enable PIO instead.
145 133
146 Say N!
147 This is only for debugging the PIO engine code. You do
148 _NOT_ want to enable this.
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542d..5e83b6f0a3a 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
12b43-y += lo.o 12b43-y += lo.o
13b43-y += wa.o 13b43-y += wa.o
14b43-y += dma.o 14b43-y += dma.o
15b43-$(CONFIG_B43_PIO) += pio.o 15b43-y += pio.o
16b43-y += rfkill.o 16b43-y += rfkill.o
17b43-$(CONFIG_B43_LEDS) += leds.o 17b43-$(CONFIG_B43_LEDS) += leds.o
18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o 18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf949199..54d6085a887 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -253,6 +253,14 @@ enum {
253#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */ 253#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */
254#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ 254#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
255#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ 255#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
256/* SHM_SHARED tx iq workarounds */
257#define B43_SHM_SH_NPHY_TXIQW0 0x0700
258#define B43_SHM_SH_NPHY_TXIQW1 0x0702
259#define B43_SHM_SH_NPHY_TXIQW2 0x0704
260#define B43_SHM_SH_NPHY_TXIQW3 0x0706
261/* SHM_SHARED tx pwr ctrl */
262#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708
263#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E
256 264
257/* SHM_SCRATCH offsets */ 265/* SHM_SCRATCH offsets */
258#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */ 266#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */
@@ -821,11 +829,9 @@ struct b43_wl {
821 /* The device LEDs. */ 829 /* The device LEDs. */
822 struct b43_leds leds; 830 struct b43_leds leds;
823 831
824#ifdef CONFIG_B43_PIO
825 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */ 832 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
826 u8 pio_scratchspace[110] __attribute__((__aligned__(8))); 833 u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
827 u8 pio_tailspace[4] __attribute__((__aligned__(8))); 834 u8 pio_tailspace[4] __attribute__((__aligned__(8)));
828#endif /* CONFIG_B43_PIO */
829}; 835};
830 836
831static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) 837static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -876,20 +882,9 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
876 882
877static inline bool b43_using_pio_transfers(struct b43_wldev *dev) 883static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
878{ 884{
879#ifdef CONFIG_B43_PIO
880 return dev->__using_pio_transfers; 885 return dev->__using_pio_transfers;
881#else
882 return 0;
883#endif
884} 886}
885 887
886#ifdef CONFIG_B43_FORCE_PIO
887# define B43_FORCE_PIO 1
888#else
889# define B43_FORCE_PIO 0
890#endif
891
892
893/* Message printing */ 888/* Message printing */
894void b43info(struct b43_wl *wl, const char *fmt, ...) 889void b43info(struct b43_wl *wl, const char *fmt, ...)
895 __attribute__ ((format(printf, 2, 3))); 890 __attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 88d1fd02d40..615af22c49f 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
1653 b43_power_saving_ctl_bits(dev, 0); 1653 b43_power_saving_ctl_bits(dev, 0);
1654} 1654}
1655 1655
1656#ifdef CONFIG_B43_PIO
1657static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, 1656static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1658 u16 mmio_base, bool enable) 1657 u16 mmio_base, bool enable)
1659{ 1658{
@@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1687 mmio_base = b43_dmacontroller_base(type, engine_index); 1686 mmio_base = b43_dmacontroller_base(type, engine_index);
1688 direct_fifo_rx(dev, type, mmio_base, enable); 1687 direct_fifo_rx(dev, type, mmio_base, enable);
1689} 1688}
1690#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f2..9c5c7c9ad53 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -67,7 +67,12 @@ MODULE_AUTHOR("Gábor Stefanik");
67MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
68 68
69MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID); 69MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
70 70MODULE_FIRMWARE("b43/ucode11.fw");
71MODULE_FIRMWARE("b43/ucode13.fw");
72MODULE_FIRMWARE("b43/ucode14.fw");
73MODULE_FIRMWARE("b43/ucode15.fw");
74MODULE_FIRMWARE("b43/ucode5.fw");
75MODULE_FIRMWARE("b43/ucode9.fw");
71 76
72static int modparam_bad_frames_preempt; 77static int modparam_bad_frames_preempt;
73module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); 78module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -102,6 +107,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
102module_param_named(verbose, b43_modparam_verbose, int, 0644); 107module_param_named(verbose, b43_modparam_verbose, int, 0644);
103MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); 108MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
104 109
110static int modparam_pio;
111module_param_named(pio, modparam_pio, int, 0444);
112MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode");
105 113
106static const struct ssb_device_id b43_ssb_tbl[] = { 114static const struct ssb_device_id b43_ssb_tbl[] = {
107 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), 115 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -110,6 +118,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
110 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), 118 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
111 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), 119 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
112 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11), 120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12),
113 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), 122 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
114 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15), 123 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
115 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16), 124 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
@@ -1786,8 +1795,8 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1786 dma_reason[4], dma_reason[5]); 1795 dma_reason[4], dma_reason[5]);
1787 b43err(dev->wl, "This device does not support DMA " 1796 b43err(dev->wl, "This device does not support DMA "
1788 "on your system. Please use PIO instead.\n"); 1797 "on your system. Please use PIO instead.\n");
1789 b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in " 1798 b43err(dev->wl, "Unload the b43 module and reload "
1790 "your kernel configuration.\n"); 1799 "with 'pio=1'\n");
1791 return; 1800 return;
1792 } 1801 }
1793 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { 1802 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -4353,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4353 4362
4354 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || 4363 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
4355 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || 4364 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
4356 B43_FORCE_PIO) { 4365 modparam_pio) {
4357 dev->__using_pio_transfers = 1; 4366 dev->__using_pio_transfers = 1;
4358 err = b43_pio_init(dev); 4367 err = b43_pio_init(dev);
4359 } else { 4368 } else {
@@ -4388,7 +4397,7 @@ err_busdown:
4388} 4397}
4389 4398
4390static int b43_op_add_interface(struct ieee80211_hw *hw, 4399static int b43_op_add_interface(struct ieee80211_hw *hw,
4391 struct ieee80211_if_init_conf *conf) 4400 struct ieee80211_vif *vif)
4392{ 4401{
4393 struct b43_wl *wl = hw_to_b43_wl(hw); 4402 struct b43_wl *wl = hw_to_b43_wl(hw);
4394 struct b43_wldev *dev; 4403 struct b43_wldev *dev;
@@ -4396,24 +4405,24 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4396 4405
4397 /* TODO: allow WDS/AP devices to coexist */ 4406 /* TODO: allow WDS/AP devices to coexist */
4398 4407
4399 if (conf->type != NL80211_IFTYPE_AP && 4408 if (vif->type != NL80211_IFTYPE_AP &&
4400 conf->type != NL80211_IFTYPE_MESH_POINT && 4409 vif->type != NL80211_IFTYPE_MESH_POINT &&
4401 conf->type != NL80211_IFTYPE_STATION && 4410 vif->type != NL80211_IFTYPE_STATION &&
4402 conf->type != NL80211_IFTYPE_WDS && 4411 vif->type != NL80211_IFTYPE_WDS &&
4403 conf->type != NL80211_IFTYPE_ADHOC) 4412 vif->type != NL80211_IFTYPE_ADHOC)
4404 return -EOPNOTSUPP; 4413 return -EOPNOTSUPP;
4405 4414
4406 mutex_lock(&wl->mutex); 4415 mutex_lock(&wl->mutex);
4407 if (wl->operating) 4416 if (wl->operating)
4408 goto out_mutex_unlock; 4417 goto out_mutex_unlock;
4409 4418
4410 b43dbg(wl, "Adding Interface type %d\n", conf->type); 4419 b43dbg(wl, "Adding Interface type %d\n", vif->type);
4411 4420
4412 dev = wl->current_dev; 4421 dev = wl->current_dev;
4413 wl->operating = 1; 4422 wl->operating = 1;
4414 wl->vif = conf->vif; 4423 wl->vif = vif;
4415 wl->if_type = conf->type; 4424 wl->if_type = vif->type;
4416 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 4425 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
4417 4426
4418 b43_adjust_opmode(dev); 4427 b43_adjust_opmode(dev);
4419 b43_set_pretbtt(dev); 4428 b43_set_pretbtt(dev);
@@ -4428,17 +4437,17 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4428} 4437}
4429 4438
4430static void b43_op_remove_interface(struct ieee80211_hw *hw, 4439static void b43_op_remove_interface(struct ieee80211_hw *hw,
4431 struct ieee80211_if_init_conf *conf) 4440 struct ieee80211_vif *vif)
4432{ 4441{
4433 struct b43_wl *wl = hw_to_b43_wl(hw); 4442 struct b43_wl *wl = hw_to_b43_wl(hw);
4434 struct b43_wldev *dev = wl->current_dev; 4443 struct b43_wldev *dev = wl->current_dev;
4435 4444
4436 b43dbg(wl, "Removing Interface type %d\n", conf->type); 4445 b43dbg(wl, "Removing Interface type %d\n", vif->type);
4437 4446
4438 mutex_lock(&wl->mutex); 4447 mutex_lock(&wl->mutex);
4439 4448
4440 B43_WARN_ON(!wl->operating); 4449 B43_WARN_ON(!wl->operating);
4441 B43_WARN_ON(wl->vif != conf->vif); 4450 B43_WARN_ON(wl->vif != vif);
4442 wl->vif = NULL; 4451 wl->vif = NULL;
4443 4452
4444 wl->operating = 0; 4453 wl->operating = 0;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff8..b58d6cf2658 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -80,6 +80,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
80 dev->phy.lp = NULL; 80 dev->phy.lp = NULL;
81} 81}
82 82
83/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
83static void lpphy_read_band_sprom(struct b43_wldev *dev) 84static void lpphy_read_band_sprom(struct b43_wldev *dev)
84{ 85{
85 struct b43_phy_lp *lpphy = dev->phy.lp; 86 struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +102,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
101 maxpwr = bus->sprom.maxpwr_bg; 102 maxpwr = bus->sprom.maxpwr_bg;
102 lpphy->max_tx_pwr_med_band = maxpwr; 103 lpphy->max_tx_pwr_med_band = maxpwr;
103 cckpo = bus->sprom.cck2gpo; 104 cckpo = bus->sprom.cck2gpo;
105 /*
106 * We don't read SPROM's opo as specs say. On rev8 SPROMs
107 * opo == ofdm2gpo and we don't know any SSB with LP-PHY
108 * and SPROM rev below 8.
109 */
110 B43_WARN_ON(bus->sprom.revision < 8);
104 ofdmpo = bus->sprom.ofdm2gpo; 111 ofdmpo = bus->sprom.ofdm2gpo;
105 if (cckpo) { 112 if (cckpo) {
106 for (i = 0; i < 4; i++) { 113 for (i = 0; i < 4; i++) {
@@ -1703,19 +1710,6 @@ static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
1703 .c0 = 0, 1710 .c0 = 0,
1704}; 1711};
1705 1712
1706static u8 lpphy_nbits(s32 val)
1707{
1708 u32 tmp = abs(val);
1709 u8 nbits = 0;
1710
1711 while (tmp != 0) {
1712 nbits++;
1713 tmp >>= 1;
1714 }
1715
1716 return nbits;
1717}
1718
1719static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples) 1713static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1720{ 1714{
1721 struct lpphy_iq_est iq_est; 1715 struct lpphy_iq_est iq_est;
@@ -1742,8 +1736,8 @@ static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1742 goto out; 1736 goto out;
1743 } 1737 }
1744 1738
1745 prod_msb = lpphy_nbits(prod); 1739 prod_msb = fls(abs(prod));
1746 q_msb = lpphy_nbits(qpwr); 1740 q_msb = fls(abs(qpwr));
1747 tmp1 = prod_msb - 20; 1741 tmp1 = prod_msb - 20;
1748 1742
1749 if (tmp1 >= 0) { 1743 if (tmp1 >= 0) {
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 992318a7807..4a817e3da16 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -28,7 +28,32 @@
28#include "b43.h" 28#include "b43.h"
29#include "phy_n.h" 29#include "phy_n.h"
30#include "tables_nphy.h" 30#include "tables_nphy.h"
31#include "main.h"
31 32
33struct nphy_txgains {
34 u16 txgm[2];
35 u16 pga[2];
36 u16 pad[2];
37 u16 ipa[2];
38};
39
40struct nphy_iqcal_params {
41 u16 txgm;
42 u16 pga;
43 u16 pad;
44 u16 ipa;
45 u16 cal_gain;
46 u16 ncorr[5];
47};
48
49struct nphy_iq_est {
50 s32 iq0_prod;
51 u32 i0_pwr;
52 u32 q0_pwr;
53 s32 iq1_prod;
54 u32 i1_pwr;
55 u32 q1_pwr;
56};
32 57
33void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 58void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
34{//TODO 59{//TODO
@@ -197,44 +222,16 @@ void b43_nphy_radio_turn_off(struct b43_wldev *dev)
197 ~B43_NPHY_RFCTL_CMD_EN); 222 ~B43_NPHY_RFCTL_CMD_EN);
198} 223}
199 224
200#define ntab_upload(dev, offset, data) do { \ 225/*
201 unsigned int i; \ 226 * Upload the N-PHY tables.
202 for (i = 0; i < (offset##_SIZE); i++) \ 227 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
203 b43_ntab_write(dev, (offset) + i, (data)[i]); \ 228 */
204 } while (0)
205
206/* Upload the N-PHY tables. */
207static void b43_nphy_tables_init(struct b43_wldev *dev) 229static void b43_nphy_tables_init(struct b43_wldev *dev)
208{ 230{
209 /* Static tables */ 231 if (dev->phy.rev < 3)
210 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); 232 b43_nphy_rev0_1_2_tables_init(dev);
211 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup); 233 else
212 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap); 234 b43_nphy_rev3plus_tables_init(dev);
213 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
214 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
215 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
216 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
217 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
218 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
219 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
220 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
221 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
222 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
223 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
224
225 /* Volatile tables */
226 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
227 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
228 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
229 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
230 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
231 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
232 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
233 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
234 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
235 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
236 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
237 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
238} 235}
239 236
240static void b43_nphy_workarounds(struct b43_wldev *dev) 237static void b43_nphy_workarounds(struct b43_wldev *dev)
@@ -341,18 +338,386 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
341 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); 338 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
342} 339}
343 340
341/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
342static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
343{
344 struct b43_phy_n *nphy = dev->phy.n;
345 enum ieee80211_band band;
346 u16 tmp;
347
348 if (!enable) {
349 nphy->rfctrl_intc1_save = b43_phy_read(dev,
350 B43_NPHY_RFCTL_INTC1);
351 nphy->rfctrl_intc2_save = b43_phy_read(dev,
352 B43_NPHY_RFCTL_INTC2);
353 band = b43_current_band(dev->wl);
354 if (dev->phy.rev >= 3) {
355 if (band == IEEE80211_BAND_5GHZ)
356 tmp = 0x600;
357 else
358 tmp = 0x480;
359 } else {
360 if (band == IEEE80211_BAND_5GHZ)
361 tmp = 0x180;
362 else
363 tmp = 0x120;
364 }
365 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
366 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
367 } else {
368 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
369 nphy->rfctrl_intc1_save);
370 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
371 nphy->rfctrl_intc2_save);
372 }
373}
374
375/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
376static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
377{
378 struct b43_phy_n *nphy = dev->phy.n;
379 u16 tmp;
380 enum ieee80211_band band = b43_current_band(dev->wl);
381 bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
382 (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
383
384 if (dev->phy.rev >= 3) {
385 if (ipa) {
386 tmp = 4;
387 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
388 (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
389 }
390
391 tmp = 1;
392 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
393 (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
394 }
395}
396
397/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
398static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
399{
400 u32 tmslow;
401
402 if (dev->phy.type != B43_PHYTYPE_N)
403 return;
404
405 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
406 if (force)
407 tmslow |= SSB_TMSLOW_FGC;
408 else
409 tmslow &= ~SSB_TMSLOW_FGC;
410 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
411}
412
413/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
344static void b43_nphy_reset_cca(struct b43_wldev *dev) 414static void b43_nphy_reset_cca(struct b43_wldev *dev)
345{ 415{
346 u16 bbcfg; 416 u16 bbcfg;
347 417
348 ssb_write32(dev->dev, SSB_TMSLOW, 418 b43_nphy_bmac_clock_fgc(dev, 1);
349 ssb_read32(dev->dev, SSB_TMSLOW) | SSB_TMSLOW_FGC);
350 bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); 419 bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
351 b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTCCA); 420 b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
352 b43_phy_write(dev, B43_NPHY_BBCFG, 421 udelay(1);
353 bbcfg & ~B43_NPHY_BBCFG_RSTCCA); 422 b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
354 ssb_write32(dev->dev, SSB_TMSLOW, 423 b43_nphy_bmac_clock_fgc(dev, 0);
355 ssb_read32(dev->dev, SSB_TMSLOW) & ~SSB_TMSLOW_FGC); 424 /* TODO: N PHY Force RF Seq with argument 2 */
425}
426
427/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
428static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
429 u16 samps, u8 time, bool wait)
430{
431 int i;
432 u16 tmp;
433
434 b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
435 b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
436 if (wait)
437 b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
438 else
439 b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
440
441 b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
442
443 for (i = 1000; i; i--) {
444 tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
445 if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
446 est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
447 b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
448 est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
449 b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
450 est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
451 b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
452
453 est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
454 b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
455 est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
456 b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
457 est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
458 b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
459 return;
460 }
461 udelay(10);
462 }
463 memset(est, 0, sizeof(*est));
464}
465
466/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
467static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
468 struct b43_phy_n_iq_comp *pcomp)
469{
470 if (write) {
471 b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
472 b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
473 b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
474 b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
475 } else {
476 pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
477 pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
478 pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
479 pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
480 }
481}
482
483/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
484static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
485{
486 int i;
487 s32 iq;
488 u32 ii;
489 u32 qq;
490 int iq_nbits, qq_nbits;
491 int arsh, brsh;
492 u16 tmp, a, b;
493
494 struct nphy_iq_est est;
495 struct b43_phy_n_iq_comp old;
496 struct b43_phy_n_iq_comp new = { };
497 bool error = false;
498
499 if (mask == 0)
500 return;
501
502 b43_nphy_rx_iq_coeffs(dev, false, &old);
503 b43_nphy_rx_iq_coeffs(dev, true, &new);
504 b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
505 new = old;
506
507 for (i = 0; i < 2; i++) {
508 if (i == 0 && (mask & 1)) {
509 iq = est.iq0_prod;
510 ii = est.i0_pwr;
511 qq = est.q0_pwr;
512 } else if (i == 1 && (mask & 2)) {
513 iq = est.iq1_prod;
514 ii = est.i1_pwr;
515 qq = est.q1_pwr;
516 } else {
517 B43_WARN_ON(1);
518 continue;
519 }
520
521 if (ii + qq < 2) {
522 error = true;
523 break;
524 }
525
526 iq_nbits = fls(abs(iq));
527 qq_nbits = fls(qq);
528
529 arsh = iq_nbits - 20;
530 if (arsh >= 0) {
531 a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
532 tmp = ii >> arsh;
533 } else {
534 a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
535 tmp = ii << -arsh;
536 }
537 if (tmp == 0) {
538 error = true;
539 break;
540 }
541 a /= tmp;
542
543 brsh = qq_nbits - 11;
544 if (brsh >= 0) {
545 b = (qq << (31 - qq_nbits));
546 tmp = ii >> brsh;
547 } else {
548 b = (qq << (31 - qq_nbits));
549 tmp = ii << -brsh;
550 }
551 if (tmp == 0) {
552 error = true;
553 break;
554 }
555 b = int_sqrt(b / tmp - a * a) - (1 << 10);
556
557 if (i == 0 && (mask & 0x1)) {
558 if (dev->phy.rev >= 3) {
559 new.a0 = a & 0x3FF;
560 new.b0 = b & 0x3FF;
561 } else {
562 new.a0 = b & 0x3FF;
563 new.b0 = a & 0x3FF;
564 }
565 } else if (i == 1 && (mask & 0x2)) {
566 if (dev->phy.rev >= 3) {
567 new.a1 = a & 0x3FF;
568 new.b1 = b & 0x3FF;
569 } else {
570 new.a1 = b & 0x3FF;
571 new.b1 = a & 0x3FF;
572 }
573 }
574 }
575
576 if (error)
577 new = old;
578
579 b43_nphy_rx_iq_coeffs(dev, true, &new);
580}
581
582/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
583static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
584{
585 u16 array[4];
586 int i;
587
588 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
589 for (i = 0; i < 4; i++)
590 array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
591
592 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
593 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
594 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
595 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
596}
597
598/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
599static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
600{
601 b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
602 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
603}
604
605/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
606static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
607{
608 clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
609 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
610}
611
612/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
613static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
614{
615 u16 tmp;
616
617 if (dev->dev->id.revision == 16)
618 b43_mac_suspend(dev);
619
620 tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
621 tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
622 B43_NPHY_CLASSCTL_WAITEDEN);
623 tmp &= ~mask;
624 tmp |= (val & mask);
625 b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
626
627 if (dev->dev->id.revision == 16)
628 b43_mac_enable(dev);
629
630 return tmp;
631}
632
633/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
634static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
635{
636 struct b43_phy *phy = &dev->phy;
637 struct b43_phy_n *nphy = phy->n;
638
639 if (enable) {
640 u16 clip[] = { 0xFFFF, 0xFFFF };
641 if (nphy->deaf_count++ == 0) {
642 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
643 b43_nphy_classifier(dev, 0x7, 0);
644 b43_nphy_read_clip_detection(dev, nphy->clip_state);
645 b43_nphy_write_clip_detection(dev, clip);
646 }
647 b43_nphy_reset_cca(dev);
648 } else {
649 if (--nphy->deaf_count == 0) {
650 b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
651 b43_nphy_write_clip_detection(dev, nphy->clip_state);
652 }
653 }
654}
655
656/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
657static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
658{
659 struct b43_phy_n *nphy = dev->phy.n;
660 int i, j;
661 u32 tmp;
662 u32 cur_real, cur_imag, real_part, imag_part;
663
664 u16 buffer[7];
665
666 if (nphy->hang_avoid)
667 b43_nphy_stay_in_carrier_search(dev, true);
668
669 /* TODO: Read an N PHY Table with ID 15, length 7, offset 80,
670 width 16, and data pointer buffer */
671
672 for (i = 0; i < 2; i++) {
673 tmp = ((buffer[i * 2] & 0x3FF) << 10) |
674 (buffer[i * 2 + 1] & 0x3FF);
675 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
676 (((i + 26) << 10) | 320));
677 for (j = 0; j < 128; j++) {
678 b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
679 ((tmp >> 16) & 0xFFFF));
680 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
681 (tmp & 0xFFFF));
682 }
683 }
684
685 for (i = 0; i < 2; i++) {
686 tmp = buffer[5 + i];
687 real_part = (tmp >> 8) & 0xFF;
688 imag_part = (tmp & 0xFF);
689 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
690 (((i + 26) << 10) | 448));
691
692 if (dev->phy.rev >= 3) {
693 cur_real = real_part;
694 cur_imag = imag_part;
695 tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
696 }
697
698 for (j = 0; j < 128; j++) {
699 if (dev->phy.rev < 3) {
700 cur_real = (real_part * loscale[j] + 128) >> 8;
701 cur_imag = (imag_part * loscale[j] + 128) >> 8;
702 tmp = ((cur_real & 0xFF) << 8) |
703 (cur_imag & 0xFF);
704 }
705 b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
706 ((tmp >> 16) & 0xFFFF));
707 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
708 (tmp & 0xFFFF));
709 }
710 }
711
712 if (dev->phy.rev >= 3) {
713 b43_shm_write16(dev, B43_SHM_SHARED,
714 B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
715 b43_shm_write16(dev, B43_SHM_SHARED,
716 B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
717 }
718
719 if (nphy->hang_avoid)
720 b43_nphy_stay_in_carrier_search(dev, false);
356} 721}
357 722
358enum b43_nphy_rf_sequence { 723enum b43_nphy_rf_sequence {
@@ -411,81 +776,1339 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
411 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); 776 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
412} 777}
413 778
414/* RSSI Calibration */ 779/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
415static void b43_nphy_rssi_cal(struct b43_wldev *dev, u8 type) 780static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
781 s8 offset, u8 core, u8 rail, u8 type)
416{ 782{
417 //TODO 783 u16 tmp;
784 bool core1or5 = (core == 1) || (core == 5);
785 bool core2or5 = (core == 2) || (core == 5);
786
787 offset = clamp_val(offset, -32, 31);
788 tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
789
790 if (core1or5 && (rail == 0) && (type == 2))
791 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
792 if (core1or5 && (rail == 1) && (type == 2))
793 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
794 if (core2or5 && (rail == 0) && (type == 2))
795 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
796 if (core2or5 && (rail == 1) && (type == 2))
797 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
798 if (core1or5 && (rail == 0) && (type == 0))
799 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
800 if (core1or5 && (rail == 1) && (type == 0))
801 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
802 if (core2or5 && (rail == 0) && (type == 0))
803 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
804 if (core2or5 && (rail == 1) && (type == 0))
805 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
806 if (core1or5 && (rail == 0) && (type == 1))
807 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
808 if (core1or5 && (rail == 1) && (type == 1))
809 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
810 if (core2or5 && (rail == 0) && (type == 1))
811 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
812 if (core2or5 && (rail == 1) && (type == 1))
813 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
814 if (core1or5 && (rail == 0) && (type == 6))
815 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
816 if (core1or5 && (rail == 1) && (type == 6))
817 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
818 if (core2or5 && (rail == 0) && (type == 6))
819 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
820 if (core2or5 && (rail == 1) && (type == 6))
821 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
822 if (core1or5 && (rail == 0) && (type == 3))
823 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
824 if (core1or5 && (rail == 1) && (type == 3))
825 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
826 if (core2or5 && (rail == 0) && (type == 3))
827 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
828 if (core2or5 && (rail == 1) && (type == 3))
829 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
830 if (core1or5 && (type == 4))
831 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
832 if (core2or5 && (type == 4))
833 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
834 if (core1or5 && (type == 5))
835 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
836 if (core2or5 && (type == 5))
837 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
838}
839
840/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
841static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
842{
843 u16 val;
844
845 if (dev->phy.rev >= 3) {
846 /* TODO */
847 } else {
848 if (type < 3)
849 val = 0;
850 else if (type == 6)
851 val = 1;
852 else if (type == 3)
853 val = 2;
854 else
855 val = 3;
856
857 val = (val << 12) | (val << 14);
858 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
859 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
860
861 if (type < 3) {
862 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
863 (type + 1) << 4);
864 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
865 (type + 1) << 4);
866 }
867
868 /* TODO use some definitions */
869 if (code == 0) {
870 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
871 if (type < 3) {
872 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
873 0xFEC7, 0);
874 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
875 0xEFDC, 0);
876 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
877 0xFFFE, 0);
878 udelay(20);
879 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
880 0xFFFE, 0);
881 }
882 } else {
883 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
884 0x3000);
885 if (type < 3) {
886 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
887 0xFEC7, 0x0180);
888 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
889 0xEFDC, (code << 1 | 0x1021));
890 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
891 0xFFFE, 0x0001);
892 udelay(20);
893 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
894 0xFFFE, 0);
895 }
896 }
897 }
898}
899
900/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
901static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
902{
903 int i;
904 for (i = 0; i < 2; i++) {
905 if (type == 2) {
906 if (i == 0) {
907 b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
908 0xFC, buf[0]);
909 b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
910 0xFC, buf[1]);
911 } else {
912 b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM,
913 0xFC, buf[2 * i]);
914 b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
915 0xFC, buf[2 * i + 1]);
916 }
917 } else {
918 if (i == 0)
919 b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
920 0xF3, buf[0] << 2);
921 else
922 b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
923 0xF3, buf[2 * i + 1] << 2);
924 }
925 }
926}
927
928/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
929static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
930 u8 nsamp)
931{
932 int i;
933 int out;
934 u16 save_regs_phy[9];
935 u16 s[2];
936
937 if (dev->phy.rev >= 3) {
938 save_regs_phy[0] = b43_phy_read(dev,
939 B43_NPHY_RFCTL_LUT_TRSW_UP1);
940 save_regs_phy[1] = b43_phy_read(dev,
941 B43_NPHY_RFCTL_LUT_TRSW_UP2);
942 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
943 save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
944 save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
945 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
946 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
947 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
948 }
949
950 b43_nphy_rssi_select(dev, 5, type);
951
952 if (dev->phy.rev < 2) {
953 save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
954 b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5);
955 }
956
957 for (i = 0; i < 4; i++)
958 buf[i] = 0;
959
960 for (i = 0; i < nsamp; i++) {
961 if (dev->phy.rev < 2) {
962 s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT);
963 s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT);
964 } else {
965 s[0] = b43_phy_read(dev, B43_NPHY_RSSI1);
966 s[1] = b43_phy_read(dev, B43_NPHY_RSSI2);
967 }
968
969 buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2;
970 buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
971 buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2;
972 buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
973 }
974 out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
975 (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
976
977 if (dev->phy.rev < 2)
978 b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
979
980 if (dev->phy.rev >= 3) {
981 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
982 save_regs_phy[0]);
983 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
984 save_regs_phy[1]);
985 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
986 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
987 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
988 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
989 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
990 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
991 }
992
993 return out;
994}
995
996/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
997static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
998{
999 int i, j;
1000 u8 state[4];
1001 u8 code, val;
1002 u16 class, override;
1003 u8 regs_save_radio[2];
1004 u16 regs_save_phy[2];
1005 s8 offset[4];
1006
1007 u16 clip_state[2];
1008 u16 clip_off[2] = { 0xFFFF, 0xFFFF };
1009 s32 results_min[4] = { };
1010 u8 vcm_final[4] = { };
1011 s32 results[4][4] = { };
1012 s32 miniq[4][2] = { };
1013
1014 if (type == 2) {
1015 code = 0;
1016 val = 6;
1017 } else if (type < 2) {
1018 code = 25;
1019 val = 4;
1020 } else {
1021 B43_WARN_ON(1);
1022 return;
1023 }
1024
1025 class = b43_nphy_classifier(dev, 0, 0);
1026 b43_nphy_classifier(dev, 7, 4);
1027 b43_nphy_read_clip_detection(dev, clip_state);
1028 b43_nphy_write_clip_detection(dev, clip_off);
1029
1030 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
1031 override = 0x140;
1032 else
1033 override = 0x110;
1034
1035 regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
1036 regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX);
1037 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
1038 b43_radio_write16(dev, B2055_C1_PD_RXTX, val);
1039
1040 regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
1041 regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX);
1042 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
1043 b43_radio_write16(dev, B2055_C2_PD_RXTX, val);
1044
1045 state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07;
1046 state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07;
1047 b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
1048 b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
1049 state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07;
1050 state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07;
1051
1052 b43_nphy_rssi_select(dev, 5, type);
1053 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type);
1054 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type);
1055
1056 for (i = 0; i < 4; i++) {
1057 u8 tmp[4];
1058 for (j = 0; j < 4; j++)
1059 tmp[j] = i;
1060 if (type != 1)
1061 b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
1062 b43_nphy_poll_rssi(dev, type, results[i], 8);
1063 if (type < 2)
1064 for (j = 0; j < 2; j++)
1065 miniq[i][j] = min(results[i][2 * j],
1066 results[i][2 * j + 1]);
1067 }
1068
1069 for (i = 0; i < 4; i++) {
1070 s32 mind = 40;
1071 u8 minvcm = 0;
1072 s32 minpoll = 249;
1073 s32 curr;
1074 for (j = 0; j < 4; j++) {
1075 if (type == 2)
1076 curr = abs(results[j][i]);
1077 else
1078 curr = abs(miniq[j][i / 2] - code * 8);
1079
1080 if (curr < mind) {
1081 mind = curr;
1082 minvcm = j;
1083 }
1084
1085 if (results[j][i] < minpoll)
1086 minpoll = results[j][i];
1087 }
1088 results_min[i] = minpoll;
1089 vcm_final[i] = minvcm;
1090 }
1091
1092 if (type != 1)
1093 b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
1094
1095 for (i = 0; i < 4; i++) {
1096 offset[i] = (code * 8) - results[vcm_final[i]][i];
1097
1098 if (offset[i] < 0)
1099 offset[i] = -((abs(offset[i]) + 4) / 8);
1100 else
1101 offset[i] = (offset[i] + 4) / 8;
1102
1103 if (results_min[i] == 248)
1104 offset[i] = code - 32;
1105
1106 if (i % 2 == 0)
1107 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
1108 type);
1109 else
1110 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
1111 type);
1112 }
1113
1114 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
1115 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
1116
1117 switch (state[2]) {
1118 case 1:
1119 b43_nphy_rssi_select(dev, 1, 2);
1120 break;
1121 case 4:
1122 b43_nphy_rssi_select(dev, 1, 0);
1123 break;
1124 case 2:
1125 b43_nphy_rssi_select(dev, 1, 1);
1126 break;
1127 default:
1128 b43_nphy_rssi_select(dev, 1, 1);
1129 break;
1130 }
1131
1132 switch (state[3]) {
1133 case 1:
1134 b43_nphy_rssi_select(dev, 2, 2);
1135 break;
1136 case 4:
1137 b43_nphy_rssi_select(dev, 2, 0);
1138 break;
1139 default:
1140 b43_nphy_rssi_select(dev, 2, 1);
1141 break;
1142 }
1143
1144 b43_nphy_rssi_select(dev, 0, type);
1145
1146 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
1147 b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
1148 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
1149 b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
1150
1151 b43_nphy_classifier(dev, 7, class);
1152 b43_nphy_write_clip_detection(dev, clip_state);
1153}
1154
1155/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
1156static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1157{
1158 /* TODO */
1159}
1160
1161/*
1162 * RSSI Calibration
1163 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
1164 */
1165static void b43_nphy_rssi_cal(struct b43_wldev *dev)
1166{
1167 if (dev->phy.rev >= 3) {
1168 b43_nphy_rev3_rssi_cal(dev);
1169 } else {
1170 b43_nphy_rev2_rssi_cal(dev, 2);
1171 b43_nphy_rev2_rssi_cal(dev, 0);
1172 b43_nphy_rev2_rssi_cal(dev, 1);
1173 }
1174}
1175
1176/*
1177 * Restore RSSI Calibration
1178 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
1179 */
1180static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
1181{
1182 struct b43_phy_n *nphy = dev->phy.n;
1183
1184 u16 *rssical_radio_regs = NULL;
1185 u16 *rssical_phy_regs = NULL;
1186
1187 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1188 if (!nphy->rssical_chanspec_2G)
1189 return;
1190 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
1191 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
1192 } else {
1193 if (!nphy->rssical_chanspec_5G)
1194 return;
1195 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
1196 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
1197 }
1198
1199 /* TODO use some definitions */
1200 b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]);
1201 b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]);
1202
1203 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
1204 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
1205 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
1206 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
1207
1208 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
1209 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
1210 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
1211 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
1212
1213 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
1214 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
1215 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
1216 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
1217}
1218
1219/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
1220static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
1221{
1222 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1223 if (dev->phy.rev >= 6) {
1224 /* TODO If the chip is 47162
1225 return txpwrctrl_tx_gain_ipa_rev5 */
1226 return txpwrctrl_tx_gain_ipa_rev6;
1227 } else if (dev->phy.rev >= 5) {
1228 return txpwrctrl_tx_gain_ipa_rev5;
1229 } else {
1230 return txpwrctrl_tx_gain_ipa;
1231 }
1232 } else {
1233 return txpwrctrl_tx_gain_ipa_5g;
1234 }
1235}
1236
1237/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
1238static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
1239{
1240 struct b43_phy_n *nphy = dev->phy.n;
1241 u16 *save = nphy->tx_rx_cal_radio_saveregs;
1242
1243 if (dev->phy.rev >= 3) {
1244 /* TODO */
1245 } else {
1246 save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1);
1247 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
1248
1249 save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2);
1250 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
1251
1252 save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1);
1253 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
1254
1255 save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2);
1256 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
1257
1258 save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX);
1259 save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX);
1260
1261 if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
1262 B43_NPHY_BANDCTL_5GHZ)) {
1263 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04);
1264 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04);
1265 } else {
1266 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20);
1267 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20);
1268 }
1269
1270 if (dev->phy.rev < 2) {
1271 b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20);
1272 b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20);
1273 } else {
1274 b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20);
1275 b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20);
1276 }
1277 }
1278}
1279
1280/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
1281static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
1282 struct nphy_txgains target,
1283 struct nphy_iqcal_params *params)
1284{
1285 int i, j, indx;
1286 u16 gain;
1287
1288 if (dev->phy.rev >= 3) {
1289 params->txgm = target.txgm[core];
1290 params->pga = target.pga[core];
1291 params->pad = target.pad[core];
1292 params->ipa = target.ipa[core];
1293 params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
1294 (params->pad << 4) | (params->ipa);
1295 for (j = 0; j < 5; j++)
1296 params->ncorr[j] = 0x79;
1297 } else {
1298 gain = (target.pad[core]) | (target.pga[core] << 4) |
1299 (target.txgm[core] << 8);
1300
1301 indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
1302 1 : 0;
1303 for (i = 0; i < 9; i++)
1304 if (tbl_iqcal_gainparams[indx][i][0] == gain)
1305 break;
1306 i = min(i, 8);
1307
1308 params->txgm = tbl_iqcal_gainparams[indx][i][1];
1309 params->pga = tbl_iqcal_gainparams[indx][i][2];
1310 params->pad = tbl_iqcal_gainparams[indx][i][3];
1311 params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
1312 (params->pad << 2);
1313 for (j = 0; j < 4; j++)
1314 params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
1315 }
1316}
1317
1318/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
1319static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
1320{
1321 struct b43_phy_n *nphy = dev->phy.n;
1322 int i;
1323 u16 scale, entry;
1324
1325 u16 tmp = nphy->txcal_bbmult;
1326 if (core == 0)
1327 tmp >>= 8;
1328 tmp &= 0xff;
1329
1330 for (i = 0; i < 18; i++) {
1331 scale = (ladder_lo[i].percent * tmp) / 100;
1332 entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
1333 /* TODO: Write an N PHY Table with ID 15, length 1,
1334 offset i, width 16, and data entry */
1335
1336 scale = (ladder_iq[i].percent * tmp) / 100;
1337 entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
1338 /* TODO: Write an N PHY Table with ID 15, length 1,
1339 offset i + 32, width 16, and data entry */
1340 }
1341}
1342
1343/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
1344static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
1345{
1346 struct b43_phy_n *nphy = dev->phy.n;
1347
1348 u16 curr_gain[2];
1349 struct nphy_txgains target;
1350 const u32 *table = NULL;
1351
1352 if (nphy->txpwrctrl == 0) {
1353 int i;
1354
1355 if (nphy->hang_avoid)
1356 b43_nphy_stay_in_carrier_search(dev, true);
1357 /* TODO: Read an N PHY Table with ID 7, length 2,
1358 offset 0x110, width 16, and curr_gain */
1359 if (nphy->hang_avoid)
1360 b43_nphy_stay_in_carrier_search(dev, false);
1361
1362 for (i = 0; i < 2; ++i) {
1363 if (dev->phy.rev >= 3) {
1364 target.ipa[i] = curr_gain[i] & 0x000F;
1365 target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
1366 target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
1367 target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
1368 } else {
1369 target.ipa[i] = curr_gain[i] & 0x0003;
1370 target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
1371 target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
1372 target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
1373 }
1374 }
1375 } else {
1376 int i;
1377 u16 index[2];
1378 index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) &
1379 B43_NPHY_TXPCTL_STAT_BIDX) >>
1380 B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
1381 index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) &
1382 B43_NPHY_TXPCTL_STAT_BIDX) >>
1383 B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
1384
1385 for (i = 0; i < 2; ++i) {
1386 if (dev->phy.rev >= 3) {
1387 enum ieee80211_band band =
1388 b43_current_band(dev->wl);
1389
1390 if ((nphy->ipa2g_on &&
1391 band == IEEE80211_BAND_2GHZ) ||
1392 (nphy->ipa5g_on &&
1393 band == IEEE80211_BAND_5GHZ)) {
1394 table = b43_nphy_get_ipa_gain_table(dev);
1395 } else {
1396 if (band == IEEE80211_BAND_5GHZ) {
1397 if (dev->phy.rev == 3)
1398 table = b43_ntab_tx_gain_rev3_5ghz;
1399 else if (dev->phy.rev == 4)
1400 table = b43_ntab_tx_gain_rev4_5ghz;
1401 else
1402 table = b43_ntab_tx_gain_rev5plus_5ghz;
1403 } else {
1404 table = b43_ntab_tx_gain_rev3plus_2ghz;
1405 }
1406 }
1407
1408 target.ipa[i] = (table[index[i]] >> 16) & 0xF;
1409 target.pad[i] = (table[index[i]] >> 20) & 0xF;
1410 target.pga[i] = (table[index[i]] >> 24) & 0xF;
1411 target.txgm[i] = (table[index[i]] >> 28) & 0xF;
1412 } else {
1413 table = b43_ntab_tx_gain_rev0_1_2;
1414
1415 target.ipa[i] = (table[index[i]] >> 16) & 0x3;
1416 target.pad[i] = (table[index[i]] >> 18) & 0x3;
1417 target.pga[i] = (table[index[i]] >> 20) & 0x7;
1418 target.txgm[i] = (table[index[i]] >> 23) & 0x7;
1419 }
1420 }
1421 }
1422
1423 return target;
1424}
1425
1426/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
1427static void b43_nphy_restore_cal(struct b43_wldev *dev)
1428{
1429 struct b43_phy_n *nphy = dev->phy.n;
1430
1431 u16 coef[4];
1432 u16 *loft = NULL;
1433 u16 *table = NULL;
1434
1435 int i;
1436 u16 *txcal_radio_regs = NULL;
1437 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
1438
1439 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1440 if (nphy->iqcal_chanspec_2G == 0)
1441 return;
1442 table = nphy->cal_cache.txcal_coeffs_2G;
1443 loft = &nphy->cal_cache.txcal_coeffs_2G[5];
1444 } else {
1445 if (nphy->iqcal_chanspec_5G == 0)
1446 return;
1447 table = nphy->cal_cache.txcal_coeffs_5G;
1448 loft = &nphy->cal_cache.txcal_coeffs_5G[5];
1449 }
1450
1451 /* TODO: Write an N PHY table with ID 15, length 4, offset 80,
1452 width 16, and data from table */
1453
1454 for (i = 0; i < 4; i++) {
1455 if (dev->phy.rev >= 3)
1456 table[i] = coef[i];
1457 else
1458 coef[i] = 0;
1459 }
1460
1461 /* TODO: Write an N PHY table with ID 15, length 4, offset 88,
1462 width 16, and data from coef */
1463 /* TODO: Write an N PHY table with ID 15, length 2, offset 85,
1464 width 16 and data from loft */
1465 /* TODO: Write an N PHY table with ID 15, length 2, offset 93,
1466 width 16 and data from loft */
1467
1468 if (dev->phy.rev < 2)
1469 b43_nphy_tx_iq_workaround(dev);
1470
1471 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1472 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
1473 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
1474 } else {
1475 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
1476 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
1477 }
1478
1479 /* TODO use some definitions */
1480 if (dev->phy.rev >= 3) {
1481 b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
1482 b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
1483 b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
1484 b43_radio_write(dev, 0x3022, txcal_radio_regs[3]);
1485 b43_radio_write(dev, 0x2023, txcal_radio_regs[4]);
1486 b43_radio_write(dev, 0x2024, txcal_radio_regs[5]);
1487 b43_radio_write(dev, 0x3023, txcal_radio_regs[6]);
1488 b43_radio_write(dev, 0x3024, txcal_radio_regs[7]);
1489 } else {
1490 b43_radio_write(dev, 0x8B, txcal_radio_regs[0]);
1491 b43_radio_write(dev, 0xBA, txcal_radio_regs[1]);
1492 b43_radio_write(dev, 0x8D, txcal_radio_regs[2]);
1493 b43_radio_write(dev, 0xBC, txcal_radio_regs[3]);
1494 }
1495 b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
1496}
1497
1498/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
1499static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
1500 struct nphy_txgains target,
1501 bool full, bool mphase)
1502{
1503 struct b43_phy_n *nphy = dev->phy.n;
1504 int i;
1505 int error = 0;
1506 int freq;
1507 bool avoid = false;
1508 u8 length;
1509 u16 tmp, core, type, count, max, numb, last, cmd;
1510 const u16 *table;
1511 bool phy6or5x;
1512
1513 u16 buffer[11];
1514 u16 diq_start = 0;
1515 u16 save[2];
1516 u16 gain[2];
1517 struct nphy_iqcal_params params[2];
1518 bool updated[2] = { };
1519
1520 b43_nphy_stay_in_carrier_search(dev, true);
1521
1522 if (dev->phy.rev >= 4) {
1523 avoid = nphy->hang_avoid;
1524 nphy->hang_avoid = 0;
1525 }
1526
1527 /* TODO: Read an N PHY Table with ID 7, length 2, offset 0x110,
1528 width 16, and data pointer save */
1529
1530 for (i = 0; i < 2; i++) {
1531 b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]);
1532 gain[i] = params[i].cal_gain;
1533 }
1534 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1535 width 16, and data pointer gain */
1536
1537 b43_nphy_tx_cal_radio_setup(dev);
1538 /* TODO: Call N PHY TX Cal PHY Setup */
1539
1540 phy6or5x = dev->phy.rev >= 6 ||
1541 (dev->phy.rev == 5 && nphy->ipa2g_on &&
1542 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
1543 if (phy6or5x) {
1544 /* TODO */
1545 }
1546
1547 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
1548
1549 if (1 /* FIXME: the band width is 20 MHz */)
1550 freq = 2500;
1551 else
1552 freq = 5000;
1553
1554 if (nphy->mphase_cal_phase_id > 2)
1555 ;/* TODO: Call N PHY Run Samples with (band width * 8),
1556 0xFFFF, 0, 1, 0 as arguments */
1557 else
1558 ;/* TODO: Call N PHY TX Tone with freq, 250, 1, 0 as arguments
1559 and save result as error */
1560
1561 if (error == 0) {
1562 if (nphy->mphase_cal_phase_id > 2) {
1563 table = nphy->mphase_txcal_bestcoeffs;
1564 length = 11;
1565 if (dev->phy.rev < 3)
1566 length -= 2;
1567 } else {
1568 if (!full && nphy->txiqlocal_coeffsvalid) {
1569 table = nphy->txiqlocal_bestc;
1570 length = 11;
1571 if (dev->phy.rev < 3)
1572 length -= 2;
1573 } else {
1574 full = true;
1575 if (dev->phy.rev >= 3) {
1576 table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
1577 length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
1578 } else {
1579 table = tbl_tx_iqlo_cal_startcoefs;
1580 length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS;
1581 }
1582 }
1583 }
1584
1585 /* TODO: Write an N PHY Table with ID 15, length from above,
1586 offset 64, width 16, and the data pointer from above */
1587
1588 if (full) {
1589 if (dev->phy.rev >= 3)
1590 max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
1591 else
1592 max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
1593 } else {
1594 if (dev->phy.rev >= 3)
1595 max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
1596 else
1597 max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL;
1598 }
1599
1600 if (mphase) {
1601 count = nphy->mphase_txcal_cmdidx;
1602 numb = min(max,
1603 (u16)(count + nphy->mphase_txcal_numcmds));
1604 } else {
1605 count = 0;
1606 numb = max;
1607 }
1608
1609 for (; count < numb; count++) {
1610 if (full) {
1611 if (dev->phy.rev >= 3)
1612 cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
1613 else
1614 cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
1615 } else {
1616 if (dev->phy.rev >= 3)
1617 cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
1618 else
1619 cmd = tbl_tx_iqlo_cal_cmds_recal[count];
1620 }
1621
1622 core = (cmd & 0x3000) >> 12;
1623 type = (cmd & 0x0F00) >> 8;
1624
1625 if (phy6or5x && updated[core] == 0) {
1626 b43_nphy_update_tx_cal_ladder(dev, core);
1627 updated[core] = 1;
1628 }
1629
1630 tmp = (params[core].ncorr[type] << 8) | 0x66;
1631 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp);
1632
1633 if (type == 1 || type == 3 || type == 4) {
1634 /* TODO: Read an N PHY Table with ID 15,
1635 length 1, offset 69 + core,
1636 width 16, and data pointer buffer */
1637 diq_start = buffer[0];
1638 buffer[0] = 0;
1639 /* TODO: Write an N PHY Table with ID 15,
1640 length 1, offset 69 + core, width 16,
1641 and data of 0 */
1642 }
1643
1644 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd);
1645 for (i = 0; i < 2000; i++) {
1646 tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD);
1647 if (tmp & 0xC000)
1648 break;
1649 udelay(10);
1650 }
1651
1652 /* TODO: Read an N PHY Table with ID 15,
1653 length table_length, offset 96, width 16,
1654 and data pointer buffer */
1655 /* TODO: Write an N PHY Table with ID 15,
1656 length table_length, offset 64, width 16,
1657 and data pointer buffer */
1658
1659 if (type == 1 || type == 3 || type == 4)
1660 buffer[0] = diq_start;
1661 }
1662
1663 if (mphase)
1664 nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
1665
1666 last = (dev->phy.rev < 3) ? 6 : 7;
1667
1668 if (!mphase || nphy->mphase_cal_phase_id == last) {
1669 /* TODO: Write an N PHY Table with ID 15, length 4,
1670 offset 96, width 16, and data pointer buffer */
1671 /* TODO: Read an N PHY Table with ID 15, length 4,
1672 offset 80, width 16, and data pointer buffer */
1673 if (dev->phy.rev < 3) {
1674 buffer[0] = 0;
1675 buffer[1] = 0;
1676 buffer[2] = 0;
1677 buffer[3] = 0;
1678 }
1679 /* TODO: Write an N PHY Table with ID 15, length 4,
1680 offset 88, width 16, and data pointer buffer */
1681 /* TODO: Read an N PHY Table with ID 15, length 2,
1682 offset 101, width 16, and data pointer buffer*/
1683 /* TODO: Write an N PHY Table with ID 15, length 2,
1684 offset 85, width 16, and data pointer buffer */
1685 /* TODO: Write an N PHY Table with ID 15, length 2,
1686 offset 93, width 16, and data pointer buffer */
1687 length = 11;
1688 if (dev->phy.rev < 3)
1689 length -= 2;
1690 /* TODO: Read an N PHY Table with ID 15, length length,
1691 offset 96, width 16, and data pointer
1692 nphy->txiqlocal_bestc */
1693 nphy->txiqlocal_coeffsvalid = true;
1694 /* TODO: Set nphy->txiqlocal_chanspec to
1695 the current channel */
1696 } else {
1697 length = 11;
1698 if (dev->phy.rev < 3)
1699 length -= 2;
1700 /* TODO: Read an N PHY Table with ID 5, length length,
1701 offset 96, width 16, and data pointer
1702 nphy->mphase_txcal_bestcoeffs */
1703 }
1704
1705 /* TODO: Call N PHY Stop Playback */
1706 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0);
1707 }
1708
1709 /* TODO: Call N PHY TX Cal PHY Cleanup */
1710 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1711 width 16, and data from save */
1712
1713 if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
1714 b43_nphy_tx_iq_workaround(dev);
1715
1716 if (dev->phy.rev >= 4)
1717 nphy->hang_avoid = avoid;
1718
1719 b43_nphy_stay_in_carrier_search(dev, false);
1720
1721 return error;
418} 1722}
419 1723
1724/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
1725static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
1726 struct nphy_txgains target, u8 type, bool debug)
1727{
1728 struct b43_phy_n *nphy = dev->phy.n;
1729 int i, j, index;
1730 u8 rfctl[2];
1731 u8 afectl_core;
1732 u16 tmp[6];
1733 u16 cur_hpf1, cur_hpf2, cur_lna;
1734 u32 real, imag;
1735 enum ieee80211_band band;
1736
1737 u8 use;
1738 u16 cur_hpf;
1739 u16 lna[3] = { 3, 3, 1 };
1740 u16 hpf1[3] = { 7, 2, 0 };
1741 u16 hpf2[3] = { 2, 0, 0 };
1742 u32 power[3];
1743 u16 gain_save[2];
1744 u16 cal_gain[2];
1745 struct nphy_iqcal_params cal_params[2];
1746 struct nphy_iq_est est;
1747 int ret = 0;
1748 bool playtone = true;
1749 int desired = 13;
1750
1751 b43_nphy_stay_in_carrier_search(dev, 1);
1752
1753 if (dev->phy.rev < 2)
1754 ;/* TODO: Call N PHY Reapply TX Cal Coeffs */
1755 /* TODO: Read an N PHY Table with ID 7, length 2, offset 0x110,
1756 width 16, and data gain_save */
1757 for (i = 0; i < 2; i++) {
1758 b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
1759 cal_gain[i] = cal_params[i].cal_gain;
1760 }
1761 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1762 width 16, and data from cal_gain */
1763
1764 for (i = 0; i < 2; i++) {
1765 if (i == 0) {
1766 rfctl[0] = B43_NPHY_RFCTL_INTC1;
1767 rfctl[1] = B43_NPHY_RFCTL_INTC2;
1768 afectl_core = B43_NPHY_AFECTL_C1;
1769 } else {
1770 rfctl[0] = B43_NPHY_RFCTL_INTC2;
1771 rfctl[1] = B43_NPHY_RFCTL_INTC1;
1772 afectl_core = B43_NPHY_AFECTL_C2;
1773 }
1774
1775 tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
1776 tmp[2] = b43_phy_read(dev, afectl_core);
1777 tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
1778 tmp[4] = b43_phy_read(dev, rfctl[0]);
1779 tmp[5] = b43_phy_read(dev, rfctl[1]);
1780
1781 b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
1782 (u16)~B43_NPHY_RFSEQCA_RXDIS,
1783 ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
1784 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
1785 (1 - i));
1786 b43_phy_set(dev, afectl_core, 0x0006);
1787 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006);
1788
1789 band = b43_current_band(dev->wl);
1790
1791 if (nphy->rxcalparams & 0xFF000000) {
1792 if (band == IEEE80211_BAND_5GHZ)
1793 b43_phy_write(dev, rfctl[0], 0x140);
1794 else
1795 b43_phy_write(dev, rfctl[0], 0x110);
1796 } else {
1797 if (band == IEEE80211_BAND_5GHZ)
1798 b43_phy_write(dev, rfctl[0], 0x180);
1799 else
1800 b43_phy_write(dev, rfctl[0], 0x120);
1801 }
1802
1803 if (band == IEEE80211_BAND_5GHZ)
1804 b43_phy_write(dev, rfctl[1], 0x148);
1805 else
1806 b43_phy_write(dev, rfctl[1], 0x114);
1807
1808 if (nphy->rxcalparams & 0x10000) {
1809 b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC,
1810 (i + 1));
1811 b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC,
1812 (2 - i));
1813 }
1814
1815 for (j = 0; i < 4; j++) {
1816 if (j < 3) {
1817 cur_lna = lna[j];
1818 cur_hpf1 = hpf1[j];
1819 cur_hpf2 = hpf2[j];
1820 } else {
1821 if (power[1] > 10000) {
1822 use = 1;
1823 cur_hpf = cur_hpf1;
1824 index = 2;
1825 } else {
1826 if (power[0] > 10000) {
1827 use = 1;
1828 cur_hpf = cur_hpf1;
1829 index = 1;
1830 } else {
1831 index = 0;
1832 use = 2;
1833 cur_hpf = cur_hpf2;
1834 }
1835 }
1836 cur_lna = lna[index];
1837 cur_hpf1 = hpf1[index];
1838 cur_hpf2 = hpf2[index];
1839 cur_hpf += desired - hweight32(power[index]);
1840 cur_hpf = clamp_val(cur_hpf, 0, 10);
1841 if (use == 1)
1842 cur_hpf1 = cur_hpf;
1843 else
1844 cur_hpf2 = cur_hpf;
1845 }
1846
1847 tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
1848 (cur_lna << 2));
1849 /* TODO:Call N PHY RF Ctrl Override with 0x400, tmp[0],
1850 3, 0 as arguments */
1851 /* TODO: Call N PHY Force RF Seq with 2 as argument */
1852 /* TODO: Call N PHT Stop Playback */
1853
1854 if (playtone) {
1855 /* TODO: Call N PHY TX Tone with 4000,
1856 (nphy_rxcalparams & 0xffff), 0, 0
1857 as arguments and save result as ret */
1858 playtone = false;
1859 } else {
1860 /* TODO: Call N PHY Run Samples with 160,
1861 0xFFFF, 0, 0, 0 as arguments */
1862 }
1863
1864 if (ret == 0) {
1865 if (j < 3) {
1866 b43_nphy_rx_iq_est(dev, &est, 1024, 32,
1867 false);
1868 if (i == 0) {
1869 real = est.i0_pwr;
1870 imag = est.q0_pwr;
1871 } else {
1872 real = est.i1_pwr;
1873 imag = est.q1_pwr;
1874 }
1875 power[i] = ((real + imag) / 1024) + 1;
1876 } else {
1877 b43_nphy_calc_rx_iq_comp(dev, 1 << i);
1878 }
1879 /* TODO: Call N PHY Stop Playback */
1880 }
1881
1882 if (ret != 0)
1883 break;
1884 }
1885
1886 b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC);
1887 b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC);
1888 b43_phy_write(dev, rfctl[1], tmp[5]);
1889 b43_phy_write(dev, rfctl[0], tmp[4]);
1890 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]);
1891 b43_phy_write(dev, afectl_core, tmp[2]);
1892 b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]);
1893
1894 if (ret != 0)
1895 break;
1896 }
1897
1898 /* TODO: Call N PHY RF Ctrl Override with 0x400, 0, 3, 1 as arguments*/
1899 /* TODO: Call N PHY Force RF Seq with 2 as argument */
1900 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1901 width 16, and data from gain_save */
1902
1903 b43_nphy_stay_in_carrier_search(dev, 0);
1904
1905 return ret;
1906}
1907
1908static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
1909 struct nphy_txgains target, u8 type, bool debug)
1910{
1911 return -1;
1912}
1913
1914/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
1915static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
1916 struct nphy_txgains target, u8 type, bool debug)
1917{
1918 if (dev->phy.rev >= 3)
1919 return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
1920 else
1921 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
1922}
1923
1924/*
1925 * Init N-PHY
1926 * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
1927 */
420int b43_phy_initn(struct b43_wldev *dev) 1928int b43_phy_initn(struct b43_wldev *dev)
421{ 1929{
1930 struct ssb_bus *bus = dev->dev->bus;
422 struct b43_phy *phy = &dev->phy; 1931 struct b43_phy *phy = &dev->phy;
1932 struct b43_phy_n *nphy = phy->n;
1933 u8 tx_pwr_state;
1934 struct nphy_txgains target;
423 u16 tmp; 1935 u16 tmp;
1936 enum ieee80211_band tmp2;
1937 bool do_rssi_cal;
1938
1939 u16 clip[2];
1940 bool do_cal = false;
424 1941
425 //TODO: Spectral management 1942 if ((dev->phy.rev >= 3) &&
1943 (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
1944 (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
1945 chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
1946 }
1947 nphy->deaf_count = 0;
426 b43_nphy_tables_init(dev); 1948 b43_nphy_tables_init(dev);
1949 nphy->crsminpwr_adjusted = false;
1950 nphy->noisevars_adjusted = false;
427 1951
428 /* Clear all overrides */ 1952 /* Clear all overrides */
429 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); 1953 if (dev->phy.rev >= 3) {
1954 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
1955 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
1956 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
1957 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
1958 } else {
1959 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
1960 }
430 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0); 1961 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0);
431 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0); 1962 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0);
432 b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0); 1963 if (dev->phy.rev < 6) {
433 b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0); 1964 b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
1965 b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
1966 }
434 b43_phy_mask(dev, B43_NPHY_RFSEQMODE, 1967 b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
435 ~(B43_NPHY_RFSEQMODE_CAOVER | 1968 ~(B43_NPHY_RFSEQMODE_CAOVER |
436 B43_NPHY_RFSEQMODE_TROVER)); 1969 B43_NPHY_RFSEQMODE_TROVER));
1970 if (dev->phy.rev >= 3)
1971 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0);
437 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0); 1972 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0);
438 1973
439 tmp = (phy->rev < 2) ? 64 : 59; 1974 if (dev->phy.rev <= 2) {
440 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, 1975 tmp = (dev->phy.rev == 2) ? 0x3B : 0x40;
441 ~B43_NPHY_BPHY_CTL3_SCALE, 1976 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
442 tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT); 1977 ~B43_NPHY_BPHY_CTL3_SCALE,
443 1978 tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
1979 }
444 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20); 1980 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
445 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20); 1981 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
446 1982
447 b43_phy_write(dev, B43_NPHY_TXREALFD, 184); 1983 if (bus->sprom.boardflags2_lo & 0x100 ||
448 b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 200); 1984 (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
449 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 80); 1985 bus->boardinfo.type == 0x8B))
450 b43_phy_write(dev, B43_NPHY_C2_BCLIPBKOFF, 511); 1986 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
1987 else
1988 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
1989 b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8);
1990 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
1991 b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
451 1992
452 //TODO MIMO-Config 1993 /* TODO MIMO-Config */
453 //TODO Update TX/RX chain 1994 /* TODO Update TX/RX chain */
454 1995
455 if (phy->rev < 2) { 1996 if (phy->rev < 2) {
456 b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8); 1997 b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8);
457 b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4); 1998 b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
458 } 1999 }
2000
2001 tmp2 = b43_current_band(dev->wl);
2002 if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
2003 (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
2004 b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
2005 b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
2006 nphy->papd_epsilon_offset[0] << 7);
2007 b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1);
2008 b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F,
2009 nphy->papd_epsilon_offset[1] << 7);
2010 /* TODO N PHY IPA Set TX Dig Filters */
2011 } else if (phy->rev >= 5) {
2012 /* TODO N PHY Ext PA Set TX Dig Filters */
2013 }
2014
459 b43_nphy_workarounds(dev); 2015 b43_nphy_workarounds(dev);
460 b43_nphy_reset_cca(dev);
461 2016
462 ssb_write32(dev->dev, SSB_TMSLOW, 2017 /* Reset CCA, in init code it differs a little from standard way */
463 ssb_read32(dev->dev, SSB_TMSLOW) | B43_TMSLOW_MACPHYCLKEN); 2018 b43_nphy_bmac_clock_fgc(dev, 1);
2019 tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
2020 b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
2021 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
2022 b43_nphy_bmac_clock_fgc(dev, 0);
2023
2024 /* TODO N PHY MAC PHY Clock Set with argument 1 */
2025
2026 b43_nphy_pa_override(dev, false);
464 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); 2027 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
465 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); 2028 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
2029 b43_nphy_pa_override(dev, true);
2030
2031 b43_nphy_classifier(dev, 0, 0);
2032 b43_nphy_read_clip_detection(dev, clip);
2033 tx_pwr_state = nphy->txpwrctrl;
2034 /* TODO N PHY TX power control with argument 0
2035 (turning off power control) */
2036 /* TODO Fix the TX Power Settings */
2037 /* TODO N PHY TX Power Control Idle TSSI */
2038 /* TODO N PHY TX Power Control Setup */
2039
2040 if (phy->rev >= 3) {
2041 /* TODO */
2042 } else {
2043 /* TODO Write an N PHY table with ID 26, length 128, offset 192, width 32, and the data from Rev 2 TX Power Control Table */
2044 /* TODO Write an N PHY table with ID 27, length 128, offset 192, width 32, and the data from Rev 2 TX Power Control Table */
2045 }
2046
2047 if (nphy->phyrxchain != 3)
2048 ;/* TODO N PHY RX Core Set State with phyrxchain as argument */
2049 if (nphy->mphase_cal_phase_id > 0)
2050 ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
2051
2052 do_rssi_cal = false;
2053 if (phy->rev >= 3) {
2054 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2055 do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
2056 else
2057 do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
2058
2059 if (do_rssi_cal)
2060 b43_nphy_rssi_cal(dev);
2061 else
2062 b43_nphy_restore_rssi_cal(dev);
2063 } else {
2064 b43_nphy_rssi_cal(dev);
2065 }
2066
2067 if (!((nphy->measure_hold & 0x6) != 0)) {
2068 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2069 do_cal = (nphy->iqcal_chanspec_2G == 0);
2070 else
2071 do_cal = (nphy->iqcal_chanspec_5G == 0);
2072
2073 if (nphy->mute)
2074 do_cal = false;
2075
2076 if (do_cal) {
2077 target = b43_nphy_get_tx_gains(dev);
2078
2079 if (nphy->antsel_type == 2)
2080 ;/*TODO NPHY Superswitch Init with argument 1*/
2081 if (nphy->perical != 2) {
2082 b43_nphy_rssi_cal(dev);
2083 if (phy->rev >= 3) {
2084 nphy->cal_orig_pwr_idx[0] =
2085 nphy->txpwrindex[0].index_internal;
2086 nphy->cal_orig_pwr_idx[1] =
2087 nphy->txpwrindex[1].index_internal;
2088 /* TODO N PHY Pre Calibrate TX Gain */
2089 target = b43_nphy_get_tx_gains(dev);
2090 }
2091 }
2092 }
2093 }
2094
2095 if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
2096 if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
2097 ;/* Call N PHY Save Cal */
2098 else if (nphy->mphase_cal_phase_id == 0)
2099 ;/* N PHY Periodic Calibration with argument 3 */
2100 } else {
2101 b43_nphy_restore_cal(dev);
2102 }
466 2103
467 b43_phy_read(dev, B43_NPHY_CLASSCTL); /* dummy read */ 2104 b43_nphy_tx_pwr_ctrl_coef_setup(dev);
468 //TODO read core1/2 clip1 thres regs 2105 /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
469 2106 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
470 if (1 /* FIXME Band is 2.4GHz */) 2107 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
471 b43_nphy_bphy_init(dev); 2108 if (phy->rev >= 3 && phy->rev <= 6)
472 //TODO disable TX power control 2109 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
473 //TODO Fix the TX power settings 2110 b43_nphy_tx_lp_fbw(dev);
474 //TODO Init periodic calibration with reason 3 2111 /* TODO N PHY Spur Workaround */
475 b43_nphy_rssi_cal(dev, 2);
476 b43_nphy_rssi_cal(dev, 0);
477 b43_nphy_rssi_cal(dev, 1);
478 //TODO get TX gain
479 //TODO init superswitch
480 //TODO calibrate LO
481 //TODO idle TSSI TX pctl
482 //TODO TX power control power setup
483 //TODO table writes
484 //TODO TX power control coefficients
485 //TODO enable TX power control
486 //TODO control antenna selection
487 //TODO init radar detection
488 //TODO reset channel if changed
489 2112
490 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n"); 2113 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
491 return 0; 2114 return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 1749aef4147..4572866756f 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -231,6 +231,7 @@
231#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */ 231#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */
232#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */ 232#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */
233#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */ 233#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */
234#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */
234#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */ 235#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */
235#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */ 236#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */
236#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0 237#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0
@@ -705,6 +706,10 @@
705#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */ 706#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
706#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */ 707#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
707#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0 708#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
709#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
710#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
708 713
709 714
710 715
@@ -919,8 +924,88 @@
919 924
920struct b43_wldev; 925struct b43_wldev;
921 926
927struct b43_phy_n_iq_comp {
928 s16 a0;
929 s16 b0;
930 s16 a1;
931 s16 b1;
932};
933
934struct b43_phy_n_rssical_cache {
935 u16 rssical_radio_regs_2G[2];
936 u16 rssical_phy_regs_2G[12];
937
938 u16 rssical_radio_regs_5G[2];
939 u16 rssical_phy_regs_5G[12];
940};
941
942struct b43_phy_n_cal_cache {
943 u16 txcal_radio_regs_2G[8];
944 u16 txcal_coeffs_2G[8];
945 struct b43_phy_n_iq_comp rxcal_coeffs_2G;
946
947 u16 txcal_radio_regs_5G[8];
948 u16 txcal_coeffs_5G[8];
949 struct b43_phy_n_iq_comp rxcal_coeffs_5G;
950};
951
952struct b43_phy_n_txpwrindex {
953 s8 index;
954 s8 index_internal;
955 s8 index_internal_save;
956 u16 AfectrlOverride;
957 u16 AfeCtrlDacGain;
958 u16 rad_gain;
959 u8 bbmult;
960 u16 iqcomp_a;
961 u16 iqcomp_b;
962 u16 locomp;
963};
964
922struct b43_phy_n { 965struct b43_phy_n {
923 //TODO lots of missing stuff 966 u8 antsel_type;
967 u8 cal_orig_pwr_idx[2];
968 u8 measure_hold;
969 u8 phyrxchain;
970 u8 perical;
971 u32 deaf_count;
972 u32 rxcalparams;
973 bool hang_avoid;
974 bool mute;
975 u16 papd_epsilon_offset[2];
976
977 u8 mphase_cal_phase_id;
978 u16 mphase_txcal_cmdidx;
979 u16 mphase_txcal_numcmds;
980 u16 mphase_txcal_bestcoeffs[11];
981
982 u8 txpwrctrl;
983 u16 txcal_bbmult;
984 u16 txiqlocal_bestc[11];
985 bool txiqlocal_coeffsvalid;
986 struct b43_phy_n_txpwrindex txpwrindex[2];
987
988 u16 tx_rx_cal_phy_saveregs[11];
989 u16 tx_rx_cal_radio_saveregs[22];
990
991 u16 rfctrl_intc1_save;
992 u16 rfctrl_intc2_save;
993
994 u16 classifier_state;
995 u16 clip_state[2];
996
997 bool ipa2g_on;
998 u8 iqcal_chanspec_2G;
999 u8 rssical_chanspec_2G;
1000
1001 bool ipa5g_on;
1002 u8 iqcal_chanspec_5G;
1003 u8 rssical_chanspec_5G;
1004
1005 struct b43_phy_n_rssical_cache rssical_cache;
1006 struct b43_phy_n_cal_cache cal_cache;
1007 bool crsminpwr_adjusted;
1008 bool noisevars_adjusted;
924}; 1009};
925 1010
926 1011
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9dda..7b3c42f93a1 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
55#define B43_PIO_MAX_NR_TXPACKETS 32 55#define B43_PIO_MAX_NR_TXPACKETS 32
56 56
57 57
58#ifdef CONFIG_B43_PIO
59
60struct b43_pio_txpacket { 58struct b43_pio_txpacket {
61 /* Pointer to the TX queue we belong to. */ 59 /* Pointer to the TX queue we belong to. */
62 struct b43_pio_txqueue *queue; 60 struct b43_pio_txqueue *queue;
@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
169void b43_pio_tx_suspend(struct b43_wldev *dev); 167void b43_pio_tx_suspend(struct b43_wldev *dev);
170void b43_pio_tx_resume(struct b43_wldev *dev); 168void b43_pio_tx_resume(struct b43_wldev *dev);
171 169
172
173#else /* CONFIG_B43_PIO */
174
175
176static inline int b43_pio_init(struct b43_wldev *dev)
177{
178 return 0;
179}
180static inline void b43_pio_free(struct b43_wldev *dev)
181{
182}
183static inline void b43_pio_stop(struct b43_wldev *dev)
184{
185}
186static inline int b43_pio_tx(struct b43_wldev *dev,
187 struct sk_buff *skb)
188{
189 return 0;
190}
191static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
192 const struct b43_txstatus *status)
193{
194}
195static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
196 struct ieee80211_tx_queue_stats *stats)
197{
198}
199static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
200{
201}
202static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
203{
204}
205static inline void b43_pio_tx_resume(struct b43_wldev *dev)
206{
207}
208
209#endif /* CONFIG_B43_PIO */
210#endif /* B43_PIO_H_ */ 170#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 4e233631554..7dff853ab96 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1336,7 +1336,7 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
1336} 1336}
1337 1337
1338 1338
1339const u8 b43_ntab_adjustpower0[] = { 1339static const u8 b43_ntab_adjustpower0[] = {
1340 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 1340 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
1341 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 1341 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
1342 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 1342 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1355,7 +1355,7 @@ const u8 b43_ntab_adjustpower0[] = {
1355 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 1355 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
1356}; 1356};
1357 1357
1358const u8 b43_ntab_adjustpower1[] = { 1358static const u8 b43_ntab_adjustpower1[] = {
1359 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 1359 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
1360 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 1360 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
1361 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 1361 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1374,11 +1374,11 @@ const u8 b43_ntab_adjustpower1[] = {
1374 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 1374 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
1375}; 1375};
1376 1376
1377const u16 b43_ntab_bdi[] = { 1377static const u16 b43_ntab_bdi[] = {
1378 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2, 1378 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2,
1379}; 1379};
1380 1380
1381const u32 b43_ntab_channelest[] = { 1381static const u32 b43_ntab_channelest[] = {
1382 0x44444444, 0x44444444, 0x44444444, 0x44444444, 1382 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1383 0x44444444, 0x44444444, 0x44444444, 0x44444444, 1383 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1384 0x10101010, 0x10101010, 0x10101010, 0x10101010, 1384 0x10101010, 0x10101010, 0x10101010, 0x10101010,
@@ -1405,7 +1405,7 @@ const u32 b43_ntab_channelest[] = {
1405 0x10101010, 0x10101010, 0x10101010, 0x10101010, 1405 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1406}; 1406};
1407 1407
1408const u8 b43_ntab_estimatepowerlt0[] = { 1408static const u8 b43_ntab_estimatepowerlt0[] = {
1409 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 1409 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
1410 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 1410 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
1411 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 1411 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1416,7 +1416,7 @@ const u8 b43_ntab_estimatepowerlt0[] = {
1416 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 1416 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
1417}; 1417};
1418 1418
1419const u8 b43_ntab_estimatepowerlt1[] = { 1419static const u8 b43_ntab_estimatepowerlt1[] = {
1420 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 1420 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
1421 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 1421 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
1422 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 1422 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1427,14 +1427,14 @@ const u8 b43_ntab_estimatepowerlt1[] = {
1427 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 1427 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
1428}; 1428};
1429 1429
1430const u8 b43_ntab_framelookup[] = { 1430static const u8 b43_ntab_framelookup[] = {
1431 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16, 1431 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
1432 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E, 1432 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E,
1433 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A, 1433 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A,
1434 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A, 1434 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A,
1435}; 1435};
1436 1436
1437const u32 b43_ntab_framestruct[] = { 1437static const u32 b43_ntab_framestruct[] = {
1438 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, 1438 0x08004A04, 0x00100000, 0x01000A05, 0x00100020,
1439 0x09804506, 0x00100030, 0x09804507, 0x00100030, 1439 0x09804506, 0x00100030, 0x09804507, 0x00100030,
1440 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1645,7 +1645,7 @@ const u32 b43_ntab_framestruct[] = {
1645 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1646}; 1646};
1647 1647
1648const u32 b43_ntab_gainctl0[] = { 1648static const u32 b43_ntab_gainctl0[] = {
1649 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 1649 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
1650 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 1650 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
1651 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 1651 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1680,7 +1680,7 @@ const u32 b43_ntab_gainctl0[] = {
1680 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 1680 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
1681}; 1681};
1682 1682
1683const u32 b43_ntab_gainctl1[] = { 1683static const u32 b43_ntab_gainctl1[] = {
1684 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 1684 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
1685 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 1685 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
1686 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 1686 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1715,12 +1715,12 @@ const u32 b43_ntab_gainctl1[] = {
1715 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 1715 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
1716}; 1716};
1717 1717
1718const u32 b43_ntab_intlevel[] = { 1718static const u32 b43_ntab_intlevel[] = {
1719 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46, 1719 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46,
1720 0x00C1188D, 0x080024D2, 0x00000070, 1720 0x00C1188D, 0x080024D2, 0x00000070,
1721}; 1721};
1722 1722
1723const u32 b43_ntab_iqlt0[] = { 1723static const u32 b43_ntab_iqlt0[] = {
1724 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1724 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1725 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1725 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1726 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1726 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1755,7 +1755,7 @@ const u32 b43_ntab_iqlt0[] = {
1755 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1755 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1756}; 1756};
1757 1757
1758const u32 b43_ntab_iqlt1[] = { 1758static const u32 b43_ntab_iqlt1[] = {
1759 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1759 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1760 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1760 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1761 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1761 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1790,7 +1790,7 @@ const u32 b43_ntab_iqlt1[] = {
1790 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1790 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1791}; 1791};
1792 1792
1793const u16 b43_ntab_loftlt0[] = { 1793static const u16 b43_ntab_loftlt0[] = {
1794 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1794 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
1795 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 1795 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
1796 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1796 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1815,7 +1815,7 @@ const u16 b43_ntab_loftlt0[] = {
1815 0x0002, 0x0103, 1815 0x0002, 0x0103,
1816}; 1816};
1817 1817
1818const u16 b43_ntab_loftlt1[] = { 1818static const u16 b43_ntab_loftlt1[] = {
1819 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1819 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
1820 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 1820 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
1821 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1821 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1840,7 +1840,7 @@ const u16 b43_ntab_loftlt1[] = {
1840 0x0002, 0x0103, 1840 0x0002, 0x0103,
1841}; 1841};
1842 1842
1843const u8 b43_ntab_mcs[] = { 1843static const u8 b43_ntab_mcs[] = {
1844 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C, 1844 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C,
1845 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C, 1845 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C,
1846 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C, 1846 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C,
@@ -1859,7 +1859,7 @@ const u8 b43_ntab_mcs[] = {
1859 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1859 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1860}; 1860};
1861 1861
1862const u32 b43_ntab_noisevar10[] = { 1862static const u32 b43_ntab_noisevar10[] = {
1863 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1863 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1864 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1864 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1865 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1865 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1926,7 +1926,7 @@ const u32 b43_ntab_noisevar10[] = {
1926 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1926 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1927}; 1927};
1928 1928
1929const u32 b43_ntab_noisevar11[] = { 1929static const u32 b43_ntab_noisevar11[] = {
1930 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1930 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1931 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1931 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1932 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1932 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1993,7 +1993,7 @@ const u32 b43_ntab_noisevar11[] = {
1993 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1993 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1994}; 1994};
1995 1995
1996const u16 b43_ntab_pilot[] = { 1996static const u16 b43_ntab_pilot[] = {
1997 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 1997 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08,
1998 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 1998 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5,
1999 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82, 1999 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82,
@@ -2011,12 +2011,12 @@ const u16 b43_ntab_pilot[] = {
2011 0xF0A0, 0xF028, 0xFFFF, 0xFFFF, 2011 0xF0A0, 0xF028, 0xFFFF, 0xFFFF,
2012}; 2012};
2013 2013
2014const u32 b43_ntab_pilotlt[] = { 2014static const u32 b43_ntab_pilotlt[] = {
2015 0x76540123, 0x62407351, 0x76543201, 0x76540213, 2015 0x76540123, 0x62407351, 0x76543201, 0x76540213,
2016 0x76540123, 0x76430521, 2016 0x76540123, 0x76430521,
2017}; 2017};
2018 2018
2019const u32 b43_ntab_tdi20a0[] = { 2019static const u32 b43_ntab_tdi20a0[] = {
2020 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0, 2020 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0,
2021 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D, 2021 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D,
2022 0x00020301, 0x00030504, 0x00040708, 0x0005090B, 2022 0x00020301, 0x00030504, 0x00040708, 0x0005090B,
@@ -2033,7 +2033,7 @@ const u32 b43_ntab_tdi20a0[] = {
2033 0x00000000, 0x00000000, 0x00000000, 2033 0x00000000, 0x00000000, 0x00000000,
2034}; 2034};
2035 2035
2036const u32 b43_ntab_tdi20a1[] = { 2036static const u32 b43_ntab_tdi20a1[] = {
2037 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630, 2037 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630,
2038 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D, 2038 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D,
2039 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B, 2039 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B,
@@ -2050,7 +2050,7 @@ const u32 b43_ntab_tdi20a1[] = {
2050 0x00000000, 0x00000000, 0x00000000, 2050 0x00000000, 0x00000000, 0x00000000,
2051}; 2051};
2052 2052
2053const u32 b43_ntab_tdi40a0[] = { 2053static const u32 b43_ntab_tdi40a0[] = {
2054 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2, 2054 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2,
2055 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C, 2055 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C,
2056 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2, 2056 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2,
@@ -2081,7 +2081,7 @@ const u32 b43_ntab_tdi40a0[] = {
2081 0x00000000, 0x00000000, 2081 0x00000000, 0x00000000,
2082}; 2082};
2083 2083
2084const u32 b43_ntab_tdi40a1[] = { 2084static const u32 b43_ntab_tdi40a1[] = {
2085 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD, 2085 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD,
2086 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07, 2086 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07,
2087 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D, 2087 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D,
@@ -2112,7 +2112,7 @@ const u32 b43_ntab_tdi40a1[] = {
2112 0x00000000, 0x00000000, 2112 0x00000000, 0x00000000,
2113}; 2113};
2114 2114
2115const u32 b43_ntab_tdtrn[] = { 2115static const u32 b43_ntab_tdtrn[] = {
2116 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6, 2116 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6,
2117 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68, 2117 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68,
2118 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52, 2118 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52,
@@ -2291,7 +2291,7 @@ const u32 b43_ntab_tdtrn[] = {
2291 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE, 2291 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE,
2292}; 2292};
2293 2293
2294const u32 b43_ntab_tmap[] = { 2294static const u32 b43_ntab_tmap[] = {
2295 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 2295 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888,
2296 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 2296 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8,
2297 0xF1111110, 0x11111111, 0x11F11111, 0x00000111, 2297 0xF1111110, 0x11111111, 0x11F11111, 0x00000111,
@@ -2406,6 +2406,483 @@ const u32 b43_ntab_tmap[] = {
2406 0x00000000, 0x00000000, 0x00000000, 0x00000000, 2406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2407}; 2407};
2408 2408
2409const u32 b43_ntab_tx_gain_rev0_1_2[] = {
2410 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
2411 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
2412 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
2413 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44,
2414 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844,
2415 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644,
2416 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444,
2417 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44,
2418 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844,
2419 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44,
2420 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944,
2421 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744,
2422 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544,
2423 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44,
2424 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844,
2425 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44,
2426 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944,
2427 0x03902942, 0x03902844, 0x03902842, 0x03902744,
2428 0x03902742, 0x03902644, 0x03902642, 0x03902544,
2429 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44,
2430 0x03802a42, 0x03802944, 0x03802942, 0x03802844,
2431 0x03802842, 0x03802744, 0x03802742, 0x03802644,
2432 0x03802642, 0x03802544, 0x03802542, 0x03802444,
2433 0x03802442, 0x03802344, 0x03802342, 0x03802244,
2434 0x03802242, 0x03802144, 0x03802142, 0x03802044,
2435 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44,
2436 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44,
2437 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44,
2438 0x03801a42, 0x03801944, 0x03801942, 0x03801844,
2439 0x03801842, 0x03801744, 0x03801742, 0x03801644,
2440 0x03801642, 0x03801544, 0x03801542, 0x03801444,
2441 0x03801442, 0x03801344, 0x03801342, 0x00002b00,
2442};
2443
2444const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
2445 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
2446 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
2447 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
2448 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037,
2449 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e,
2450 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037,
2451 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e,
2452 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037,
2453 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e,
2454 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037,
2455 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e,
2456 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037,
2457 0x19410044, 0x19410042, 0x19410040, 0x1941003e,
2458 0x1941003c, 0x1941003b, 0x19410039, 0x19410037,
2459 0x18410044, 0x18410042, 0x18410040, 0x1841003e,
2460 0x1841003c, 0x1841003b, 0x18410039, 0x18410037,
2461 0x17410044, 0x17410042, 0x17410040, 0x1741003e,
2462 0x1741003c, 0x1741003b, 0x17410039, 0x17410037,
2463 0x16410044, 0x16410042, 0x16410040, 0x1641003e,
2464 0x1641003c, 0x1641003b, 0x16410039, 0x16410037,
2465 0x15410044, 0x15410042, 0x15410040, 0x1541003e,
2466 0x1541003c, 0x1541003b, 0x15410039, 0x15410037,
2467 0x14410044, 0x14410042, 0x14410040, 0x1441003e,
2468 0x1441003c, 0x1441003b, 0x14410039, 0x14410037,
2469 0x13410044, 0x13410042, 0x13410040, 0x1341003e,
2470 0x1341003c, 0x1341003b, 0x13410039, 0x13410037,
2471 0x12410044, 0x12410042, 0x12410040, 0x1241003e,
2472 0x1241003c, 0x1241003b, 0x12410039, 0x12410037,
2473 0x11410044, 0x11410042, 0x11410040, 0x1141003e,
2474 0x1141003c, 0x1141003b, 0x11410039, 0x11410037,
2475 0x10410044, 0x10410042, 0x10410040, 0x1041003e,
2476 0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
2477};
2478
2479const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
2480 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
2481 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
2482 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
2483 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037,
2484 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e,
2485 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037,
2486 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e,
2487 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037,
2488 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e,
2489 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037,
2490 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e,
2491 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037,
2492 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e,
2493 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037,
2494 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e,
2495 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037,
2496 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e,
2497 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037,
2498 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e,
2499 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037,
2500 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e,
2501 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037,
2502 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e,
2503 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037,
2504 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e,
2505 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037,
2506 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e,
2507 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037,
2508 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e,
2509 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037,
2510 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e,
2511 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
2512};
2513
2514const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
2515 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
2516 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
2517 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
2518 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037,
2519 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e,
2520 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037,
2521 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e,
2522 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037,
2523 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e,
2524 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037,
2525 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e,
2526 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037,
2527 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e,
2528 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037,
2529 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e,
2530 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037,
2531 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e,
2532 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037,
2533 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e,
2534 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037,
2535 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e,
2536 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037,
2537 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e,
2538 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038,
2539 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e,
2540 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037,
2541 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e,
2542 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037,
2543 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e,
2544 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037,
2545 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c,
2546 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
2547};
2548
2549const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
2550 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
2551 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
2552 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
2553 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a,
2554 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e,
2555 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a,
2556 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e,
2557 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037,
2558 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040,
2559 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a,
2560 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c,
2561 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038,
2562 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b,
2563 0x09620039, 0x09620037, 0x09620035, 0x09620033,
2564 0x08620044, 0x08620042, 0x08620040, 0x0862003e,
2565 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039,
2566 0x07620043, 0x07620042, 0x07620040, 0x0762003f,
2567 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039,
2568 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b,
2569 0x06620039, 0x06620037, 0x06620035, 0x06620033,
2570 0x05620046, 0x05620044, 0x05620042, 0x05620040,
2571 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039,
2572 0x04620044, 0x04620042, 0x04620040, 0x0462003e,
2573 0x0462003c, 0x0462003b, 0x04620039, 0x04620038,
2574 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039,
2575 0x03620038, 0x03620037, 0x03620035, 0x03620033,
2576 0x0262004c, 0x0262004a, 0x02620048, 0x02620047,
2577 0x02620046, 0x02620044, 0x02620043, 0x02620042,
2578 0x0162004a, 0x01620048, 0x01620046, 0x01620044,
2579 0x01620043, 0x01620042, 0x01620041, 0x01620040,
2580 0x00620042, 0x00620040, 0x0062003e, 0x0062003c,
2581 0x0062003b, 0x00620039, 0x00620037, 0x00620035,
2582};
2583
2584const u32 txpwrctrl_tx_gain_ipa[] = {
2585 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
2586 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
2587 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
2588 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025,
2589 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029,
2590 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025,
2591 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029,
2592 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025,
2593 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029,
2594 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025,
2595 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029,
2596 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025,
2597 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029,
2598 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025,
2599 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029,
2600 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025,
2601 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029,
2602 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025,
2603 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029,
2604 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025,
2605 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029,
2606 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025,
2607 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029,
2608 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025,
2609 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029,
2610 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025,
2611 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029,
2612 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025,
2613 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029,
2614 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025,
2615 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029,
2616 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
2617};
2618
2619const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
2620 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
2621 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
2622 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
2623 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025,
2624 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029,
2625 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025,
2626 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029,
2627 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025,
2628 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029,
2629 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025,
2630 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029,
2631 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025,
2632 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029,
2633 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025,
2634 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029,
2635 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025,
2636 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029,
2637 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025,
2638 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029,
2639 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025,
2640 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029,
2641 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025,
2642 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029,
2643 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025,
2644 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029,
2645 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025,
2646 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029,
2647 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025,
2648 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029,
2649 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025,
2650 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029,
2651 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
2652};
2653
2654const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
2655 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
2656 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
2657 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
2658 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025,
2659 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029,
2660 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025,
2661 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029,
2662 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025,
2663 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029,
2664 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025,
2665 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029,
2666 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025,
2667 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029,
2668 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025,
2669 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029,
2670 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025,
2671 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029,
2672 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025,
2673 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029,
2674 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025,
2675 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029,
2676 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025,
2677 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029,
2678 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025,
2679 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029,
2680 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025,
2681 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029,
2682 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025,
2683 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029,
2684 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025,
2685 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029,
2686 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
2687};
2688
2689const u32 txpwrctrl_tx_gain_ipa_5g[] = {
2690 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
2691 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
2692 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
2693 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022,
2694 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025,
2695 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027,
2696 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023,
2697 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027,
2698 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022,
2699 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025,
2700 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021,
2701 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026,
2702 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022,
2703 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026,
2704 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022,
2705 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026,
2706 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022,
2707 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026,
2708 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022,
2709 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026,
2710 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021,
2711 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026,
2712 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029,
2713 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024,
2714 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027,
2715 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023,
2716 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026,
2717 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022,
2718 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025,
2719 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027,
2720 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022,
2721 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
2722};
2723
2724const u16 tbl_iqcal_gainparams[2][9][8] = {
2725 {
2726 { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
2727 { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 },
2728 { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 },
2729 { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 },
2730 { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 },
2731 { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 },
2732 { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 },
2733 { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 },
2734 { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 }
2735 },
2736 {
2737 { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
2738 { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
2739 { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 },
2740 { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 },
2741 { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 },
2742 { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 },
2743 { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 },
2744 { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 },
2745 { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 }
2746 }
2747};
2748
2749const struct nphy_txiqcal_ladder ladder_lo[] = {
2750 { 3, 0 },
2751 { 4, 0 },
2752 { 6, 0 },
2753 { 9, 0 },
2754 { 13, 0 },
2755 { 18, 0 },
2756 { 25, 0 },
2757 { 25, 1 },
2758 { 25, 2 },
2759 { 25, 3 },
2760 { 25, 4 },
2761 { 25, 5 },
2762 { 25, 6 },
2763 { 25, 7 },
2764 { 35, 7 },
2765 { 50, 7 },
2766 { 71, 7 },
2767 { 100, 7 }
2768};
2769
2770const struct nphy_txiqcal_ladder ladder_iq[] = {
2771 { 3, 0 },
2772 { 4, 0 },
2773 { 6, 0 },
2774 { 9, 0 },
2775 { 13, 0 },
2776 { 18, 0 },
2777 { 25, 0 },
2778 { 35, 0 },
2779 { 50, 0 },
2780 { 71, 0 },
2781 { 100, 0 },
2782 { 100, 1 },
2783 { 100, 2 },
2784 { 100, 3 },
2785 { 100, 4 },
2786 { 100, 5 },
2787 { 100, 6 },
2788 { 100, 7 }
2789};
2790
2791const u16 loscale[] = {
2792 256, 256, 271, 271,
2793 287, 256, 256, 271,
2794 271, 287, 287, 304,
2795 304, 256, 256, 271,
2796 271, 287, 287, 304,
2797 304, 322, 322, 341,
2798 341, 362, 362, 383,
2799 383, 256, 256, 271,
2800 271, 287, 287, 304,
2801 304, 322, 322, 256,
2802 256, 271, 271, 287,
2803 287, 304, 304, 322,
2804 322, 341, 341, 362,
2805 362, 256, 256, 271,
2806 271, 287, 287, 304,
2807 304, 322, 322, 256,
2808 256, 271, 271, 287,
2809 287, 304, 304, 322,
2810 322, 341, 341, 362,
2811 362, 256, 256, 271,
2812 271, 287, 287, 304,
2813 304, 322, 322, 341,
2814 341, 362, 362, 383,
2815 383, 406, 406, 430,
2816 430, 455, 455, 482,
2817 482, 511, 511, 541,
2818 541, 573, 573, 607,
2819 607, 643, 643, 681,
2820 681, 722, 722, 764,
2821 764, 810, 810, 858,
2822 858, 908, 908, 962,
2823 962, 1019, 1019, 256
2824};
2825
2826const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
2827 0x0200, 0x0300, 0x0400, 0x0700,
2828 0x0900, 0x0c00, 0x1200, 0x1201,
2829 0x1202, 0x1203, 0x1204, 0x1205,
2830 0x1206, 0x1207, 0x1907, 0x2307,
2831 0x3207, 0x4707
2832};
2833
2834const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
2835 0x0300, 0x0500, 0x0700, 0x0900,
2836 0x0d00, 0x1100, 0x1900, 0x1901,
2837 0x1902, 0x1903, 0x1904, 0x1905,
2838 0x1906, 0x1907, 0x2407, 0x3207,
2839 0x4607, 0x6407
2840};
2841
2842const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
2843 0x0100, 0x0200, 0x0400, 0x0700,
2844 0x0900, 0x0c00, 0x1200, 0x1900,
2845 0x2300, 0x3200, 0x4700, 0x4701,
2846 0x4702, 0x4703, 0x4704, 0x4705,
2847 0x4706, 0x4707
2848};
2849
2850const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
2851 0x0200, 0x0300, 0x0600, 0x0900,
2852 0x0d00, 0x1100, 0x1900, 0x2400,
2853 0x3200, 0x4600, 0x6400, 0x6401,
2854 0x6402, 0x6403, 0x6404, 0x6405,
2855 0x6406, 0x6407
2856};
2857
2858const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { };
2859
2860const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { };
2861
2862const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
2863 0x8423, 0x8323, 0x8073, 0x8256,
2864 0x8045, 0x8223, 0x9423, 0x9323,
2865 0x9073, 0x9256, 0x9045, 0x9223
2866};
2867
2868const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
2869 0x8101, 0x8253, 0x8053, 0x8234,
2870 0x8034, 0x9101, 0x9253, 0x9053,
2871 0x9234, 0x9034
2872};
2873
2874const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
2875 0x8123, 0x8264, 0x8086, 0x8245,
2876 0x8056, 0x9123, 0x9264, 0x9086,
2877 0x9245, 0x9056
2878};
2879
2880const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
2881 0x8434, 0x8334, 0x8084, 0x8267,
2882 0x8056, 0x8234, 0x9434, 0x9334,
2883 0x9084, 0x9267, 0x9056, 0x9234
2884};
2885
2409static inline void assert_ntab_array_sizes(void) 2886static inline void assert_ntab_array_sizes(void)
2410{ 2887{
2411#undef check 2888#undef check
@@ -2474,3 +2951,51 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
2474 /* Some compiletime assertions... */ 2951 /* Some compiletime assertions... */
2475 assert_ntab_array_sizes(); 2952 assert_ntab_array_sizes();
2476} 2953}
2954
2955#define ntab_upload(dev, offset, data) do { \
2956 unsigned int i; \
2957 for (i = 0; i < (offset##_SIZE); i++) \
2958 b43_ntab_write(dev, (offset) + i, (data)[i]); \
2959 } while (0)
2960
2961void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
2962{
2963 /* Static tables */
2964 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
2965 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
2966 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
2967 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
2968 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
2969 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
2970 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
2971 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
2972 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
2973 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
2974 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
2975 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
2976 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
2977 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
2978
2979 /* Volatile tables */
2980 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
2981 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
2982 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
2983 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
2984 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
2985 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
2986 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
2987 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
2988 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
2989 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
2990 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
2991 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
2992}
2993
2994void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
2995{
2996 /* Static tables */
2997 /* TODO */
2998
2999 /* Volatile tables */
3000 /* TODO */
3001}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4d498b053ec..51636d02f8b 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -46,6 +46,11 @@ struct b43_nphy_channeltab_entry {
46 46
47struct b43_wldev; 47struct b43_wldev;
48 48
49struct nphy_txiqcal_ladder {
50 u8 percent;
51 u8 g_env;
52};
53
49/* Upload the default register value table. 54/* Upload the default register value table.
50 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz 55 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
51 * table is uploaded. If "ignore_uploadflag" is true, we upload any value 56 * table is uploaded. If "ignore_uploadflag" is true, we upload any value
@@ -126,34 +131,46 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
126#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ 131#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
127#define B43_NTAB_C1_LOFEEDTH_SIZE 128 132#define B43_NTAB_C1_LOFEEDTH_SIZE 128
128 133
129void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value); 134#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
135#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
136#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
137#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18
138#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11
139#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9
140#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12
141#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10
142#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10
143#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12
130 144
131extern const u8 b43_ntab_adjustpower0[]; 145void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
132extern const u8 b43_ntab_adjustpower1[];
133extern const u16 b43_ntab_bdi[];
134extern const u32 b43_ntab_channelest[];
135extern const u8 b43_ntab_estimatepowerlt0[];
136extern const u8 b43_ntab_estimatepowerlt1[];
137extern const u8 b43_ntab_framelookup[];
138extern const u32 b43_ntab_framestruct[];
139extern const u32 b43_ntab_gainctl0[];
140extern const u32 b43_ntab_gainctl1[];
141extern const u32 b43_ntab_intlevel[];
142extern const u32 b43_ntab_iqlt0[];
143extern const u32 b43_ntab_iqlt1[];
144extern const u16 b43_ntab_loftlt0[];
145extern const u16 b43_ntab_loftlt1[];
146extern const u8 b43_ntab_mcs[];
147extern const u32 b43_ntab_noisevar10[];
148extern const u32 b43_ntab_noisevar11[];
149extern const u16 b43_ntab_pilot[];
150extern const u32 b43_ntab_pilotlt[];
151extern const u32 b43_ntab_tdi20a0[];
152extern const u32 b43_ntab_tdi20a1[];
153extern const u32 b43_ntab_tdi40a0[];
154extern const u32 b43_ntab_tdi40a1[];
155extern const u32 b43_ntab_tdtrn[];
156extern const u32 b43_ntab_tmap[];
157 146
147void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
148void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
149
150extern const u32 b43_ntab_tx_gain_rev0_1_2[];
151extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
152extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
153extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
154extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
155
156extern const u32 txpwrctrl_tx_gain_ipa[];
157extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
158extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
159extern const u32 txpwrctrl_tx_gain_ipa_5g[];
160extern const u16 tbl_iqcal_gainparams[2][9][8];
161extern const struct nphy_txiqcal_ladder ladder_lo[];
162extern const struct nphy_txiqcal_ladder ladder_iq[];
163extern const u16 loscale[];
164
165extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[];
166extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[];
167extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[];
168extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[];
169extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[];
170extern const u16 tbl_tx_iqlo_cal_startcoefs[];
171extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[];
172extern const u16 tbl_tx_iqlo_cal_cmds_recal[];
173extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[];
174extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[];
158 175
159#endif /* B43_TABLES_NPHY_H_ */ 176#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4a905b6a886..874a64a6c61 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -61,6 +61,8 @@ MODULE_AUTHOR("Michael Buesch");
61MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
62 62
63MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID); 63MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
64MODULE_FIRMWARE("b43legacy/ucode2.fw");
65MODULE_FIRMWARE("b43legacy/ucode4.fw");
64 66
65#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) 67#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
66static int modparam_pio; 68static int modparam_pio;
@@ -3361,7 +3363,7 @@ err_kfree_lo_control:
3361} 3363}
3362 3364
3363static int b43legacy_op_add_interface(struct ieee80211_hw *hw, 3365static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3364 struct ieee80211_if_init_conf *conf) 3366 struct ieee80211_vif *vif)
3365{ 3367{
3366 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3368 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3367 struct b43legacy_wldev *dev; 3369 struct b43legacy_wldev *dev;
@@ -3370,23 +3372,23 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3370 3372
3371 /* TODO: allow WDS/AP devices to coexist */ 3373 /* TODO: allow WDS/AP devices to coexist */
3372 3374
3373 if (conf->type != NL80211_IFTYPE_AP && 3375 if (vif->type != NL80211_IFTYPE_AP &&
3374 conf->type != NL80211_IFTYPE_STATION && 3376 vif->type != NL80211_IFTYPE_STATION &&
3375 conf->type != NL80211_IFTYPE_WDS && 3377 vif->type != NL80211_IFTYPE_WDS &&
3376 conf->type != NL80211_IFTYPE_ADHOC) 3378 vif->type != NL80211_IFTYPE_ADHOC)
3377 return -EOPNOTSUPP; 3379 return -EOPNOTSUPP;
3378 3380
3379 mutex_lock(&wl->mutex); 3381 mutex_lock(&wl->mutex);
3380 if (wl->operating) 3382 if (wl->operating)
3381 goto out_mutex_unlock; 3383 goto out_mutex_unlock;
3382 3384
3383 b43legacydbg(wl, "Adding Interface type %d\n", conf->type); 3385 b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
3384 3386
3385 dev = wl->current_dev; 3387 dev = wl->current_dev;
3386 wl->operating = 1; 3388 wl->operating = 1;
3387 wl->vif = conf->vif; 3389 wl->vif = vif;
3388 wl->if_type = conf->type; 3390 wl->if_type = vif->type;
3389 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 3391 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
3390 3392
3391 spin_lock_irqsave(&wl->irq_lock, flags); 3393 spin_lock_irqsave(&wl->irq_lock, flags);
3392 b43legacy_adjust_opmode(dev); 3394 b43legacy_adjust_opmode(dev);
@@ -3403,18 +3405,18 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3403} 3405}
3404 3406
3405static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, 3407static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
3406 struct ieee80211_if_init_conf *conf) 3408 struct ieee80211_vif *vif)
3407{ 3409{
3408 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3410 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3409 struct b43legacy_wldev *dev = wl->current_dev; 3411 struct b43legacy_wldev *dev = wl->current_dev;
3410 unsigned long flags; 3412 unsigned long flags;
3411 3413
3412 b43legacydbg(wl, "Removing Interface type %d\n", conf->type); 3414 b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
3413 3415
3414 mutex_lock(&wl->mutex); 3416 mutex_lock(&wl->mutex);
3415 3417
3416 B43legacy_WARN_ON(!wl->operating); 3418 B43legacy_WARN_ON(!wl->operating);
3417 B43legacy_WARN_ON(wl->vif != conf->vif); 3419 B43legacy_WARN_ON(wl->vif != vif);
3418 wl->vif = NULL; 3420 wl->vif = NULL;
3419 3421
3420 wl->operating = 0; 3422 wl->operating = 0;
@@ -3960,7 +3962,7 @@ static struct ssb_driver b43legacy_ssb_driver = {
3960 3962
3961static void b43legacy_print_driverinfo(void) 3963static void b43legacy_print_driverinfo(void)
3962{ 3964{
3963 const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "", 3965 const char *feat_pci = "", *feat_leds = "",
3964 *feat_pio = "", *feat_dma = ""; 3966 *feat_pio = "", *feat_dma = "";
3965 3967
3966#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT 3968#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
@@ -3969,9 +3971,6 @@ static void b43legacy_print_driverinfo(void)
3969#ifdef CONFIG_B43LEGACY_LEDS 3971#ifdef CONFIG_B43LEGACY_LEDS
3970 feat_leds = "L"; 3972 feat_leds = "L";
3971#endif 3973#endif
3972#ifdef CONFIG_B43LEGACY_RFKILL
3973 feat_rfkill = "R";
3974#endif
3975#ifdef CONFIG_B43LEGACY_PIO 3974#ifdef CONFIG_B43LEGACY_PIO
3976 feat_pio = "I"; 3975 feat_pio = "I";
3977#endif 3976#endif
@@ -3979,9 +3978,9 @@ static void b43legacy_print_driverinfo(void)
3979 feat_dma = "D"; 3978 feat_dma = "D";
3980#endif 3979#endif
3981 printk(KERN_INFO "Broadcom 43xx-legacy driver loaded " 3980 printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
3982 "[ Features: %s%s%s%s%s, Firmware-ID: " 3981 "[ Features: %s%s%s%s, Firmware-ID: "
3983 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", 3982 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
3984 feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma); 3983 feat_pci, feat_leds, feat_pio, feat_dma);
3985} 3984}
3986 3985
3987static int __init b43legacy_init(void) 3986static int __init b43legacy_init(void)
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c88218..d7073281942 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2618,6 +2618,15 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
2618 int events = 0; 2618 int events = 0;
2619 u16 ev; 2619 u16 ev;
2620 2620
2621 /* Detect early interrupt before driver is fully configued */
2622 if (!dev->base_addr) {
2623 if (net_ratelimit()) {
2624 printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
2625 dev->name);
2626 }
2627 return IRQ_HANDLED;
2628 }
2629
2621 iface = netdev_priv(dev); 2630 iface = netdev_priv(dev);
2622 local = iface->local; 2631 local = iface->local;
2623 2632
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f..4d97ae37499 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -39,7 +39,7 @@ struct hostap_pci_priv {
39/* FIX: do we need mb/wmb/rmb with memory operations? */ 39/* FIX: do we need mb/wmb/rmb with memory operations? */
40 40
41 41
42static struct pci_device_id prism2_pci_id_table[] __devinitdata = { 42static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
43 /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */ 43 /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
44 { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID }, 44 { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
45 /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */ 45 /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a4..fc04ccdc5be 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -60,7 +60,7 @@ struct hostap_plx_priv {
60 60
61#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID } 61#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
62 62
63static struct pci_device_id prism2_plx_id_table[] __devinitdata = { 63static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
64 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), 64 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
65 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), 65 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
66 PLXDEV(0x126c, 0x8030, "Nortel emobility"), 66 PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f8..9b72c45a774 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
6585 6585
6586#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } 6586#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
6587 6587
6588static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = { 6588static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
6589 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */ 6589 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
6590 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */ 6590 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
6591 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */ 6591 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bed..63c2a7ade5f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11524,7 +11524,7 @@ out:
11524} 11524}
11525 11525
11526/* PCI driver stuff */ 11526/* PCI driver stuff */
11527static struct pci_device_id card_ids[] = { 11527static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11528 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, 11528 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11529 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, 11529 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11530 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, 11530 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8414178bcff..0db1fda94a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -105,6 +105,7 @@ static struct iwl_lib_ops iwl1000_lib = {
105 .load_ucode = iwl5000_load_ucode, 105 .load_ucode = iwl5000_load_ucode,
106 .dump_nic_event_log = iwl_dump_nic_event_log, 106 .dump_nic_event_log = iwl_dump_nic_event_log,
107 .dump_nic_error_log = iwl_dump_nic_error_log, 107 .dump_nic_error_log = iwl_dump_nic_error_log,
108 .dump_csr = iwl_dump_csr,
108 .init_alive_start = iwl5000_init_alive_start, 109 .init_alive_start = iwl5000_init_alive_start,
109 .alive_notify = iwl5000_alive_notify, 110 .alive_notify = iwl5000_alive_notify,
110 .send_tx_power = iwl5000_send_tx_power, 111 .send_tx_power = iwl5000_send_tx_power,
@@ -140,7 +141,7 @@ static struct iwl_lib_ops iwl1000_lib = {
140 }, 141 },
141}; 142};
142 143
143static struct iwl_ops iwl1000_ops = { 144static const struct iwl_ops iwl1000_ops = {
144 .ucode = &iwl5000_ucode, 145 .ucode = &iwl5000_ucode,
145 .lib = &iwl1000_lib, 146 .lib = &iwl1000_lib,
146 .hcmd = &iwl5000_hcmd, 147 .hcmd = &iwl5000_hcmd,
@@ -173,7 +174,6 @@ struct iwl_cfg iwl1000_bgn_cfg = {
173 .use_rts_for_ht = true, /* use rts/cts protection */ 174 .use_rts_for_ht = true, /* use rts/cts protection */
174 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 175 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
175 .support_ct_kill_exit = true, 176 .support_ct_kill_exit = true,
176 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
177}; 177};
178 178
179struct iwl_cfg iwl1000_bg_cfg = { 179struct iwl_cfg iwl1000_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 234891d8cc1..6cde661ce0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2804,7 +2804,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2804 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2804 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2805}; 2805};
2806 2806
2807static struct iwl_ops iwl3945_ops = { 2807static const struct iwl_ops iwl3945_ops = {
2808 .ucode = &iwl3945_ucode, 2808 .ucode = &iwl3945_ucode,
2809 .lib = &iwl3945_lib, 2809 .lib = &iwl3945_lib,
2810 .hcmd = &iwl3945_hcmd, 2810 .hcmd = &iwl3945_hcmd,
@@ -2849,7 +2849,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2849 .broken_powersave = true, 2849 .broken_powersave = true,
2850}; 2850};
2851 2851
2852struct pci_device_id iwl3945_hw_card_ids[] = { 2852DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2853 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, 2853 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2854 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, 2854 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2855 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, 2855 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 531fa125f5a..bc532ff4f88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -37,7 +37,7 @@
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39/* Hardware specific file defines the PCI IDs table for that hardware module */ 39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern struct pci_device_id iwl3945_hw_card_ids[]; 40extern const struct pci_device_id iwl3945_hw_card_ids[];
41 41
42#include "iwl-csr.h" 42#include "iwl-csr.h"
43#include "iwl-prph.h" 43#include "iwl-prph.h"
@@ -226,7 +226,8 @@ extern void iwl3945_rx_replenish(void *data);
226extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 226extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
227extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 227extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
228 struct ieee80211_hdr *hdr,int left); 228 struct ieee80211_hdr *hdr,int left);
229extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log); 229extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
230 char **buf, bool display);
230extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 231extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
231 232
232/* 233/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9b4b8b5c757..6a004abb597 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2208,7 +2208,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2208 }, 2208 },
2209}; 2209};
2210 2210
2211static struct iwl_ops iwl4965_ops = { 2211static const struct iwl_ops iwl4965_ops = {
2212 .ucode = &iwl4965_ucode, 2212 .ucode = &iwl4965_ucode,
2213 .lib = &iwl4965_lib, 2213 .lib = &iwl4965_lib,
2214 .hcmd = &iwl4965_hcmd, 2214 .hcmd = &iwl4965_hcmd,
@@ -2239,7 +2239,6 @@ struct iwl_cfg iwl4965_agn_cfg = {
2239 .broken_powersave = true, 2239 .broken_powersave = true,
2240 .led_compensation = 61, 2240 .led_compensation = 61,
2241 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2241 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2242 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
2243}; 2242};
2244 2243
2245/* Module firmware */ 2244/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de45f308b74..c6120f0b8f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -781,7 +781,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
781 781
782 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 782 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
783 783
784 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 784 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
785 scd_bc_tbl[txq_id]. 785 scd_bc_tbl[txq_id].
786 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 786 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
787} 787}
@@ -800,12 +800,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
800 if (txq_id != IWL_CMD_QUEUE_NUM) 800 if (txq_id != IWL_CMD_QUEUE_NUM)
801 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 801 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
802 802
803 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 803 bc_ent = cpu_to_le16(1 | (sta_id << 12));
804 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 804 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
805 805
806 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 806 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
807 scd_bc_tbl[txq_id]. 807 scd_bc_tbl[txq_id].
808 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 808 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
809} 809}
810 810
811static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 811static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1466,6 +1466,7 @@ struct iwl_lib_ops iwl5000_lib = {
1466 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1466 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1467 .dump_nic_event_log = iwl_dump_nic_event_log, 1467 .dump_nic_event_log = iwl_dump_nic_event_log,
1468 .dump_nic_error_log = iwl_dump_nic_error_log, 1468 .dump_nic_error_log = iwl_dump_nic_error_log,
1469 .dump_csr = iwl_dump_csr,
1469 .load_ucode = iwl5000_load_ucode, 1470 .load_ucode = iwl5000_load_ucode,
1470 .init_alive_start = iwl5000_init_alive_start, 1471 .init_alive_start = iwl5000_init_alive_start,
1471 .alive_notify = iwl5000_alive_notify, 1472 .alive_notify = iwl5000_alive_notify,
@@ -1518,6 +1519,7 @@ static struct iwl_lib_ops iwl5150_lib = {
1518 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1519 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1519 .dump_nic_event_log = iwl_dump_nic_event_log, 1520 .dump_nic_event_log = iwl_dump_nic_event_log,
1520 .dump_nic_error_log = iwl_dump_nic_error_log, 1521 .dump_nic_error_log = iwl_dump_nic_error_log,
1522 .dump_csr = iwl_dump_csr,
1521 .load_ucode = iwl5000_load_ucode, 1523 .load_ucode = iwl5000_load_ucode,
1522 .init_alive_start = iwl5000_init_alive_start, 1524 .init_alive_start = iwl5000_init_alive_start,
1523 .alive_notify = iwl5000_alive_notify, 1525 .alive_notify = iwl5000_alive_notify,
@@ -1555,7 +1557,7 @@ static struct iwl_lib_ops iwl5150_lib = {
1555 }, 1557 },
1556}; 1558};
1557 1559
1558static struct iwl_ops iwl5000_ops = { 1560static const struct iwl_ops iwl5000_ops = {
1559 .ucode = &iwl5000_ucode, 1561 .ucode = &iwl5000_ucode,
1560 .lib = &iwl5000_lib, 1562 .lib = &iwl5000_lib,
1561 .hcmd = &iwl5000_hcmd, 1563 .hcmd = &iwl5000_hcmd,
@@ -1563,7 +1565,7 @@ static struct iwl_ops iwl5000_ops = {
1563 .led = &iwlagn_led_ops, 1565 .led = &iwlagn_led_ops,
1564}; 1566};
1565 1567
1566static struct iwl_ops iwl5150_ops = { 1568static const struct iwl_ops iwl5150_ops = {
1567 .ucode = &iwl5000_ucode, 1569 .ucode = &iwl5000_ucode,
1568 .lib = &iwl5150_lib, 1570 .lib = &iwl5150_lib,
1569 .hcmd = &iwl5000_hcmd, 1571 .hcmd = &iwl5000_hcmd,
@@ -1600,7 +1602,6 @@ struct iwl_cfg iwl5300_agn_cfg = {
1600 .led_compensation = 51, 1602 .led_compensation = 51,
1601 .use_rts_for_ht = true, /* use rts/cts protection */ 1603 .use_rts_for_ht = true, /* use rts/cts protection */
1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1604 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1603 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1604}; 1605};
1605 1606
1606struct iwl_cfg iwl5100_bgn_cfg = { 1607struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1671,7 +1672,6 @@ struct iwl_cfg iwl5100_agn_cfg = {
1671 .led_compensation = 51, 1672 .led_compensation = 51,
1672 .use_rts_for_ht = true, /* use rts/cts protection */ 1673 .use_rts_for_ht = true, /* use rts/cts protection */
1673 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1674 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1674 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1675}; 1675};
1676 1676
1677struct iwl_cfg iwl5350_agn_cfg = { 1677struct iwl_cfg iwl5350_agn_cfg = {
@@ -1696,7 +1696,6 @@ struct iwl_cfg iwl5350_agn_cfg = {
1696 .led_compensation = 51, 1696 .led_compensation = 51,
1697 .use_rts_for_ht = true, /* use rts/cts protection */ 1697 .use_rts_for_ht = true, /* use rts/cts protection */
1698 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1698 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1699 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1700}; 1699};
1701 1700
1702struct iwl_cfg iwl5150_agn_cfg = { 1701struct iwl_cfg iwl5150_agn_cfg = {
@@ -1721,7 +1720,6 @@ struct iwl_cfg iwl5150_agn_cfg = {
1721 .led_compensation = 51, 1720 .led_compensation = 51,
1722 .use_rts_for_ht = true, /* use rts/cts protection */ 1721 .use_rts_for_ht = true, /* use rts/cts protection */
1723 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1722 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1724 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1725}; 1723};
1726 1724
1727struct iwl_cfg iwl5150_abg_cfg = { 1725struct iwl_cfg iwl5150_abg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 74e57104927..a5a0ed4817a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -215,6 +215,7 @@ static struct iwl_lib_ops iwl6000_lib = {
215 .load_ucode = iwl5000_load_ucode, 215 .load_ucode = iwl5000_load_ucode,
216 .dump_nic_event_log = iwl_dump_nic_event_log, 216 .dump_nic_event_log = iwl_dump_nic_event_log,
217 .dump_nic_error_log = iwl_dump_nic_error_log, 217 .dump_nic_error_log = iwl_dump_nic_error_log,
218 .dump_csr = iwl_dump_csr,
218 .init_alive_start = iwl5000_init_alive_start, 219 .init_alive_start = iwl5000_init_alive_start,
219 .alive_notify = iwl5000_alive_notify, 220 .alive_notify = iwl5000_alive_notify,
220 .send_tx_power = iwl5000_send_tx_power, 221 .send_tx_power = iwl5000_send_tx_power,
@@ -252,7 +253,7 @@ static struct iwl_lib_ops iwl6000_lib = {
252 }, 253 },
253}; 254};
254 255
255static struct iwl_ops iwl6000_ops = { 256static const struct iwl_ops iwl6000_ops = {
256 .ucode = &iwl5000_ucode, 257 .ucode = &iwl5000_ucode,
257 .lib = &iwl6000_lib, 258 .lib = &iwl6000_lib,
258 .hcmd = &iwl5000_hcmd, 259 .hcmd = &iwl5000_hcmd,
@@ -267,7 +268,7 @@ static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
267 .calc_rssi = iwl5000_calc_rssi, 268 .calc_rssi = iwl5000_calc_rssi,
268}; 269};
269 270
270static struct iwl_ops iwl6050_ops = { 271static const struct iwl_ops iwl6050_ops = {
271 .ucode = &iwl5000_ucode, 272 .ucode = &iwl5000_ucode,
272 .lib = &iwl6000_lib, 273 .lib = &iwl6000_lib,
273 .hcmd = &iwl5000_hcmd, 274 .hcmd = &iwl5000_hcmd,
@@ -306,7 +307,6 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
306 .supports_idle = true, 307 .supports_idle = true,
307 .adv_thermal_throttle = true, 308 .adv_thermal_throttle = true,
308 .support_ct_kill_exit = true, 309 .support_ct_kill_exit = true,
309 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
310}; 310};
311 311
312struct iwl_cfg iwl6000i_2abg_cfg = { 312struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -395,7 +395,6 @@ struct iwl_cfg iwl6050_2agn_cfg = {
395 .supports_idle = true, 395 .supports_idle = true,
396 .adv_thermal_throttle = true, 396 .adv_thermal_throttle = true,
397 .support_ct_kill_exit = true, 397 .support_ct_kill_exit = true,
398 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
399}; 398};
400 399
401struct iwl_cfg iwl6050_2abg_cfg = { 400struct iwl_cfg iwl6050_2abg_cfg = {
@@ -455,7 +454,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
455 .supports_idle = true, 454 .supports_idle = true,
456 .adv_thermal_throttle = true, 455 .adv_thermal_throttle = true,
457 .support_ct_kill_exit = true, 456 .support_ct_kill_exit = true,
458 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
459}; 457};
460 458
461MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 459MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 1c9866daf81..344e99de4ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -657,6 +657,131 @@ static void iwl_bg_statistics_periodic(unsigned long data)
657 iwl_send_statistics_request(priv, CMD_ASYNC, false); 657 iwl_send_statistics_request(priv, CMD_ASYNC, false);
658} 658}
659 659
660
661static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
662 u32 start_idx, u32 num_events,
663 u32 mode)
664{
665 u32 i;
666 u32 ptr; /* SRAM byte address of log data */
667 u32 ev, time, data; /* event log data */
668 unsigned long reg_flags;
669
670 if (mode == 0)
671 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
672 else
673 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
674
675 /* Make sure device is powered up for SRAM reads */
676 spin_lock_irqsave(&priv->reg_lock, reg_flags);
677 if (iwl_grab_nic_access(priv)) {
678 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
679 return;
680 }
681
682 /* Set starting address; reads will auto-increment */
683 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
684 rmb();
685
686 /*
687 * "time" is actually "data" for mode 0 (no timestamp).
688 * place event id # at far right for easier visual parsing.
689 */
690 for (i = 0; i < num_events; i++) {
691 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
692 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
693 if (mode == 0) {
694 trace_iwlwifi_dev_ucode_cont_event(priv,
695 0, time, ev);
696 } else {
697 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
698 trace_iwlwifi_dev_ucode_cont_event(priv,
699 time, data, ev);
700 }
701 }
702 /* Allow device to power down */
703 iwl_release_nic_access(priv);
704 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
705}
706
707void iwl_continuous_event_trace(struct iwl_priv *priv)
708{
709 u32 capacity; /* event log capacity in # entries */
710 u32 base; /* SRAM byte address of event log header */
711 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
712 u32 num_wraps; /* # times uCode wrapped to top of log */
713 u32 next_entry; /* index of next entry to be written by uCode */
714
715 if (priv->ucode_type == UCODE_INIT)
716 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
717 else
718 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
719 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
720 capacity = iwl_read_targ_mem(priv, base);
721 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
722 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
723 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
724 } else
725 return;
726
727 if (num_wraps == priv->event_log.num_wraps) {
728 iwl_print_cont_event_trace(priv,
729 base, priv->event_log.next_entry,
730 next_entry - priv->event_log.next_entry,
731 mode);
732 priv->event_log.non_wraps_count++;
733 } else {
734 if ((num_wraps - priv->event_log.num_wraps) > 1)
735 priv->event_log.wraps_more_count++;
736 else
737 priv->event_log.wraps_once_count++;
738 trace_iwlwifi_dev_ucode_wrap_event(priv,
739 num_wraps - priv->event_log.num_wraps,
740 next_entry, priv->event_log.next_entry);
741 if (next_entry < priv->event_log.next_entry) {
742 iwl_print_cont_event_trace(priv, base,
743 priv->event_log.next_entry,
744 capacity - priv->event_log.next_entry,
745 mode);
746
747 iwl_print_cont_event_trace(priv, base, 0,
748 next_entry, mode);
749 } else {
750 iwl_print_cont_event_trace(priv, base,
751 next_entry, capacity - next_entry,
752 mode);
753
754 iwl_print_cont_event_trace(priv, base, 0,
755 next_entry, mode);
756 }
757 }
758 priv->event_log.num_wraps = num_wraps;
759 priv->event_log.next_entry = next_entry;
760}
761
762/**
763 * iwl_bg_ucode_trace - Timer callback to log ucode event
764 *
765 * The timer is continually set to execute every
766 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
767 * this function is to perform continuous uCode event logging operation
768 * if enabled
769 */
770static void iwl_bg_ucode_trace(unsigned long data)
771{
772 struct iwl_priv *priv = (struct iwl_priv *)data;
773
774 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
775 return;
776
777 if (priv->event_log.ucode_trace) {
778 iwl_continuous_event_trace(priv);
779 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
780 mod_timer(&priv->ucode_trace,
781 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
782 }
783}
784
660static void iwl_rx_beacon_notif(struct iwl_priv *priv, 785static void iwl_rx_beacon_notif(struct iwl_priv *priv,
661 struct iwl_rx_mem_buffer *rxb) 786 struct iwl_rx_mem_buffer *rxb)
662{ 787{
@@ -689,12 +814,14 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
689 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 814 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
690 unsigned long status = priv->status; 815 unsigned long status = priv->status;
691 816
692 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n", 817 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
693 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 818 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
694 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 819 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
820 (flags & CT_CARD_DISABLED) ?
821 "Reached" : "Not reached");
695 822
696 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 823 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
697 RF_CARD_DISABLED)) { 824 CT_CARD_DISABLED)) {
698 825
699 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 826 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
700 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 827 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
@@ -708,10 +835,10 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
708 iwl_write_direct32(priv, HBUS_TARG_MBX_C, 835 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
709 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 836 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
710 } 837 }
711 if (flags & RF_CARD_DISABLED) 838 if (flags & CT_CARD_DISABLED)
712 iwl_tt_enter_ct_kill(priv); 839 iwl_tt_enter_ct_kill(priv);
713 } 840 }
714 if (!(flags & RF_CARD_DISABLED)) 841 if (!(flags & CT_CARD_DISABLED))
715 iwl_tt_exit_ct_kill(priv); 842 iwl_tt_exit_ct_kill(priv);
716 843
717 if (flags & HW_CARD_DISABLED) 844 if (flags & HW_CARD_DISABLED)
@@ -1705,8 +1832,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1705 * iwl_print_event_log - Dump error event log to syslog 1832 * iwl_print_event_log - Dump error event log to syslog
1706 * 1833 *
1707 */ 1834 */
1708static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, 1835static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1709 u32 num_events, u32 mode) 1836 u32 num_events, u32 mode,
1837 int pos, char **buf, size_t bufsz)
1710{ 1838{
1711 u32 i; 1839 u32 i;
1712 u32 base; /* SRAM byte address of event log header */ 1840 u32 base; /* SRAM byte address of event log header */
@@ -1716,7 +1844,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1716 unsigned long reg_flags; 1844 unsigned long reg_flags;
1717 1845
1718 if (num_events == 0) 1846 if (num_events == 0)
1719 return; 1847 return pos;
1720 if (priv->ucode_type == UCODE_INIT) 1848 if (priv->ucode_type == UCODE_INIT)
1721 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1849 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1722 else 1850 else
@@ -1744,27 +1872,44 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1744 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1872 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1745 if (mode == 0) { 1873 if (mode == 0) {
1746 /* data, ev */ 1874 /* data, ev */
1747 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev); 1875 if (bufsz) {
1748 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev); 1876 pos += scnprintf(*buf + pos, bufsz - pos,
1877 "EVT_LOG:0x%08x:%04u\n",
1878 time, ev);
1879 } else {
1880 trace_iwlwifi_dev_ucode_event(priv, 0,
1881 time, ev);
1882 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1883 time, ev);
1884 }
1749 } else { 1885 } else {
1750 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1886 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1751 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", 1887 if (bufsz) {
1888 pos += scnprintf(*buf + pos, bufsz - pos,
1889 "EVT_LOGT:%010u:0x%08x:%04u\n",
1890 time, data, ev);
1891 } else {
1892 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1752 time, data, ev); 1893 time, data, ev);
1753 trace_iwlwifi_dev_ucode_event(priv, time, data, ev); 1894 trace_iwlwifi_dev_ucode_event(priv, time,
1895 data, ev);
1896 }
1754 } 1897 }
1755 } 1898 }
1756 1899
1757 /* Allow device to power down */ 1900 /* Allow device to power down */
1758 iwl_release_nic_access(priv); 1901 iwl_release_nic_access(priv);
1759 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 1902 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1903 return pos;
1760} 1904}
1761 1905
1762/** 1906/**
1763 * iwl_print_last_event_logs - Dump the newest # of event log to syslog 1907 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1764 */ 1908 */
1765static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 1909static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1766 u32 num_wraps, u32 next_entry, 1910 u32 num_wraps, u32 next_entry,
1767 u32 size, u32 mode) 1911 u32 size, u32 mode,
1912 int pos, char **buf, size_t bufsz)
1768{ 1913{
1769 /* 1914 /*
1770 * display the newest DEFAULT_LOG_ENTRIES entries 1915 * display the newest DEFAULT_LOG_ENTRIES entries
@@ -1772,21 +1917,26 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1772 */ 1917 */
1773 if (num_wraps) { 1918 if (num_wraps) {
1774 if (next_entry < size) { 1919 if (next_entry < size) {
1775 iwl_print_event_log(priv, 1920 pos = iwl_print_event_log(priv,
1776 capacity - (size - next_entry), 1921 capacity - (size - next_entry),
1777 size - next_entry, mode); 1922 size - next_entry, mode,
1778 iwl_print_event_log(priv, 0, 1923 pos, buf, bufsz);
1779 next_entry, mode); 1924 pos = iwl_print_event_log(priv, 0,
1925 next_entry, mode,
1926 pos, buf, bufsz);
1780 } else 1927 } else
1781 iwl_print_event_log(priv, next_entry - size, 1928 pos = iwl_print_event_log(priv, next_entry - size,
1782 size, mode); 1929 size, mode, pos, buf, bufsz);
1783 } else { 1930 } else {
1784 if (next_entry < size) 1931 if (next_entry < size) {
1785 iwl_print_event_log(priv, 0, next_entry, mode); 1932 pos = iwl_print_event_log(priv, 0, next_entry,
1786 else 1933 mode, pos, buf, bufsz);
1787 iwl_print_event_log(priv, next_entry - size, 1934 } else {
1788 size, mode); 1935 pos = iwl_print_event_log(priv, next_entry - size,
1936 size, mode, pos, buf, bufsz);
1937 }
1789 } 1938 }
1939 return pos;
1790} 1940}
1791 1941
1792/* For sanity check only. Actual size is determined by uCode, typ. 512 */ 1942/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1794,7 +1944,8 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1794 1944
1795#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) 1945#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1796 1946
1797void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log) 1947int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1948 char **buf, bool display)
1798{ 1949{
1799 u32 base; /* SRAM byte address of event log header */ 1950 u32 base; /* SRAM byte address of event log header */
1800 u32 capacity; /* event log capacity in # entries */ 1951 u32 capacity; /* event log capacity in # entries */
@@ -1802,6 +1953,8 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1802 u32 num_wraps; /* # times uCode wrapped to top of log */ 1953 u32 num_wraps; /* # times uCode wrapped to top of log */
1803 u32 next_entry; /* index of next entry to be written by uCode */ 1954 u32 next_entry; /* index of next entry to be written by uCode */
1804 u32 size; /* # entries that we'll print */ 1955 u32 size; /* # entries that we'll print */
1956 int pos = 0;
1957 size_t bufsz = 0;
1805 1958
1806 if (priv->ucode_type == UCODE_INIT) 1959 if (priv->ucode_type == UCODE_INIT)
1807 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1960 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
@@ -1812,7 +1965,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1812 IWL_ERR(priv, 1965 IWL_ERR(priv,
1813 "Invalid event log pointer 0x%08X for %s uCode\n", 1966 "Invalid event log pointer 0x%08X for %s uCode\n",
1814 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); 1967 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1815 return; 1968 return pos;
1816 } 1969 }
1817 1970
1818 /* event log header */ 1971 /* event log header */
@@ -1838,7 +1991,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1838 /* bail out if nothing in log */ 1991 /* bail out if nothing in log */
1839 if (size == 0) { 1992 if (size == 0) {
1840 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1993 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1841 return; 1994 return pos;
1842 } 1995 }
1843 1996
1844#ifdef CONFIG_IWLWIFI_DEBUG 1997#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1853,6 +2006,15 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1853 size); 2006 size);
1854 2007
1855#ifdef CONFIG_IWLWIFI_DEBUG 2008#ifdef CONFIG_IWLWIFI_DEBUG
2009 if (display) {
2010 if (full_log)
2011 bufsz = capacity * 48;
2012 else
2013 bufsz = size * 48;
2014 *buf = kmalloc(bufsz, GFP_KERNEL);
2015 if (!*buf)
2016 return pos;
2017 }
1856 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 2018 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1857 /* 2019 /*
1858 * if uCode has wrapped back to top of log, 2020 * if uCode has wrapped back to top of log,
@@ -1860,17 +2022,22 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1860 * i.e the next one that uCode would fill. 2022 * i.e the next one that uCode would fill.
1861 */ 2023 */
1862 if (num_wraps) 2024 if (num_wraps)
1863 iwl_print_event_log(priv, next_entry, 2025 pos = iwl_print_event_log(priv, next_entry,
1864 capacity - next_entry, mode); 2026 capacity - next_entry, mode,
2027 pos, buf, bufsz);
1865 /* (then/else) start at top of log */ 2028 /* (then/else) start at top of log */
1866 iwl_print_event_log(priv, 0, next_entry, mode); 2029 pos = iwl_print_event_log(priv, 0,
2030 next_entry, mode, pos, buf, bufsz);
1867 } else 2031 } else
1868 iwl_print_last_event_logs(priv, capacity, num_wraps, 2032 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1869 next_entry, size, mode); 2033 next_entry, size, mode,
2034 pos, buf, bufsz);
1870#else 2035#else
1871 iwl_print_last_event_logs(priv, capacity, num_wraps, 2036 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1872 next_entry, size, mode); 2037 next_entry, size, mode,
2038 pos, buf, bufsz);
1873#endif 2039#endif
2040 return pos;
1874} 2041}
1875 2042
1876/** 2043/**
@@ -2456,6 +2623,10 @@ static int iwl_setup_mac(struct iwl_priv *priv)
2456 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 2623 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2457 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 2624 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2458 2625
2626 if (priv->cfg->sku & IWL_SKU_N)
2627 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2628 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2629
2459 hw->sta_data_size = sizeof(struct iwl_station_priv); 2630 hw->sta_data_size = sizeof(struct iwl_station_priv);
2460 hw->wiphy->interface_modes = 2631 hw->wiphy->interface_modes =
2461 BIT(NL80211_IFTYPE_STATION) | 2632 BIT(NL80211_IFTYPE_STATION) |
@@ -2784,6 +2955,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2784 return 0; 2955 return 0;
2785 else 2956 else
2786 return ret; 2957 return ret;
2958 case IEEE80211_AMPDU_TX_OPERATIONAL:
2959 /* do nothing */
2960 return -EOPNOTSUPP;
2787 default: 2961 default:
2788 IWL_DEBUG_HT(priv, "unknown\n"); 2962 IWL_DEBUG_HT(priv, "unknown\n");
2789 return -EINVAL; 2963 return -EINVAL;
@@ -3126,6 +3300,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3126 priv->statistics_periodic.data = (unsigned long)priv; 3300 priv->statistics_periodic.data = (unsigned long)priv;
3127 priv->statistics_periodic.function = iwl_bg_statistics_periodic; 3301 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
3128 3302
3303 init_timer(&priv->ucode_trace);
3304 priv->ucode_trace.data = (unsigned long)priv;
3305 priv->ucode_trace.function = iwl_bg_ucode_trace;
3306
3129 if (!priv->cfg->use_isr_legacy) 3307 if (!priv->cfg->use_isr_legacy)
3130 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3308 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3131 iwl_irq_tasklet, (unsigned long)priv); 3309 iwl_irq_tasklet, (unsigned long)priv);
@@ -3144,6 +3322,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3144 cancel_delayed_work(&priv->alive_start); 3322 cancel_delayed_work(&priv->alive_start);
3145 cancel_work_sync(&priv->beacon_update); 3323 cancel_work_sync(&priv->beacon_update);
3146 del_timer_sync(&priv->statistics_periodic); 3324 del_timer_sync(&priv->statistics_periodic);
3325 del_timer_sync(&priv->ucode_trace);
3147} 3326}
3148 3327
3149static void iwl_init_hw_rates(struct iwl_priv *priv, 3328static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3188,6 +3367,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
3188 priv->band = IEEE80211_BAND_2GHZ; 3367 priv->band = IEEE80211_BAND_2GHZ;
3189 3368
3190 priv->iw_mode = NL80211_IFTYPE_STATION; 3369 priv->iw_mode = NL80211_IFTYPE_STATION;
3370 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3191 3371
3192 /* Choose which receivers/antennas to use */ 3372 /* Choose which receivers/antennas to use */
3193 if (priv->cfg->ops->hcmd->set_rxon_chain) 3373 if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3589,7 +3769,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3589 *****************************************************************************/ 3769 *****************************************************************************/
3590 3770
3591/* Hardware specific file defines the PCI IDs table for that hardware module */ 3771/* Hardware specific file defines the PCI IDs table for that hardware module */
3592static struct pci_device_id iwl_hw_card_ids[] = { 3772static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3593#ifdef CONFIG_IWL4965 3773#ifdef CONFIG_IWL4965
3594 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 3774 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
3595 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 3775 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 95a57b36a7e..dc61906290e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -414,7 +414,6 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
414/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 414/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
415static int iwl_sensitivity_write(struct iwl_priv *priv) 415static int iwl_sensitivity_write(struct iwl_priv *priv)
416{ 416{
417 int ret = 0;
418 struct iwl_sensitivity_cmd cmd ; 417 struct iwl_sensitivity_cmd cmd ;
419 struct iwl_sensitivity_data *data = NULL; 418 struct iwl_sensitivity_data *data = NULL;
420 struct iwl_host_cmd cmd_out = { 419 struct iwl_host_cmd cmd_out = {
@@ -477,11 +476,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
477 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 476 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
478 sizeof(u16)*HD_TABLE_SIZE); 477 sizeof(u16)*HD_TABLE_SIZE);
479 478
480 ret = iwl_send_cmd(priv, &cmd_out); 479 return iwl_send_cmd(priv, &cmd_out);
481 if (ret)
482 IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
483
484 return ret;
485} 480}
486 481
487void iwl_init_sensitivity(struct iwl_priv *priv) 482void iwl_init_sensitivity(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9150753192..3320cce3d57 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -120,7 +120,6 @@ enum {
120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67, 120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
121 121
122 /* 802.11h related */ 122 /* 802.11h related */
123 RADAR_NOTIFICATION = 0x70, /* not used */
124 REPLY_QUIET_CMD = 0x71, /* not used */ 123 REPLY_QUIET_CMD = 0x71, /* not used */
125 REPLY_CHANNEL_SWITCH = 0x72, 124 REPLY_CHANNEL_SWITCH = 0x72,
126 CHANNEL_SWITCH_NOTIFICATION = 0x73, 125 CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2510,7 +2509,7 @@ struct iwl_card_state_notif {
2510 2509
2511#define HW_CARD_DISABLED 0x01 2510#define HW_CARD_DISABLED 0x01
2512#define SW_CARD_DISABLED 0x02 2511#define SW_CARD_DISABLED 0x02
2513#define RF_CARD_DISABLED 0x04 2512#define CT_CARD_DISABLED 0x04
2514#define RXON_CARD_DISABLED 0x10 2513#define RXON_CARD_DISABLED 0x10
2515 2514
2516struct iwl_ct_kill_config { 2515struct iwl_ct_kill_config {
@@ -2984,7 +2983,7 @@ struct statistics_rx_ht_phy {
2984 __le32 agg_crc32_good; 2983 __le32 agg_crc32_good;
2985 __le32 agg_mpdu_cnt; 2984 __le32 agg_mpdu_cnt;
2986 __le32 agg_cnt; 2985 __le32 agg_cnt;
2987 __le32 reserved2; 2986 __le32 unsupport_mcs;
2988} __attribute__ ((packed)); 2987} __attribute__ ((packed));
2989 2988
2990#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 2989#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3086,8 @@ struct statistics_div {
3087} __attribute__ ((packed)); 3086} __attribute__ ((packed));
3088 3087
3089struct statistics_general { 3088struct statistics_general {
3090 __le32 temperature; 3089 __le32 temperature; /* radio temperature */
3091 __le32 temperature_m; 3090 __le32 temperature_m; /* for 5000 and up, this is radio voltage */
3092 struct statistics_dbg dbg; 3091 struct statistics_dbg dbg;
3093 __le32 sleep_time; 3092 __le32 sleep_time;
3094 __le32 slots_out; 3093 __le32 slots_out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5461f105bd2..5b56307a381 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -450,8 +450,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
450 if (priv->cfg->ht_greenfield_support) 450 if (priv->cfg->ht_greenfield_support)
451 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; 451 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
452 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 452 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
453 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
454 (priv->cfg->sm_ps_mode << 2));
455 max_bit_rate = MAX_BIT_RATE_20_MHZ; 453 max_bit_rate = MAX_BIT_RATE_20_MHZ;
456 if (priv->hw_params.ht40_channel & BIT(band)) { 454 if (priv->hw_params.ht40_channel & BIT(band)) {
457 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 455 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -636,7 +634,7 @@ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
636 634
637static bool is_single_rx_stream(struct iwl_priv *priv) 635static bool is_single_rx_stream(struct iwl_priv *priv)
638{ 636{
639 return !priv->current_ht_config.is_ht || 637 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
640 priv->current_ht_config.single_chain_sufficient; 638 priv->current_ht_config.single_chain_sufficient;
641} 639}
642 640
@@ -1003,28 +1001,18 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
1003 */ 1001 */
1004static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) 1002static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1005{ 1003{
1006 int idle_cnt = active_cnt; 1004 /* # Rx chains when idling, depending on SMPS mode */
1007 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 1005 switch (priv->current_ht_config.smps) {
1008 1006 case IEEE80211_SMPS_STATIC:
1009 /* # Rx chains when idling and maybe trying to save power */ 1007 case IEEE80211_SMPS_DYNAMIC:
1010 switch (priv->cfg->sm_ps_mode) { 1008 return IWL_NUM_IDLE_CHAINS_SINGLE;
1011 case WLAN_HT_CAP_SM_PS_STATIC: 1009 case IEEE80211_SMPS_OFF:
1012 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE; 1010 return active_cnt;
1013 break;
1014 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1015 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
1016 IWL_NUM_IDLE_CHAINS_SINGLE;
1017 break;
1018 case WLAN_HT_CAP_SM_PS_DISABLED:
1019 break;
1020 case WLAN_HT_CAP_SM_PS_INVALID:
1021 default: 1011 default:
1022 IWL_ERR(priv, "invalid sm_ps mode %u\n", 1012 WARN(1, "invalid SMPS mode %d",
1023 priv->cfg->sm_ps_mode); 1013 priv->current_ht_config.smps);
1024 WARN_ON(1); 1014 return active_cnt;
1025 break;
1026 } 1015 }
1027 return idle_cnt;
1028} 1016}
1029 1017
1030/* up to 4 chains */ 1018/* up to 4 chains */
@@ -1363,7 +1351,9 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1363 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1351 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1364 1352
1365 priv->cfg->ops->lib->dump_nic_error_log(priv); 1353 priv->cfg->ops->lib->dump_nic_error_log(priv);
1366 priv->cfg->ops->lib->dump_nic_event_log(priv, false); 1354 if (priv->cfg->ops->lib->dump_csr)
1355 priv->cfg->ops->lib->dump_csr(priv);
1356 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
1367#ifdef CONFIG_IWLWIFI_DEBUG 1357#ifdef CONFIG_IWLWIFI_DEBUG
1368 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) 1358 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1369 iwl_print_rx_config_cmd(priv); 1359 iwl_print_rx_config_cmd(priv);
@@ -2599,12 +2589,12 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
2599EXPORT_SYMBOL(iwl_set_mode); 2589EXPORT_SYMBOL(iwl_set_mode);
2600 2590
2601int iwl_mac_add_interface(struct ieee80211_hw *hw, 2591int iwl_mac_add_interface(struct ieee80211_hw *hw,
2602 struct ieee80211_if_init_conf *conf) 2592 struct ieee80211_vif *vif)
2603{ 2593{
2604 struct iwl_priv *priv = hw->priv; 2594 struct iwl_priv *priv = hw->priv;
2605 unsigned long flags; 2595 unsigned long flags;
2606 2596
2607 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type); 2597 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
2608 2598
2609 if (priv->vif) { 2599 if (priv->vif) {
2610 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); 2600 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
@@ -2612,19 +2602,19 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2612 } 2602 }
2613 2603
2614 spin_lock_irqsave(&priv->lock, flags); 2604 spin_lock_irqsave(&priv->lock, flags);
2615 priv->vif = conf->vif; 2605 priv->vif = vif;
2616 priv->iw_mode = conf->type; 2606 priv->iw_mode = vif->type;
2617 2607
2618 spin_unlock_irqrestore(&priv->lock, flags); 2608 spin_unlock_irqrestore(&priv->lock, flags);
2619 2609
2620 mutex_lock(&priv->mutex); 2610 mutex_lock(&priv->mutex);
2621 2611
2622 if (conf->mac_addr) { 2612 if (vif->addr) {
2623 IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr); 2613 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2624 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 2614 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
2625 } 2615 }
2626 2616
2627 if (iwl_set_mode(priv, conf->type) == -EAGAIN) 2617 if (iwl_set_mode(priv, vif->type) == -EAGAIN)
2628 /* we are not ready, will run again when ready */ 2618 /* we are not ready, will run again when ready */
2629 set_bit(STATUS_MODE_PENDING, &priv->status); 2619 set_bit(STATUS_MODE_PENDING, &priv->status);
2630 2620
@@ -2636,7 +2626,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2636EXPORT_SYMBOL(iwl_mac_add_interface); 2626EXPORT_SYMBOL(iwl_mac_add_interface);
2637 2627
2638void iwl_mac_remove_interface(struct ieee80211_hw *hw, 2628void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2639 struct ieee80211_if_init_conf *conf) 2629 struct ieee80211_vif *vif)
2640{ 2630{
2641 struct iwl_priv *priv = hw->priv; 2631 struct iwl_priv *priv = hw->priv;
2642 2632
@@ -2649,7 +2639,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2649 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2639 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2650 iwlcore_commit_rxon(priv); 2640 iwlcore_commit_rxon(priv);
2651 } 2641 }
2652 if (priv->vif == conf->vif) { 2642 if (priv->vif == vif) {
2653 priv->vif = NULL; 2643 priv->vif = NULL;
2654 memset(priv->bssid, 0, ETH_ALEN); 2644 memset(priv->bssid, 0, ETH_ALEN);
2655 } 2645 }
@@ -2689,6 +2679,21 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2689 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 2679 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2690 } 2680 }
2691 2681
2682 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2683 IEEE80211_CONF_CHANGE_CHANNEL)) {
2684 /* mac80211 uses static for non-HT which is what we want */
2685 priv->current_ht_config.smps = conf->smps_mode;
2686
2687 /*
2688 * Recalculate chain counts.
2689 *
2690 * If monitor mode is enabled then mac80211 will
2691 * set up the SM PS mode to OFF if an HT channel is
2692 * configured.
2693 */
2694 if (priv->cfg->ops->hcmd->set_rxon_chain)
2695 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2696 }
2692 2697
2693 /* during scanning mac80211 will delay channel setting until 2698 /* during scanning mac80211 will delay channel setting until
2694 * scan finish with changed = 0 2699 * scan finish with changed = 0
@@ -2785,10 +2790,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2785 iwl_set_tx_power(priv, conf->power_level, false); 2790 iwl_set_tx_power(priv, conf->power_level, false);
2786 } 2791 }
2787 2792
2788 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2789 if (priv->cfg->ops->hcmd->set_rxon_chain)
2790 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2791
2792 if (!iwl_is_ready(priv)) { 2793 if (!iwl_is_ready(priv)) {
2793 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2794 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2794 goto out; 2795 goto out;
@@ -3196,6 +3197,77 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
3196EXPORT_SYMBOL(iwl_update_stats); 3197EXPORT_SYMBOL(iwl_update_stats);
3197#endif 3198#endif
3198 3199
3200const static char *get_csr_string(int cmd)
3201{
3202 switch (cmd) {
3203 IWL_CMD(CSR_HW_IF_CONFIG_REG);
3204 IWL_CMD(CSR_INT_COALESCING);
3205 IWL_CMD(CSR_INT);
3206 IWL_CMD(CSR_INT_MASK);
3207 IWL_CMD(CSR_FH_INT_STATUS);
3208 IWL_CMD(CSR_GPIO_IN);
3209 IWL_CMD(CSR_RESET);
3210 IWL_CMD(CSR_GP_CNTRL);
3211 IWL_CMD(CSR_HW_REV);
3212 IWL_CMD(CSR_EEPROM_REG);
3213 IWL_CMD(CSR_EEPROM_GP);
3214 IWL_CMD(CSR_OTP_GP_REG);
3215 IWL_CMD(CSR_GIO_REG);
3216 IWL_CMD(CSR_GP_UCODE_REG);
3217 IWL_CMD(CSR_GP_DRIVER_REG);
3218 IWL_CMD(CSR_UCODE_DRV_GP1);
3219 IWL_CMD(CSR_UCODE_DRV_GP2);
3220 IWL_CMD(CSR_LED_REG);
3221 IWL_CMD(CSR_DRAM_INT_TBL_REG);
3222 IWL_CMD(CSR_GIO_CHICKEN_BITS);
3223 IWL_CMD(CSR_ANA_PLL_CFG);
3224 IWL_CMD(CSR_HW_REV_WA_REG);
3225 IWL_CMD(CSR_DBG_HPET_MEM_REG);
3226 default:
3227 return "UNKNOWN";
3228
3229 }
3230}
3231
3232void iwl_dump_csr(struct iwl_priv *priv)
3233{
3234 int i;
3235 u32 csr_tbl[] = {
3236 CSR_HW_IF_CONFIG_REG,
3237 CSR_INT_COALESCING,
3238 CSR_INT,
3239 CSR_INT_MASK,
3240 CSR_FH_INT_STATUS,
3241 CSR_GPIO_IN,
3242 CSR_RESET,
3243 CSR_GP_CNTRL,
3244 CSR_HW_REV,
3245 CSR_EEPROM_REG,
3246 CSR_EEPROM_GP,
3247 CSR_OTP_GP_REG,
3248 CSR_GIO_REG,
3249 CSR_GP_UCODE_REG,
3250 CSR_GP_DRIVER_REG,
3251 CSR_UCODE_DRV_GP1,
3252 CSR_UCODE_DRV_GP2,
3253 CSR_LED_REG,
3254 CSR_DRAM_INT_TBL_REG,
3255 CSR_GIO_CHICKEN_BITS,
3256 CSR_ANA_PLL_CFG,
3257 CSR_HW_REV_WA_REG,
3258 CSR_DBG_HPET_MEM_REG
3259 };
3260 IWL_ERR(priv, "CSR values:\n");
3261 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
3262 "CSR_INT_PERIODIC_REG)\n");
3263 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
3264 IWL_ERR(priv, " %25s: 0X%08x\n",
3265 get_csr_string(csr_tbl[i]),
3266 iwl_read32(priv, csr_tbl[i]));
3267 }
3268}
3269EXPORT_SYMBOL(iwl_dump_csr);
3270
3199#ifdef CONFIG_PM 3271#ifdef CONFIG_PM
3200 3272
3201int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) 3273int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e745..8deb83bfe18 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,8 +63,6 @@
63#ifndef __iwl_core_h__ 63#ifndef __iwl_core_h__
64#define __iwl_core_h__ 64#define __iwl_core_h__
65 65
66#include <generated/utsrelease.h>
67
68/************************ 66/************************
69 * forward declarations * 67 * forward declarations *
70 ************************/ 68 ************************/
@@ -72,7 +70,7 @@ struct iwl_host_cmd;
72struct iwl_cmd; 70struct iwl_cmd;
73 71
74 72
75#define IWLWIFI_VERSION UTS_RELEASE "-k" 73#define IWLWIFI_VERSION "in-tree:"
76#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation" 74#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
77#define DRV_AUTHOR "<ilw@linux.intel.com>" 75#define DRV_AUTHOR "<ilw@linux.intel.com>"
78 76
@@ -169,8 +167,10 @@ struct iwl_lib_ops {
169 int (*is_valid_rtc_data_addr)(u32 addr); 167 int (*is_valid_rtc_data_addr)(u32 addr);
170 /* 1st ucode load */ 168 /* 1st ucode load */
171 int (*load_ucode)(struct iwl_priv *priv); 169 int (*load_ucode)(struct iwl_priv *priv);
172 void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log); 170 int (*dump_nic_event_log)(struct iwl_priv *priv,
171 bool full_log, char **buf, bool display);
173 void (*dump_nic_error_log)(struct iwl_priv *priv); 172 void (*dump_nic_error_log)(struct iwl_priv *priv);
173 void (*dump_csr)(struct iwl_priv *priv);
174 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel); 174 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
175 /* power management */ 175 /* power management */
176 struct iwl_apm_ops apm_ops; 176 struct iwl_apm_ops apm_ops;
@@ -230,7 +230,6 @@ struct iwl_mod_params {
230 * @chain_noise_num_beacons: number of beacons used to compute chain noise 230 * @chain_noise_num_beacons: number of beacons used to compute chain noise
231 * @adv_thermal_throttle: support advance thermal throttle 231 * @adv_thermal_throttle: support advance thermal throttle
232 * @support_ct_kill_exit: support ct kill exit condition 232 * @support_ct_kill_exit: support ct kill exit condition
233 * @sm_ps_mode: spatial multiplexing power save mode
234 * @support_wimax_coexist: support wimax/wifi co-exist 233 * @support_wimax_coexist: support wimax/wifi co-exist
235 * 234 *
236 * We enable the driver to be backward compatible wrt API version. The 235 * We enable the driver to be backward compatible wrt API version. The
@@ -287,7 +286,6 @@ struct iwl_cfg {
287 const bool supports_idle; 286 const bool supports_idle;
288 bool adv_thermal_throttle; 287 bool adv_thermal_throttle;
289 bool support_ct_kill_exit; 288 bool support_ct_kill_exit;
290 u8 sm_ps_mode;
291 const bool support_wimax_coexist; 289 const bool support_wimax_coexist;
292}; 290};
293 291
@@ -332,9 +330,9 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
332int iwl_commit_rxon(struct iwl_priv *priv); 330int iwl_commit_rxon(struct iwl_priv *priv);
333int iwl_set_mode(struct iwl_priv *priv, int mode); 331int iwl_set_mode(struct iwl_priv *priv, int mode);
334int iwl_mac_add_interface(struct ieee80211_hw *hw, 332int iwl_mac_add_interface(struct ieee80211_hw *hw,
335 struct ieee80211_if_init_conf *conf); 333 struct ieee80211_vif *vif);
336void iwl_mac_remove_interface(struct ieee80211_hw *hw, 334void iwl_mac_remove_interface(struct ieee80211_hw *hw,
337 struct ieee80211_if_init_conf *conf); 335 struct ieee80211_vif *vif);
338int iwl_mac_config(struct ieee80211_hw *hw, u32 changed); 336int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
339void iwl_config_ap(struct iwl_priv *priv); 337void iwl_config_ap(struct iwl_priv *priv);
340int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, 338int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
@@ -581,7 +579,9 @@ int iwl_pci_resume(struct pci_dev *pdev);
581* Error Handling Debugging 579* Error Handling Debugging
582******************************************************/ 580******************************************************/
583void iwl_dump_nic_error_log(struct iwl_priv *priv); 581void iwl_dump_nic_error_log(struct iwl_priv *priv);
584void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log); 582int iwl_dump_nic_event_log(struct iwl_priv *priv,
583 bool full_log, char **buf, bool display);
584void iwl_dump_csr(struct iwl_priv *priv);
585#ifdef CONFIG_IWLWIFI_DEBUG 585#ifdef CONFIG_IWLWIFI_DEBUG
586void iwl_print_rx_config_cmd(struct iwl_priv *priv); 586void iwl_print_rx_config_cmd(struct iwl_priv *priv);
587#else 587#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d61293ab67c..58e0462cafa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -109,6 +109,8 @@ struct iwl_debugfs {
109 struct dentry *file_power_save_status; 109 struct dentry *file_power_save_status;
110 struct dentry *file_clear_ucode_statistics; 110 struct dentry *file_clear_ucode_statistics;
111 struct dentry *file_clear_traffic_statistics; 111 struct dentry *file_clear_traffic_statistics;
112 struct dentry *file_csr;
113 struct dentry *file_ucode_tracing;
112 } dbgfs_debug_files; 114 } dbgfs_debug_files;
113 u32 sram_offset; 115 u32 sram_offset;
114 u32 sram_len; 116 u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 21e0f6699da..4a2ac9311ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -125,7 +125,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
125 char __user *user_buf, 125 char __user *user_buf,
126 size_t count, loff_t *ppos) { 126 size_t count, loff_t *ppos) {
127 127
128 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 128 struct iwl_priv *priv = file->private_data;
129 char *buf; 129 char *buf;
130 int pos = 0; 130 int pos = 0;
131 131
@@ -184,7 +184,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
184 char __user *user_buf, 184 char __user *user_buf,
185 size_t count, loff_t *ppos) { 185 size_t count, loff_t *ppos) {
186 186
187 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 187 struct iwl_priv *priv = file->private_data;
188 char *buf; 188 char *buf;
189 int pos = 0; 189 int pos = 0;
190 int cnt; 190 int cnt;
@@ -232,7 +232,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
232 ssize_t ret; 232 ssize_t ret;
233 int i; 233 int i;
234 int pos = 0; 234 int pos = 0;
235 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 235 struct iwl_priv *priv = file->private_data;
236 size_t bufsz; 236 size_t bufsz;
237 237
238 /* default is to dump the entire data segment */ 238 /* default is to dump the entire data segment */
@@ -306,7 +306,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
306static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, 306static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
307 size_t count, loff_t *ppos) 307 size_t count, loff_t *ppos)
308{ 308{
309 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 309 struct iwl_priv *priv = file->private_data;
310 struct iwl_station_entry *station; 310 struct iwl_station_entry *station;
311 int max_sta = priv->hw_params.max_stations; 311 int max_sta = priv->hw_params.max_stations;
312 char *buf; 312 char *buf;
@@ -376,7 +376,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
376 loff_t *ppos) 376 loff_t *ppos)
377{ 377{
378 ssize_t ret; 378 ssize_t ret;
379 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 379 struct iwl_priv *priv = file->private_data;
380 int pos = 0, ofs = 0, buf_size = 0; 380 int pos = 0, ofs = 0, buf_size = 0;
381 const u8 *ptr; 381 const u8 *ptr;
382 char *buf; 382 char *buf;
@@ -420,6 +420,23 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
420 return ret; 420 return ret;
421} 421}
422 422
423static ssize_t iwl_dbgfs_log_event_read(struct file *file,
424 char __user *user_buf,
425 size_t count, loff_t *ppos)
426{
427 struct iwl_priv *priv = file->private_data;
428 char *buf;
429 int pos = 0;
430 ssize_t ret = -ENOMEM;
431
432 pos = priv->cfg->ops->lib->dump_nic_event_log(priv, true, &buf, true);
433 if (pos && buf) {
434 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
435 kfree(buf);
436 }
437 return ret;
438}
439
423static ssize_t iwl_dbgfs_log_event_write(struct file *file, 440static ssize_t iwl_dbgfs_log_event_write(struct file *file,
424 const char __user *user_buf, 441 const char __user *user_buf,
425 size_t count, loff_t *ppos) 442 size_t count, loff_t *ppos)
@@ -436,7 +453,8 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
436 if (sscanf(buf, "%d", &event_log_flag) != 1) 453 if (sscanf(buf, "%d", &event_log_flag) != 1)
437 return -EFAULT; 454 return -EFAULT;
438 if (event_log_flag == 1) 455 if (event_log_flag == 1)
439 priv->cfg->ops->lib->dump_nic_event_log(priv, true); 456 priv->cfg->ops->lib->dump_nic_event_log(priv, true,
457 NULL, false);
440 458
441 return count; 459 return count;
442} 460}
@@ -446,7 +464,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
446static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, 464static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
447 size_t count, loff_t *ppos) 465 size_t count, loff_t *ppos)
448{ 466{
449 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 467 struct iwl_priv *priv = file->private_data;
450 struct ieee80211_channel *channels = NULL; 468 struct ieee80211_channel *channels = NULL;
451 const struct ieee80211_supported_band *supp_band = NULL; 469 const struct ieee80211_supported_band *supp_band = NULL;
452 int pos = 0, i, bufsz = PAGE_SIZE; 470 int pos = 0, i, bufsz = PAGE_SIZE;
@@ -519,7 +537,7 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
519 char __user *user_buf, 537 char __user *user_buf,
520 size_t count, loff_t *ppos) { 538 size_t count, loff_t *ppos) {
521 539
522 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 540 struct iwl_priv *priv = file->private_data;
523 char buf[512]; 541 char buf[512];
524 int pos = 0; 542 int pos = 0;
525 const size_t bufsz = sizeof(buf); 543 const size_t bufsz = sizeof(buf);
@@ -567,7 +585,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
567 char __user *user_buf, 585 char __user *user_buf,
568 size_t count, loff_t *ppos) { 586 size_t count, loff_t *ppos) {
569 587
570 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 588 struct iwl_priv *priv = file->private_data;
571 int pos = 0; 589 int pos = 0;
572 int cnt = 0; 590 int cnt = 0;
573 char *buf; 591 char *buf;
@@ -654,7 +672,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
654static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, 672static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
655 size_t count, loff_t *ppos) 673 size_t count, loff_t *ppos)
656{ 674{
657 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 675 struct iwl_priv *priv = file->private_data;
658 int pos = 0, i; 676 int pos = 0, i;
659 char buf[256]; 677 char buf[256];
660 const size_t bufsz = sizeof(buf); 678 const size_t bufsz = sizeof(buf);
@@ -677,7 +695,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
677static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, 695static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
678 size_t count, loff_t *ppos) 696 size_t count, loff_t *ppos)
679{ 697{
680 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 698 struct iwl_priv *priv = file->private_data;
681 int pos = 0; 699 int pos = 0;
682 char buf[256]; 700 char buf[256];
683 const size_t bufsz = sizeof(buf); 701 const size_t bufsz = sizeof(buf);
@@ -703,7 +721,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
703 char __user *user_buf, 721 char __user *user_buf,
704 size_t count, loff_t *ppos) 722 size_t count, loff_t *ppos)
705{ 723{
706 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 724 struct iwl_priv *priv = file->private_data;
707 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 725 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
708 struct iwl_tt_restriction *restriction; 726 struct iwl_tt_restriction *restriction;
709 char buf[100]; 727 char buf[100];
@@ -763,7 +781,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
763 char __user *user_buf, 781 char __user *user_buf,
764 size_t count, loff_t *ppos) 782 size_t count, loff_t *ppos)
765{ 783{
766 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 784 struct iwl_priv *priv = file->private_data;
767 char buf[100]; 785 char buf[100];
768 int pos = 0; 786 int pos = 0;
769 const size_t bufsz = sizeof(buf); 787 const size_t bufsz = sizeof(buf);
@@ -820,7 +838,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
820 char __user *user_buf, 838 char __user *user_buf,
821 size_t count, loff_t *ppos) 839 size_t count, loff_t *ppos)
822{ 840{
823 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 841 struct iwl_priv *priv = file->private_data;
824 char buf[10]; 842 char buf[10];
825 int pos, value; 843 int pos, value;
826 const size_t bufsz = sizeof(buf); 844 const size_t bufsz = sizeof(buf);
@@ -838,7 +856,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
838 char __user *user_buf, 856 char __user *user_buf,
839 size_t count, loff_t *ppos) 857 size_t count, loff_t *ppos)
840{ 858{
841 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 859 struct iwl_priv *priv = file->private_data;
842 char buf[200]; 860 char buf[200];
843 int pos = 0, i; 861 int pos = 0, i;
844 const size_t bufsz = sizeof(buf); 862 const size_t bufsz = sizeof(buf);
@@ -859,7 +877,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
859} 877}
860 878
861DEBUGFS_READ_WRITE_FILE_OPS(sram); 879DEBUGFS_READ_WRITE_FILE_OPS(sram);
862DEBUGFS_WRITE_FILE_OPS(log_event); 880DEBUGFS_READ_WRITE_FILE_OPS(log_event);
863DEBUGFS_READ_FILE_OPS(nvm); 881DEBUGFS_READ_FILE_OPS(nvm);
864DEBUGFS_READ_FILE_OPS(stations); 882DEBUGFS_READ_FILE_OPS(stations);
865DEBUGFS_READ_FILE_OPS(channels); 883DEBUGFS_READ_FILE_OPS(channels);
@@ -976,7 +994,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
976 char __user *user_buf, 994 char __user *user_buf,
977 size_t count, loff_t *ppos) { 995 size_t count, loff_t *ppos) {
978 996
979 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 997 struct iwl_priv *priv = file->private_data;
980 struct iwl_tx_queue *txq; 998 struct iwl_tx_queue *txq;
981 struct iwl_queue *q; 999 struct iwl_queue *q;
982 char *buf; 1000 char *buf;
@@ -1022,7 +1040,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1022 char __user *user_buf, 1040 char __user *user_buf,
1023 size_t count, loff_t *ppos) { 1041 size_t count, loff_t *ppos) {
1024 1042
1025 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1043 struct iwl_priv *priv = file->private_data;
1026 struct iwl_rx_queue *rxq = &priv->rxq; 1044 struct iwl_rx_queue *rxq = &priv->rxq;
1027 char buf[256]; 1045 char buf[256];
1028 int pos = 0; 1046 int pos = 0;
@@ -1068,7 +1086,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1068 char __user *user_buf, 1086 char __user *user_buf,
1069 size_t count, loff_t *ppos) 1087 size_t count, loff_t *ppos)
1070{ 1088{
1071 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1089 struct iwl_priv *priv = file->private_data;
1072 int pos = 0; 1090 int pos = 0;
1073 char *buf; 1091 char *buf;
1074 int bufsz = sizeof(struct statistics_rx_phy) * 20 + 1092 int bufsz = sizeof(struct statistics_rx_phy) * 20 +
@@ -1369,6 +1387,9 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1369 accum_ht->agg_mpdu_cnt); 1387 accum_ht->agg_mpdu_cnt);
1370 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n", 1388 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
1371 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt); 1389 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
1390 pos += scnprintf(buf + pos, bufsz - pos, "unsupport_mcs:\t\t%u\t\t\t%u\n",
1391 le32_to_cpu(ht->unsupport_mcs),
1392 accum_ht->unsupport_mcs);
1372 1393
1373 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1394 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1374 kfree(buf); 1395 kfree(buf);
@@ -1379,7 +1400,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1379 char __user *user_buf, 1400 char __user *user_buf,
1380 size_t count, loff_t *ppos) 1401 size_t count, loff_t *ppos)
1381{ 1402{
1382 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1403 struct iwl_priv *priv = file->private_data;
1383 int pos = 0; 1404 int pos = 0;
1384 char *buf; 1405 char *buf;
1385 int bufsz = (sizeof(struct statistics_tx) * 24) + 250; 1406 int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
@@ -1521,7 +1542,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1521 char __user *user_buf, 1542 char __user *user_buf,
1522 size_t count, loff_t *ppos) 1543 size_t count, loff_t *ppos)
1523{ 1544{
1524 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1545 struct iwl_priv *priv = file->private_data;
1525 int pos = 0; 1546 int pos = 0;
1526 char *buf; 1547 char *buf;
1527 int bufsz = sizeof(struct statistics_general) * 4 + 250; 1548 int bufsz = sizeof(struct statistics_general) * 4 + 250;
@@ -1612,7 +1633,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
1612 char __user *user_buf, 1633 char __user *user_buf,
1613 size_t count, loff_t *ppos) { 1634 size_t count, loff_t *ppos) {
1614 1635
1615 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1636 struct iwl_priv *priv = file->private_data;
1616 int pos = 0; 1637 int pos = 0;
1617 int cnt = 0; 1638 int cnt = 0;
1618 char *buf; 1639 char *buf;
@@ -1693,7 +1714,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
1693 char __user *user_buf, 1714 char __user *user_buf,
1694 size_t count, loff_t *ppos) { 1715 size_t count, loff_t *ppos) {
1695 1716
1696 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1717 struct iwl_priv *priv = file->private_data;
1697 int pos = 0; 1718 int pos = 0;
1698 int cnt = 0; 1719 int cnt = 0;
1699 char *buf; 1720 char *buf;
@@ -1751,7 +1772,7 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1751 char __user *user_buf, 1772 char __user *user_buf,
1752 size_t count, loff_t *ppos) { 1773 size_t count, loff_t *ppos) {
1753 1774
1754 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1775 struct iwl_priv *priv = file->private_data;
1755 char buf[128]; 1776 char buf[128];
1756 int pos = 0; 1777 int pos = 0;
1757 ssize_t ret; 1778 ssize_t ret;
@@ -1802,7 +1823,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1802 char __user *user_buf, 1823 char __user *user_buf,
1803 size_t count, loff_t *ppos) 1824 size_t count, loff_t *ppos)
1804{ 1825{
1805 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1826 struct iwl_priv *priv = file->private_data;
1806 char buf[60]; 1827 char buf[60];
1807 int pos = 0; 1828 int pos = 0;
1808 const size_t bufsz = sizeof(buf); 1829 const size_t bufsz = sizeof(buf);
@@ -1845,6 +1866,80 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1845 return count; 1866 return count;
1846} 1867}
1847 1868
1869static ssize_t iwl_dbgfs_csr_write(struct file *file,
1870 const char __user *user_buf,
1871 size_t count, loff_t *ppos)
1872{
1873 struct iwl_priv *priv = file->private_data;
1874 char buf[8];
1875 int buf_size;
1876 int csr;
1877
1878 memset(buf, 0, sizeof(buf));
1879 buf_size = min(count, sizeof(buf) - 1);
1880 if (copy_from_user(buf, user_buf, buf_size))
1881 return -EFAULT;
1882 if (sscanf(buf, "%d", &csr) != 1)
1883 return -EFAULT;
1884
1885 if (priv->cfg->ops->lib->dump_csr)
1886 priv->cfg->ops->lib->dump_csr(priv);
1887
1888 return count;
1889}
1890
1891static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
1892 char __user *user_buf,
1893 size_t count, loff_t *ppos) {
1894
1895 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1896 int pos = 0;
1897 char buf[128];
1898 const size_t bufsz = sizeof(buf);
1899 ssize_t ret;
1900
1901 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1902 priv->event_log.ucode_trace ? "On" : "Off");
1903 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1904 priv->event_log.non_wraps_count);
1905 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1906 priv->event_log.wraps_once_count);
1907 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1908 priv->event_log.wraps_more_count);
1909
1910 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1911 return ret;
1912}
1913
1914static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
1915 const char __user *user_buf,
1916 size_t count, loff_t *ppos)
1917{
1918 struct iwl_priv *priv = file->private_data;
1919 char buf[8];
1920 int buf_size;
1921 int trace;
1922
1923 memset(buf, 0, sizeof(buf));
1924 buf_size = min(count, sizeof(buf) - 1);
1925 if (copy_from_user(buf, user_buf, buf_size))
1926 return -EFAULT;
1927 if (sscanf(buf, "%d", &trace) != 1)
1928 return -EFAULT;
1929
1930 if (trace) {
1931 priv->event_log.ucode_trace = true;
1932 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
1933 mod_timer(&priv->ucode_trace,
1934 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
1935 } else {
1936 priv->event_log.ucode_trace = false;
1937 del_timer_sync(&priv->ucode_trace);
1938 }
1939
1940 return count;
1941}
1942
1848DEBUGFS_READ_FILE_OPS(rx_statistics); 1943DEBUGFS_READ_FILE_OPS(rx_statistics);
1849DEBUGFS_READ_FILE_OPS(tx_statistics); 1944DEBUGFS_READ_FILE_OPS(tx_statistics);
1850DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1945DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1859,6 +1954,8 @@ DEBUGFS_READ_FILE_OPS(tx_power);
1859DEBUGFS_READ_FILE_OPS(power_save_status); 1954DEBUGFS_READ_FILE_OPS(power_save_status);
1860DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); 1955DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1861DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); 1956DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1957DEBUGFS_WRITE_FILE_OPS(csr);
1958DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
1862 1959
1863/* 1960/*
1864 * Create the debugfs files and directories 1961 * Create the debugfs files and directories
@@ -1889,7 +1986,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1889 DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv); 1986 DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
1890 DEBUGFS_ADD_FILE(nvm, data, S_IRUSR); 1987 DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
1891 DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR); 1988 DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
1892 DEBUGFS_ADD_FILE(log_event, data, S_IWUSR); 1989 DEBUGFS_ADD_FILE(log_event, data, S_IWUSR | S_IRUSR);
1893 DEBUGFS_ADD_FILE(stations, data, S_IRUSR); 1990 DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
1894 DEBUGFS_ADD_FILE(channels, data, S_IRUSR); 1991 DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
1895 DEBUGFS_ADD_FILE(status, data, S_IRUSR); 1992 DEBUGFS_ADD_FILE(status, data, S_IRUSR);
@@ -1909,12 +2006,14 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1909 DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR); 2006 DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
1910 DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR); 2007 DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
1911 DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR); 2008 DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
2009 DEBUGFS_ADD_FILE(csr, debug, S_IWUSR);
1912 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 2010 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1913 DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR); 2011 DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
1914 DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR); 2012 DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
1915 DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR); 2013 DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
1916 DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR); 2014 DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
1917 DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR); 2015 DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
2016 DEBUGFS_ADD_FILE(ucode_tracing, debug, S_IWUSR | S_IRUSR);
1918 } 2017 }
1919 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 2018 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
1920 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 2019 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
@@ -1966,6 +2065,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1966 file_clear_ucode_statistics); 2065 file_clear_ucode_statistics);
1967 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 2066 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1968 file_clear_traffic_statistics); 2067 file_clear_traffic_statistics);
2068 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_csr);
1969 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 2069 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1970 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 2070 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1971 file_ucode_rx_stats); 2071 file_ucode_rx_stats);
@@ -1977,6 +2077,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1977 file_sensitivity); 2077 file_sensitivity);
1978 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 2078 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1979 file_chain_noise); 2079 file_chain_noise);
2080 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
2081 file_ucode_tracing);
1980 } 2082 }
1981 DEBUGFS_REMOVE(priv->dbgfs->dir_debug); 2083 DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
1982 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity); 2084 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 3822cf53e36..70f0e79c8e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -512,6 +512,7 @@ struct iwl_ht_config {
512 bool is_ht; 512 bool is_ht;
513 bool is_40mhz; 513 bool is_40mhz;
514 bool single_chain_sufficient; 514 bool single_chain_sufficient;
515 enum ieee80211_smps_mode smps; /* current smps mode */
515 /* BSS related data */ 516 /* BSS related data */
516 u8 extension_chan_offset; 517 u8 extension_chan_offset;
517 u8 ht_protection; 518 u8 ht_protection;
@@ -984,6 +985,32 @@ struct iwl_switch_rxon {
984 __le16 channel; 985 __le16 channel;
985}; 986};
986 987
988/*
989 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
990 * to perform continuous uCode event logging operation if enabled
991 */
992#define UCODE_TRACE_PERIOD (100)
993
994/*
995 * iwl_event_log: current uCode event log position
996 *
997 * @ucode_trace: enable/disable ucode continuous trace timer
998 * @num_wraps: how many times the event buffer wraps
999 * @next_entry: the entry just before the next one that uCode would fill
1000 * @non_wraps_count: counter for no wrap detected when dump ucode events
1001 * @wraps_once_count: counter for wrap once detected when dump ucode events
1002 * @wraps_more_count: counter for wrap more than once detected
1003 * when dump ucode events
1004 */
1005struct iwl_event_log {
1006 bool ucode_trace;
1007 u32 num_wraps;
1008 u32 next_entry;
1009 int non_wraps_count;
1010 int wraps_once_count;
1011 int wraps_more_count;
1012};
1013
987struct iwl_priv { 1014struct iwl_priv {
988 1015
989 /* ieee device used by generic ieee processing code */ 1016 /* ieee device used by generic ieee processing code */
@@ -1261,6 +1288,7 @@ struct iwl_priv {
1261 u32 disable_tx_power_cal; 1288 u32 disable_tx_power_cal;
1262 struct work_struct run_time_calib_work; 1289 struct work_struct run_time_calib_work;
1263 struct timer_list statistics_periodic; 1290 struct timer_list statistics_periodic;
1291 struct timer_list ucode_trace;
1264 bool hw_ready; 1292 bool hw_ready;
1265 /*For 3945*/ 1293 /*For 3945*/
1266#define IWL_DEFAULT_TX_POWER 0x0F 1294#define IWL_DEFAULT_TX_POWER 0x0F
@@ -1268,6 +1296,8 @@ struct iwl_priv {
1268 struct iwl3945_notif_statistics statistics_39; 1296 struct iwl3945_notif_statistics statistics_39;
1269 1297
1270 u32 sta_supp_rates; 1298 u32 sta_supp_rates;
1299
1300 struct iwl_event_log event_log;
1271}; /*iwl_priv */ 1301}; /*iwl_priv */
1272 1302
1273static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1303static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 83cc4e500a9..36580d8d8b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,4 +37,6 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); 37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); 38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
40#endif 42#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index d9c7363b1bb..ff4d012ce26 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -91,6 +91,50 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
91); 91);
92 92
93#undef TRACE_SYSTEM 93#undef TRACE_SYSTEM
94#define TRACE_SYSTEM iwlwifi_ucode
95
96TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
97 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
98 TP_ARGS(priv, time, data, ev),
99 TP_STRUCT__entry(
100 PRIV_ENTRY
101
102 __field(u32, time)
103 __field(u32, data)
104 __field(u32, ev)
105 ),
106 TP_fast_assign(
107 PRIV_ASSIGN;
108 __entry->time = time;
109 __entry->data = data;
110 __entry->ev = ev;
111 ),
112 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
113 __entry->priv, __entry->time, __entry->data, __entry->ev)
114);
115
116TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
117 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
118 TP_ARGS(priv, wraps, n_entry, p_entry),
119 TP_STRUCT__entry(
120 PRIV_ENTRY
121
122 __field(u32, wraps)
123 __field(u32, n_entry)
124 __field(u32, p_entry)
125 ),
126 TP_fast_assign(
127 PRIV_ASSIGN;
128 __entry->wraps = wraps;
129 __entry->n_entry = n_entry;
130 __entry->p_entry = p_entry;
131 ),
132 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
133 __entry->priv, __entry->wraps, __entry->n_entry,
134 __entry->p_entry)
135);
136
137#undef TRACE_SYSTEM
94#define TRACE_SYSTEM iwlwifi 138#define TRACE_SYSTEM iwlwifi
95 139
96TRACE_EVENT(iwlwifi_dev_hcmd, 140TRACE_EVENT(iwlwifi_dev_hcmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 30e9ea6d54e..87d684efe11 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
58 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 58 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
59 IWL_CMD(COEX_MEDIUM_NOTIFICATION); 59 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
60 IWL_CMD(COEX_EVENT_CMD); 60 IWL_CMD(COEX_EVENT_CMD);
61 IWL_CMD(RADAR_NOTIFICATION);
62 IWL_CMD(REPLY_QUIET_CMD); 61 IWL_CMD(REPLY_QUIET_CMD);
63 IWL_CMD(REPLY_CHANNEL_SWITCH); 62 IWL_CMD(REPLY_CHANNEL_SWITCH);
64 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); 63 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f8e4e4b18d0..10b0aa8024c 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1518,8 +1518,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1518 * iwl3945_print_event_log - Dump error event log to syslog 1518 * iwl3945_print_event_log - Dump error event log to syslog
1519 * 1519 *
1520 */ 1520 */
1521static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, 1521static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1522 u32 num_events, u32 mode) 1522 u32 num_events, u32 mode,
1523 int pos, char **buf, size_t bufsz)
1523{ 1524{
1524 u32 i; 1525 u32 i;
1525 u32 base; /* SRAM byte address of event log header */ 1526 u32 base; /* SRAM byte address of event log header */
@@ -1529,7 +1530,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1529 unsigned long reg_flags; 1530 unsigned long reg_flags;
1530 1531
1531 if (num_events == 0) 1532 if (num_events == 0)
1532 return; 1533 return pos;
1533 1534
1534 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1535 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1535 1536
@@ -1555,26 +1556,43 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1555 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1556 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1556 if (mode == 0) { 1557 if (mode == 0) {
1557 /* data, ev */ 1558 /* data, ev */
1558 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1559 if (bufsz) {
1559 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev); 1560 pos += scnprintf(*buf + pos, bufsz - pos,
1561 "0x%08x:%04u\n",
1562 time, ev);
1563 } else {
1564 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1565 trace_iwlwifi_dev_ucode_event(priv, 0,
1566 time, ev);
1567 }
1560 } else { 1568 } else {
1561 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1569 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1562 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); 1570 if (bufsz) {
1563 trace_iwlwifi_dev_ucode_event(priv, time, data, ev); 1571 pos += scnprintf(*buf + pos, bufsz - pos,
1572 "%010u:0x%08x:%04u\n",
1573 time, data, ev);
1574 } else {
1575 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1576 time, data, ev);
1577 trace_iwlwifi_dev_ucode_event(priv, time,
1578 data, ev);
1579 }
1564 } 1580 }
1565 } 1581 }
1566 1582
1567 /* Allow device to power down */ 1583 /* Allow device to power down */
1568 iwl_release_nic_access(priv); 1584 iwl_release_nic_access(priv);
1569 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 1585 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1586 return pos;
1570} 1587}
1571 1588
1572/** 1589/**
1573 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog 1590 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
1574 */ 1591 */
1575static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 1592static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1576 u32 num_wraps, u32 next_entry, 1593 u32 num_wraps, u32 next_entry,
1577 u32 size, u32 mode) 1594 u32 size, u32 mode,
1595 int pos, char **buf, size_t bufsz)
1578{ 1596{
1579 /* 1597 /*
1580 * display the newest DEFAULT_LOG_ENTRIES entries 1598 * display the newest DEFAULT_LOG_ENTRIES entries
@@ -1582,21 +1600,28 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1582 */ 1600 */
1583 if (num_wraps) { 1601 if (num_wraps) {
1584 if (next_entry < size) { 1602 if (next_entry < size) {
1585 iwl3945_print_event_log(priv, 1603 pos = iwl3945_print_event_log(priv,
1586 capacity - (size - next_entry), 1604 capacity - (size - next_entry),
1587 size - next_entry, mode); 1605 size - next_entry, mode,
1588 iwl3945_print_event_log(priv, 0, 1606 pos, buf, bufsz);
1589 next_entry, mode); 1607 pos = iwl3945_print_event_log(priv, 0,
1608 next_entry, mode,
1609 pos, buf, bufsz);
1590 } else 1610 } else
1591 iwl3945_print_event_log(priv, next_entry - size, 1611 pos = iwl3945_print_event_log(priv, next_entry - size,
1592 size, mode); 1612 size, mode,
1613 pos, buf, bufsz);
1593 } else { 1614 } else {
1594 if (next_entry < size) 1615 if (next_entry < size)
1595 iwl3945_print_event_log(priv, 0, next_entry, mode); 1616 pos = iwl3945_print_event_log(priv, 0,
1617 next_entry, mode,
1618 pos, buf, bufsz);
1596 else 1619 else
1597 iwl3945_print_event_log(priv, next_entry - size, 1620 pos = iwl3945_print_event_log(priv, next_entry - size,
1598 size, mode); 1621 size, mode,
1622 pos, buf, bufsz);
1599 } 1623 }
1624 return pos;
1600} 1625}
1601 1626
1602/* For sanity check only. Actual size is determined by uCode, typ. 512 */ 1627/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1604,7 +1629,8 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1604 1629
1605#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) 1630#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1606 1631
1607void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log) 1632int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1633 char **buf, bool display)
1608{ 1634{
1609 u32 base; /* SRAM byte address of event log header */ 1635 u32 base; /* SRAM byte address of event log header */
1610 u32 capacity; /* event log capacity in # entries */ 1636 u32 capacity; /* event log capacity in # entries */
@@ -1612,11 +1638,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1612 u32 num_wraps; /* # times uCode wrapped to top of log */ 1638 u32 num_wraps; /* # times uCode wrapped to top of log */
1613 u32 next_entry; /* index of next entry to be written by uCode */ 1639 u32 next_entry; /* index of next entry to be written by uCode */
1614 u32 size; /* # entries that we'll print */ 1640 u32 size; /* # entries that we'll print */
1641 int pos = 0;
1642 size_t bufsz = 0;
1615 1643
1616 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1644 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1617 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 1645 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1618 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); 1646 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1619 return; 1647 return pos;
1620 } 1648 }
1621 1649
1622 /* event log header */ 1650 /* event log header */
@@ -1642,7 +1670,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1642 /* bail out if nothing in log */ 1670 /* bail out if nothing in log */
1643 if (size == 0) { 1671 if (size == 0) {
1644 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1672 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1645 return; 1673 return pos;
1646 } 1674 }
1647 1675
1648#ifdef CONFIG_IWLWIFI_DEBUG 1676#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1658,25 +1686,38 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1658 size); 1686 size);
1659 1687
1660#ifdef CONFIG_IWLWIFI_DEBUG 1688#ifdef CONFIG_IWLWIFI_DEBUG
1689 if (display) {
1690 if (full_log)
1691 bufsz = capacity * 48;
1692 else
1693 bufsz = size * 48;
1694 *buf = kmalloc(bufsz, GFP_KERNEL);
1695 if (!*buf)
1696 return pos;
1697 }
1661 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 1698 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1662 /* if uCode has wrapped back to top of log, 1699 /* if uCode has wrapped back to top of log,
1663 * start at the oldest entry, 1700 * start at the oldest entry,
1664 * i.e the next one that uCode would fill. 1701 * i.e the next one that uCode would fill.
1665 */ 1702 */
1666 if (num_wraps) 1703 if (num_wraps)
1667 iwl3945_print_event_log(priv, next_entry, 1704 pos = iwl3945_print_event_log(priv, next_entry,
1668 capacity - next_entry, mode); 1705 capacity - next_entry, mode,
1706 pos, buf, bufsz);
1669 1707
1670 /* (then/else) start at top of log */ 1708 /* (then/else) start at top of log */
1671 iwl3945_print_event_log(priv, 0, next_entry, mode); 1709 pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
1710 pos, buf, bufsz);
1672 } else 1711 } else
1673 iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1712 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1674 next_entry, size, mode); 1713 next_entry, size, mode,
1714 pos, buf, bufsz);
1675#else 1715#else
1676 iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1716 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1677 next_entry, size, mode); 1717 next_entry, size, mode,
1718 pos, buf, bufsz);
1678#endif 1719#endif
1679 1720 return pos;
1680} 1721}
1681 1722
1682static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1723static void iwl3945_irq_tasklet(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 842811142be..79ffa3b98d7 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
268 268
269 struct sk_buff_head rx_list; 269 struct sk_buff_head rx_list;
270 struct list_head rx_tickets; 270 struct list_head rx_tickets;
271 struct list_head rx_packets[IWM_RX_ID_HASH + 1]; 271 struct list_head rx_packets[IWM_RX_ID_HASH];
272 struct workqueue_struct *rx_wq; 272 struct workqueue_struct *rx_wq;
273 struct work_struct rx_worker; 273 struct work_struct rx_worker;
274 274
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 6d6ed748517..d32adeab68a 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -868,36 +868,35 @@ static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
868 struct iwm_umac_notif_mgt_frame *mgt_frame = 868 struct iwm_umac_notif_mgt_frame *mgt_frame =
869 (struct iwm_umac_notif_mgt_frame *)buf; 869 (struct iwm_umac_notif_mgt_frame *)buf;
870 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame; 870 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
871 u8 *ie;
872 871
873 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame, 872 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
874 le16_to_cpu(mgt_frame->len)); 873 le16_to_cpu(mgt_frame->len));
875 874
876 if (ieee80211_is_assoc_req(mgt->frame_control)) { 875 if (ieee80211_is_assoc_req(mgt->frame_control)) {
877 ie = mgt->u.assoc_req.variable;; 876 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
878 iwm->req_ie_len = 877 - offsetof(struct ieee80211_mgmt,
879 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 878 u.assoc_req.variable);
880 kfree(iwm->req_ie); 879 kfree(iwm->req_ie);
881 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable, 880 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
882 iwm->req_ie_len, GFP_KERNEL); 881 iwm->req_ie_len, GFP_KERNEL);
883 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) { 882 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
884 ie = mgt->u.reassoc_req.variable;; 883 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
885 iwm->req_ie_len = 884 - offsetof(struct ieee80211_mgmt,
886 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 885 u.reassoc_req.variable);
887 kfree(iwm->req_ie); 886 kfree(iwm->req_ie);
888 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable, 887 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
889 iwm->req_ie_len, GFP_KERNEL); 888 iwm->req_ie_len, GFP_KERNEL);
890 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) { 889 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
891 ie = mgt->u.assoc_resp.variable;; 890 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
892 iwm->resp_ie_len = 891 - offsetof(struct ieee80211_mgmt,
893 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 892 u.assoc_resp.variable);
894 kfree(iwm->resp_ie); 893 kfree(iwm->resp_ie);
895 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable, 894 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
896 iwm->resp_ie_len, GFP_KERNEL); 895 iwm->resp_ie_len, GFP_KERNEL);
897 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) { 896 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
898 ie = mgt->u.reassoc_resp.variable;; 897 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
899 iwm->resp_ie_len = 898 - offsetof(struct ieee80211_mgmt,
900 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 899 u.reassoc_resp.variable);
901 kfree(iwm->resp_ie); 900 kfree(iwm->resp_ie);
902 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable, 901 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
903 iwm->resp_ie_len, GFP_KERNEL); 902 iwm->resp_ie_len, GFP_KERNEL);
@@ -1534,6 +1533,33 @@ static void classify8023(struct sk_buff *skb)
1534 } 1533 }
1535} 1534}
1536 1535
1536static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
1537{
1538 struct wireless_dev *wdev = iwm_to_wdev(iwm);
1539 struct net_device *ndev = iwm_to_ndev(iwm);
1540 struct sk_buff_head list;
1541 struct sk_buff *frame;
1542
1543 IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
1544
1545 __skb_queue_head_init(&list);
1546 ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
1547
1548 while ((frame = __skb_dequeue(&list))) {
1549 ndev->stats.rx_packets++;
1550 ndev->stats.rx_bytes += frame->len;
1551
1552 frame->protocol = eth_type_trans(frame, ndev);
1553 frame->ip_summed = CHECKSUM_NONE;
1554 memset(frame->cb, 0, sizeof(frame->cb));
1555
1556 if (netif_rx_ni(frame) == NET_RX_DROP) {
1557 IWM_ERR(iwm, "Packet dropped\n");
1558 ndev->stats.rx_dropped++;
1559 }
1560 }
1561}
1562
1537static void iwm_rx_process_packet(struct iwm_priv *iwm, 1563static void iwm_rx_process_packet(struct iwm_priv *iwm,
1538 struct iwm_rx_packet *packet, 1564 struct iwm_rx_packet *packet,
1539 struct iwm_rx_ticket_node *ticket_node) 1565 struct iwm_rx_ticket_node *ticket_node)
@@ -1548,25 +1574,34 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
1548 switch (le16_to_cpu(ticket_node->ticket->action)) { 1574 switch (le16_to_cpu(ticket_node->ticket->action)) {
1549 case IWM_RX_TICKET_RELEASE: 1575 case IWM_RX_TICKET_RELEASE:
1550 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n"); 1576 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
1551 classify8023(skb); 1577
1552 iwm_rx_adjust_packet(iwm, packet, ticket_node); 1578 iwm_rx_adjust_packet(iwm, packet, ticket_node);
1579 skb->dev = iwm_to_ndev(iwm);
1580 classify8023(skb);
1581
1582 if (le16_to_cpu(ticket_node->ticket->flags) &
1583 IWM_RX_TICKET_AMSDU_MSK) {
1584 iwm_rx_process_amsdu(iwm, skb);
1585 break;
1586 }
1587
1553 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype); 1588 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
1554 if (ret < 0) { 1589 if (ret < 0) {
1555 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - " 1590 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
1556 "%d\n", ret); 1591 "%d\n", ret);
1592 kfree_skb(packet->skb);
1557 break; 1593 break;
1558 } 1594 }
1559 1595
1560 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len); 1596 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
1561 1597
1562 skb->dev = iwm_to_ndev(iwm); 1598 ndev->stats.rx_packets++;
1599 ndev->stats.rx_bytes += skb->len;
1600
1563 skb->protocol = eth_type_trans(skb, ndev); 1601 skb->protocol = eth_type_trans(skb, ndev);
1564 skb->ip_summed = CHECKSUM_NONE; 1602 skb->ip_summed = CHECKSUM_NONE;
1565 memset(skb->cb, 0, sizeof(skb->cb)); 1603 memset(skb->cb, 0, sizeof(skb->cb));
1566 1604
1567 ndev->stats.rx_packets++;
1568 ndev->stats.rx_bytes += skb->len;
1569
1570 if (netif_rx_ni(skb) == NET_RX_DROP) { 1605 if (netif_rx_ni(skb) == NET_RX_DROP) {
1571 IWM_ERR(iwm, "Packet dropped\n"); 1606 IWM_ERR(iwm, "Packet dropped\n");
1572 ndev->stats.rx_dropped++; 1607 ndev->stats.rx_dropped++;
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
index 30aa9d48d67..0485c995757 100644
--- a/drivers/net/wireless/libertas/Kconfig
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -37,3 +37,9 @@ config LIBERTAS_DEBUG
37 depends on LIBERTAS 37 depends on LIBERTAS
38 ---help--- 38 ---help---
39 Debugging support. 39 Debugging support.
40
41config LIBERTAS_MESH
42 bool "Enable mesh support"
43 depends on LIBERTAS
44 help
45 This enables Libertas' MESH support, used by e.g. the OLPC people.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index b188cd97a05..45e870e3311 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -5,11 +5,11 @@ libertas-y += cmdresp.o
5libertas-y += debugfs.o 5libertas-y += debugfs.o
6libertas-y += ethtool.o 6libertas-y += ethtool.o
7libertas-y += main.o 7libertas-y += main.o
8libertas-y += mesh.o
9libertas-y += rx.o 8libertas-y += rx.o
10libertas-y += scan.o 9libertas-y += scan.o
11libertas-y += tx.o 10libertas-y += tx.o
12libertas-y += wext.o 11libertas-y += wext.o
12libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
13 13
14usb8xxx-objs += if_usb.o 14usb8xxx-objs += if_usb.o
15libertas_cs-objs += if_cs.o 15libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 751067369ba..5e650f35841 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -390,10 +390,8 @@ int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto); 390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto); 391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd); 392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
393 if (!ret && cmd_action == CMD_ACT_GET) { 393 if (!ret && cmd_action == CMD_ACT_GET)
394 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
395 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto); 394 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
396 }
397 395
398 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 396 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
399 return ret; 397 return ret;
@@ -807,8 +805,7 @@ static int lbs_try_associate(struct lbs_private *priv,
807 } 805 }
808 806
809 /* Use short preamble only when both the BSS and firmware support it */ 807 /* Use short preamble only when both the BSS and firmware support it */
810 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 808 if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
811 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
812 preamble = RADIO_PREAMBLE_SHORT; 809 preamble = RADIO_PREAMBLE_SHORT;
813 810
814 ret = lbs_set_radio(priv, preamble, 1); 811 ret = lbs_set_radio(priv, preamble, 1);
@@ -939,8 +936,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
939 } 936 }
940 937
941 /* Use short preamble only when both the BSS and firmware support it */ 938 /* Use short preamble only when both the BSS and firmware support it */
942 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 939 if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
943 (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
944 lbs_deb_join("AdhocJoin: Short preamble\n"); 940 lbs_deb_join("AdhocJoin: Short preamble\n");
945 preamble = RADIO_PREAMBLE_SHORT; 941 preamble = RADIO_PREAMBLE_SHORT;
946 } 942 }
@@ -1049,7 +1045,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
1049 struct assoc_request *assoc_req) 1045 struct assoc_request *assoc_req)
1050{ 1046{
1051 struct cmd_ds_802_11_ad_hoc_start cmd; 1047 struct cmd_ds_802_11_ad_hoc_start cmd;
1052 u8 preamble = RADIO_PREAMBLE_LONG; 1048 u8 preamble = RADIO_PREAMBLE_SHORT;
1053 size_t ratesize = 0; 1049 size_t ratesize = 0;
1054 u16 tmpcap = 0; 1050 u16 tmpcap = 0;
1055 int ret = 0; 1051 int ret = 0;
@@ -1057,11 +1053,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
1057 1053
1058 lbs_deb_enter(LBS_DEB_ASSOC); 1054 lbs_deb_enter(LBS_DEB_ASSOC);
1059 1055
1060 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
1061 lbs_deb_join("ADHOC_START: Will use short preamble\n");
1062 preamble = RADIO_PREAMBLE_SHORT;
1063 }
1064
1065 ret = lbs_set_radio(priv, preamble, 1); 1056 ret = lbs_set_radio(priv, preamble, 1);
1066 if (ret) 1057 if (ret)
1067 goto out; 1058 goto out;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 42611bea76a..82371ef3952 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -143,19 +143,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
143 lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n", 143 lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
144 cmd.hwifversion, cmd.version); 144 cmd.hwifversion, cmd.version);
145 145
146 /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
147 /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
148 /* 5.110.22 have mesh command with 0xa3 command id */
149 /* 10.0.0.p0 FW brings in mesh config command with different id */
150 /* Check FW version MSB and initialize mesh_fw_ver */
151 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
152 priv->mesh_fw_ver = MESH_FW_OLD;
153 else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
154 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
155 priv->mesh_fw_ver = MESH_FW_NEW;
156 else
157 priv->mesh_fw_ver = MESH_NONE;
158
159 /* Clamp region code to 8-bit since FW spec indicates that it should 146 /* Clamp region code to 8-bit since FW spec indicates that it should
160 * only ever be 8-bit, even though the field size is 16-bit. Some firmware 147 * only ever be 8-bit, even though the field size is 16-bit. Some firmware
161 * returns non-zero high 8 bits here. 148 * returns non-zero high 8 bits here.
@@ -855,9 +842,6 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
855 if (priv->fwrelease < 0x09000000) { 842 if (priv->fwrelease < 0x09000000) {
856 switch (preamble) { 843 switch (preamble) {
857 case RADIO_PREAMBLE_SHORT: 844 case RADIO_PREAMBLE_SHORT:
858 if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
859 goto out;
860 /* Fall through */
861 case RADIO_PREAMBLE_AUTO: 845 case RADIO_PREAMBLE_AUTO:
862 case RADIO_PREAMBLE_LONG: 846 case RADIO_PREAMBLE_LONG:
863 cmd.control = cpu_to_le16(preamble); 847 cmd.control = cpu_to_le16(preamble);
@@ -1011,6 +995,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1011 ret = 0; 995 ret = 0;
1012 break; 996 break;
1013 997
998#ifdef CONFIG_LIBERTAS_MESH
999
1014 case CMD_BT_ACCESS: 1000 case CMD_BT_ACCESS:
1015 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); 1001 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
1016 break; 1002 break;
@@ -1019,6 +1005,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1019 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); 1005 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1020 break; 1006 break;
1021 1007
1008#endif
1009
1022 case CMD_802_11_BEACON_CTRL: 1010 case CMD_802_11_BEACON_CTRL:
1023 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1011 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1024 break; 1012 break;
@@ -1317,7 +1305,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
1317 if ((priv->psmode != LBS802_11POWERMODECAM) && 1305 if ((priv->psmode != LBS802_11POWERMODECAM) &&
1318 (priv->psstate == PS_STATE_FULL_POWER) && 1306 (priv->psstate == PS_STATE_FULL_POWER) &&
1319 ((priv->connect_status == LBS_CONNECTED) || 1307 ((priv->connect_status == LBS_CONNECTED) ||
1320 (priv->mesh_connect_status == LBS_CONNECTED))) { 1308 lbs_mesh_connected(priv))) {
1321 if (priv->secinfo.WPAenabled || 1309 if (priv->secinfo.WPAenabled ||
1322 priv->secinfo.WPA2enabled) { 1310 priv->secinfo.WPA2enabled) {
1323 /* check for valid WPA group keys */ 1311 /* check for valid WPA group keys */
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 2862748aef7..cb4138a55fd 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -110,18 +110,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); 110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
111 111
112 112
113/* Mesh related */
114
115int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
116 struct cmd_ds_mesh_access *cmd);
117
118int lbs_mesh_config_send(struct lbs_private *priv,
119 struct cmd_ds_mesh_config *cmd,
120 uint16_t action, uint16_t type);
121
122int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
123
124
125/* Commands only used in wext.c, assoc. and scan.c */ 113/* Commands only used in wext.c, assoc. and scan.c */
126 114
127int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 115int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 21d57690c20..0334a58820e 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -485,20 +485,8 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
485 break; 485 break;
486 486
487 case MACREG_INT_CODE_MESH_AUTO_STARTED: 487 case MACREG_INT_CODE_MESH_AUTO_STARTED:
488 /* Ignore spurious autostart events if autostart is disabled */ 488 /* Ignore spurious autostart events */
489 if (!priv->mesh_autostart_enabled) { 489 lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
490 lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
491 break;
492 }
493 lbs_pr_info("EVENT: MESH_AUTO_STARTED\n");
494 priv->mesh_connect_status = LBS_CONNECTED;
495 if (priv->mesh_open) {
496 netif_carrier_on(priv->mesh_dev);
497 if (!priv->tx_pending_len)
498 netif_wake_queue(priv->mesh_dev);
499 }
500 priv->mode = IW_MODE_ADHOC;
501 schedule_work(&priv->sync_channel);
502 break; 490 break;
503 491
504 default: 492 default:
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 6b6ea9f7bf5..ea3f10ef4e0 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -397,13 +397,6 @@ enum KEY_INFO_WPA {
397 KEY_INFO_WPA_ENABLED = 0x04 397 KEY_INFO_WPA_ENABLED = 0x04
398}; 398};
399 399
400/** mesh_fw_ver */
401enum _mesh_fw_ver {
402 MESH_NONE = 0, /* MESH is not supported */
403 MESH_FW_OLD, /* MESH is supported in FW V5 */
404 MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
405};
406
407/* Default values for fwt commands. */ 400/* Default values for fwt commands. */
408#define FWT_DEFAULT_METRIC 0 401#define FWT_DEFAULT_METRIC 0
409#define FWT_DEFAULT_DIR 1 402#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 05bb298dfae..c348aff8f30 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -39,15 +39,14 @@ struct lbs_private {
39 39
40 /* Mesh */ 40 /* Mesh */
41 struct net_device *mesh_dev; /* Virtual device */ 41 struct net_device *mesh_dev; /* Virtual device */
42#ifdef CONFIG_LIBERTAS_MESH
42 u32 mesh_connect_status; 43 u32 mesh_connect_status;
43 struct lbs_mesh_stats mstats; 44 struct lbs_mesh_stats mstats;
44 int mesh_open; 45 int mesh_open;
45 int mesh_fw_ver;
46 int mesh_autostart_enabled;
47 uint16_t mesh_tlv; 46 uint16_t mesh_tlv;
48 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1]; 47 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
49 u8 mesh_ssid_len; 48 u8 mesh_ssid_len;
50 struct work_struct sync_channel; 49#endif
51 50
52 /* Monitor mode */ 51 /* Monitor mode */
53 struct net_device *rtap_net_dev; 52 struct net_device *rtap_net_dev;
@@ -176,9 +175,7 @@ struct lbs_private {
176 struct bss_descriptor *networks; 175 struct bss_descriptor *networks;
177 struct assoc_request * pending_assoc_req; 176 struct assoc_request * pending_assoc_req;
178 struct assoc_request * in_progress_assoc_req; 177 struct assoc_request * in_progress_assoc_req;
179 u16 capability;
180 uint16_t enablehwauto; 178 uint16_t enablehwauto;
181 uint16_t ratebitmap;
182 179
183 /* ADHOC */ 180 /* ADHOC */
184 u16 beacon_period; 181 u16 beacon_period;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 63d020374c2..3804a58d7f4 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -114,9 +114,11 @@ const struct ethtool_ops lbs_ethtool_ops = {
114 .get_drvinfo = lbs_ethtool_get_drvinfo, 114 .get_drvinfo = lbs_ethtool_get_drvinfo,
115 .get_eeprom = lbs_ethtool_get_eeprom, 115 .get_eeprom = lbs_ethtool_get_eeprom,
116 .get_eeprom_len = lbs_ethtool_get_eeprom_len, 116 .get_eeprom_len = lbs_ethtool_get_eeprom_len,
117#ifdef CONFIG_LIBERTAS_MESH
117 .get_sset_count = lbs_mesh_ethtool_get_sset_count, 118 .get_sset_count = lbs_mesh_ethtool_get_sset_count,
118 .get_ethtool_stats = lbs_mesh_ethtool_get_stats, 119 .get_ethtool_stats = lbs_mesh_ethtool_get_stats,
119 .get_strings = lbs_mesh_ethtool_get_strings, 120 .get_strings = lbs_mesh_ethtool_get_strings,
121#endif
120 .get_wol = lbs_ethtool_get_wol, 122 .get_wol = lbs_ethtool_get_wol,
121 .set_wol = lbs_ethtool_set_wol, 123 .set_wol = lbs_ethtool_set_wol,
122}; 124};
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index c2975c8e2f2..60bde1233a3 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -123,7 +123,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
123 if (priv->monitormode == monitor_mode) 123 if (priv->monitormode == monitor_mode)
124 return strlen(buf); 124 return strlen(buf);
125 if (!priv->monitormode) { 125 if (!priv->monitormode) {
126 if (priv->infra_open || priv->mesh_open) 126 if (priv->infra_open || lbs_mesh_open(priv))
127 return -EBUSY; 127 return -EBUSY;
128 if (priv->mode == IW_MODE_INFRA) 128 if (priv->mode == IW_MODE_INFRA)
129 lbs_cmd_80211_deauthenticate(priv, 129 lbs_cmd_80211_deauthenticate(priv,
@@ -622,7 +622,7 @@ static int lbs_thread(void *data)
622 if (priv->connect_status == LBS_CONNECTED) 622 if (priv->connect_status == LBS_CONNECTED)
623 netif_wake_queue(priv->dev); 623 netif_wake_queue(priv->dev);
624 if (priv->mesh_dev && 624 if (priv->mesh_dev &&
625 priv->mesh_connect_status == LBS_CONNECTED) 625 lbs_mesh_connected(priv))
626 netif_wake_queue(priv->mesh_dev); 626 netif_wake_queue(priv->mesh_dev);
627 } 627 }
628 } 628 }
@@ -809,18 +809,6 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
809 return 0; 809 return 0;
810} 810}
811 811
812static void lbs_sync_channel_worker(struct work_struct *work)
813{
814 struct lbs_private *priv = container_of(work, struct lbs_private,
815 sync_channel);
816
817 lbs_deb_enter(LBS_DEB_MAIN);
818 if (lbs_update_channel(priv))
819 lbs_pr_info("Channel synchronization failed.");
820 lbs_deb_leave(LBS_DEB_MAIN);
821}
822
823
824static int lbs_init_adapter(struct lbs_private *priv) 812static int lbs_init_adapter(struct lbs_private *priv)
825{ 813{
826 size_t bufsize; 814 size_t bufsize;
@@ -848,14 +836,12 @@ static int lbs_init_adapter(struct lbs_private *priv)
848 memset(priv->current_addr, 0xff, ETH_ALEN); 836 memset(priv->current_addr, 0xff, ETH_ALEN);
849 837
850 priv->connect_status = LBS_DISCONNECTED; 838 priv->connect_status = LBS_DISCONNECTED;
851 priv->mesh_connect_status = LBS_DISCONNECTED;
852 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 839 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
853 priv->mode = IW_MODE_INFRA; 840 priv->mode = IW_MODE_INFRA;
854 priv->channel = DEFAULT_AD_HOC_CHANNEL; 841 priv->channel = DEFAULT_AD_HOC_CHANNEL;
855 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 842 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
856 priv->radio_on = 1; 843 priv->radio_on = 1;
857 priv->enablehwauto = 1; 844 priv->enablehwauto = 1;
858 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
859 priv->psmode = LBS802_11POWERMODECAM; 845 priv->psmode = LBS802_11POWERMODECAM;
860 priv->psstate = PS_STATE_FULL_POWER; 846 priv->psstate = PS_STATE_FULL_POWER;
861 priv->is_deep_sleep = 0; 847 priv->is_deep_sleep = 0;
@@ -998,11 +984,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
998 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); 984 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
999 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); 985 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
1000 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); 986 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
1001 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
1002
1003 priv->mesh_open = 0;
1004 sprintf(priv->mesh_ssid, "mesh");
1005 priv->mesh_ssid_len = 4;
1006 987
1007 priv->wol_criteria = 0xffffffff; 988 priv->wol_criteria = 0xffffffff;
1008 priv->wol_gpio = 0xff; 989 priv->wol_gpio = 0xff;
@@ -1076,6 +1057,17 @@ void lbs_remove_card(struct lbs_private *priv)
1076EXPORT_SYMBOL_GPL(lbs_remove_card); 1057EXPORT_SYMBOL_GPL(lbs_remove_card);
1077 1058
1078 1059
1060static int lbs_rtap_supported(struct lbs_private *priv)
1061{
1062 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
1063 return 1;
1064
1065 /* newer firmware use a capability mask */
1066 return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
1067 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK));
1068}
1069
1070
1079int lbs_start_card(struct lbs_private *priv) 1071int lbs_start_card(struct lbs_private *priv)
1080{ 1072{
1081 struct net_device *dev = priv->dev; 1073 struct net_device *dev = priv->dev;
@@ -1095,12 +1087,14 @@ int lbs_start_card(struct lbs_private *priv)
1095 1087
1096 lbs_update_channel(priv); 1088 lbs_update_channel(priv);
1097 1089
1090 lbs_init_mesh(priv);
1091
1098 /* 1092 /*
1099 * While rtap isn't related to mesh, only mesh-enabled 1093 * While rtap isn't related to mesh, only mesh-enabled
1100 * firmware implements the rtap functionality via 1094 * firmware implements the rtap functionality via
1101 * CMD_802_11_MONITOR_MODE. 1095 * CMD_802_11_MONITOR_MODE.
1102 */ 1096 */
1103 if (lbs_init_mesh(priv)) { 1097 if (lbs_rtap_supported(priv)) {
1104 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap)) 1098 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1105 lbs_pr_err("cannot register lbs_rtap attribute\n"); 1099 lbs_pr_err("cannot register lbs_rtap attribute\n");
1106 } 1100 }
@@ -1134,7 +1128,9 @@ void lbs_stop_card(struct lbs_private *priv)
1134 netif_carrier_off(dev); 1128 netif_carrier_off(dev);
1135 1129
1136 lbs_debugfs_remove_one(priv); 1130 lbs_debugfs_remove_one(priv);
1137 if (lbs_deinit_mesh(priv)) 1131 lbs_deinit_mesh(priv);
1132
1133 if (lbs_rtap_supported(priv))
1138 device_remove_file(&dev->dev, &dev_attr_lbs_rtap); 1134 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1139 1135
1140 /* Delete the timeout of the currently processing command */ 1136 /* Delete the timeout of the currently processing command */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 92b7a357a5e..e385af1f458 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,4 +1,3 @@
1#include <linux/moduleparam.h>
2#include <linux/delay.h> 1#include <linux/delay.h>
3#include <linux/etherdevice.h> 2#include <linux/etherdevice.h>
4#include <linux/netdevice.h> 3#include <linux/netdevice.h>
@@ -197,7 +196,14 @@ int lbs_init_mesh(struct lbs_private *priv)
197 196
198 lbs_deb_enter(LBS_DEB_MESH); 197 lbs_deb_enter(LBS_DEB_MESH);
199 198
200 if (priv->mesh_fw_ver == MESH_FW_OLD) { 199 priv->mesh_connect_status = LBS_DISCONNECTED;
200
201 /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
202 /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
203 /* 5.110.22 have mesh command with 0xa3 command id */
204 /* 10.0.0.p0 FW brings in mesh config command with different id */
205 /* Check FW version MSB and initialize mesh_fw_ver */
206 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
201 /* Enable mesh, if supported, and work out which TLV it uses. 207 /* Enable mesh, if supported, and work out which TLV it uses.
202 0x100 + 291 is an unofficial value used in 5.110.20.pXX 208 0x100 + 291 is an unofficial value used in 5.110.20.pXX
203 0x100 + 37 is the official value used in 5.110.21.pXX 209 0x100 + 37 is the official value used in 5.110.21.pXX
@@ -219,7 +225,9 @@ int lbs_init_mesh(struct lbs_private *priv)
219 priv->channel)) 225 priv->channel))
220 priv->mesh_tlv = 0; 226 priv->mesh_tlv = 0;
221 } 227 }
222 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 228 } else
229 if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
230 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
223 /* 10.0.0.pXX new firmwares should succeed with TLV 231 /* 10.0.0.pXX new firmwares should succeed with TLV
224 * 0x100+37; Do not invoke command with old TLV. 232 * 0x100+37; Do not invoke command with old TLV.
225 */ 233 */
@@ -228,7 +236,12 @@ int lbs_init_mesh(struct lbs_private *priv)
228 priv->channel)) 236 priv->channel))
229 priv->mesh_tlv = 0; 237 priv->mesh_tlv = 0;
230 } 238 }
239
240
231 if (priv->mesh_tlv) { 241 if (priv->mesh_tlv) {
242 sprintf(priv->mesh_ssid, "mesh");
243 priv->mesh_ssid_len = 4;
244
232 lbs_add_mesh(priv); 245 lbs_add_mesh(priv);
233 246
234 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh)) 247 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
@@ -416,10 +429,10 @@ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
416 struct net_device *dev, struct rxpd *rxpd) 429 struct net_device *dev, struct rxpd *rxpd)
417{ 430{
418 if (priv->mesh_dev) { 431 if (priv->mesh_dev) {
419 if (priv->mesh_fw_ver == MESH_FW_OLD) { 432 if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
420 if (rxpd->rx_control & RxPD_MESH_FRAME) 433 if (rxpd->rx_control & RxPD_MESH_FRAME)
421 dev = priv->mesh_dev; 434 dev = priv->mesh_dev;
422 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 435 } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
423 if (rxpd->u.bss.bss_num == MESH_IFACE_ID) 436 if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
424 dev = priv->mesh_dev; 437 dev = priv->mesh_dev;
425 } 438 }
@@ -432,9 +445,9 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
432 struct net_device *dev, struct txpd *txpd) 445 struct net_device *dev, struct txpd *txpd)
433{ 446{
434 if (dev == priv->mesh_dev) { 447 if (dev == priv->mesh_dev) {
435 if (priv->mesh_fw_ver == MESH_FW_OLD) 448 if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
436 txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); 449 txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
437 else if (priv->mesh_fw_ver == MESH_FW_NEW) 450 else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
438 txpd->u.bss.bss_num = MESH_IFACE_ID; 451 txpd->u.bss.bss_num = MESH_IFACE_ID;
439 } 452 }
440} 453}
@@ -538,7 +551,7 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
538 * Command id is 0xac for v10 FW along with mesh interface 551 * Command id is 0xac for v10 FW along with mesh interface
539 * id in bits 14-13-12. 552 * id in bits 14-13-12.
540 */ 553 */
541 if (priv->mesh_fw_ver == MESH_FW_NEW) 554 if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
542 command = CMD_MESH_CONFIG | 555 command = CMD_MESH_CONFIG |
543 (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET); 556 (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
544 557
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index fea9b5d005f..e2573303a32 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,6 +9,8 @@
9#include <net/lib80211.h> 9#include <net/lib80211.h>
10 10
11 11
12#ifdef CONFIG_LIBERTAS_MESH
13
12/* Mesh statistics */ 14/* Mesh statistics */
13struct lbs_mesh_stats { 15struct lbs_mesh_stats {
14 u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */ 16 u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
@@ -46,11 +48,20 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
46/* Command handling */ 48/* Command handling */
47 49
48struct cmd_ds_command; 50struct cmd_ds_command;
51struct cmd_ds_mesh_access;
52struct cmd_ds_mesh_config;
49 53
50int lbs_cmd_bt_access(struct cmd_ds_command *cmd, 54int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
51 u16 cmd_action, void *pdata_buf); 55 u16 cmd_action, void *pdata_buf);
52int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, 56int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
53 u16 cmd_action, void *pdata_buf); 57 u16 cmd_action, void *pdata_buf);
58int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
59 struct cmd_ds_mesh_access *cmd);
60int lbs_mesh_config_send(struct lbs_private *priv,
61 struct cmd_ds_mesh_config *cmd,
62 uint16_t action, uint16_t type);
63int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
64
54 65
55 66
56/* Persistent configuration */ 67/* Persistent configuration */
@@ -75,4 +86,25 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
75 uint32_t stringset, uint8_t *s); 86 uint32_t stringset, uint8_t *s);
76 87
77 88
89/* Accessors */
90
91#define lbs_mesh_open(priv) (priv->mesh_open)
92#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
93
94#else
95
96#define lbs_init_mesh(priv)
97#define lbs_deinit_mesh(priv)
98#define lbs_add_mesh(priv)
99#define lbs_remove_mesh(priv)
100#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
101#define lbs_mesh_set_txpd(priv, dev, txpd)
102#define lbs_mesh_config(priv, enable, chan)
103#define lbs_mesh_open(priv) (0)
104#define lbs_mesh_connected(priv) (0)
105
106#endif
107
108
109
78#endif 110#endif
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index b0b1c784150..220361e69cd 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -635,7 +635,7 @@ out:
635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) 635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
636 netif_wake_queue(priv->dev); 636 netif_wake_queue(priv->dev);
637 637
638 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) && 638 if (priv->mesh_dev && lbs_mesh_connected(priv) &&
639 !priv->tx_pending_len) 639 !priv->tx_pending_len)
640 netif_wake_queue(priv->mesh_dev); 640 netif_wake_queue(priv->mesh_dev);
641 641
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 315d1ce286c..52d244ea3d9 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -198,7 +198,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
198 if (priv->connect_status == LBS_CONNECTED) 198 if (priv->connect_status == LBS_CONNECTED)
199 netif_wake_queue(priv->dev); 199 netif_wake_queue(priv->dev);
200 200
201 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) 201 if (priv->mesh_dev && lbs_mesh_connected(priv))
202 netif_wake_queue(priv->mesh_dev); 202 netif_wake_queue(priv->mesh_dev);
203} 203}
204EXPORT_SYMBOL_GPL(lbs_send_tx_feedback); 204EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 4b1aab593a8..71f88a08e09 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -192,7 +192,7 @@ static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
192 lbs_deb_enter(LBS_DEB_WEXT); 192 lbs_deb_enter(LBS_DEB_WEXT);
193 193
194 if ((priv->connect_status != LBS_CONNECTED) && 194 if ((priv->connect_status != LBS_CONNECTED) &&
195 (priv->mesh_connect_status != LBS_CONNECTED)) 195 !lbs_mesh_connected(priv))
196 memcpy(rates, lbs_bg_rates, MAX_RATES); 196 memcpy(rates, lbs_bg_rates, MAX_RATES);
197 else 197 else
198 memcpy(rates, priv->curbssparams.rates, MAX_RATES); 198 memcpy(rates, priv->curbssparams.rates, MAX_RATES);
@@ -298,6 +298,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
298 return 0; 298 return 0;
299} 299}
300 300
301#ifdef CONFIG_LIBERTAS_MESH
301static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, 302static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
302 struct iw_point *dwrq, char *extra) 303 struct iw_point *dwrq, char *extra)
303{ 304{
@@ -307,7 +308,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
307 308
308 /* Use nickname to indicate that mesh is on */ 309 /* Use nickname to indicate that mesh is on */
309 310
310 if (priv->mesh_connect_status == LBS_CONNECTED) { 311 if (lbs_mesh_connected(priv)) {
311 strncpy(extra, "Mesh", 12); 312 strncpy(extra, "Mesh", 12);
312 extra[12] = '\0'; 313 extra[12] = '\0';
313 dwrq->length = strlen(extra); 314 dwrq->length = strlen(extra);
@@ -321,6 +322,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
321 lbs_deb_leave(LBS_DEB_WEXT); 322 lbs_deb_leave(LBS_DEB_WEXT);
322 return 0; 323 return 0;
323} 324}
325#endif
324 326
325static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info, 327static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
326 struct iw_param *vwrq, char *extra) 328 struct iw_param *vwrq, char *extra)
@@ -422,6 +424,7 @@ static int lbs_get_mode(struct net_device *dev,
422 return 0; 424 return 0;
423} 425}
424 426
427#ifdef CONFIG_LIBERTAS_MESH
425static int mesh_wlan_get_mode(struct net_device *dev, 428static int mesh_wlan_get_mode(struct net_device *dev,
426 struct iw_request_info *info, u32 * uwrq, 429 struct iw_request_info *info, u32 * uwrq,
427 char *extra) 430 char *extra)
@@ -433,6 +436,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
433 lbs_deb_leave(LBS_DEB_WEXT); 436 lbs_deb_leave(LBS_DEB_WEXT);
434 return 0; 437 return 0;
435} 438}
439#endif
436 440
437static int lbs_get_txpow(struct net_device *dev, 441static int lbs_get_txpow(struct net_device *dev,
438 struct iw_request_info *info, 442 struct iw_request_info *info,
@@ -863,7 +867,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
863 867
864 /* If we're not associated, all quality values are meaningless */ 868 /* If we're not associated, all quality values are meaningless */
865 if ((priv->connect_status != LBS_CONNECTED) && 869 if ((priv->connect_status != LBS_CONNECTED) &&
866 (priv->mesh_connect_status != LBS_CONNECTED)) 870 !lbs_mesh_connected(priv))
867 goto out; 871 goto out;
868 872
869 /* Quality by RSSI */ 873 /* Quality by RSSI */
@@ -1010,6 +1014,7 @@ out:
1010 return ret; 1014 return ret;
1011} 1015}
1012 1016
1017#ifdef CONFIG_LIBERTAS_MESH
1013static int lbs_mesh_set_freq(struct net_device *dev, 1018static int lbs_mesh_set_freq(struct net_device *dev,
1014 struct iw_request_info *info, 1019 struct iw_request_info *info,
1015 struct iw_freq *fwrq, char *extra) 1020 struct iw_freq *fwrq, char *extra)
@@ -1061,6 +1066,7 @@ out:
1061 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 1066 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1062 return ret; 1067 return ret;
1063} 1068}
1069#endif
1064 1070
1065static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info, 1071static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1066 struct iw_param *vwrq, char *extra) 1072 struct iw_param *vwrq, char *extra)
@@ -2108,6 +2114,7 @@ out:
2108 return ret; 2114 return ret;
2109} 2115}
2110 2116
2117#ifdef CONFIG_LIBERTAS_MESH
2111static int lbs_mesh_get_essid(struct net_device *dev, 2118static int lbs_mesh_get_essid(struct net_device *dev,
2112 struct iw_request_info *info, 2119 struct iw_request_info *info,
2113 struct iw_point *dwrq, char *extra) 2120 struct iw_point *dwrq, char *extra)
@@ -2161,6 +2168,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2161 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2168 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2162 return ret; 2169 return ret;
2163} 2170}
2171#endif
2164 2172
2165/** 2173/**
2166 * @brief Connect to the AP or Ad-hoc Network with specific bssid 2174 * @brief Connect to the AP or Ad-hoc Network with specific bssid
@@ -2267,7 +2275,13 @@ static const iw_handler lbs_handler[] = {
2267 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ 2275 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2268 (iw_handler) NULL, /* SIOCSIWPMKSA */ 2276 (iw_handler) NULL, /* SIOCSIWPMKSA */
2269}; 2277};
2278struct iw_handler_def lbs_handler_def = {
2279 .num_standard = ARRAY_SIZE(lbs_handler),
2280 .standard = (iw_handler *) lbs_handler,
2281 .get_wireless_stats = lbs_get_wireless_stats,
2282};
2270 2283
2284#ifdef CONFIG_LIBERTAS_MESH
2271static const iw_handler mesh_wlan_handler[] = { 2285static const iw_handler mesh_wlan_handler[] = {
2272 (iw_handler) NULL, /* SIOCSIWCOMMIT */ 2286 (iw_handler) NULL, /* SIOCSIWCOMMIT */
2273 (iw_handler) lbs_get_name, /* SIOCGIWNAME */ 2287 (iw_handler) lbs_get_name, /* SIOCGIWNAME */
@@ -2325,14 +2339,10 @@ static const iw_handler mesh_wlan_handler[] = {
2325 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ 2339 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2326 (iw_handler) NULL, /* SIOCSIWPMKSA */ 2340 (iw_handler) NULL, /* SIOCSIWPMKSA */
2327}; 2341};
2328struct iw_handler_def lbs_handler_def = {
2329 .num_standard = ARRAY_SIZE(lbs_handler),
2330 .standard = (iw_handler *) lbs_handler,
2331 .get_wireless_stats = lbs_get_wireless_stats,
2332};
2333 2342
2334struct iw_handler_def mesh_handler_def = { 2343struct iw_handler_def mesh_handler_def = {
2335 .num_standard = ARRAY_SIZE(mesh_wlan_handler), 2344 .num_standard = ARRAY_SIZE(mesh_wlan_handler),
2336 .standard = (iw_handler *) mesh_wlan_handler, 2345 .standard = (iw_handler *) mesh_wlan_handler,
2337 .get_wireless_stats = lbs_get_wireless_stats, 2346 .get_wireless_stats = lbs_get_wireless_stats,
2338}; 2347};
2348#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 26a1abd5bb0..ba3eb0101d5 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -318,14 +318,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
318} 318}
319 319
320static int lbtf_op_add_interface(struct ieee80211_hw *hw, 320static int lbtf_op_add_interface(struct ieee80211_hw *hw,
321 struct ieee80211_if_init_conf *conf) 321 struct ieee80211_vif *vif)
322{ 322{
323 struct lbtf_private *priv = hw->priv; 323 struct lbtf_private *priv = hw->priv;
324 if (priv->vif != NULL) 324 if (priv->vif != NULL)
325 return -EOPNOTSUPP; 325 return -EOPNOTSUPP;
326 326
327 priv->vif = conf->vif; 327 priv->vif = vif;
328 switch (conf->type) { 328 switch (vif->type) {
329 case NL80211_IFTYPE_MESH_POINT: 329 case NL80211_IFTYPE_MESH_POINT:
330 case NL80211_IFTYPE_AP: 330 case NL80211_IFTYPE_AP:
331 lbtf_set_mode(priv, LBTF_AP_MODE); 331 lbtf_set_mode(priv, LBTF_AP_MODE);
@@ -337,12 +337,12 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
337 priv->vif = NULL; 337 priv->vif = NULL;
338 return -EOPNOTSUPP; 338 return -EOPNOTSUPP;
339 } 339 }
340 lbtf_set_mac_address(priv, (u8 *) conf->mac_addr); 340 lbtf_set_mac_address(priv, (u8 *) vif->addr);
341 return 0; 341 return 0;
342} 342}
343 343
344static void lbtf_op_remove_interface(struct ieee80211_hw *hw, 344static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
345 struct ieee80211_if_init_conf *conf) 345 struct ieee80211_vif *vif)
346{ 346{
347 struct lbtf_private *priv = hw->priv; 347 struct lbtf_private *priv = hw->priv;
348 348
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 88e41176e7f..84df3fcf37b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -436,6 +436,38 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
436} 436}
437 437
438 438
439struct mac80211_hwsim_addr_match_data {
440 bool ret;
441 const u8 *addr;
442};
443
444static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
445 struct ieee80211_vif *vif)
446{
447 struct mac80211_hwsim_addr_match_data *md = data;
448 if (memcmp(mac, md->addr, ETH_ALEN) == 0)
449 md->ret = true;
450}
451
452
453static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
454 const u8 *addr)
455{
456 struct mac80211_hwsim_addr_match_data md;
457
458 if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
459 return true;
460
461 md.ret = false;
462 md.addr = addr;
463 ieee80211_iterate_active_interfaces_atomic(data->hw,
464 mac80211_hwsim_addr_iter,
465 &md);
466
467 return md.ret;
468}
469
470
439static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, 471static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
440 struct sk_buff *skb) 472 struct sk_buff *skb)
441{ 473{
@@ -488,8 +520,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
488 if (nskb == NULL) 520 if (nskb == NULL)
489 continue; 521 continue;
490 522
491 if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr, 523 if (mac80211_hwsim_addr_match(data2, hdr->addr1))
492 ETH_ALEN) == 0)
493 ack = true; 524 ack = true;
494 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); 525 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
495 ieee80211_rx_irqsafe(data2->hw, nskb); 526 ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -553,24 +584,24 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
553 584
554 585
555static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, 586static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
556 struct ieee80211_if_init_conf *conf) 587 struct ieee80211_vif *vif)
557{ 588{
558 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 589 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
559 wiphy_name(hw->wiphy), __func__, conf->type, 590 wiphy_name(hw->wiphy), __func__, vif->type,
560 conf->mac_addr); 591 vif->addr);
561 hwsim_set_magic(conf->vif); 592 hwsim_set_magic(vif);
562 return 0; 593 return 0;
563} 594}
564 595
565 596
566static void mac80211_hwsim_remove_interface( 597static void mac80211_hwsim_remove_interface(
567 struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) 598 struct ieee80211_hw *hw, struct ieee80211_vif *vif)
568{ 599{
569 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 600 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
570 wiphy_name(hw->wiphy), __func__, conf->type, 601 wiphy_name(hw->wiphy), __func__, vif->type,
571 conf->mac_addr); 602 vif->addr);
572 hwsim_check_magic(conf->vif); 603 hwsim_check_magic(vif);
573 hwsim_clear_magic(conf->vif); 604 hwsim_clear_magic(vif);
574} 605}
575 606
576 607
@@ -618,12 +649,26 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
618{ 649{
619 struct mac80211_hwsim_data *data = hw->priv; 650 struct mac80211_hwsim_data *data = hw->priv;
620 struct ieee80211_conf *conf = &hw->conf; 651 struct ieee80211_conf *conf = &hw->conf;
621 652 static const char *chantypes[4] = {
622 printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n", 653 [NL80211_CHAN_NO_HT] = "noht",
654 [NL80211_CHAN_HT20] = "ht20",
655 [NL80211_CHAN_HT40MINUS] = "ht40-",
656 [NL80211_CHAN_HT40PLUS] = "ht40+",
657 };
658 static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
659 [IEEE80211_SMPS_AUTOMATIC] = "auto",
660 [IEEE80211_SMPS_OFF] = "off",
661 [IEEE80211_SMPS_STATIC] = "static",
662 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
663 };
664
665 printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
623 wiphy_name(hw->wiphy), __func__, 666 wiphy_name(hw->wiphy), __func__,
624 conf->channel->center_freq, 667 conf->channel->center_freq,
668 chantypes[conf->channel_type],
625 !!(conf->flags & IEEE80211_CONF_IDLE), 669 !!(conf->flags & IEEE80211_CONF_IDLE),
626 !!(conf->flags & IEEE80211_CONF_PS)); 670 !!(conf->flags & IEEE80211_CONF_PS),
671 smps_modes[conf->smps_mode]);
627 672
628 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 673 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
629 674
@@ -827,6 +872,41 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
827} 872}
828#endif 873#endif
829 874
875static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
876 struct ieee80211_vif *vif,
877 enum ieee80211_ampdu_mlme_action action,
878 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
879{
880 switch (action) {
881 case IEEE80211_AMPDU_TX_START:
882 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
883 break;
884 case IEEE80211_AMPDU_TX_STOP:
885 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
886 break;
887 case IEEE80211_AMPDU_TX_OPERATIONAL:
888 break;
889 case IEEE80211_AMPDU_RX_START:
890 case IEEE80211_AMPDU_RX_STOP:
891 break;
892 default:
893 return -EOPNOTSUPP;
894 }
895
896 return 0;
897}
898
899static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
900{
901 /*
902 * In this special case, there's nothing we need to
903 * do because hwsim does transmission synchronously.
904 * In the future, when it does transmissions via
905 * userspace, we may need to do something.
906 */
907}
908
909
830static const struct ieee80211_ops mac80211_hwsim_ops = 910static const struct ieee80211_ops mac80211_hwsim_ops =
831{ 911{
832 .tx = mac80211_hwsim_tx, 912 .tx = mac80211_hwsim_tx,
@@ -841,6 +921,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
841 .set_tim = mac80211_hwsim_set_tim, 921 .set_tim = mac80211_hwsim_set_tim,
842 .conf_tx = mac80211_hwsim_conf_tx, 922 .conf_tx = mac80211_hwsim_conf_tx,
843 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) 923 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
924 .ampdu_action = mac80211_hwsim_ampdu_action,
925 .flush = mac80211_hwsim_flush,
844}; 926};
845 927
846 928
@@ -1082,7 +1164,9 @@ static int __init init_mac80211_hwsim(void)
1082 BIT(NL80211_IFTYPE_MESH_POINT); 1164 BIT(NL80211_IFTYPE_MESH_POINT);
1083 1165
1084 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1166 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1085 IEEE80211_HW_SIGNAL_DBM; 1167 IEEE80211_HW_SIGNAL_DBM |
1168 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1169 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
1086 1170
1087 /* ask mac80211 to reserve space for magic */ 1171 /* ask mac80211 to reserve space for magic */
1088 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 1172 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59f92105b0c..68546ca0ba3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
2 * drivers/net/wireless/mwl8k.c 2 * drivers/net/wireless/mwl8k.c
3 * Driver for Marvell TOPDOG 802.11 Wireless cards 3 * Driver for Marvell TOPDOG 802.11 Wireless cards
4 * 4 *
5 * Copyright (C) 2008-2009 Marvell Semiconductor Inc. 5 * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -26,7 +26,7 @@
26 26
27#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" 27#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
28#define MWL8K_NAME KBUILD_MODNAME 28#define MWL8K_NAME KBUILD_MODNAME
29#define MWL8K_VERSION "0.10" 29#define MWL8K_VERSION "0.12"
30 30
31/* Register definitions */ 31/* Register definitions */
32#define MWL8K_HIU_GEN_PTR 0x00000c10 32#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -92,8 +92,7 @@ struct mwl8k_device_info {
92 char *part_name; 92 char *part_name;
93 char *helper_image; 93 char *helper_image;
94 char *fw_image; 94 char *fw_image;
95 struct rxd_ops *rxd_ops; 95 struct rxd_ops *ap_rxd_ops;
96 u16 modes;
97}; 96};
98 97
99struct mwl8k_rx_queue { 98struct mwl8k_rx_queue {
@@ -126,28 +125,30 @@ struct mwl8k_tx_queue {
126 struct sk_buff **skb; 125 struct sk_buff **skb;
127}; 126};
128 127
129/* Pointers to the firmware data and meta information about it. */ 128struct mwl8k_priv {
130struct mwl8k_firmware { 129 struct ieee80211_hw *hw;
131 /* Boot helper code */ 130 struct pci_dev *pdev;
132 struct firmware *helper;
133 131
134 /* Microcode */ 132 struct mwl8k_device_info *device_info;
135 struct firmware *ucode;
136};
137 133
138struct mwl8k_priv {
139 void __iomem *sram; 134 void __iomem *sram;
140 void __iomem *regs; 135 void __iomem *regs;
141 struct ieee80211_hw *hw;
142 136
143 struct pci_dev *pdev; 137 /* firmware */
138 struct firmware *fw_helper;
139 struct firmware *fw_ucode;
144 140
145 struct mwl8k_device_info *device_info; 141 /* hardware/firmware parameters */
146 bool ap_fw; 142 bool ap_fw;
147 struct rxd_ops *rxd_ops; 143 struct rxd_ops *rxd_ops;
148 144 struct ieee80211_supported_band band_24;
149 /* firmware files and meta data */ 145 struct ieee80211_channel channels_24[14];
150 struct mwl8k_firmware fw; 146 struct ieee80211_rate rates_24[14];
147 struct ieee80211_supported_band band_50;
148 struct ieee80211_channel channels_50[4];
149 struct ieee80211_rate rates_50[9];
150 u32 ap_macids_supported;
151 u32 sta_macids_supported;
151 152
152 /* firmware access */ 153 /* firmware access */
153 struct mutex fw_mutex; 154 struct mutex fw_mutex;
@@ -161,9 +162,9 @@ struct mwl8k_priv {
161 /* TX quiesce completion, protected by fw_mutex and tx_lock */ 162 /* TX quiesce completion, protected by fw_mutex and tx_lock */
162 struct completion *tx_wait; 163 struct completion *tx_wait;
163 164
164 struct ieee80211_vif *vif; 165 /* List of interfaces. */
165 166 u32 macids_used;
166 struct ieee80211_channel *current_channel; 167 struct list_head vif_list;
167 168
168 /* power management status cookie from firmware */ 169 /* power management status cookie from firmware */
169 u32 *cookie; 170 u32 *cookie;
@@ -182,16 +183,15 @@ struct mwl8k_priv {
182 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; 183 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
183 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES]; 184 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
184 185
185 /* PHY parameters */
186 struct ieee80211_supported_band band;
187 struct ieee80211_channel channels[14];
188 struct ieee80211_rate rates[14];
189
190 bool radio_on; 186 bool radio_on;
191 bool radio_short_preamble; 187 bool radio_short_preamble;
192 bool sniffer_enabled; 188 bool sniffer_enabled;
193 bool wmm_enabled; 189 bool wmm_enabled;
194 190
191 struct work_struct sta_notify_worker;
192 spinlock_t sta_notify_list_lock;
193 struct list_head sta_notify_list;
194
195 /* XXX need to convert this to handle multiple interfaces */ 195 /* XXX need to convert this to handle multiple interfaces */
196 bool capture_beacon; 196 bool capture_beacon;
197 u8 capture_bssid[ETH_ALEN]; 197 u8 capture_bssid[ETH_ALEN];
@@ -205,32 +205,33 @@ struct mwl8k_priv {
205 */ 205 */
206 struct work_struct finalize_join_worker; 206 struct work_struct finalize_join_worker;
207 207
208 /* Tasklet to reclaim TX descriptors and buffers after tx */ 208 /* Tasklet to perform TX reclaim. */
209 struct tasklet_struct tx_reclaim_task; 209 struct tasklet_struct poll_tx_task;
210
211 /* Tasklet to perform RX. */
212 struct tasklet_struct poll_rx_task;
210}; 213};
211 214
212/* Per interface specific private data */ 215/* Per interface specific private data */
213struct mwl8k_vif { 216struct mwl8k_vif {
214 /* backpointer to parent config block */ 217 struct list_head list;
215 struct mwl8k_priv *priv; 218 struct ieee80211_vif *vif;
216
217 /* BSS config of AP or IBSS from mac80211*/
218 struct ieee80211_bss_conf bss_info;
219
220 /* BSSID of AP or IBSS */
221 u8 bssid[ETH_ALEN];
222 u8 mac_addr[ETH_ALEN];
223 219
224 /* Index into station database.Returned by update_sta_db call */ 220 /* Firmware macid for this vif. */
225 u8 peer_id; 221 int macid;
226 222
227 /* Non AMPDU sequence number assigned by driver */ 223 /* Non AMPDU sequence number assigned by driver. */
228 u16 seqno; 224 u16 seqno;
229}; 225};
230
231#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) 226#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
232 227
233static const struct ieee80211_channel mwl8k_channels[] = { 228struct mwl8k_sta {
229 /* Index into station database. Returned by UPDATE_STADB. */
230 u8 peer_id;
231};
232#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
233
234static const struct ieee80211_channel mwl8k_channels_24[] = {
234 { .center_freq = 2412, .hw_value = 1, }, 235 { .center_freq = 2412, .hw_value = 1, },
235 { .center_freq = 2417, .hw_value = 2, }, 236 { .center_freq = 2417, .hw_value = 2, },
236 { .center_freq = 2422, .hw_value = 3, }, 237 { .center_freq = 2422, .hw_value = 3, },
@@ -242,9 +243,12 @@ static const struct ieee80211_channel mwl8k_channels[] = {
242 { .center_freq = 2452, .hw_value = 9, }, 243 { .center_freq = 2452, .hw_value = 9, },
243 { .center_freq = 2457, .hw_value = 10, }, 244 { .center_freq = 2457, .hw_value = 10, },
244 { .center_freq = 2462, .hw_value = 11, }, 245 { .center_freq = 2462, .hw_value = 11, },
246 { .center_freq = 2467, .hw_value = 12, },
247 { .center_freq = 2472, .hw_value = 13, },
248 { .center_freq = 2484, .hw_value = 14, },
245}; 249};
246 250
247static const struct ieee80211_rate mwl8k_rates[] = { 251static const struct ieee80211_rate mwl8k_rates_24[] = {
248 { .bitrate = 10, .hw_value = 2, }, 252 { .bitrate = 10, .hw_value = 2, },
249 { .bitrate = 20, .hw_value = 4, }, 253 { .bitrate = 20, .hw_value = 4, },
250 { .bitrate = 55, .hw_value = 11, }, 254 { .bitrate = 55, .hw_value = 11, },
@@ -261,8 +265,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
261 { .bitrate = 720, .hw_value = 144, }, 265 { .bitrate = 720, .hw_value = 144, },
262}; 266};
263 267
264static const u8 mwl8k_rateids[12] = { 268static const struct ieee80211_channel mwl8k_channels_50[] = {
265 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 269 { .center_freq = 5180, .hw_value = 36, },
270 { .center_freq = 5200, .hw_value = 40, },
271 { .center_freq = 5220, .hw_value = 44, },
272 { .center_freq = 5240, .hw_value = 48, },
273};
274
275static const struct ieee80211_rate mwl8k_rates_50[] = {
276 { .bitrate = 60, .hw_value = 12, },
277 { .bitrate = 90, .hw_value = 18, },
278 { .bitrate = 120, .hw_value = 24, },
279 { .bitrate = 180, .hw_value = 36, },
280 { .bitrate = 240, .hw_value = 48, },
281 { .bitrate = 360, .hw_value = 72, },
282 { .bitrate = 480, .hw_value = 96, },
283 { .bitrate = 540, .hw_value = 108, },
284 { .bitrate = 720, .hw_value = 144, },
266}; 285};
267 286
268/* Set or get info from Firmware */ 287/* Set or get info from Firmware */
@@ -278,6 +297,7 @@ static const u8 mwl8k_rateids[12] = {
278#define MWL8K_CMD_RADIO_CONTROL 0x001c 297#define MWL8K_CMD_RADIO_CONTROL 0x001c
279#define MWL8K_CMD_RF_TX_POWER 0x001e 298#define MWL8K_CMD_RF_TX_POWER 0x001e
280#define MWL8K_CMD_RF_ANTENNA 0x0020 299#define MWL8K_CMD_RF_ANTENNA 0x0020
300#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
281#define MWL8K_CMD_SET_PRE_SCAN 0x0107 301#define MWL8K_CMD_SET_PRE_SCAN 0x0107
282#define MWL8K_CMD_SET_POST_SCAN 0x0108 302#define MWL8K_CMD_SET_POST_SCAN 0x0108
283#define MWL8K_CMD_SET_RF_CHANNEL 0x010a 303#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -291,8 +311,10 @@ static const u8 mwl8k_rateids[12] = {
291#define MWL8K_CMD_MIMO_CONFIG 0x0125 311#define MWL8K_CMD_MIMO_CONFIG 0x0125
292#define MWL8K_CMD_USE_FIXED_RATE 0x0126 312#define MWL8K_CMD_USE_FIXED_RATE 0x0126
293#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 313#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
294#define MWL8K_CMD_SET_MAC_ADDR 0x0202 314#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
295#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 315#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
316#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
317#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
296#define MWL8K_CMD_UPDATE_STADB 0x1123 318#define MWL8K_CMD_UPDATE_STADB 0x1123
297 319
298static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize) 320static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -310,6 +332,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
310 MWL8K_CMDNAME(RADIO_CONTROL); 332 MWL8K_CMDNAME(RADIO_CONTROL);
311 MWL8K_CMDNAME(RF_TX_POWER); 333 MWL8K_CMDNAME(RF_TX_POWER);
312 MWL8K_CMDNAME(RF_ANTENNA); 334 MWL8K_CMDNAME(RF_ANTENNA);
335 MWL8K_CMDNAME(SET_BEACON);
313 MWL8K_CMDNAME(SET_PRE_SCAN); 336 MWL8K_CMDNAME(SET_PRE_SCAN);
314 MWL8K_CMDNAME(SET_POST_SCAN); 337 MWL8K_CMDNAME(SET_POST_SCAN);
315 MWL8K_CMDNAME(SET_RF_CHANNEL); 338 MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -325,6 +348,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
325 MWL8K_CMDNAME(ENABLE_SNIFFER); 348 MWL8K_CMDNAME(ENABLE_SNIFFER);
326 MWL8K_CMDNAME(SET_MAC_ADDR); 349 MWL8K_CMDNAME(SET_MAC_ADDR);
327 MWL8K_CMDNAME(SET_RATEADAPT_MODE); 350 MWL8K_CMDNAME(SET_RATEADAPT_MODE);
351 MWL8K_CMDNAME(BSS_START);
352 MWL8K_CMDNAME(SET_NEW_STN);
328 MWL8K_CMDNAME(UPDATE_STADB); 353 MWL8K_CMDNAME(UPDATE_STADB);
329 default: 354 default:
330 snprintf(buf, bufsize, "0x%x", cmd); 355 snprintf(buf, bufsize, "0x%x", cmd);
@@ -355,8 +380,8 @@ static void mwl8k_release_fw(struct firmware **fw)
355 380
356static void mwl8k_release_firmware(struct mwl8k_priv *priv) 381static void mwl8k_release_firmware(struct mwl8k_priv *priv)
357{ 382{
358 mwl8k_release_fw(&priv->fw.ucode); 383 mwl8k_release_fw(&priv->fw_ucode);
359 mwl8k_release_fw(&priv->fw.helper); 384 mwl8k_release_fw(&priv->fw_helper);
360} 385}
361 386
362/* Request fw image */ 387/* Request fw image */
@@ -377,7 +402,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
377 int rc; 402 int rc;
378 403
379 if (di->helper_image != NULL) { 404 if (di->helper_image != NULL) {
380 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper); 405 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
381 if (rc) { 406 if (rc) {
382 printk(KERN_ERR "%s: Error requesting helper " 407 printk(KERN_ERR "%s: Error requesting helper "
383 "firmware file %s\n", pci_name(priv->pdev), 408 "firmware file %s\n", pci_name(priv->pdev),
@@ -386,24 +411,22 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
386 } 411 }
387 } 412 }
388 413
389 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode); 414 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
390 if (rc) { 415 if (rc) {
391 printk(KERN_ERR "%s: Error requesting firmware file %s\n", 416 printk(KERN_ERR "%s: Error requesting firmware file %s\n",
392 pci_name(priv->pdev), di->fw_image); 417 pci_name(priv->pdev), di->fw_image);
393 mwl8k_release_fw(&priv->fw.helper); 418 mwl8k_release_fw(&priv->fw_helper);
394 return rc; 419 return rc;
395 } 420 }
396 421
397 return 0; 422 return 0;
398} 423}
399 424
400MODULE_FIRMWARE("mwl8k/helper_8687.fw");
401MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
402
403struct mwl8k_cmd_pkt { 425struct mwl8k_cmd_pkt {
404 __le16 code; 426 __le16 code;
405 __le16 length; 427 __le16 length;
406 __le16 seq_num; 428 __u8 seq_num;
429 __u8 macid;
407 __le16 result; 430 __le16 result;
408 char payload[0]; 431 char payload[0];
409} __attribute__((packed)); 432} __attribute__((packed));
@@ -461,6 +484,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
461 484
462 cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD); 485 cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
463 cmd->seq_num = 0; 486 cmd->seq_num = 0;
487 cmd->macid = 0;
464 cmd->result = 0; 488 cmd->result = 0;
465 489
466 done = 0; 490 done = 0;
@@ -551,13 +575,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
551static int mwl8k_load_firmware(struct ieee80211_hw *hw) 575static int mwl8k_load_firmware(struct ieee80211_hw *hw)
552{ 576{
553 struct mwl8k_priv *priv = hw->priv; 577 struct mwl8k_priv *priv = hw->priv;
554 struct firmware *fw = priv->fw.ucode; 578 struct firmware *fw = priv->fw_ucode;
555 struct mwl8k_device_info *di = priv->device_info;
556 int rc; 579 int rc;
557 int loops; 580 int loops;
558 581
559 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { 582 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
560 struct firmware *helper = priv->fw.helper; 583 struct firmware *helper = priv->fw_helper;
561 584
562 if (helper == NULL) { 585 if (helper == NULL) {
563 printk(KERN_ERR "%s: helper image needed but none " 586 printk(KERN_ERR "%s: helper image needed but none "
@@ -584,10 +607,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
584 return rc; 607 return rc;
585 } 608 }
586 609
587 if (di->modes & BIT(NL80211_IFTYPE_AP)) 610 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
588 iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
589 else
590 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
591 611
592 loops = 500000; 612 loops = 500000;
593 do { 613 do {
@@ -610,91 +630,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
610} 630}
611 631
612 632
613/*
614 * Defines shared between transmission and reception.
615 */
616/* HT control fields for firmware */
617struct ewc_ht_info {
618 __le16 control1;
619 __le16 control2;
620 __le16 control3;
621} __attribute__((packed));
622
623/* Firmware Station database operations */
624#define MWL8K_STA_DB_ADD_ENTRY 0
625#define MWL8K_STA_DB_MODIFY_ENTRY 1
626#define MWL8K_STA_DB_DEL_ENTRY 2
627#define MWL8K_STA_DB_FLUSH 3
628
629/* Peer Entry flags - used to define the type of the peer node */
630#define MWL8K_PEER_TYPE_ACCESSPOINT 2
631
632struct peer_capability_info {
633 /* Peer type - AP vs. STA. */
634 __u8 peer_type;
635
636 /* Basic 802.11 capabilities from assoc resp. */
637 __le16 basic_caps;
638
639 /* Set if peer supports 802.11n high throughput (HT). */
640 __u8 ht_support;
641
642 /* Valid if HT is supported. */
643 __le16 ht_caps;
644 __u8 extended_ht_caps;
645 struct ewc_ht_info ewc_info;
646
647 /* Legacy rate table. Intersection of our rates and peer rates. */
648 __u8 legacy_rates[12];
649
650 /* HT rate table. Intersection of our rates and peer rates. */
651 __u8 ht_rates[16];
652 __u8 pad[16];
653
654 /* If set, interoperability mode, no proprietary extensions. */
655 __u8 interop;
656 __u8 pad2;
657 __u8 station_id;
658 __le16 amsdu_enabled;
659} __attribute__((packed));
660
661/* Inline functions to manipulate QoS field in data descriptor. */
662static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
663{
664 u16 val_mask = 1 << 4;
665
666 /* End of Service Period Bit 4 */
667 return qos | val_mask;
668}
669
670static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
671{
672 u16 val_mask = 0x3;
673 u8 shift = 5;
674 u16 qos_mask = ~(val_mask << shift);
675
676 /* Ack Policy Bit 5-6 */
677 return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
678}
679
680static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
681{
682 u16 val_mask = 1 << 7;
683
684 /* AMSDU present Bit 7 */
685 return qos | val_mask;
686}
687
688static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
689{
690 u16 val_mask = 0xff;
691 u8 shift = 8;
692 u16 qos_mask = ~(val_mask << shift);
693
694 /* Queue Length Bits 8-15 */
695 return (qos & qos_mask) | ((len & val_mask) << shift);
696}
697
698/* DMA header used by firmware and hardware. */ 633/* DMA header used by firmware and hardware. */
699struct mwl8k_dma_data { 634struct mwl8k_dma_data {
700 __le16 fwlen; 635 __le16 fwlen;
@@ -761,9 +696,9 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
761 696
762 697
763/* 698/*
764 * Packet reception for 88w8366. 699 * Packet reception for 88w8366 AP firmware.
765 */ 700 */
766struct mwl8k_rxd_8366 { 701struct mwl8k_rxd_8366_ap {
767 __le16 pkt_len; 702 __le16 pkt_len;
768 __u8 sq2; 703 __u8 sq2;
769 __u8 rate; 704 __u8 rate;
@@ -781,23 +716,23 @@ struct mwl8k_rxd_8366 {
781 __u8 rx_ctrl; 716 __u8 rx_ctrl;
782} __attribute__((packed)); 717} __attribute__((packed));
783 718
784#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80 719#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
785#define MWL8K_8366_RATE_INFO_40MHZ 0x40 720#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
786#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f) 721#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
787 722
788#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80 723#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
789 724
790static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr) 725static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
791{ 726{
792 struct mwl8k_rxd_8366 *rxd = _rxd; 727 struct mwl8k_rxd_8366_ap *rxd = _rxd;
793 728
794 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 729 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
795 rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST; 730 rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
796} 731}
797 732
798static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len) 733static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
799{ 734{
800 struct mwl8k_rxd_8366 *rxd = _rxd; 735 struct mwl8k_rxd_8366_ap *rxd = _rxd;
801 736
802 rxd->pkt_len = cpu_to_le16(len); 737 rxd->pkt_len = cpu_to_le16(len);
803 rxd->pkt_phys_addr = cpu_to_le32(addr); 738 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -806,12 +741,12 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
806} 741}
807 742
808static int 743static int
809mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status, 744mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
810 __le16 *qos) 745 __le16 *qos)
811{ 746{
812 struct mwl8k_rxd_8366 *rxd = _rxd; 747 struct mwl8k_rxd_8366_ap *rxd = _rxd;
813 748
814 if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST)) 749 if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
815 return -1; 750 return -1;
816 rmb(); 751 rmb();
817 752
@@ -820,23 +755,29 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
820 status->signal = -rxd->rssi; 755 status->signal = -rxd->rssi;
821 status->noise = -rxd->noise_floor; 756 status->noise = -rxd->noise_floor;
822 757
823 if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) { 758 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
824 status->flag |= RX_FLAG_HT; 759 status->flag |= RX_FLAG_HT;
825 if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ) 760 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
826 status->flag |= RX_FLAG_40MHZ; 761 status->flag |= RX_FLAG_40MHZ;
827 status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate); 762 status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
828 } else { 763 } else {
829 int i; 764 int i;
830 765
831 for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) { 766 for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
832 if (mwl8k_rates[i].hw_value == rxd->rate) { 767 if (mwl8k_rates_24[i].hw_value == rxd->rate) {
833 status->rate_idx = i; 768 status->rate_idx = i;
834 break; 769 break;
835 } 770 }
836 } 771 }
837 } 772 }
838 773
839 status->band = IEEE80211_BAND_2GHZ; 774 if (rxd->channel > 14) {
775 status->band = IEEE80211_BAND_5GHZ;
776 if (!(status->flag & RX_FLAG_HT))
777 status->rate_idx -= 5;
778 } else {
779 status->band = IEEE80211_BAND_2GHZ;
780 }
840 status->freq = ieee80211_channel_to_frequency(rxd->channel); 781 status->freq = ieee80211_channel_to_frequency(rxd->channel);
841 782
842 *qos = rxd->qos_control; 783 *qos = rxd->qos_control;
@@ -844,17 +785,17 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
844 return le16_to_cpu(rxd->pkt_len); 785 return le16_to_cpu(rxd->pkt_len);
845} 786}
846 787
847static struct rxd_ops rxd_8366_ops = { 788static struct rxd_ops rxd_8366_ap_ops = {
848 .rxd_size = sizeof(struct mwl8k_rxd_8366), 789 .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
849 .rxd_init = mwl8k_rxd_8366_init, 790 .rxd_init = mwl8k_rxd_8366_ap_init,
850 .rxd_refill = mwl8k_rxd_8366_refill, 791 .rxd_refill = mwl8k_rxd_8366_ap_refill,
851 .rxd_process = mwl8k_rxd_8366_process, 792 .rxd_process = mwl8k_rxd_8366_ap_process,
852}; 793};
853 794
854/* 795/*
855 * Packet reception for 88w8687. 796 * Packet reception for STA firmware.
856 */ 797 */
857struct mwl8k_rxd_8687 { 798struct mwl8k_rxd_sta {
858 __le16 pkt_len; 799 __le16 pkt_len;
859 __u8 link_quality; 800 __u8 link_quality;
860 __u8 noise_level; 801 __u8 noise_level;
@@ -871,26 +812,26 @@ struct mwl8k_rxd_8687 {
871 __u8 pad2[2]; 812 __u8 pad2[2];
872} __attribute__((packed)); 813} __attribute__((packed));
873 814
874#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000 815#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
875#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) 816#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
876#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f) 817#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
877#define MWL8K_8687_RATE_INFO_40MHZ 0x0004 818#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
878#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002 819#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
879#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001 820#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
880 821
881#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02 822#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
882 823
883static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr) 824static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
884{ 825{
885 struct mwl8k_rxd_8687 *rxd = _rxd; 826 struct mwl8k_rxd_sta *rxd = _rxd;
886 827
887 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 828 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
888 rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST; 829 rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
889} 830}
890 831
891static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len) 832static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
892{ 833{
893 struct mwl8k_rxd_8687 *rxd = _rxd; 834 struct mwl8k_rxd_sta *rxd = _rxd;
894 835
895 rxd->pkt_len = cpu_to_le16(len); 836 rxd->pkt_len = cpu_to_le16(len);
896 rxd->pkt_phys_addr = cpu_to_le32(addr); 837 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -899,13 +840,13 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
899} 840}
900 841
901static int 842static int
902mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status, 843mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
903 __le16 *qos) 844 __le16 *qos)
904{ 845{
905 struct mwl8k_rxd_8687 *rxd = _rxd; 846 struct mwl8k_rxd_sta *rxd = _rxd;
906 u16 rate_info; 847 u16 rate_info;
907 848
908 if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST)) 849 if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
909 return -1; 850 return -1;
910 rmb(); 851 rmb();
911 852
@@ -915,19 +856,25 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
915 856
916 status->signal = -rxd->rssi; 857 status->signal = -rxd->rssi;
917 status->noise = -rxd->noise_level; 858 status->noise = -rxd->noise_level;
918 status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info); 859 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
919 status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info); 860 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
920 861
921 if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE) 862 if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
922 status->flag |= RX_FLAG_SHORTPRE; 863 status->flag |= RX_FLAG_SHORTPRE;
923 if (rate_info & MWL8K_8687_RATE_INFO_40MHZ) 864 if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
924 status->flag |= RX_FLAG_40MHZ; 865 status->flag |= RX_FLAG_40MHZ;
925 if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI) 866 if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
926 status->flag |= RX_FLAG_SHORT_GI; 867 status->flag |= RX_FLAG_SHORT_GI;
927 if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT) 868 if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
928 status->flag |= RX_FLAG_HT; 869 status->flag |= RX_FLAG_HT;
929 870
930 status->band = IEEE80211_BAND_2GHZ; 871 if (rxd->channel > 14) {
872 status->band = IEEE80211_BAND_5GHZ;
873 if (!(status->flag & RX_FLAG_HT))
874 status->rate_idx -= 5;
875 } else {
876 status->band = IEEE80211_BAND_2GHZ;
877 }
931 status->freq = ieee80211_channel_to_frequency(rxd->channel); 878 status->freq = ieee80211_channel_to_frequency(rxd->channel);
932 879
933 *qos = rxd->qos_control; 880 *qos = rxd->qos_control;
@@ -935,11 +882,11 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
935 return le16_to_cpu(rxd->pkt_len); 882 return le16_to_cpu(rxd->pkt_len);
936} 883}
937 884
938static struct rxd_ops rxd_8687_ops = { 885static struct rxd_ops rxd_sta_ops = {
939 .rxd_size = sizeof(struct mwl8k_rxd_8687), 886 .rxd_size = sizeof(struct mwl8k_rxd_sta),
940 .rxd_init = mwl8k_rxd_8687_init, 887 .rxd_init = mwl8k_rxd_sta_init,
941 .rxd_refill = mwl8k_rxd_8687_refill, 888 .rxd_refill = mwl8k_rxd_sta_refill,
942 .rxd_process = mwl8k_rxd_8687_process, 889 .rxd_process = mwl8k_rxd_sta_process,
943}; 890};
944 891
945 892
@@ -1153,16 +1100,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1153 * Packet transmission. 1100 * Packet transmission.
1154 */ 1101 */
1155 1102
1156/* Transmit packet ACK policy */
1157#define MWL8K_TXD_ACK_POLICY_NORMAL 0
1158#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
1159
1160#define MWL8K_TXD_STATUS_OK 0x00000001 1103#define MWL8K_TXD_STATUS_OK 0x00000001
1161#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 1104#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
1162#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 1105#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
1163#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008 1106#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
1164#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000 1107#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
1165 1108
1109#define MWL8K_QOS_QLEN_UNSPEC 0xff00
1110#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
1111#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
1112#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
1113#define MWL8K_QOS_EOSP 0x0010
1114
1166struct mwl8k_tx_desc { 1115struct mwl8k_tx_desc {
1167 __le32 status; 1116 __le32 status;
1168 __u8 data_rate; 1117 __u8 data_rate;
@@ -1272,7 +1221,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
1272/* 1221/*
1273 * Must be called with priv->fw_mutex held and tx queues stopped. 1222 * Must be called with priv->fw_mutex held and tx queues stopped.
1274 */ 1223 */
1275#define MWL8K_TX_WAIT_TIMEOUT_MS 1000 1224#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
1276 1225
1277static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) 1226static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1278{ 1227{
@@ -1316,8 +1265,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1316 } 1265 }
1317 1266
1318 if (priv->pending_tx_pkts < oldcount) { 1267 if (priv->pending_tx_pkts < oldcount) {
1319 printk(KERN_NOTICE "%s: timeout waiting for tx " 1268 printk(KERN_NOTICE "%s: waiting for tx rings "
1320 "rings to drain (%d -> %d pkts), retrying\n", 1269 "to drain (%d -> %d pkts)\n",
1321 wiphy_name(hw->wiphy), oldcount, 1270 wiphy_name(hw->wiphy), oldcount,
1322 priv->pending_tx_pkts); 1271 priv->pending_tx_pkts);
1323 retry = 1; 1272 retry = 1;
@@ -1342,13 +1291,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1342 MWL8K_TXD_STATUS_OK_RETRY | \ 1291 MWL8K_TXD_STATUS_OK_RETRY | \
1343 MWL8K_TXD_STATUS_OK_MORE_RETRY)) 1292 MWL8K_TXD_STATUS_OK_MORE_RETRY))
1344 1293
1345static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force) 1294static int
1295mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1346{ 1296{
1347 struct mwl8k_priv *priv = hw->priv; 1297 struct mwl8k_priv *priv = hw->priv;
1348 struct mwl8k_tx_queue *txq = priv->txq + index; 1298 struct mwl8k_tx_queue *txq = priv->txq + index;
1349 int wake = 0; 1299 int processed;
1350 1300
1351 while (txq->stats.len > 0) { 1301 processed = 0;
1302 while (txq->stats.len > 0 && limit--) {
1352 int tx; 1303 int tx;
1353 struct mwl8k_tx_desc *tx_desc; 1304 struct mwl8k_tx_desc *tx_desc;
1354 unsigned long addr; 1305 unsigned long addr;
@@ -1395,11 +1346,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1395 1346
1396 ieee80211_tx_status_irqsafe(hw, skb); 1347 ieee80211_tx_status_irqsafe(hw, skb);
1397 1348
1398 wake = 1; 1349 processed++;
1399 } 1350 }
1400 1351
1401 if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex)) 1352 if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
1402 ieee80211_wake_queue(hw, index); 1353 ieee80211_wake_queue(hw, index);
1354
1355 return processed;
1403} 1356}
1404 1357
1405/* must be called only when the card's transmit is completely halted */ 1358/* must be called only when the card's transmit is completely halted */
@@ -1408,7 +1361,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1408 struct mwl8k_priv *priv = hw->priv; 1361 struct mwl8k_priv *priv = hw->priv;
1409 struct mwl8k_tx_queue *txq = priv->txq + index; 1362 struct mwl8k_tx_queue *txq = priv->txq + index;
1410 1363
1411 mwl8k_txq_reclaim(hw, index, 1); 1364 mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
1412 1365
1413 kfree(txq->skb); 1366 kfree(txq->skb);
1414 txq->skb = NULL; 1367 txq->skb = NULL;
@@ -1446,11 +1399,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1446 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1399 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1447 1400
1448 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1401 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1449 u16 seqno = mwl8k_vif->seqno;
1450
1451 wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1402 wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1452 wh->seq_ctrl |= cpu_to_le16(seqno << 4); 1403 wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
1453 mwl8k_vif->seqno = seqno++ % 4096; 1404 mwl8k_vif->seqno += 0x10;
1454 } 1405 }
1455 1406
1456 /* Setup firmware control bit fields for each frame type. */ 1407 /* Setup firmware control bit fields for each frame type. */
@@ -1459,24 +1410,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1459 if (ieee80211_is_mgmt(wh->frame_control) || 1410 if (ieee80211_is_mgmt(wh->frame_control) ||
1460 ieee80211_is_ctl(wh->frame_control)) { 1411 ieee80211_is_ctl(wh->frame_control)) {
1461 txdatarate = 0; 1412 txdatarate = 0;
1462 qos = mwl8k_qos_setbit_eosp(qos); 1413 qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
1463 /* Set Queue size to unspecified */
1464 qos = mwl8k_qos_setbit_qlen(qos, 0xff);
1465 } else if (ieee80211_is_data(wh->frame_control)) { 1414 } else if (ieee80211_is_data(wh->frame_control)) {
1466 txdatarate = 1; 1415 txdatarate = 1;
1467 if (is_multicast_ether_addr(wh->addr1)) 1416 if (is_multicast_ether_addr(wh->addr1))
1468 txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX; 1417 txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
1469 1418
1470 /* Send pkt in an aggregate if AMPDU frame. */ 1419 qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
1471 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 1420 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1472 qos = mwl8k_qos_setbit_ack(qos, 1421 qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
1473 MWL8K_TXD_ACK_POLICY_BLOCKACK);
1474 else 1422 else
1475 qos = mwl8k_qos_setbit_ack(qos, 1423 qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
1476 MWL8K_TXD_ACK_POLICY_NORMAL);
1477
1478 if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
1479 qos = mwl8k_qos_setbit_amsdu(qos);
1480 } 1424 }
1481 1425
1482 dma = pci_map_single(priv->pdev, skb->data, 1426 dma = pci_map_single(priv->pdev, skb->data,
@@ -1503,7 +1447,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1503 tx->pkt_phys_addr = cpu_to_le32(dma); 1447 tx->pkt_phys_addr = cpu_to_le32(dma);
1504 tx->pkt_len = cpu_to_le16(skb->len); 1448 tx->pkt_len = cpu_to_le16(skb->len);
1505 tx->rate_info = 0; 1449 tx->rate_info = 0;
1506 tx->peer_id = mwl8k_vif->peer_id; 1450 if (!priv->ap_fw && tx_info->control.sta != NULL)
1451 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
1452 else
1453 tx->peer_id = 0;
1507 wmb(); 1454 wmb();
1508 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 1455 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
1509 1456
@@ -1656,6 +1603,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1656 return rc; 1603 return rc;
1657} 1604}
1658 1605
1606static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
1607 struct ieee80211_vif *vif,
1608 struct mwl8k_cmd_pkt *cmd)
1609{
1610 if (vif != NULL)
1611 cmd->macid = MWL8K_VIF(vif)->macid;
1612 return mwl8k_post_cmd(hw, cmd);
1613}
1614
1615/*
1616 * Setup code shared between STA and AP firmware images.
1617 */
1618static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
1619{
1620 struct mwl8k_priv *priv = hw->priv;
1621
1622 BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
1623 memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
1624
1625 BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
1626 memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
1627
1628 priv->band_24.band = IEEE80211_BAND_2GHZ;
1629 priv->band_24.channels = priv->channels_24;
1630 priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
1631 priv->band_24.bitrates = priv->rates_24;
1632 priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
1633
1634 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
1635}
1636
1637static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
1638{
1639 struct mwl8k_priv *priv = hw->priv;
1640
1641 BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
1642 memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
1643
1644 BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
1645 memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
1646
1647 priv->band_50.band = IEEE80211_BAND_5GHZ;
1648 priv->band_50.channels = priv->channels_50;
1649 priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
1650 priv->band_50.bitrates = priv->rates_50;
1651 priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
1652
1653 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
1654}
1655
1659/* 1656/*
1660 * CMD_GET_HW_SPEC (STA version). 1657 * CMD_GET_HW_SPEC (STA version).
1661 */ 1658 */
@@ -1678,6 +1675,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
1678 __le32 total_rxd; 1675 __le32 total_rxd;
1679} __attribute__((packed)); 1676} __attribute__((packed));
1680 1677
1678#define MWL8K_CAP_MAX_AMSDU 0x20000000
1679#define MWL8K_CAP_GREENFIELD 0x08000000
1680#define MWL8K_CAP_AMPDU 0x04000000
1681#define MWL8K_CAP_RX_STBC 0x01000000
1682#define MWL8K_CAP_TX_STBC 0x00800000
1683#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
1684#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
1685#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
1686#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
1687#define MWL8K_CAP_DELAY_BA 0x00003000
1688#define MWL8K_CAP_MIMO 0x00000200
1689#define MWL8K_CAP_40MHZ 0x00000100
1690#define MWL8K_CAP_BAND_MASK 0x00000007
1691#define MWL8K_CAP_5GHZ 0x00000004
1692#define MWL8K_CAP_2GHZ4 0x00000001
1693
1694static void
1695mwl8k_set_ht_caps(struct ieee80211_hw *hw,
1696 struct ieee80211_supported_band *band, u32 cap)
1697{
1698 int rx_streams;
1699 int tx_streams;
1700
1701 band->ht_cap.ht_supported = 1;
1702
1703 if (cap & MWL8K_CAP_MAX_AMSDU)
1704 band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
1705 if (cap & MWL8K_CAP_GREENFIELD)
1706 band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
1707 if (cap & MWL8K_CAP_AMPDU) {
1708 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1709 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
1710 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
1711 }
1712 if (cap & MWL8K_CAP_RX_STBC)
1713 band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
1714 if (cap & MWL8K_CAP_TX_STBC)
1715 band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
1716 if (cap & MWL8K_CAP_SHORTGI_40MHZ)
1717 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
1718 if (cap & MWL8K_CAP_SHORTGI_20MHZ)
1719 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
1720 if (cap & MWL8K_CAP_DELAY_BA)
1721 band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
1722 if (cap & MWL8K_CAP_40MHZ)
1723 band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1724
1725 rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
1726 tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
1727
1728 band->ht_cap.mcs.rx_mask[0] = 0xff;
1729 if (rx_streams >= 2)
1730 band->ht_cap.mcs.rx_mask[1] = 0xff;
1731 if (rx_streams >= 3)
1732 band->ht_cap.mcs.rx_mask[2] = 0xff;
1733 band->ht_cap.mcs.rx_mask[4] = 0x01;
1734 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1735
1736 if (rx_streams != tx_streams) {
1737 band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
1738 band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
1739 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1740 }
1741}
1742
1743static void
1744mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
1745{
1746 struct mwl8k_priv *priv = hw->priv;
1747
1748 if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
1749 mwl8k_setup_2ghz_band(hw);
1750 if (caps & MWL8K_CAP_MIMO)
1751 mwl8k_set_ht_caps(hw, &priv->band_24, caps);
1752 }
1753
1754 if (caps & MWL8K_CAP_5GHZ) {
1755 mwl8k_setup_5ghz_band(hw);
1756 if (caps & MWL8K_CAP_MIMO)
1757 mwl8k_set_ht_caps(hw, &priv->band_50, caps);
1758 }
1759}
1760
1681static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) 1761static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1682{ 1762{
1683 struct mwl8k_priv *priv = hw->priv; 1763 struct mwl8k_priv *priv = hw->priv;
@@ -1708,6 +1788,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1708 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1788 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1709 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1789 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1710 priv->hw_rev = cmd->hw_rev; 1790 priv->hw_rev = cmd->hw_rev;
1791 mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
1792 priv->ap_macids_supported = 0x00000000;
1793 priv->sta_macids_supported = 0x00000001;
1711 } 1794 }
1712 1795
1713 kfree(cmd); 1796 kfree(cmd);
@@ -1761,6 +1844,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1761 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1844 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1762 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1845 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1763 priv->hw_rev = cmd->hw_rev; 1846 priv->hw_rev = cmd->hw_rev;
1847 mwl8k_setup_2ghz_band(hw);
1848 priv->ap_macids_supported = 0x000000ff;
1849 priv->sta_macids_supported = 0x00000000;
1764 1850
1765 off = le32_to_cpu(cmd->wcbbase0) & 0xffff; 1851 off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
1766 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off); 1852 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1806,7 +1892,9 @@ struct mwl8k_cmd_set_hw_spec {
1806 __le32 total_rxd; 1892 __le32 total_rxd;
1807} __attribute__((packed)); 1893} __attribute__((packed));
1808 1894
1809#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 1895#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
1896#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
1897#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
1810 1898
1811static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) 1899static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1812{ 1900{
@@ -1827,7 +1915,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1827 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 1915 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1828 for (i = 0; i < MWL8K_TX_QUEUES; i++) 1916 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1829 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); 1917 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1830 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT); 1918 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
1919 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
1920 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
1831 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 1921 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1832 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 1922 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1833 1923
@@ -1897,9 +1987,9 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1897} 1987}
1898 1988
1899/* 1989/*
1900 * CMD_802_11_GET_STAT. 1990 * CMD_GET_STAT.
1901 */ 1991 */
1902struct mwl8k_cmd_802_11_get_stat { 1992struct mwl8k_cmd_get_stat {
1903 struct mwl8k_cmd_pkt header; 1993 struct mwl8k_cmd_pkt header;
1904 __le32 stats[64]; 1994 __le32 stats[64];
1905} __attribute__((packed)); 1995} __attribute__((packed));
@@ -1909,10 +1999,10 @@ struct mwl8k_cmd_802_11_get_stat {
1909#define MWL8K_STAT_FCS_ERROR 24 1999#define MWL8K_STAT_FCS_ERROR 24
1910#define MWL8K_STAT_RTS_SUCCESS 11 2000#define MWL8K_STAT_RTS_SUCCESS 11
1911 2001
1912static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw, 2002static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
1913 struct ieee80211_low_level_stats *stats) 2003 struct ieee80211_low_level_stats *stats)
1914{ 2004{
1915 struct mwl8k_cmd_802_11_get_stat *cmd; 2005 struct mwl8k_cmd_get_stat *cmd;
1916 int rc; 2006 int rc;
1917 2007
1918 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2008 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1939,9 +2029,9 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
1939} 2029}
1940 2030
1941/* 2031/*
1942 * CMD_802_11_RADIO_CONTROL. 2032 * CMD_RADIO_CONTROL.
1943 */ 2033 */
1944struct mwl8k_cmd_802_11_radio_control { 2034struct mwl8k_cmd_radio_control {
1945 struct mwl8k_cmd_pkt header; 2035 struct mwl8k_cmd_pkt header;
1946 __le16 action; 2036 __le16 action;
1947 __le16 control; 2037 __le16 control;
@@ -1949,10 +2039,10 @@ struct mwl8k_cmd_802_11_radio_control {
1949} __attribute__((packed)); 2039} __attribute__((packed));
1950 2040
1951static int 2041static int
1952mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force) 2042mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
1953{ 2043{
1954 struct mwl8k_priv *priv = hw->priv; 2044 struct mwl8k_priv *priv = hw->priv;
1955 struct mwl8k_cmd_802_11_radio_control *cmd; 2045 struct mwl8k_cmd_radio_control *cmd;
1956 int rc; 2046 int rc;
1957 2047
1958 if (enable == priv->radio_on && !force) 2048 if (enable == priv->radio_on && !force)
@@ -1977,36 +2067,32 @@ mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
1977 return rc; 2067 return rc;
1978} 2068}
1979 2069
1980static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw) 2070static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
1981{ 2071{
1982 return mwl8k_cmd_802_11_radio_control(hw, 0, 0); 2072 return mwl8k_cmd_radio_control(hw, 0, 0);
1983} 2073}
1984 2074
1985static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw) 2075static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
1986{ 2076{
1987 return mwl8k_cmd_802_11_radio_control(hw, 1, 0); 2077 return mwl8k_cmd_radio_control(hw, 1, 0);
1988} 2078}
1989 2079
1990static int 2080static int
1991mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble) 2081mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
1992{ 2082{
1993 struct mwl8k_priv *priv; 2083 struct mwl8k_priv *priv = hw->priv;
1994
1995 if (hw == NULL || hw->priv == NULL)
1996 return -EINVAL;
1997 priv = hw->priv;
1998 2084
1999 priv->radio_short_preamble = short_preamble; 2085 priv->radio_short_preamble = short_preamble;
2000 2086
2001 return mwl8k_cmd_802_11_radio_control(hw, 1, 1); 2087 return mwl8k_cmd_radio_control(hw, 1, 1);
2002} 2088}
2003 2089
2004/* 2090/*
2005 * CMD_802_11_RF_TX_POWER. 2091 * CMD_RF_TX_POWER.
2006 */ 2092 */
2007#define MWL8K_TX_POWER_LEVEL_TOTAL 8 2093#define MWL8K_TX_POWER_LEVEL_TOTAL 8
2008 2094
2009struct mwl8k_cmd_802_11_rf_tx_power { 2095struct mwl8k_cmd_rf_tx_power {
2010 struct mwl8k_cmd_pkt header; 2096 struct mwl8k_cmd_pkt header;
2011 __le16 action; 2097 __le16 action;
2012 __le16 support_level; 2098 __le16 support_level;
@@ -2015,9 +2101,9 @@ struct mwl8k_cmd_802_11_rf_tx_power {
2015 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2101 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2016} __attribute__((packed)); 2102} __attribute__((packed));
2017 2103
2018static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm) 2104static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
2019{ 2105{
2020 struct mwl8k_cmd_802_11_rf_tx_power *cmd; 2106 struct mwl8k_cmd_rf_tx_power *cmd;
2021 int rc; 2107 int rc;
2022 2108
2023 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2109 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2069,6 +2155,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
2069} 2155}
2070 2156
2071/* 2157/*
2158 * CMD_SET_BEACON.
2159 */
2160struct mwl8k_cmd_set_beacon {
2161 struct mwl8k_cmd_pkt header;
2162 __le16 beacon_len;
2163 __u8 beacon[0];
2164};
2165
2166static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
2167 struct ieee80211_vif *vif, u8 *beacon, int len)
2168{
2169 struct mwl8k_cmd_set_beacon *cmd;
2170 int rc;
2171
2172 cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
2173 if (cmd == NULL)
2174 return -ENOMEM;
2175
2176 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
2177 cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
2178 cmd->beacon_len = cpu_to_le16(len);
2179 memcpy(cmd->beacon, beacon, len);
2180
2181 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2182 kfree(cmd);
2183
2184 return rc;
2185}
2186
2187/*
2072 * CMD_SET_PRE_SCAN. 2188 * CMD_SET_PRE_SCAN.
2073 */ 2189 */
2074struct mwl8k_cmd_set_pre_scan { 2190struct mwl8k_cmd_set_pre_scan {
@@ -2103,7 +2219,7 @@ struct mwl8k_cmd_set_post_scan {
2103} __attribute__((packed)); 2219} __attribute__((packed));
2104 2220
2105static int 2221static int
2106mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac) 2222mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
2107{ 2223{
2108 struct mwl8k_cmd_set_post_scan *cmd; 2224 struct mwl8k_cmd_set_post_scan *cmd;
2109 int rc; 2225 int rc;
@@ -2134,8 +2250,9 @@ struct mwl8k_cmd_set_rf_channel {
2134} __attribute__((packed)); 2250} __attribute__((packed));
2135 2251
2136static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, 2252static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2137 struct ieee80211_channel *channel) 2253 struct ieee80211_conf *conf)
2138{ 2254{
2255 struct ieee80211_channel *channel = conf->channel;
2139 struct mwl8k_cmd_set_rf_channel *cmd; 2256 struct mwl8k_cmd_set_rf_channel *cmd;
2140 int rc; 2257 int rc;
2141 2258
@@ -2147,10 +2264,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2147 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2264 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2148 cmd->action = cpu_to_le16(MWL8K_CMD_SET); 2265 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2149 cmd->current_channel = channel->hw_value; 2266 cmd->current_channel = channel->hw_value;
2267
2150 if (channel->band == IEEE80211_BAND_2GHZ) 2268 if (channel->band == IEEE80211_BAND_2GHZ)
2151 cmd->channel_flags = cpu_to_le32(0x00000081); 2269 cmd->channel_flags |= cpu_to_le32(0x00000001);
2152 else 2270 else if (channel->band == IEEE80211_BAND_5GHZ)
2153 cmd->channel_flags = cpu_to_le32(0x00000000); 2271 cmd->channel_flags |= cpu_to_le32(0x00000004);
2272
2273 if (conf->channel_type == NL80211_CHAN_NO_HT ||
2274 conf->channel_type == NL80211_CHAN_HT20)
2275 cmd->channel_flags |= cpu_to_le32(0x00000080);
2276 else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
2277 cmd->channel_flags |= cpu_to_le32(0x000001900);
2278 else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
2279 cmd->channel_flags |= cpu_to_le32(0x000000900);
2154 2280
2155 rc = mwl8k_post_cmd(hw, &cmd->header); 2281 rc = mwl8k_post_cmd(hw, &cmd->header);
2156 kfree(cmd); 2282 kfree(cmd);
@@ -2159,85 +2285,75 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2159} 2285}
2160 2286
2161/* 2287/*
2162 * CMD_SET_SLOT. 2288 * CMD_SET_AID.
2163 */ 2289 */
2164struct mwl8k_cmd_set_slot { 2290#define MWL8K_FRAME_PROT_DISABLED 0x00
2165 struct mwl8k_cmd_pkt header; 2291#define MWL8K_FRAME_PROT_11G 0x07
2166 __le16 action; 2292#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
2167 __u8 short_slot; 2293#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
2168} __attribute__((packed));
2169
2170static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2171{
2172 struct mwl8k_cmd_set_slot *cmd;
2173 int rc;
2174
2175 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2176 if (cmd == NULL)
2177 return -ENOMEM;
2178
2179 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
2180 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2181 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2182 cmd->short_slot = short_slot_time;
2183
2184 rc = mwl8k_post_cmd(hw, &cmd->header);
2185 kfree(cmd);
2186 2294
2187 return rc; 2295struct mwl8k_cmd_update_set_aid {
2188} 2296 struct mwl8k_cmd_pkt header;
2297 __le16 aid;
2189 2298
2190/* 2299 /* AP's MAC address (BSSID) */
2191 * CMD_MIMO_CONFIG. 2300 __u8 bssid[ETH_ALEN];
2192 */ 2301 __le16 protection_mode;
2193struct mwl8k_cmd_mimo_config { 2302 __u8 supp_rates[14];
2194 struct mwl8k_cmd_pkt header;
2195 __le32 action;
2196 __u8 rx_antenna_map;
2197 __u8 tx_antenna_map;
2198} __attribute__((packed)); 2303} __attribute__((packed));
2199 2304
2200static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) 2305static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
2201{ 2306{
2202 struct mwl8k_cmd_mimo_config *cmd; 2307 int i;
2203 int rc; 2308 int j;
2204
2205 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2206 if (cmd == NULL)
2207 return -ENOMEM;
2208
2209 cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
2210 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2211 cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
2212 cmd->rx_antenna_map = rx;
2213 cmd->tx_antenna_map = tx;
2214 2309
2215 rc = mwl8k_post_cmd(hw, &cmd->header); 2310 /*
2216 kfree(cmd); 2311 * Clear nonstandard rates 4 and 13.
2312 */
2313 mask &= 0x1fef;
2217 2314
2218 return rc; 2315 for (i = 0, j = 0; i < 14; i++) {
2316 if (mask & (1 << i))
2317 rates[j++] = mwl8k_rates_24[i].hw_value;
2318 }
2219} 2319}
2220 2320
2221/* 2321static int
2222 * CMD_ENABLE_SNIFFER. 2322mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
2223 */ 2323 struct ieee80211_vif *vif, u32 legacy_rate_mask)
2224struct mwl8k_cmd_enable_sniffer {
2225 struct mwl8k_cmd_pkt header;
2226 __le32 action;
2227} __attribute__((packed));
2228
2229static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2230{ 2324{
2231 struct mwl8k_cmd_enable_sniffer *cmd; 2325 struct mwl8k_cmd_update_set_aid *cmd;
2326 u16 prot_mode;
2232 int rc; 2327 int rc;
2233 2328
2234 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2329 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2235 if (cmd == NULL) 2330 if (cmd == NULL)
2236 return -ENOMEM; 2331 return -ENOMEM;
2237 2332
2238 cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER); 2333 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
2239 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2334 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2240 cmd->action = cpu_to_le32(!!enable); 2335 cmd->aid = cpu_to_le16(vif->bss_conf.aid);
2336 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
2337
2338 if (vif->bss_conf.use_cts_prot) {
2339 prot_mode = MWL8K_FRAME_PROT_11G;
2340 } else {
2341 switch (vif->bss_conf.ht_operation_mode &
2342 IEEE80211_HT_OP_MODE_PROTECTION) {
2343 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
2344 prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
2345 break;
2346 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
2347 prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
2348 break;
2349 default:
2350 prot_mode = MWL8K_FRAME_PROT_DISABLED;
2351 break;
2352 }
2353 }
2354 cmd->protection_mode = cpu_to_le16(prot_mode);
2355
2356 legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
2241 2357
2242 rc = mwl8k_post_cmd(hw, &cmd->header); 2358 rc = mwl8k_post_cmd(hw, &cmd->header);
2243 kfree(cmd); 2359 kfree(cmd);
@@ -2246,37 +2362,32 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2246} 2362}
2247 2363
2248/* 2364/*
2249 * CMD_SET_MAC_ADDR. 2365 * CMD_SET_RATE.
2250 */ 2366 */
2251struct mwl8k_cmd_set_mac_addr { 2367struct mwl8k_cmd_set_rate {
2252 struct mwl8k_cmd_pkt header; 2368 struct mwl8k_cmd_pkt header;
2253 union { 2369 __u8 legacy_rates[14];
2254 struct { 2370
2255 __le16 mac_type; 2371 /* Bitmap for supported MCS codes. */
2256 __u8 mac_addr[ETH_ALEN]; 2372 __u8 mcs_set[16];
2257 } mbss; 2373 __u8 reserved[16];
2258 __u8 mac_addr[ETH_ALEN];
2259 };
2260} __attribute__((packed)); 2374} __attribute__((packed));
2261 2375
2262static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac) 2376static int
2377mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2378 u32 legacy_rate_mask, u8 *mcs_rates)
2263{ 2379{
2264 struct mwl8k_priv *priv = hw->priv; 2380 struct mwl8k_cmd_set_rate *cmd;
2265 struct mwl8k_cmd_set_mac_addr *cmd;
2266 int rc; 2381 int rc;
2267 2382
2268 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2383 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2269 if (cmd == NULL) 2384 if (cmd == NULL)
2270 return -ENOMEM; 2385 return -ENOMEM;
2271 2386
2272 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR); 2387 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
2273 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2388 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2274 if (priv->ap_fw) { 2389 legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
2275 cmd->mbss.mac_type = 0; 2390 memcpy(cmd->mcs_set, mcs_rates, 16);
2276 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2277 } else {
2278 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2279 }
2280 2391
2281 rc = mwl8k_post_cmd(hw, &cmd->header); 2392 rc = mwl8k_post_cmd(hw, &cmd->header);
2282 kfree(cmd); 2393 kfree(cmd);
@@ -2284,29 +2395,40 @@ static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
2284 return rc; 2395 return rc;
2285} 2396}
2286 2397
2287
2288/* 2398/*
2289 * CMD_SET_RATEADAPT_MODE. 2399 * CMD_FINALIZE_JOIN.
2290 */ 2400 */
2291struct mwl8k_cmd_set_rate_adapt_mode { 2401#define MWL8K_FJ_BEACON_MAXLEN 128
2402
2403struct mwl8k_cmd_finalize_join {
2292 struct mwl8k_cmd_pkt header; 2404 struct mwl8k_cmd_pkt header;
2293 __le16 action; 2405 __le32 sleep_interval; /* Number of beacon periods to sleep */
2294 __le16 mode; 2406 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2295} __attribute__((packed)); 2407} __attribute__((packed));
2296 2408
2297static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode) 2409static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
2410 int framelen, int dtim)
2298{ 2411{
2299 struct mwl8k_cmd_set_rate_adapt_mode *cmd; 2412 struct mwl8k_cmd_finalize_join *cmd;
2413 struct ieee80211_mgmt *payload = frame;
2414 int payload_len;
2300 int rc; 2415 int rc;
2301 2416
2302 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2417 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2303 if (cmd == NULL) 2418 if (cmd == NULL)
2304 return -ENOMEM; 2419 return -ENOMEM;
2305 2420
2306 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE); 2421 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
2307 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2422 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2308 cmd->action = cpu_to_le16(MWL8K_CMD_SET); 2423 cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
2309 cmd->mode = cpu_to_le16(mode); 2424
2425 payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
2426 if (payload_len < 0)
2427 payload_len = 0;
2428 else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2429 payload_len = MWL8K_FJ_BEACON_MAXLEN;
2430
2431 memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
2310 2432
2311 rc = mwl8k_post_cmd(hw, &cmd->header); 2433 rc = mwl8k_post_cmd(hw, &cmd->header);
2312 kfree(cmd); 2434 kfree(cmd);
@@ -2315,59 +2437,57 @@ static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
2315} 2437}
2316 2438
2317/* 2439/*
2318 * CMD_SET_WMM_MODE. 2440 * CMD_SET_RTS_THRESHOLD.
2319 */ 2441 */
2320struct mwl8k_cmd_set_wmm { 2442struct mwl8k_cmd_set_rts_threshold {
2321 struct mwl8k_cmd_pkt header; 2443 struct mwl8k_cmd_pkt header;
2322 __le16 action; 2444 __le16 action;
2445 __le16 threshold;
2323} __attribute__((packed)); 2446} __attribute__((packed));
2324 2447
2325static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable) 2448static int
2449mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
2326{ 2450{
2327 struct mwl8k_priv *priv = hw->priv; 2451 struct mwl8k_cmd_set_rts_threshold *cmd;
2328 struct mwl8k_cmd_set_wmm *cmd;
2329 int rc; 2452 int rc;
2330 2453
2331 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2454 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2332 if (cmd == NULL) 2455 if (cmd == NULL)
2333 return -ENOMEM; 2456 return -ENOMEM;
2334 2457
2335 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE); 2458 cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
2336 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2459 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2337 cmd->action = cpu_to_le16(!!enable); 2460 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2461 cmd->threshold = cpu_to_le16(rts_thresh);
2338 2462
2339 rc = mwl8k_post_cmd(hw, &cmd->header); 2463 rc = mwl8k_post_cmd(hw, &cmd->header);
2340 kfree(cmd); 2464 kfree(cmd);
2341 2465
2342 if (!rc)
2343 priv->wmm_enabled = enable;
2344
2345 return rc; 2466 return rc;
2346} 2467}
2347 2468
2348/* 2469/*
2349 * CMD_SET_RTS_THRESHOLD. 2470 * CMD_SET_SLOT.
2350 */ 2471 */
2351struct mwl8k_cmd_rts_threshold { 2472struct mwl8k_cmd_set_slot {
2352 struct mwl8k_cmd_pkt header; 2473 struct mwl8k_cmd_pkt header;
2353 __le16 action; 2474 __le16 action;
2354 __le16 threshold; 2475 __u8 short_slot;
2355} __attribute__((packed)); 2476} __attribute__((packed));
2356 2477
2357static int mwl8k_rts_threshold(struct ieee80211_hw *hw, 2478static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2358 u16 action, u16 threshold)
2359{ 2479{
2360 struct mwl8k_cmd_rts_threshold *cmd; 2480 struct mwl8k_cmd_set_slot *cmd;
2361 int rc; 2481 int rc;
2362 2482
2363 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2483 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2364 if (cmd == NULL) 2484 if (cmd == NULL)
2365 return -ENOMEM; 2485 return -ENOMEM;
2366 2486
2367 cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD); 2487 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
2368 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2488 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2369 cmd->action = cpu_to_le16(action); 2489 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2370 cmd->threshold = cpu_to_le16(threshold); 2490 cmd->short_slot = short_slot_time;
2371 2491
2372 rc = mwl8k_post_cmd(hw, &cmd->header); 2492 rc = mwl8k_post_cmd(hw, &cmd->header);
2373 kfree(cmd); 2493 kfree(cmd);
@@ -2426,9 +2546,9 @@ struct mwl8k_cmd_set_edca_params {
2426 MWL8K_SET_EDCA_AIFS) 2546 MWL8K_SET_EDCA_AIFS)
2427 2547
2428static int 2548static int
2429mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, 2549mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2430 __u16 cw_min, __u16 cw_max, 2550 __u16 cw_min, __u16 cw_max,
2431 __u8 aifs, __u16 txop) 2551 __u8 aifs, __u16 txop)
2432{ 2552{
2433 struct mwl8k_priv *priv = hw->priv; 2553 struct mwl8k_priv *priv = hw->priv;
2434 struct mwl8k_cmd_set_edca_params *cmd; 2554 struct mwl8k_cmd_set_edca_params *cmd;
@@ -2438,12 +2558,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2438 if (cmd == NULL) 2558 if (cmd == NULL)
2439 return -ENOMEM; 2559 return -ENOMEM;
2440 2560
2441 /*
2442 * Queues 0 (BE) and 1 (BK) are swapped in hardware for
2443 * this call.
2444 */
2445 qnum ^= !(qnum >> 1);
2446
2447 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); 2561 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
2448 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2562 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2449 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); 2563 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2467,170 +2581,259 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2467} 2581}
2468 2582
2469/* 2583/*
2470 * CMD_FINALIZE_JOIN. 2584 * CMD_SET_WMM_MODE.
2471 */ 2585 */
2472#define MWL8K_FJ_BEACON_MAXLEN 128 2586struct mwl8k_cmd_set_wmm_mode {
2473
2474struct mwl8k_cmd_finalize_join {
2475 struct mwl8k_cmd_pkt header; 2587 struct mwl8k_cmd_pkt header;
2476 __le32 sleep_interval; /* Number of beacon periods to sleep */ 2588 __le16 action;
2477 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2478} __attribute__((packed)); 2589} __attribute__((packed));
2479 2590
2480static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame, 2591static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
2481 int framelen, int dtim)
2482{ 2592{
2483 struct mwl8k_cmd_finalize_join *cmd; 2593 struct mwl8k_priv *priv = hw->priv;
2484 struct ieee80211_mgmt *payload = frame; 2594 struct mwl8k_cmd_set_wmm_mode *cmd;
2485 int payload_len;
2486 int rc; 2595 int rc;
2487 2596
2488 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2597 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2489 if (cmd == NULL) 2598 if (cmd == NULL)
2490 return -ENOMEM; 2599 return -ENOMEM;
2491 2600
2492 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN); 2601 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
2493 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2602 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2494 cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1); 2603 cmd->action = cpu_to_le16(!!enable);
2495
2496 payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
2497 if (payload_len < 0)
2498 payload_len = 0;
2499 else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2500 payload_len = MWL8K_FJ_BEACON_MAXLEN;
2501
2502 memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
2503 2604
2504 rc = mwl8k_post_cmd(hw, &cmd->header); 2605 rc = mwl8k_post_cmd(hw, &cmd->header);
2505 kfree(cmd); 2606 kfree(cmd);
2506 2607
2608 if (!rc)
2609 priv->wmm_enabled = enable;
2610
2507 return rc; 2611 return rc;
2508} 2612}
2509 2613
2510/* 2614/*
2511 * CMD_UPDATE_STADB. 2615 * CMD_MIMO_CONFIG.
2512 */ 2616 */
2513struct mwl8k_cmd_update_sta_db { 2617struct mwl8k_cmd_mimo_config {
2514 struct mwl8k_cmd_pkt header; 2618 struct mwl8k_cmd_pkt header;
2619 __le32 action;
2620 __u8 rx_antenna_map;
2621 __u8 tx_antenna_map;
2622} __attribute__((packed));
2515 2623
2516 /* See STADB_ACTION_TYPE */ 2624static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
2517 __le32 action; 2625{
2626 struct mwl8k_cmd_mimo_config *cmd;
2627 int rc;
2518 2628
2519 /* Peer MAC address */ 2629 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2520 __u8 peer_addr[ETH_ALEN]; 2630 if (cmd == NULL)
2631 return -ENOMEM;
2521 2632
2522 __le32 reserved; 2633 cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
2634 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2635 cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
2636 cmd->rx_antenna_map = rx;
2637 cmd->tx_antenna_map = tx;
2523 2638
2524 /* Peer info - valid during add/update. */ 2639 rc = mwl8k_post_cmd(hw, &cmd->header);
2525 struct peer_capability_info peer_info; 2640 kfree(cmd);
2641
2642 return rc;
2643}
2644
2645/*
2646 * CMD_USE_FIXED_RATE (STA version).
2647 */
2648struct mwl8k_cmd_use_fixed_rate_sta {
2649 struct mwl8k_cmd_pkt header;
2650 __le32 action;
2651 __le32 allow_rate_drop;
2652 __le32 num_rates;
2653 struct {
2654 __le32 is_ht_rate;
2655 __le32 enable_retry;
2656 __le32 rate;
2657 __le32 retry_count;
2658 } rate_entry[8];
2659 __le32 rate_type;
2660 __le32 reserved1;
2661 __le32 reserved2;
2526} __attribute__((packed)); 2662} __attribute__((packed));
2527 2663
2528static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw, 2664#define MWL8K_USE_AUTO_RATE 0x0002
2529 struct ieee80211_vif *vif, __u32 action) 2665#define MWL8K_UCAST_RATE 0
2666
2667static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
2530{ 2668{
2531 struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); 2669 struct mwl8k_cmd_use_fixed_rate_sta *cmd;
2532 struct ieee80211_bss_conf *info = &mv_vif->bss_info;
2533 struct mwl8k_cmd_update_sta_db *cmd;
2534 struct peer_capability_info *peer_info;
2535 int rc; 2670 int rc;
2536 2671
2537 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2672 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2538 if (cmd == NULL) 2673 if (cmd == NULL)
2539 return -ENOMEM; 2674 return -ENOMEM;
2540 2675
2541 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); 2676 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
2542 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2677 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2678 cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
2679 cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
2543 2680
2544 cmd->action = cpu_to_le32(action); 2681 rc = mwl8k_post_cmd(hw, &cmd->header);
2545 peer_info = &cmd->peer_info; 2682 kfree(cmd);
2546 memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
2547 2683
2548 switch (action) { 2684 return rc;
2549 case MWL8K_STA_DB_ADD_ENTRY: 2685}
2550 case MWL8K_STA_DB_MODIFY_ENTRY:
2551 /* Build peer_info block */
2552 peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
2553 peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
2554 memcpy(peer_info->legacy_rates, mwl8k_rateids,
2555 sizeof(mwl8k_rateids));
2556 peer_info->interop = 1;
2557 peer_info->amsdu_enabled = 0;
2558
2559 rc = mwl8k_post_cmd(hw, &cmd->header);
2560 if (rc == 0)
2561 mv_vif->peer_id = peer_info->station_id;
2562 2686
2563 break; 2687/*
2688 * CMD_USE_FIXED_RATE (AP version).
2689 */
2690struct mwl8k_cmd_use_fixed_rate_ap {
2691 struct mwl8k_cmd_pkt header;
2692 __le32 action;
2693 __le32 allow_rate_drop;
2694 __le32 num_rates;
2695 struct mwl8k_rate_entry_ap {
2696 __le32 is_ht_rate;
2697 __le32 enable_retry;
2698 __le32 rate;
2699 __le32 retry_count;
2700 } rate_entry[4];
2701 u8 multicast_rate;
2702 u8 multicast_rate_type;
2703 u8 management_rate;
2704} __attribute__((packed));
2564 2705
2565 case MWL8K_STA_DB_DEL_ENTRY: 2706static int
2566 case MWL8K_STA_DB_FLUSH: 2707mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
2567 default: 2708{
2568 rc = mwl8k_post_cmd(hw, &cmd->header); 2709 struct mwl8k_cmd_use_fixed_rate_ap *cmd;
2569 if (rc == 0) 2710 int rc;
2570 mv_vif->peer_id = 0; 2711
2571 break; 2712 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2572 } 2713 if (cmd == NULL)
2714 return -ENOMEM;
2715
2716 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
2717 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2718 cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
2719 cmd->multicast_rate = mcast;
2720 cmd->management_rate = mgmt;
2721
2722 rc = mwl8k_post_cmd(hw, &cmd->header);
2573 kfree(cmd); 2723 kfree(cmd);
2574 2724
2575 return rc; 2725 return rc;
2576} 2726}
2577 2727
2578/* 2728/*
2579 * CMD_SET_AID. 2729 * CMD_ENABLE_SNIFFER.
2580 */ 2730 */
2581#define MWL8K_FRAME_PROT_DISABLED 0x00 2731struct mwl8k_cmd_enable_sniffer {
2582#define MWL8K_FRAME_PROT_11G 0x07 2732 struct mwl8k_cmd_pkt header;
2583#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02 2733 __le32 action;
2584#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
2585
2586struct mwl8k_cmd_update_set_aid {
2587 struct mwl8k_cmd_pkt header;
2588 __le16 aid;
2589
2590 /* AP's MAC address (BSSID) */
2591 __u8 bssid[ETH_ALEN];
2592 __le16 protection_mode;
2593 __u8 supp_rates[14];
2594} __attribute__((packed)); 2734} __attribute__((packed));
2595 2735
2596static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw, 2736static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2597 struct ieee80211_vif *vif)
2598{ 2737{
2599 struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); 2738 struct mwl8k_cmd_enable_sniffer *cmd;
2600 struct ieee80211_bss_conf *info = &mv_vif->bss_info;
2601 struct mwl8k_cmd_update_set_aid *cmd;
2602 u16 prot_mode;
2603 int rc; 2739 int rc;
2604 2740
2605 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2741 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2606 if (cmd == NULL) 2742 if (cmd == NULL)
2607 return -ENOMEM; 2743 return -ENOMEM;
2608 2744
2609 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID); 2745 cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
2610 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2746 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2611 cmd->aid = cpu_to_le16(info->aid); 2747 cmd->action = cpu_to_le32(!!enable);
2612 2748
2613 memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN); 2749 rc = mwl8k_post_cmd(hw, &cmd->header);
2750 kfree(cmd);
2614 2751
2615 if (info->use_cts_prot) { 2752 return rc;
2616 prot_mode = MWL8K_FRAME_PROT_11G; 2753}
2754
2755/*
2756 * CMD_SET_MAC_ADDR.
2757 */
2758struct mwl8k_cmd_set_mac_addr {
2759 struct mwl8k_cmd_pkt header;
2760 union {
2761 struct {
2762 __le16 mac_type;
2763 __u8 mac_addr[ETH_ALEN];
2764 } mbss;
2765 __u8 mac_addr[ETH_ALEN];
2766 };
2767} __attribute__((packed));
2768
2769#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
2770#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
2771#define MWL8K_MAC_TYPE_PRIMARY_AP 2
2772#define MWL8K_MAC_TYPE_SECONDARY_AP 3
2773
2774static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
2775 struct ieee80211_vif *vif, u8 *mac)
2776{
2777 struct mwl8k_priv *priv = hw->priv;
2778 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
2779 struct mwl8k_cmd_set_mac_addr *cmd;
2780 int mac_type;
2781 int rc;
2782
2783 mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
2784 if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
2785 if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
2786 mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
2787 else
2788 mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
2789 } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
2790 if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
2791 mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
2792 else
2793 mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
2794 }
2795
2796 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2797 if (cmd == NULL)
2798 return -ENOMEM;
2799
2800 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
2801 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2802 if (priv->ap_fw) {
2803 cmd->mbss.mac_type = cpu_to_le16(mac_type);
2804 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2617 } else { 2805 } else {
2618 switch (info->ht_operation_mode & 2806 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2619 IEEE80211_HT_OP_MODE_PROTECTION) {
2620 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
2621 prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
2622 break;
2623 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
2624 prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
2625 break;
2626 default:
2627 prot_mode = MWL8K_FRAME_PROT_DISABLED;
2628 break;
2629 }
2630 } 2807 }
2631 cmd->protection_mode = cpu_to_le16(prot_mode);
2632 2808
2633 memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids)); 2809 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2810 kfree(cmd);
2811
2812 return rc;
2813}
2814
2815/*
2816 * CMD_SET_RATEADAPT_MODE.
2817 */
2818struct mwl8k_cmd_set_rate_adapt_mode {
2819 struct mwl8k_cmd_pkt header;
2820 __le16 action;
2821 __le16 mode;
2822} __attribute__((packed));
2823
2824static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
2825{
2826 struct mwl8k_cmd_set_rate_adapt_mode *cmd;
2827 int rc;
2828
2829 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2830 if (cmd == NULL)
2831 return -ENOMEM;
2832
2833 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
2834 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2835 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2836 cmd->mode = cpu_to_le16(mode);
2634 2837
2635 rc = mwl8k_post_cmd(hw, &cmd->header); 2838 rc = mwl8k_post_cmd(hw, &cmd->header);
2636 kfree(cmd); 2839 kfree(cmd);
@@ -2639,115 +2842,255 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
2639} 2842}
2640 2843
2641/* 2844/*
2642 * CMD_SET_RATE. 2845 * CMD_BSS_START.
2643 */ 2846 */
2644struct mwl8k_cmd_update_rateset { 2847struct mwl8k_cmd_bss_start {
2645 struct mwl8k_cmd_pkt header; 2848 struct mwl8k_cmd_pkt header;
2646 __u8 legacy_rates[14]; 2849 __le32 enable;
2647
2648 /* Bitmap for supported MCS codes. */
2649 __u8 mcs_set[16];
2650 __u8 reserved[16];
2651} __attribute__((packed)); 2850} __attribute__((packed));
2652 2851
2653static int mwl8k_update_rateset(struct ieee80211_hw *hw, 2852static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
2654 struct ieee80211_vif *vif) 2853 struct ieee80211_vif *vif, int enable)
2655{ 2854{
2656 struct mwl8k_cmd_update_rateset *cmd; 2855 struct mwl8k_cmd_bss_start *cmd;
2657 int rc; 2856 int rc;
2658 2857
2659 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2858 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2660 if (cmd == NULL) 2859 if (cmd == NULL)
2661 return -ENOMEM; 2860 return -ENOMEM;
2662 2861
2663 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE); 2862 cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
2664 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2863 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2665 memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids)); 2864 cmd->enable = cpu_to_le32(enable);
2666 2865
2667 rc = mwl8k_post_cmd(hw, &cmd->header); 2866 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2668 kfree(cmd); 2867 kfree(cmd);
2669 2868
2670 return rc; 2869 return rc;
2671} 2870}
2672 2871
2673/* 2872/*
2674 * CMD_USE_FIXED_RATE. 2873 * CMD_SET_NEW_STN.
2675 */ 2874 */
2676#define MWL8K_RATE_TABLE_SIZE 8 2875struct mwl8k_cmd_set_new_stn {
2677#define MWL8K_UCAST_RATE 0 2876 struct mwl8k_cmd_pkt header;
2678#define MWL8K_USE_AUTO_RATE 0x0002 2877 __le16 aid;
2878 __u8 mac_addr[6];
2879 __le16 stn_id;
2880 __le16 action;
2881 __le16 rsvd;
2882 __le32 legacy_rates;
2883 __u8 ht_rates[4];
2884 __le16 cap_info;
2885 __le16 ht_capabilities_info;
2886 __u8 mac_ht_param_info;
2887 __u8 rev;
2888 __u8 control_channel;
2889 __u8 add_channel;
2890 __le16 op_mode;
2891 __le16 stbc;
2892 __u8 add_qos_info;
2893 __u8 is_qos_sta;
2894 __le32 fw_sta_ptr;
2895} __attribute__((packed));
2679 2896
2680struct mwl8k_rate_entry { 2897#define MWL8K_STA_ACTION_ADD 0
2681 /* Set to 1 if HT rate, 0 if legacy. */ 2898#define MWL8K_STA_ACTION_REMOVE 2
2682 __le32 is_ht_rate;
2683 2899
2684 /* Set to 1 to use retry_count field. */ 2900static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
2685 __le32 enable_retry; 2901 struct ieee80211_vif *vif,
2902 struct ieee80211_sta *sta)
2903{
2904 struct mwl8k_cmd_set_new_stn *cmd;
2905 u32 rates;
2906 int rc;
2686 2907
2687 /* Specified legacy rate or MCS. */ 2908 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2688 __le32 rate; 2909 if (cmd == NULL)
2910 return -ENOMEM;
2689 2911
2690 /* Number of allowed retries. */ 2912 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2691 __le32 retry_count; 2913 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2914 cmd->aid = cpu_to_le16(sta->aid);
2915 memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
2916 cmd->stn_id = cpu_to_le16(sta->aid);
2917 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
2918 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
2919 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
2920 else
2921 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
2922 cmd->legacy_rates = cpu_to_le32(rates);
2923 if (sta->ht_cap.ht_supported) {
2924 cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
2925 cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
2926 cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
2927 cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
2928 cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
2929 cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
2930 ((sta->ht_cap.ampdu_density & 7) << 2);
2931 cmd->is_qos_sta = 1;
2932 }
2933
2934 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2935 kfree(cmd);
2936
2937 return rc;
2938}
2939
2940static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
2941 struct ieee80211_vif *vif)
2942{
2943 struct mwl8k_cmd_set_new_stn *cmd;
2944 int rc;
2945
2946 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2947 if (cmd == NULL)
2948 return -ENOMEM;
2949
2950 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2951 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2952 memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
2953
2954 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2955 kfree(cmd);
2956
2957 return rc;
2958}
2959
2960static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
2961 struct ieee80211_vif *vif, u8 *addr)
2962{
2963 struct mwl8k_cmd_set_new_stn *cmd;
2964 int rc;
2965
2966 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2967 if (cmd == NULL)
2968 return -ENOMEM;
2969
2970 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2971 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2972 memcpy(cmd->mac_addr, addr, ETH_ALEN);
2973 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
2974
2975 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2976 kfree(cmd);
2977
2978 return rc;
2979}
2980
2981/*
2982 * CMD_UPDATE_STADB.
2983 */
2984struct ewc_ht_info {
2985 __le16 control1;
2986 __le16 control2;
2987 __le16 control3;
2692} __attribute__((packed)); 2988} __attribute__((packed));
2693 2989
2694struct mwl8k_rate_table { 2990struct peer_capability_info {
2695 /* 1 to allow specified rate and below */ 2991 /* Peer type - AP vs. STA. */
2696 __le32 allow_rate_drop; 2992 __u8 peer_type;
2697 __le32 num_rates; 2993
2698 struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE]; 2994 /* Basic 802.11 capabilities from assoc resp. */
2995 __le16 basic_caps;
2996
2997 /* Set if peer supports 802.11n high throughput (HT). */
2998 __u8 ht_support;
2999
3000 /* Valid if HT is supported. */
3001 __le16 ht_caps;
3002 __u8 extended_ht_caps;
3003 struct ewc_ht_info ewc_info;
3004
3005 /* Legacy rate table. Intersection of our rates and peer rates. */
3006 __u8 legacy_rates[12];
3007
3008 /* HT rate table. Intersection of our rates and peer rates. */
3009 __u8 ht_rates[16];
3010 __u8 pad[16];
3011
3012 /* If set, interoperability mode, no proprietary extensions. */
3013 __u8 interop;
3014 __u8 pad2;
3015 __u8 station_id;
3016 __le16 amsdu_enabled;
2699} __attribute__((packed)); 3017} __attribute__((packed));
2700 3018
2701struct mwl8k_cmd_use_fixed_rate { 3019struct mwl8k_cmd_update_stadb {
2702 struct mwl8k_cmd_pkt header; 3020 struct mwl8k_cmd_pkt header;
3021
3022 /* See STADB_ACTION_TYPE */
2703 __le32 action; 3023 __le32 action;
2704 struct mwl8k_rate_table rate_table;
2705 3024
2706 /* Unicast, Broadcast or Multicast */ 3025 /* Peer MAC address */
2707 __le32 rate_type; 3026 __u8 peer_addr[ETH_ALEN];
2708 __le32 reserved1; 3027
2709 __le32 reserved2; 3028 __le32 reserved;
3029
3030 /* Peer info - valid during add/update. */
3031 struct peer_capability_info peer_info;
2710} __attribute__((packed)); 3032} __attribute__((packed));
2711 3033
2712static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw, 3034#define MWL8K_STA_DB_MODIFY_ENTRY 1
2713 u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table) 3035#define MWL8K_STA_DB_DEL_ENTRY 2
3036
3037/* Peer Entry flags - used to define the type of the peer node */
3038#define MWL8K_PEER_TYPE_ACCESSPOINT 2
3039
3040static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
3041 struct ieee80211_vif *vif,
3042 struct ieee80211_sta *sta)
2714{ 3043{
2715 struct mwl8k_cmd_use_fixed_rate *cmd; 3044 struct mwl8k_cmd_update_stadb *cmd;
2716 int count; 3045 struct peer_capability_info *p;
3046 u32 rates;
2717 int rc; 3047 int rc;
2718 3048
2719 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 3049 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2720 if (cmd == NULL) 3050 if (cmd == NULL)
2721 return -ENOMEM; 3051 return -ENOMEM;
2722 3052
2723 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); 3053 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
2724 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 3054 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3055 cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
3056 memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
3057
3058 p = &cmd->peer_info;
3059 p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
3060 p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
3061 p->ht_support = sta->ht_cap.ht_supported;
3062 p->ht_caps = sta->ht_cap.cap;
3063 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
3064 ((sta->ht_cap.ampdu_density & 7) << 2);
3065 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
3066 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
3067 else
3068 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
3069 legacy_rate_mask_to_array(p->legacy_rates, rates);
3070 memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
3071 p->interop = 1;
3072 p->amsdu_enabled = 0;
2725 3073
2726 cmd->action = cpu_to_le32(action); 3074 rc = mwl8k_post_cmd(hw, &cmd->header);
2727 cmd->rate_type = cpu_to_le32(rate_type); 3075 kfree(cmd);
2728 3076
2729 if (rate_table != NULL) { 3077 return rc ? rc : p->station_id;
2730 /* 3078}
2731 * Copy over each field manually so that endian 3079
2732 * conversion can be done. 3080static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
2733 */ 3081 struct ieee80211_vif *vif, u8 *addr)
2734 cmd->rate_table.allow_rate_drop = 3082{
2735 cpu_to_le32(rate_table->allow_rate_drop); 3083 struct mwl8k_cmd_update_stadb *cmd;
2736 cmd->rate_table.num_rates = 3084 int rc;
2737 cpu_to_le32(rate_table->num_rates); 3085
2738 3086 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2739 for (count = 0; count < rate_table->num_rates; count++) { 3087 if (cmd == NULL)
2740 struct mwl8k_rate_entry *dst = 3088 return -ENOMEM;
2741 &cmd->rate_table.rate_entry[count]; 3089
2742 struct mwl8k_rate_entry *src = 3090 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
2743 &rate_table->rate_entry[count]; 3091 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2744 3092 cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
2745 dst->is_ht_rate = cpu_to_le32(src->is_ht_rate); 3093 memcpy(cmd->peer_addr, addr, ETH_ALEN);
2746 dst->enable_retry = cpu_to_le32(src->enable_retry);
2747 dst->rate = cpu_to_le32(src->rate);
2748 dst->retry_count = cpu_to_le32(src->retry_count);
2749 }
2750 }
2751 3094
2752 rc = mwl8k_post_cmd(hw, &cmd->header); 3095 rc = mwl8k_post_cmd(hw, &cmd->header);
2753 kfree(cmd); 3096 kfree(cmd);
@@ -2766,19 +3109,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2766 u32 status; 3109 u32 status;
2767 3110
2768 status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 3111 status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
2769 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
2770
2771 if (!status) 3112 if (!status)
2772 return IRQ_NONE; 3113 return IRQ_NONE;
2773 3114
2774 if (status & MWL8K_A2H_INT_TX_DONE) 3115 if (status & MWL8K_A2H_INT_TX_DONE) {
2775 tasklet_schedule(&priv->tx_reclaim_task); 3116 status &= ~MWL8K_A2H_INT_TX_DONE;
3117 tasklet_schedule(&priv->poll_tx_task);
3118 }
2776 3119
2777 if (status & MWL8K_A2H_INT_RX_READY) { 3120 if (status & MWL8K_A2H_INT_RX_READY) {
2778 while (rxq_process(hw, 0, 1)) 3121 status &= ~MWL8K_A2H_INT_RX_READY;
2779 rxq_refill(hw, 0, 1); 3122 tasklet_schedule(&priv->poll_rx_task);
2780 } 3123 }
2781 3124
3125 if (status)
3126 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3127
2782 if (status & MWL8K_A2H_INT_OPC_DONE) { 3128 if (status & MWL8K_A2H_INT_OPC_DONE) {
2783 if (priv->hostcmd_wait != NULL) 3129 if (priv->hostcmd_wait != NULL)
2784 complete(priv->hostcmd_wait); 3130 complete(priv->hostcmd_wait);
@@ -2793,6 +3139,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2793 return IRQ_HANDLED; 3139 return IRQ_HANDLED;
2794} 3140}
2795 3141
3142static void mwl8k_tx_poll(unsigned long data)
3143{
3144 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
3145 struct mwl8k_priv *priv = hw->priv;
3146 int limit;
3147 int i;
3148
3149 limit = 32;
3150
3151 spin_lock_bh(&priv->tx_lock);
3152
3153 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3154 limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
3155
3156 if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
3157 complete(priv->tx_wait);
3158 priv->tx_wait = NULL;
3159 }
3160
3161 spin_unlock_bh(&priv->tx_lock);
3162
3163 if (limit) {
3164 writel(~MWL8K_A2H_INT_TX_DONE,
3165 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3166 } else {
3167 tasklet_schedule(&priv->poll_tx_task);
3168 }
3169}
3170
3171static void mwl8k_rx_poll(unsigned long data)
3172{
3173 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
3174 struct mwl8k_priv *priv = hw->priv;
3175 int limit;
3176
3177 limit = 32;
3178 limit -= rxq_process(hw, 0, limit);
3179 limit -= rxq_refill(hw, 0, limit);
3180
3181 if (limit) {
3182 writel(~MWL8K_A2H_INT_RX_READY,
3183 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3184 } else {
3185 tasklet_schedule(&priv->poll_rx_task);
3186 }
3187}
3188
2796 3189
2797/* 3190/*
2798 * Core driver operations. 3191 * Core driver operations.
@@ -2803,7 +3196,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2803 int index = skb_get_queue_mapping(skb); 3196 int index = skb_get_queue_mapping(skb);
2804 int rc; 3197 int rc;
2805 3198
2806 if (priv->current_channel == NULL) { 3199 if (!priv->radio_on) {
2807 printk(KERN_DEBUG "%s: dropped TX frame since radio " 3200 printk(KERN_DEBUG "%s: dropped TX frame since radio "
2808 "disabled\n", wiphy_name(hw->wiphy)); 3201 "disabled\n", wiphy_name(hw->wiphy));
2809 dev_kfree_skb(skb); 3202 dev_kfree_skb(skb);
@@ -2828,19 +3221,20 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2828 return -EIO; 3221 return -EIO;
2829 } 3222 }
2830 3223
2831 /* Enable tx reclaim tasklet */ 3224 /* Enable TX reclaim and RX tasklets. */
2832 tasklet_enable(&priv->tx_reclaim_task); 3225 tasklet_enable(&priv->poll_tx_task);
3226 tasklet_enable(&priv->poll_rx_task);
2833 3227
2834 /* Enable interrupts */ 3228 /* Enable interrupts */
2835 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3229 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2836 3230
2837 rc = mwl8k_fw_lock(hw); 3231 rc = mwl8k_fw_lock(hw);
2838 if (!rc) { 3232 if (!rc) {
2839 rc = mwl8k_cmd_802_11_radio_enable(hw); 3233 rc = mwl8k_cmd_radio_enable(hw);
2840 3234
2841 if (!priv->ap_fw) { 3235 if (!priv->ap_fw) {
2842 if (!rc) 3236 if (!rc)
2843 rc = mwl8k_enable_sniffer(hw, 0); 3237 rc = mwl8k_cmd_enable_sniffer(hw, 0);
2844 3238
2845 if (!rc) 3239 if (!rc)
2846 rc = mwl8k_cmd_set_pre_scan(hw); 3240 rc = mwl8k_cmd_set_pre_scan(hw);
@@ -2851,10 +3245,10 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2851 } 3245 }
2852 3246
2853 if (!rc) 3247 if (!rc)
2854 rc = mwl8k_cmd_setrateadaptmode(hw, 0); 3248 rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
2855 3249
2856 if (!rc) 3250 if (!rc)
2857 rc = mwl8k_set_wmm(hw, 0); 3251 rc = mwl8k_cmd_set_wmm_mode(hw, 0);
2858 3252
2859 mwl8k_fw_unlock(hw); 3253 mwl8k_fw_unlock(hw);
2860 } 3254 }
@@ -2862,7 +3256,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2862 if (rc) { 3256 if (rc) {
2863 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3257 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2864 free_irq(priv->pdev->irq, hw); 3258 free_irq(priv->pdev->irq, hw);
2865 tasklet_disable(&priv->tx_reclaim_task); 3259 tasklet_disable(&priv->poll_tx_task);
3260 tasklet_disable(&priv->poll_rx_task);
2866 } 3261 }
2867 3262
2868 return rc; 3263 return rc;
@@ -2873,7 +3268,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2873 struct mwl8k_priv *priv = hw->priv; 3268 struct mwl8k_priv *priv = hw->priv;
2874 int i; 3269 int i;
2875 3270
2876 mwl8k_cmd_802_11_radio_disable(hw); 3271 mwl8k_cmd_radio_disable(hw);
2877 3272
2878 ieee80211_stop_queues(hw); 3273 ieee80211_stop_queues(hw);
2879 3274
@@ -2886,36 +3281,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2886 if (priv->beacon_skb != NULL) 3281 if (priv->beacon_skb != NULL)
2887 dev_kfree_skb(priv->beacon_skb); 3282 dev_kfree_skb(priv->beacon_skb);
2888 3283
2889 /* Stop tx reclaim tasklet */ 3284 /* Stop TX reclaim and RX tasklets. */
2890 tasklet_disable(&priv->tx_reclaim_task); 3285 tasklet_disable(&priv->poll_tx_task);
3286 tasklet_disable(&priv->poll_rx_task);
2891 3287
2892 /* Return all skbs to mac80211 */ 3288 /* Return all skbs to mac80211 */
2893 for (i = 0; i < MWL8K_TX_QUEUES; i++) 3289 for (i = 0; i < MWL8K_TX_QUEUES; i++)
2894 mwl8k_txq_reclaim(hw, i, 1); 3290 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
2895} 3291}
2896 3292
2897static int mwl8k_add_interface(struct ieee80211_hw *hw, 3293static int mwl8k_add_interface(struct ieee80211_hw *hw,
2898 struct ieee80211_if_init_conf *conf) 3294 struct ieee80211_vif *vif)
2899{ 3295{
2900 struct mwl8k_priv *priv = hw->priv; 3296 struct mwl8k_priv *priv = hw->priv;
2901 struct mwl8k_vif *mwl8k_vif; 3297 struct mwl8k_vif *mwl8k_vif;
2902 3298 u32 macids_supported;
2903 /* 3299 int macid;
2904 * We only support one active interface at a time.
2905 */
2906 if (priv->vif != NULL)
2907 return -EBUSY;
2908
2909 /*
2910 * We only support managed interfaces for now.
2911 */
2912 if (conf->type != NL80211_IFTYPE_STATION)
2913 return -EINVAL;
2914 3300
2915 /* 3301 /*
2916 * Reject interface creation if sniffer mode is active, as 3302 * Reject interface creation if sniffer mode is active, as
2917 * STA operation is mutually exclusive with hardware sniffer 3303 * STA operation is mutually exclusive with hardware sniffer
2918 * mode. 3304 * mode. (Sniffer mode is only used on STA firmware.)
2919 */ 3305 */
2920 if (priv->sniffer_enabled) { 3306 if (priv->sniffer_enabled) {
2921 printk(KERN_INFO "%s: unable to create STA " 3307 printk(KERN_INFO "%s: unable to create STA "
@@ -2924,37 +3310,54 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
2924 return -EINVAL; 3310 return -EINVAL;
2925 } 3311 }
2926 3312
2927 /* Clean out driver private area */
2928 mwl8k_vif = MWL8K_VIF(conf->vif);
2929 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
2930 3313
2931 /* Set and save the mac address */ 3314 switch (vif->type) {
2932 mwl8k_set_mac_addr(hw, conf->mac_addr); 3315 case NL80211_IFTYPE_AP:
2933 memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN); 3316 macids_supported = priv->ap_macids_supported;
3317 break;
3318 case NL80211_IFTYPE_STATION:
3319 macids_supported = priv->sta_macids_supported;
3320 break;
3321 default:
3322 return -EINVAL;
3323 }
2934 3324
2935 /* Back pointer to parent config block */ 3325 macid = ffs(macids_supported & ~priv->macids_used);
2936 mwl8k_vif->priv = priv; 3326 if (!macid--)
3327 return -EBUSY;
2937 3328
2938 /* Set Initial sequence number to zero */ 3329 /* Setup driver private area. */
3330 mwl8k_vif = MWL8K_VIF(vif);
3331 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
3332 mwl8k_vif->vif = vif;
3333 mwl8k_vif->macid = macid;
2939 mwl8k_vif->seqno = 0; 3334 mwl8k_vif->seqno = 0;
2940 3335
2941 priv->vif = conf->vif; 3336 /* Set the mac address. */
2942 priv->current_channel = NULL; 3337 mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
3338
3339 if (priv->ap_fw)
3340 mwl8k_cmd_set_new_stn_add_self(hw, vif);
3341
3342 priv->macids_used |= 1 << mwl8k_vif->macid;
3343 list_add_tail(&mwl8k_vif->list, &priv->vif_list);
2943 3344
2944 return 0; 3345 return 0;
2945} 3346}
2946 3347
2947static void mwl8k_remove_interface(struct ieee80211_hw *hw, 3348static void mwl8k_remove_interface(struct ieee80211_hw *hw,
2948 struct ieee80211_if_init_conf *conf) 3349 struct ieee80211_vif *vif)
2949{ 3350{
2950 struct mwl8k_priv *priv = hw->priv; 3351 struct mwl8k_priv *priv = hw->priv;
3352 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
2951 3353
2952 if (priv->vif == NULL) 3354 if (priv->ap_fw)
2953 return; 3355 mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
2954 3356
2955 mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00"); 3357 mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
2956 3358
2957 priv->vif = NULL; 3359 priv->macids_used &= ~(1 << mwl8k_vif->macid);
3360 list_del(&mwl8k_vif->list);
2958} 3361}
2959 3362
2960static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) 3363static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2964,8 +3367,7 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2964 int rc; 3367 int rc;
2965 3368
2966 if (conf->flags & IEEE80211_CONF_IDLE) { 3369 if (conf->flags & IEEE80211_CONF_IDLE) {
2967 mwl8k_cmd_802_11_radio_disable(hw); 3370 mwl8k_cmd_radio_disable(hw);
2968 priv->current_channel = NULL;
2969 return 0; 3371 return 0;
2970 } 3372 }
2971 3373
@@ -2973,19 +3375,17 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2973 if (rc) 3375 if (rc)
2974 return rc; 3376 return rc;
2975 3377
2976 rc = mwl8k_cmd_802_11_radio_enable(hw); 3378 rc = mwl8k_cmd_radio_enable(hw);
2977 if (rc) 3379 if (rc)
2978 goto out; 3380 goto out;
2979 3381
2980 rc = mwl8k_cmd_set_rf_channel(hw, conf->channel); 3382 rc = mwl8k_cmd_set_rf_channel(hw, conf);
2981 if (rc) 3383 if (rc)
2982 goto out; 3384 goto out;
2983 3385
2984 priv->current_channel = conf->channel;
2985
2986 if (conf->power_level > 18) 3386 if (conf->power_level > 18)
2987 conf->power_level = 18; 3387 conf->power_level = 18;
2988 rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level); 3388 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
2989 if (rc) 3389 if (rc)
2990 goto out; 3390 goto out;
2991 3391
@@ -3003,79 +3403,160 @@ out:
3003 return rc; 3403 return rc;
3004} 3404}
3005 3405
3006static void mwl8k_bss_info_changed(struct ieee80211_hw *hw, 3406static void
3007 struct ieee80211_vif *vif, 3407mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3008 struct ieee80211_bss_conf *info, 3408 struct ieee80211_bss_conf *info, u32 changed)
3009 u32 changed)
3010{ 3409{
3011 struct mwl8k_priv *priv = hw->priv; 3410 struct mwl8k_priv *priv = hw->priv;
3012 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); 3411 u32 ap_legacy_rates;
3412 u8 ap_mcs_rates[16];
3013 int rc; 3413 int rc;
3014 3414
3015 if ((changed & BSS_CHANGED_ASSOC) == 0) 3415 if (mwl8k_fw_lock(hw))
3016 return; 3416 return;
3017 3417
3018 priv->capture_beacon = false; 3418 /*
3019 3419 * No need to capture a beacon if we're no longer associated.
3020 rc = mwl8k_fw_lock(hw); 3420 */
3021 if (rc) 3421 if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
3022 return; 3422 priv->capture_beacon = false;
3023 3423
3024 if (info->assoc) { 3424 /*
3025 memcpy(&mwl8k_vif->bss_info, info, 3425 * Get the AP's legacy and MCS rates.
3026 sizeof(struct ieee80211_bss_conf)); 3426 */
3427 if (vif->bss_conf.assoc) {
3428 struct ieee80211_sta *ap;
3027 3429
3028 memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN); 3430 rcu_read_lock();
3029 3431
3030 /* Install rates */ 3432 ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
3031 rc = mwl8k_update_rateset(hw, vif); 3433 if (ap == NULL) {
3032 if (rc) 3434 rcu_read_unlock();
3033 goto out; 3435 goto out;
3436 }
3437
3438 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
3439 ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
3440 } else {
3441 ap_legacy_rates =
3442 ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
3443 }
3444 memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
3445
3446 rcu_read_unlock();
3447 }
3034 3448
3035 /* Turn on rate adaptation */ 3449 if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
3036 rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE, 3450 rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
3037 MWL8K_UCAST_RATE, NULL);
3038 if (rc) 3451 if (rc)
3039 goto out; 3452 goto out;
3040 3453
3041 /* Set radio preamble */ 3454 rc = mwl8k_cmd_use_fixed_rate_sta(hw);
3042 rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble);
3043 if (rc) 3455 if (rc)
3044 goto out; 3456 goto out;
3457 }
3045 3458
3046 /* Set slot time */ 3459 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3047 rc = mwl8k_cmd_set_slot(hw, info->use_short_slot); 3460 rc = mwl8k_set_radio_preamble(hw,
3461 vif->bss_conf.use_short_preamble);
3048 if (rc) 3462 if (rc)
3049 goto out; 3463 goto out;
3464 }
3050 3465
3051 /* Update peer rate info */ 3466 if (changed & BSS_CHANGED_ERP_SLOT) {
3052 rc = mwl8k_cmd_update_sta_db(hw, vif, 3467 rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
3053 MWL8K_STA_DB_MODIFY_ENTRY);
3054 if (rc) 3468 if (rc)
3055 goto out; 3469 goto out;
3470 }
3056 3471
3057 /* Set AID */ 3472 if (vif->bss_conf.assoc &&
3058 rc = mwl8k_cmd_set_aid(hw, vif); 3473 (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
3474 BSS_CHANGED_HT))) {
3475 rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
3059 if (rc) 3476 if (rc)
3060 goto out; 3477 goto out;
3478 }
3061 3479
3480 if (vif->bss_conf.assoc &&
3481 (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
3062 /* 3482 /*
3063 * Finalize the join. Tell rx handler to process 3483 * Finalize the join. Tell rx handler to process
3064 * next beacon from our BSSID. 3484 * next beacon from our BSSID.
3065 */ 3485 */
3066 memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN); 3486 memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
3067 priv->capture_beacon = true; 3487 priv->capture_beacon = true;
3068 } else {
3069 rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
3070 memset(&mwl8k_vif->bss_info, 0,
3071 sizeof(struct ieee80211_bss_conf));
3072 memset(mwl8k_vif->bssid, 0, ETH_ALEN);
3073 } 3488 }
3074 3489
3075out: 3490out:
3076 mwl8k_fw_unlock(hw); 3491 mwl8k_fw_unlock(hw);
3077} 3492}
3078 3493
3494static void
3495mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3496 struct ieee80211_bss_conf *info, u32 changed)
3497{
3498 int rc;
3499
3500 if (mwl8k_fw_lock(hw))
3501 return;
3502
3503 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3504 rc = mwl8k_set_radio_preamble(hw,
3505 vif->bss_conf.use_short_preamble);
3506 if (rc)
3507 goto out;
3508 }
3509
3510 if (changed & BSS_CHANGED_BASIC_RATES) {
3511 int idx;
3512 int rate;
3513
3514 /*
3515 * Use lowest supported basic rate for multicasts
3516 * and management frames (such as probe responses --
3517 * beacons will always go out at 1 Mb/s).
3518 */
3519 idx = ffs(vif->bss_conf.basic_rates);
3520 if (idx)
3521 idx--;
3522
3523 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
3524 rate = mwl8k_rates_24[idx].hw_value;
3525 else
3526 rate = mwl8k_rates_50[idx].hw_value;
3527
3528 mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
3529 }
3530
3531 if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
3532 struct sk_buff *skb;
3533
3534 skb = ieee80211_beacon_get(hw, vif);
3535 if (skb != NULL) {
3536 mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
3537 kfree_skb(skb);
3538 }
3539 }
3540
3541 if (changed & BSS_CHANGED_BEACON_ENABLED)
3542 mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
3543
3544out:
3545 mwl8k_fw_unlock(hw);
3546}
3547
3548static void
3549mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3550 struct ieee80211_bss_conf *info, u32 changed)
3551{
3552 struct mwl8k_priv *priv = hw->priv;
3553
3554 if (!priv->ap_fw)
3555 mwl8k_bss_info_changed_sta(hw, vif, info, changed);
3556 else
3557 mwl8k_bss_info_changed_ap(hw, vif, info, changed);
3558}
3559
3079static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, 3560static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
3080 int mc_count, struct dev_addr_list *mclist) 3561 int mc_count, struct dev_addr_list *mclist)
3081{ 3562{
@@ -3105,7 +3586,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3105 * operation, so refuse to enable sniffer mode if a STA 3586 * operation, so refuse to enable sniffer mode if a STA
3106 * interface is active. 3587 * interface is active.
3107 */ 3588 */
3108 if (priv->vif != NULL) { 3589 if (!list_empty(&priv->vif_list)) {
3109 if (net_ratelimit()) 3590 if (net_ratelimit())
3110 printk(KERN_INFO "%s: not enabling sniffer " 3591 printk(KERN_INFO "%s: not enabling sniffer "
3111 "mode because STA interface is active\n", 3592 "mode because STA interface is active\n",
@@ -3114,7 +3595,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3114 } 3595 }
3115 3596
3116 if (!priv->sniffer_enabled) { 3597 if (!priv->sniffer_enabled) {
3117 if (mwl8k_enable_sniffer(hw, 1)) 3598 if (mwl8k_cmd_enable_sniffer(hw, 1))
3118 return 0; 3599 return 0;
3119 priv->sniffer_enabled = true; 3600 priv->sniffer_enabled = true;
3120 } 3601 }
@@ -3126,6 +3607,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3126 return 1; 3607 return 1;
3127} 3608}
3128 3609
3610static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
3611{
3612 if (!list_empty(&priv->vif_list))
3613 return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
3614
3615 return NULL;
3616}
3617
3129static void mwl8k_configure_filter(struct ieee80211_hw *hw, 3618static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3130 unsigned int changed_flags, 3619 unsigned int changed_flags,
3131 unsigned int *total_flags, 3620 unsigned int *total_flags,
@@ -3163,7 +3652,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3163 } 3652 }
3164 3653
3165 if (priv->sniffer_enabled) { 3654 if (priv->sniffer_enabled) {
3166 mwl8k_enable_sniffer(hw, 0); 3655 mwl8k_cmd_enable_sniffer(hw, 0);
3167 priv->sniffer_enabled = false; 3656 priv->sniffer_enabled = false;
3168 } 3657 }
3169 3658
@@ -3174,7 +3663,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3174 */ 3663 */
3175 mwl8k_cmd_set_pre_scan(hw); 3664 mwl8k_cmd_set_pre_scan(hw);
3176 } else { 3665 } else {
3177 u8 *bssid; 3666 struct mwl8k_vif *mwl8k_vif;
3667 const u8 *bssid;
3178 3668
3179 /* 3669 /*
3180 * Enable the BSS filter. 3670 * Enable the BSS filter.
@@ -3184,9 +3674,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3184 * (where the OUI part needs to be nonzero for 3674 * (where the OUI part needs to be nonzero for
3185 * the BSSID to be accepted by POST_SCAN). 3675 * the BSSID to be accepted by POST_SCAN).
3186 */ 3676 */
3187 bssid = "\x01\x00\x00\x00\x00\x00"; 3677 mwl8k_vif = mwl8k_first_vif(priv);
3188 if (priv->vif != NULL) 3678 if (mwl8k_vif != NULL)
3189 bssid = MWL8K_VIF(priv->vif)->bssid; 3679 bssid = mwl8k_vif->vif->bss_conf.bssid;
3680 else
3681 bssid = "\x01\x00\x00\x00\x00\x00";
3190 3682
3191 mwl8k_cmd_set_post_scan(hw, bssid); 3683 mwl8k_cmd_set_post_scan(hw, bssid);
3192 } 3684 }
@@ -3213,7 +3705,93 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3213 3705
3214static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3706static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3215{ 3707{
3216 return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value); 3708 return mwl8k_cmd_set_rts_threshold(hw, value);
3709}
3710
3711struct mwl8k_sta_notify_item
3712{
3713 struct list_head list;
3714 struct ieee80211_vif *vif;
3715 enum sta_notify_cmd cmd;
3716 struct ieee80211_sta sta;
3717};
3718
3719static void
3720mwl8k_do_sta_notify(struct ieee80211_hw *hw, struct mwl8k_sta_notify_item *s)
3721{
3722 struct mwl8k_priv *priv = hw->priv;
3723
3724 /*
3725 * STA firmware uses UPDATE_STADB, AP firmware uses SET_NEW_STN.
3726 */
3727 if (!priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
3728 int rc;
3729
3730 rc = mwl8k_cmd_update_stadb_add(hw, s->vif, &s->sta);
3731 if (rc >= 0) {
3732 struct ieee80211_sta *sta;
3733
3734 rcu_read_lock();
3735 sta = ieee80211_find_sta(s->vif, s->sta.addr);
3736 if (sta != NULL)
3737 MWL8K_STA(sta)->peer_id = rc;
3738 rcu_read_unlock();
3739 }
3740 } else if (!priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
3741 mwl8k_cmd_update_stadb_del(hw, s->vif, s->sta.addr);
3742 } else if (priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
3743 mwl8k_cmd_set_new_stn_add(hw, s->vif, &s->sta);
3744 } else if (priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
3745 mwl8k_cmd_set_new_stn_del(hw, s->vif, s->sta.addr);
3746 }
3747}
3748
3749static void mwl8k_sta_notify_worker(struct work_struct *work)
3750{
3751 struct mwl8k_priv *priv =
3752 container_of(work, struct mwl8k_priv, sta_notify_worker);
3753 struct ieee80211_hw *hw = priv->hw;
3754
3755 spin_lock_bh(&priv->sta_notify_list_lock);
3756 while (!list_empty(&priv->sta_notify_list)) {
3757 struct mwl8k_sta_notify_item *s;
3758
3759 s = list_entry(priv->sta_notify_list.next,
3760 struct mwl8k_sta_notify_item, list);
3761 list_del(&s->list);
3762
3763 spin_unlock_bh(&priv->sta_notify_list_lock);
3764
3765 mwl8k_do_sta_notify(hw, s);
3766 kfree(s);
3767
3768 spin_lock_bh(&priv->sta_notify_list_lock);
3769 }
3770 spin_unlock_bh(&priv->sta_notify_list_lock);
3771}
3772
3773static void
3774mwl8k_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3775 enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
3776{
3777 struct mwl8k_priv *priv = hw->priv;
3778 struct mwl8k_sta_notify_item *s;
3779
3780 if (cmd != STA_NOTIFY_ADD && cmd != STA_NOTIFY_REMOVE)
3781 return;
3782
3783 s = kmalloc(sizeof(*s), GFP_ATOMIC);
3784 if (s != NULL) {
3785 s->vif = vif;
3786 s->cmd = cmd;
3787 s->sta = *sta;
3788
3789 spin_lock(&priv->sta_notify_list_lock);
3790 list_add_tail(&s->list, &priv->sta_notify_list);
3791 spin_unlock(&priv->sta_notify_list_lock);
3792
3793 ieee80211_queue_work(hw, &priv->sta_notify_worker);
3794 }
3217} 3795}
3218 3796
3219static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, 3797static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3225,14 +3803,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3225 rc = mwl8k_fw_lock(hw); 3803 rc = mwl8k_fw_lock(hw);
3226 if (!rc) { 3804 if (!rc) {
3227 if (!priv->wmm_enabled) 3805 if (!priv->wmm_enabled)
3228 rc = mwl8k_set_wmm(hw, 1); 3806 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
3229 3807
3230 if (!rc) 3808 if (!rc)
3231 rc = mwl8k_set_edca_params(hw, queue, 3809 rc = mwl8k_cmd_set_edca_params(hw, queue,
3232 params->cw_min, 3810 params->cw_min,
3233 params->cw_max, 3811 params->cw_max,
3234 params->aifs, 3812 params->aifs,
3235 params->txop); 3813 params->txop);
3236 3814
3237 mwl8k_fw_unlock(hw); 3815 mwl8k_fw_unlock(hw);
3238 } 3816 }
@@ -3261,7 +3839,23 @@ static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
3261static int mwl8k_get_stats(struct ieee80211_hw *hw, 3839static int mwl8k_get_stats(struct ieee80211_hw *hw,
3262 struct ieee80211_low_level_stats *stats) 3840 struct ieee80211_low_level_stats *stats)
3263{ 3841{
3264 return mwl8k_cmd_802_11_get_stat(hw, stats); 3842 return mwl8k_cmd_get_stat(hw, stats);
3843}
3844
3845static int
3846mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3847 enum ieee80211_ampdu_mlme_action action,
3848 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3849{
3850 switch (action) {
3851 case IEEE80211_AMPDU_RX_START:
3852 case IEEE80211_AMPDU_RX_STOP:
3853 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
3854 return -ENOTSUPP;
3855 return 0;
3856 default:
3857 return -ENOTSUPP;
3858 }
3265} 3859}
3266 3860
3267static const struct ieee80211_ops mwl8k_ops = { 3861static const struct ieee80211_ops mwl8k_ops = {
@@ -3275,67 +3869,68 @@ static const struct ieee80211_ops mwl8k_ops = {
3275 .prepare_multicast = mwl8k_prepare_multicast, 3869 .prepare_multicast = mwl8k_prepare_multicast,
3276 .configure_filter = mwl8k_configure_filter, 3870 .configure_filter = mwl8k_configure_filter,
3277 .set_rts_threshold = mwl8k_set_rts_threshold, 3871 .set_rts_threshold = mwl8k_set_rts_threshold,
3872 .sta_notify = mwl8k_sta_notify,
3278 .conf_tx = mwl8k_conf_tx, 3873 .conf_tx = mwl8k_conf_tx,
3279 .get_tx_stats = mwl8k_get_tx_stats, 3874 .get_tx_stats = mwl8k_get_tx_stats,
3280 .get_stats = mwl8k_get_stats, 3875 .get_stats = mwl8k_get_stats,
3876 .ampdu_action = mwl8k_ampdu_action,
3281}; 3877};
3282 3878
3283static void mwl8k_tx_reclaim_handler(unsigned long data)
3284{
3285 int i;
3286 struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
3287 struct mwl8k_priv *priv = hw->priv;
3288
3289 spin_lock_bh(&priv->tx_lock);
3290 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3291 mwl8k_txq_reclaim(hw, i, 0);
3292
3293 if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
3294 complete(priv->tx_wait);
3295 priv->tx_wait = NULL;
3296 }
3297 spin_unlock_bh(&priv->tx_lock);
3298}
3299
3300static void mwl8k_finalize_join_worker(struct work_struct *work) 3879static void mwl8k_finalize_join_worker(struct work_struct *work)
3301{ 3880{
3302 struct mwl8k_priv *priv = 3881 struct mwl8k_priv *priv =
3303 container_of(work, struct mwl8k_priv, finalize_join_worker); 3882 container_of(work, struct mwl8k_priv, finalize_join_worker);
3304 struct sk_buff *skb = priv->beacon_skb; 3883 struct sk_buff *skb = priv->beacon_skb;
3305 u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period; 3884 struct mwl8k_vif *mwl8k_vif;
3306 3885
3307 mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim); 3886 mwl8k_vif = mwl8k_first_vif(priv);
3308 dev_kfree_skb(skb); 3887 if (mwl8k_vif != NULL)
3888 mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len,
3889 mwl8k_vif->vif->bss_conf.dtim_period);
3309 3890
3891 dev_kfree_skb(skb);
3310 priv->beacon_skb = NULL; 3892 priv->beacon_skb = NULL;
3311} 3893}
3312 3894
3313enum { 3895enum {
3314 MWL8687 = 0, 3896 MWL8363 = 0,
3897 MWL8687,
3315 MWL8366, 3898 MWL8366,
3316}; 3899};
3317 3900
3318static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { 3901static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
3319 { 3902 [MWL8363] = {
3903 .part_name = "88w8363",
3904 .helper_image = "mwl8k/helper_8363.fw",
3905 .fw_image = "mwl8k/fmimage_8363.fw",
3906 },
3907 [MWL8687] = {
3320 .part_name = "88w8687", 3908 .part_name = "88w8687",
3321 .helper_image = "mwl8k/helper_8687.fw", 3909 .helper_image = "mwl8k/helper_8687.fw",
3322 .fw_image = "mwl8k/fmimage_8687.fw", 3910 .fw_image = "mwl8k/fmimage_8687.fw",
3323 .rxd_ops = &rxd_8687_ops,
3324 .modes = BIT(NL80211_IFTYPE_STATION),
3325 }, 3911 },
3326 { 3912 [MWL8366] = {
3327 .part_name = "88w8366", 3913 .part_name = "88w8366",
3328 .helper_image = "mwl8k/helper_8366.fw", 3914 .helper_image = "mwl8k/helper_8366.fw",
3329 .fw_image = "mwl8k/fmimage_8366.fw", 3915 .fw_image = "mwl8k/fmimage_8366.fw",
3330 .rxd_ops = &rxd_8366_ops, 3916 .ap_rxd_ops = &rxd_8366_ap_ops,
3331 .modes = 0,
3332 }, 3917 },
3333}; 3918};
3334 3919
3920MODULE_FIRMWARE("mwl8k/helper_8363.fw");
3921MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
3922MODULE_FIRMWARE("mwl8k/helper_8687.fw");
3923MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
3924MODULE_FIRMWARE("mwl8k/helper_8366.fw");
3925MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
3926
3335static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { 3927static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3928 { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
3929 { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
3336 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, 3930 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
3337 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, }, 3931 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
3338 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, }, 3932 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
3933 { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
3339 { }, 3934 { },
3340}; 3935};
3341MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); 3936MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3354,6 +3949,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3354 printed_version = 1; 3949 printed_version = 1;
3355 } 3950 }
3356 3951
3952
3357 rc = pci_enable_device(pdev); 3953 rc = pci_enable_device(pdev);
3358 if (rc) { 3954 if (rc) {
3359 printk(KERN_ERR "%s: Cannot enable new PCI device\n", 3955 printk(KERN_ERR "%s: Cannot enable new PCI device\n",
@@ -3370,6 +3966,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3370 3966
3371 pci_set_master(pdev); 3967 pci_set_master(pdev);
3372 3968
3969
3373 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops); 3970 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
3374 if (hw == NULL) { 3971 if (hw == NULL) {
3375 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME); 3972 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
@@ -3377,17 +3974,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3377 goto err_free_reg; 3974 goto err_free_reg;
3378 } 3975 }
3379 3976
3977 SET_IEEE80211_DEV(hw, &pdev->dev);
3978 pci_set_drvdata(pdev, hw);
3979
3380 priv = hw->priv; 3980 priv = hw->priv;
3381 priv->hw = hw; 3981 priv->hw = hw;
3382 priv->pdev = pdev; 3982 priv->pdev = pdev;
3383 priv->device_info = &mwl8k_info_tbl[id->driver_data]; 3983 priv->device_info = &mwl8k_info_tbl[id->driver_data];
3384 priv->rxd_ops = priv->device_info->rxd_ops;
3385 priv->sniffer_enabled = false;
3386 priv->wmm_enabled = false;
3387 priv->pending_tx_pkts = 0;
3388 3984
3389 SET_IEEE80211_DEV(hw, &pdev->dev);
3390 pci_set_drvdata(pdev, hw);
3391 3985
3392 priv->sram = pci_iomap(pdev, 0, 0x10000); 3986 priv->sram = pci_iomap(pdev, 0, 0x10000);
3393 if (priv->sram == NULL) { 3987 if (priv->sram == NULL) {
@@ -3410,16 +4004,46 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3410 } 4004 }
3411 } 4005 }
3412 4006
3413 memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
3414 priv->band.band = IEEE80211_BAND_2GHZ;
3415 priv->band.channels = priv->channels;
3416 priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
3417 priv->band.bitrates = priv->rates;
3418 priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
3419 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
3420 4007
3421 BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates)); 4008 /* Reset firmware and hardware */
3422 memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates)); 4009 mwl8k_hw_reset(priv);
4010
4011 /* Ask userland hotplug daemon for the device firmware */
4012 rc = mwl8k_request_firmware(priv);
4013 if (rc) {
4014 printk(KERN_ERR "%s: Firmware files not found\n",
4015 wiphy_name(hw->wiphy));
4016 goto err_stop_firmware;
4017 }
4018
4019 /* Load firmware into hardware */
4020 rc = mwl8k_load_firmware(hw);
4021 if (rc) {
4022 printk(KERN_ERR "%s: Cannot start firmware\n",
4023 wiphy_name(hw->wiphy));
4024 goto err_stop_firmware;
4025 }
4026
4027 /* Reclaim memory once firmware is successfully loaded */
4028 mwl8k_release_firmware(priv);
4029
4030
4031 if (priv->ap_fw) {
4032 priv->rxd_ops = priv->device_info->ap_rxd_ops;
4033 if (priv->rxd_ops == NULL) {
4034 printk(KERN_ERR "%s: Driver does not have AP "
4035 "firmware image support for this hardware\n",
4036 wiphy_name(hw->wiphy));
4037 goto err_stop_firmware;
4038 }
4039 } else {
4040 priv->rxd_ops = &rxd_sta_ops;
4041 }
4042
4043 priv->sniffer_enabled = false;
4044 priv->wmm_enabled = false;
4045 priv->pending_tx_pkts = 0;
4046
3423 4047
3424 /* 4048 /*
3425 * Extra headroom is the size of the required DMA header 4049 * Extra headroom is the size of the required DMA header
@@ -3432,33 +4056,40 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3432 4056
3433 hw->queues = MWL8K_TX_QUEUES; 4057 hw->queues = MWL8K_TX_QUEUES;
3434 4058
3435 hw->wiphy->interface_modes = priv->device_info->modes;
3436
3437 /* Set rssi and noise values to dBm */ 4059 /* Set rssi and noise values to dBm */
3438 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; 4060 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
3439 hw->vif_data_size = sizeof(struct mwl8k_vif); 4061 hw->vif_data_size = sizeof(struct mwl8k_vif);
3440 priv->vif = NULL; 4062 hw->sta_data_size = sizeof(struct mwl8k_sta);
4063
4064 priv->macids_used = 0;
4065 INIT_LIST_HEAD(&priv->vif_list);
3441 4066
3442 /* Set default radio state and preamble */ 4067 /* Set default radio state and preamble */
3443 priv->radio_on = 0; 4068 priv->radio_on = 0;
3444 priv->radio_short_preamble = 0; 4069 priv->radio_short_preamble = 0;
3445 4070
4071 /* Station database handling */
4072 INIT_WORK(&priv->sta_notify_worker, mwl8k_sta_notify_worker);
4073 spin_lock_init(&priv->sta_notify_list_lock);
4074 INIT_LIST_HEAD(&priv->sta_notify_list);
4075
3446 /* Finalize join worker */ 4076 /* Finalize join worker */
3447 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); 4077 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
3448 4078
3449 /* TX reclaim tasklet */ 4079 /* TX reclaim and RX tasklets. */
3450 tasklet_init(&priv->tx_reclaim_task, 4080 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
3451 mwl8k_tx_reclaim_handler, (unsigned long)hw); 4081 tasklet_disable(&priv->poll_tx_task);
3452 tasklet_disable(&priv->tx_reclaim_task); 4082 tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
4083 tasklet_disable(&priv->poll_rx_task);
3453 4084
3454 /* Power management cookie */ 4085 /* Power management cookie */
3455 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); 4086 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
3456 if (priv->cookie == NULL) 4087 if (priv->cookie == NULL)
3457 goto err_iounmap; 4088 goto err_stop_firmware;
3458 4089
3459 rc = mwl8k_rxq_init(hw, 0); 4090 rc = mwl8k_rxq_init(hw, 0);
3460 if (rc) 4091 if (rc)
3461 goto err_iounmap; 4092 goto err_free_cookie;
3462 rxq_refill(hw, 0, INT_MAX); 4093 rxq_refill(hw, 0, INT_MAX);
3463 4094
3464 mutex_init(&priv->fw_mutex); 4095 mutex_init(&priv->fw_mutex);
@@ -3478,7 +4109,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3478 4109
3479 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 4110 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3480 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4111 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3481 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); 4112 iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
4113 priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
3482 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); 4114 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
3483 4115
3484 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 4116 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3489,31 +4121,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3489 goto err_free_queues; 4121 goto err_free_queues;
3490 } 4122 }
3491 4123
3492 /* Reset firmware and hardware */
3493 mwl8k_hw_reset(priv);
3494
3495 /* Ask userland hotplug daemon for the device firmware */
3496 rc = mwl8k_request_firmware(priv);
3497 if (rc) {
3498 printk(KERN_ERR "%s: Firmware files not found\n",
3499 wiphy_name(hw->wiphy));
3500 goto err_free_irq;
3501 }
3502
3503 /* Load firmware into hardware */
3504 rc = mwl8k_load_firmware(hw);
3505 if (rc) {
3506 printk(KERN_ERR "%s: Cannot start firmware\n",
3507 wiphy_name(hw->wiphy));
3508 goto err_stop_firmware;
3509 }
3510
3511 /* Reclaim memory once firmware is successfully loaded */
3512 mwl8k_release_firmware(priv);
3513
3514 /* 4124 /*
3515 * Temporarily enable interrupts. Initial firmware host 4125 * Temporarily enable interrupts. Initial firmware host
3516 * commands use interrupts and avoids polling. Disable 4126 * commands use interrupts and avoid polling. Disable
3517 * interrupts when done. 4127 * interrupts when done.
3518 */ 4128 */
3519 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4129 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3529,22 +4139,29 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3529 if (rc) { 4139 if (rc) {
3530 printk(KERN_ERR "%s: Cannot initialise firmware\n", 4140 printk(KERN_ERR "%s: Cannot initialise firmware\n",
3531 wiphy_name(hw->wiphy)); 4141 wiphy_name(hw->wiphy));
3532 goto err_stop_firmware; 4142 goto err_free_irq;
3533 } 4143 }
3534 4144
4145 hw->wiphy->interface_modes = 0;
4146 if (priv->ap_macids_supported)
4147 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
4148 if (priv->sta_macids_supported)
4149 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
4150
4151
3535 /* Turn radio off */ 4152 /* Turn radio off */
3536 rc = mwl8k_cmd_802_11_radio_disable(hw); 4153 rc = mwl8k_cmd_radio_disable(hw);
3537 if (rc) { 4154 if (rc) {
3538 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy)); 4155 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
3539 goto err_stop_firmware; 4156 goto err_free_irq;
3540 } 4157 }
3541 4158
3542 /* Clear MAC address */ 4159 /* Clear MAC address */
3543 rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00"); 4160 rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
3544 if (rc) { 4161 if (rc) {
3545 printk(KERN_ERR "%s: Cannot clear MAC address\n", 4162 printk(KERN_ERR "%s: Cannot clear MAC address\n",
3546 wiphy_name(hw->wiphy)); 4163 wiphy_name(hw->wiphy));
3547 goto err_stop_firmware; 4164 goto err_free_irq;
3548 } 4165 }
3549 4166
3550 /* Disable interrupts */ 4167 /* Disable interrupts */
@@ -3555,7 +4172,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3555 if (rc) { 4172 if (rc) {
3556 printk(KERN_ERR "%s: Cannot register device\n", 4173 printk(KERN_ERR "%s: Cannot register device\n",
3557 wiphy_name(hw->wiphy)); 4174 wiphy_name(hw->wiphy));
3558 goto err_stop_firmware; 4175 goto err_free_queues;
3559 } 4176 }
3560 4177
3561 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n", 4178 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3567,10 +4184,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3567 4184
3568 return 0; 4185 return 0;
3569 4186
3570err_stop_firmware:
3571 mwl8k_hw_reset(priv);
3572 mwl8k_release_firmware(priv);
3573
3574err_free_irq: 4187err_free_irq:
3575 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4188 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3576 free_irq(priv->pdev->irq, hw); 4189 free_irq(priv->pdev->irq, hw);
@@ -3580,11 +4193,16 @@ err_free_queues:
3580 mwl8k_txq_deinit(hw, i); 4193 mwl8k_txq_deinit(hw, i);
3581 mwl8k_rxq_deinit(hw, 0); 4194 mwl8k_rxq_deinit(hw, 0);
3582 4195
3583err_iounmap: 4196err_free_cookie:
3584 if (priv->cookie != NULL) 4197 if (priv->cookie != NULL)
3585 pci_free_consistent(priv->pdev, 4, 4198 pci_free_consistent(priv->pdev, 4,
3586 priv->cookie, priv->cookie_dma); 4199 priv->cookie, priv->cookie_dma);
3587 4200
4201err_stop_firmware:
4202 mwl8k_hw_reset(priv);
4203 mwl8k_release_firmware(priv);
4204
4205err_iounmap:
3588 if (priv->regs != NULL) 4206 if (priv->regs != NULL)
3589 pci_iounmap(pdev, priv->regs); 4207 pci_iounmap(pdev, priv->regs);
3590 4208
@@ -3622,15 +4240,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3622 4240
3623 ieee80211_unregister_hw(hw); 4241 ieee80211_unregister_hw(hw);
3624 4242
3625 /* Remove tx reclaim tasklet */ 4243 /* Remove TX reclaim and RX tasklets. */
3626 tasklet_kill(&priv->tx_reclaim_task); 4244 tasklet_kill(&priv->poll_tx_task);
4245 tasklet_kill(&priv->poll_rx_task);
3627 4246
3628 /* Stop hardware */ 4247 /* Stop hardware */
3629 mwl8k_hw_reset(priv); 4248 mwl8k_hw_reset(priv);
3630 4249
3631 /* Return all skbs to mac80211 */ 4250 /* Return all skbs to mac80211 */
3632 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4251 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3633 mwl8k_txq_reclaim(hw, i, 1); 4252 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
3634 4253
3635 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4254 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3636 mwl8k_txq_deinit(hw, i); 4255 mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c38341..075f446b313 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
274 pci_disable_device(pdev); 274 pci_disable_device(pdev);
275} 275}
276 276
277static struct pci_device_id orinoco_nortel_id_table[] = { 277static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
278 /* Nortel emobility PCI */ 278 /* Nortel emobility PCI */
279 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, 279 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
280 /* Symbol LA-4123 PCI */ 280 /* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e..bda5317cc59 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
212 pci_disable_device(pdev); 212 pci_disable_device(pdev);
213} 213}
214 214
215static struct pci_device_id orinoco_pci_id_table[] = { 215static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
216 /* Intersil Prism 3 */ 216 /* Intersil Prism 3 */
217 {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,}, 217 {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
218 /* Intersil Prism 2.5 */ 218 /* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f..e0d5874ab42 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
310 pci_disable_device(pdev); 310 pci_disable_device(pdev);
311} 311}
312 312
313static struct pci_device_id orinoco_plx_id_table[] = { 313static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
314 {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ 314 {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
315 {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ 315 {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
316 {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ 316 {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc7..88cbc7902aa 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
203 pci_disable_device(pdev); 203 pci_disable_device(pdev);
204} 204}
205 205
206static struct pci_device_id orinoco_tmd_id_table[] = { 206static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
207 {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ 207 {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
208 {0,}, 208 {0,},
209}; 209};
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 18012dbfb45..26428e4c9c6 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -216,7 +216,7 @@ static void p54_stop(struct ieee80211_hw *dev)
216} 216}
217 217
218static int p54_add_interface(struct ieee80211_hw *dev, 218static int p54_add_interface(struct ieee80211_hw *dev,
219 struct ieee80211_if_init_conf *conf) 219 struct ieee80211_vif *vif)
220{ 220{
221 struct p54_common *priv = dev->priv; 221 struct p54_common *priv = dev->priv;
222 222
@@ -226,28 +226,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
226 return -EOPNOTSUPP; 226 return -EOPNOTSUPP;
227 } 227 }
228 228
229 priv->vif = conf->vif; 229 priv->vif = vif;
230 230
231 switch (conf->type) { 231 switch (vif->type) {
232 case NL80211_IFTYPE_STATION: 232 case NL80211_IFTYPE_STATION:
233 case NL80211_IFTYPE_ADHOC: 233 case NL80211_IFTYPE_ADHOC:
234 case NL80211_IFTYPE_AP: 234 case NL80211_IFTYPE_AP:
235 case NL80211_IFTYPE_MESH_POINT: 235 case NL80211_IFTYPE_MESH_POINT:
236 priv->mode = conf->type; 236 priv->mode = vif->type;
237 break; 237 break;
238 default: 238 default:
239 mutex_unlock(&priv->conf_mutex); 239 mutex_unlock(&priv->conf_mutex);
240 return -EOPNOTSUPP; 240 return -EOPNOTSUPP;
241 } 241 }
242 242
243 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 243 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
244 p54_setup_mac(priv); 244 p54_setup_mac(priv);
245 mutex_unlock(&priv->conf_mutex); 245 mutex_unlock(&priv->conf_mutex);
246 return 0; 246 return 0;
247} 247}
248 248
249static void p54_remove_interface(struct ieee80211_hw *dev, 249static void p54_remove_interface(struct ieee80211_hw *dev,
250 struct ieee80211_if_init_conf *conf) 250 struct ieee80211_vif *vif)
251{ 251{
252 struct p54_common *priv = dev->priv; 252 struct p54_common *priv = dev->priv;
253 253
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a72f7c2577d..57c64659806 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
31MODULE_ALIAS("prism54pci"); 31MODULE_ALIAS("prism54pci");
32MODULE_FIRMWARE("isl3886pci"); 32MODULE_FIRMWARE("isl3886pci");
33 33
34static struct pci_device_id p54p_table[] __devinitdata = { 34static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
35 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 35 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
36 { PCI_DEVICE(0x1260, 0x3890) }, 36 { PCI_DEVICE(0x1260, 0x3890) },
37 /* 3COM 3CRWE154G72 Wireless LAN adapter */ 37 /* 3COM 3CRWE154G72 Wireless LAN adapter */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f..dc14420a9ad 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
39 * driver_data 39 * driver_data
40 * If you have an update for this please contact prism54-devel@prism54.org 40 * If you have an update for this please contact prism54-devel@prism54.org
41 * The latest list can be found at http://prism54.org/supported_cards.php */ 41 * The latest list can be found at http://prism54.org/supported_cards.php */
42static const struct pci_device_id prism54_id_tbl[] = { 42static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
44 { 44 {
45 0x1260, 0x3890, 45 0x1260, 0x3890,
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2ecbedb26e1..305c106fdc1 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2594,23 +2594,9 @@ end:
2594/* 2594/*
2595 * driver/device initialization 2595 * driver/device initialization
2596 */ 2596 */
2597static int bcm4320a_early_init(struct usbnet *usbdev) 2597static void rndis_copy_module_params(struct usbnet *usbdev)
2598{
2599 /* bcm4320a doesn't handle configuration parameters well. Try
2600 * set any and you get partially zeroed mac and broken device.
2601 */
2602
2603 return 0;
2604}
2605
2606static int bcm4320b_early_init(struct usbnet *usbdev)
2607{ 2598{
2608 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2599 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2609 char buf[8];
2610
2611 /* Early initialization settings, setting these won't have effect
2612 * if called after generic_rndis_bind().
2613 */
2614 2600
2615 priv->param_country[0] = modparam_country[0]; 2601 priv->param_country[0] = modparam_country[0];
2616 priv->param_country[1] = modparam_country[1]; 2602 priv->param_country[1] = modparam_country[1];
@@ -2652,6 +2638,32 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
2652 priv->param_workaround_interval = 500; 2638 priv->param_workaround_interval = 500;
2653 else 2639 else
2654 priv->param_workaround_interval = modparam_workaround_interval; 2640 priv->param_workaround_interval = modparam_workaround_interval;
2641}
2642
2643static int bcm4320a_early_init(struct usbnet *usbdev)
2644{
2645 /* copy module parameters for bcm4320a so that iwconfig reports txpower
2646 * and workaround parameter is copied to private structure correctly.
2647 */
2648 rndis_copy_module_params(usbdev);
2649
2650 /* bcm4320a doesn't handle configuration parameters well. Try
2651 * set any and you get partially zeroed mac and broken device.
2652 */
2653
2654 return 0;
2655}
2656
2657static int bcm4320b_early_init(struct usbnet *usbdev)
2658{
2659 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2660 char buf[8];
2661
2662 rndis_copy_module_params(usbdev);
2663
2664 /* Early initialization settings, setting these won't have effect
2665 * if called after generic_rndis_bind().
2666 */
2655 2667
2656 rndis_set_config_parameter_str(usbdev, "Country", priv->param_country); 2668 rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
2657 rndis_set_config_parameter_str(usbdev, "FrameBursting", 2669 rndis_set_config_parameter_str(usbdev, "FrameBursting",
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bf60689aaab..3ca824a91ad 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -54,12 +54,12 @@ config RT61PCI
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI 56config RT2800PCI_PCI
57 tristate 57 boolean
58 depends on PCI 58 depends on PCI
59 default y 59 default y
60 60
61config RT2800PCI_SOC 61config RT2800PCI_SOC
62 tristate 62 boolean
63 depends on RALINK_RT288X || RALINK_RT305X 63 depends on RALINK_RT288X || RALINK_RT305X
64 default y 64 default y
65 65
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e7f46405a41..aa579eb8723 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -451,7 +451,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
451 /* 451 /*
452 * RF2420 chipset don't need any additional actions. 452 * RF2420 chipset don't need any additional actions.
453 */ 453 */
454 if (rt2x00_rf(&rt2x00dev->chip, RF2420)) 454 if (rt2x00_rf(rt2x00dev, RF2420))
455 return; 455 return;
456 456
457 /* 457 /*
@@ -1343,8 +1343,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1343 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1343 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1344 rt2x00_print_chip(rt2x00dev); 1344 rt2x00_print_chip(rt2x00dev);
1345 1345
1346 if (!rt2x00_rf(&rt2x00dev->chip, RF2420) && 1346 if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
1347 !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
1348 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1347 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1349 return -ENODEV; 1348 return -ENODEV;
1350 } 1349 }
@@ -1643,7 +1642,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
1643/* 1642/*
1644 * RT2400pci module information. 1643 * RT2400pci module information.
1645 */ 1644 */
1646static struct pci_device_id rt2400pci_device_table[] = { 1645static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
1647 { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) }, 1646 { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
1648 { 0, } 1647 { 0, }
1649}; 1648};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 408fcfc120f..77ee1df7933 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -440,8 +440,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
440 /* 440 /*
441 * RT2525E and RT5222 need to flip TX I/Q 441 * RT2525E and RT5222 need to flip TX I/Q
442 */ 442 */
443 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || 443 if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
444 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
445 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); 444 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
446 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1); 445 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
447 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1); 446 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
@@ -449,7 +448,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
449 /* 448 /*
450 * RT2525E does not need RX I/Q Flip. 449 * RT2525E does not need RX I/Q Flip.
451 */ 450 */
452 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) 451 if (rt2x00_rf(rt2x00dev, RF2525E))
453 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); 452 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
454 } else { 453 } else {
455 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0); 454 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
@@ -475,14 +474,14 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
475 * Switch on tuning bits. 474 * Switch on tuning bits.
476 * For RT2523 devices we do not need to update the R1 register. 475 * For RT2523 devices we do not need to update the R1 register.
477 */ 476 */
478 if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) 477 if (!rt2x00_rf(rt2x00dev, RF2523))
479 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); 478 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1);
480 rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); 479 rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1);
481 480
482 /* 481 /*
483 * For RT2525 we should first set the channel to half band higher. 482 * For RT2525 we should first set the channel to half band higher.
484 */ 483 */
485 if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 484 if (rt2x00_rf(rt2x00dev, RF2525)) {
486 static const u32 vals[] = { 485 static const u32 vals[] = {
487 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a, 486 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a,
488 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a, 487 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a,
@@ -516,7 +515,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
516 * Switch off tuning bits. 515 * Switch off tuning bits.
517 * For RT2523 devices we do not need to update the R1 register. 516 * For RT2523 devices we do not need to update the R1 register.
518 */ 517 */
519 if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) { 518 if (!rt2x00_rf(rt2x00dev, RF2523)) {
520 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); 519 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0);
521 rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); 520 rt2500pci_rf_write(rt2x00dev, 1, rf->rf1);
522 } 521 }
@@ -640,7 +639,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
640 * up to version C the link tuning should halt after 20 639 * up to version C the link tuning should halt after 20
641 * seconds while being associated. 640 * seconds while being associated.
642 */ 641 */
643 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D && 642 if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D &&
644 rt2x00dev->intf_associated && count > 20) 643 rt2x00dev->intf_associated && count > 20)
645 return; 644 return;
646 645
@@ -650,7 +649,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
650 * should go straight to dynamic CCA tuning when they 649 * should go straight to dynamic CCA tuning when they
651 * are not associated. 650 * are not associated.
652 */ 651 */
653 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D || 652 if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D ||
654 !rt2x00dev->intf_associated) 653 !rt2x00dev->intf_associated)
655 goto dynamic_cca_tune; 654 goto dynamic_cca_tune;
656 655
@@ -1507,12 +1506,12 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1507 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1506 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1508 rt2x00_print_chip(rt2x00dev); 1507 rt2x00_print_chip(rt2x00dev);
1509 1508
1510 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && 1509 if (!rt2x00_rf(rt2x00dev, RF2522) &&
1511 !rt2x00_rf(&rt2x00dev->chip, RF2523) && 1510 !rt2x00_rf(rt2x00dev, RF2523) &&
1512 !rt2x00_rf(&rt2x00dev->chip, RF2524) && 1511 !rt2x00_rf(rt2x00dev, RF2524) &&
1513 !rt2x00_rf(&rt2x00dev->chip, RF2525) && 1512 !rt2x00_rf(rt2x00dev, RF2525) &&
1514 !rt2x00_rf(&rt2x00dev->chip, RF2525E) && 1513 !rt2x00_rf(rt2x00dev, RF2525E) &&
1515 !rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1514 !rt2x00_rf(rt2x00dev, RF5222)) {
1516 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1515 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1517 return -ENODEV; 1516 return -ENODEV;
1518 } 1517 }
@@ -1744,22 +1743,22 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1744 spec->supported_bands = SUPPORT_BAND_2GHZ; 1743 spec->supported_bands = SUPPORT_BAND_2GHZ;
1745 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1744 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1746 1745
1747 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1746 if (rt2x00_rf(rt2x00dev, RF2522)) {
1748 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1747 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
1749 spec->channels = rf_vals_bg_2522; 1748 spec->channels = rf_vals_bg_2522;
1750 } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { 1749 } else if (rt2x00_rf(rt2x00dev, RF2523)) {
1751 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); 1750 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
1752 spec->channels = rf_vals_bg_2523; 1751 spec->channels = rf_vals_bg_2523;
1753 } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { 1752 } else if (rt2x00_rf(rt2x00dev, RF2524)) {
1754 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); 1753 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
1755 spec->channels = rf_vals_bg_2524; 1754 spec->channels = rf_vals_bg_2524;
1756 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 1755 } else if (rt2x00_rf(rt2x00dev, RF2525)) {
1757 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); 1756 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
1758 spec->channels = rf_vals_bg_2525; 1757 spec->channels = rf_vals_bg_2525;
1759 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 1758 } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
1760 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1759 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1761 spec->channels = rf_vals_bg_2525e; 1760 spec->channels = rf_vals_bg_2525e;
1762 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1761 } else if (rt2x00_rf(rt2x00dev, RF5222)) {
1763 spec->supported_bands |= SUPPORT_BAND_5GHZ; 1762 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1764 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1763 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1765 spec->channels = rf_vals_5222; 1764 spec->channels = rf_vals_5222;
@@ -1941,7 +1940,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
1941/* 1940/*
1942 * RT2500pci module information. 1941 * RT2500pci module information.
1943 */ 1942 */
1944static struct pci_device_id rt2500pci_device_table[] = { 1943static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
1945 { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) }, 1944 { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
1946 { 0, } 1945 { 0, }
1947}; 1946};
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 83f2592c59d..9e6f865c57f 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -565,8 +565,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
565 /* 565 /*
566 * RT2525E and RT5222 need to flip TX I/Q 566 * RT2525E and RT5222 need to flip TX I/Q
567 */ 567 */
568 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || 568 if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
569 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
570 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); 569 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
571 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); 570 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
572 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); 571 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
@@ -574,7 +573,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
574 /* 573 /*
575 * RT2525E does not need RX I/Q Flip. 574 * RT2525E does not need RX I/Q Flip.
576 */ 575 */
577 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) 576 if (rt2x00_rf(rt2x00dev, RF2525E))
578 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); 577 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
579 } else { 578 } else {
580 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); 579 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@ -598,7 +597,7 @@ static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
598 /* 597 /*
599 * For RT2525E we should first set the channel to half band higher. 598 * For RT2525E we should first set the channel to half band higher.
600 */ 599 */
601 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 600 if (rt2x00_rf(rt2x00dev, RF2525E)) {
602 static const u32 vals[] = { 601 static const u32 vals[] = {
603 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 602 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
604 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 603 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@ -793,7 +792,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
793 rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1); 792 rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
794 rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); 793 rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
795 794
796 if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) { 795 if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
797 rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg); 796 rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
798 rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0); 797 rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
799 } else { 798 } else {
@@ -1411,19 +1410,18 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1411 rt2x00_set_chip(rt2x00dev, RT2570, value, reg); 1410 rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
1412 rt2x00_print_chip(rt2x00dev); 1411 rt2x00_print_chip(rt2x00dev);
1413 1412
1414 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) || 1413 if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0) ||
1415 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { 1414 rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
1416
1417 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1415 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1418 return -ENODEV; 1416 return -ENODEV;
1419 } 1417 }
1420 1418
1421 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && 1419 if (!rt2x00_rf(rt2x00dev, RF2522) &&
1422 !rt2x00_rf(&rt2x00dev->chip, RF2523) && 1420 !rt2x00_rf(rt2x00dev, RF2523) &&
1423 !rt2x00_rf(&rt2x00dev->chip, RF2524) && 1421 !rt2x00_rf(rt2x00dev, RF2524) &&
1424 !rt2x00_rf(&rt2x00dev->chip, RF2525) && 1422 !rt2x00_rf(rt2x00dev, RF2525) &&
1425 !rt2x00_rf(&rt2x00dev->chip, RF2525E) && 1423 !rt2x00_rf(rt2x00dev, RF2525E) &&
1426 !rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1424 !rt2x00_rf(rt2x00dev, RF5222)) {
1427 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1425 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1428 return -ENODEV; 1426 return -ENODEV;
1429 } 1427 }
@@ -1667,22 +1665,22 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1667 spec->supported_bands = SUPPORT_BAND_2GHZ; 1665 spec->supported_bands = SUPPORT_BAND_2GHZ;
1668 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1666 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1669 1667
1670 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1668 if (rt2x00_rf(rt2x00dev, RF2522)) {
1671 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1669 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
1672 spec->channels = rf_vals_bg_2522; 1670 spec->channels = rf_vals_bg_2522;
1673 } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { 1671 } else if (rt2x00_rf(rt2x00dev, RF2523)) {
1674 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); 1672 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
1675 spec->channels = rf_vals_bg_2523; 1673 spec->channels = rf_vals_bg_2523;
1676 } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { 1674 } else if (rt2x00_rf(rt2x00dev, RF2524)) {
1677 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); 1675 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
1678 spec->channels = rf_vals_bg_2524; 1676 spec->channels = rf_vals_bg_2524;
1679 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 1677 } else if (rt2x00_rf(rt2x00dev, RF2525)) {
1680 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); 1678 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
1681 spec->channels = rf_vals_bg_2525; 1679 spec->channels = rf_vals_bg_2525;
1682 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 1680 } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
1683 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1681 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1684 spec->channels = rf_vals_bg_2525e; 1682 spec->channels = rf_vals_bg_2525e;
1685 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1683 } else if (rt2x00_rf(rt2x00dev, RF5222)) {
1686 spec->supported_bands |= SUPPORT_BAND_5GHZ; 1684 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1687 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1685 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1688 spec->channels = rf_vals_5222; 1686 spec->channels = rf_vals_5222;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9deae41cb78..529a37364eb 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38 38
39#include "rt2x00.h" 39#include "rt2x00.h"
40#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) 40#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
41#include "rt2x00usb.h" 41#include "rt2x00usb.h"
42#endif 42#endif
43#include "rt2800lib.h" 43#include "rt2800lib.h"
@@ -220,8 +220,7 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
220 /* 220 /*
221 * RT2880 and RT3052 don't support MCU requests. 221 * RT2880 and RT3052 don't support MCU requests.
222 */ 222 */
223 if (rt2x00_rt(&rt2x00dev->chip, RT2880) || 223 if (rt2x00_rt(rt2x00dev, RT2880) || rt2x00_rt(rt2x00dev, RT3052))
224 rt2x00_rt(&rt2x00dev->chip, RT3052))
225 return; 224 return;
226 225
227 mutex_lock(&rt2x00dev->csr_mutex); 226 mutex_lock(&rt2x00dev->csr_mutex);
@@ -246,6 +245,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
246} 245}
247EXPORT_SYMBOL_GPL(rt2800_mcu_request); 246EXPORT_SYMBOL_GPL(rt2800_mcu_request);
248 247
248int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
249{
250 unsigned int i;
251 u32 reg;
252
253 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
254 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
255 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
256 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
257 return 0;
258
259 msleep(1);
260 }
261
262 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
263 return -EACCES;
264}
265EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
266
249#ifdef CONFIG_RT2X00_LIB_DEBUGFS 267#ifdef CONFIG_RT2X00_LIB_DEBUGFS
250const struct rt2x00debug rt2800_rt2x00debug = { 268const struct rt2x00debug rt2800_rt2x00debug = {
251 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
@@ -348,7 +366,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
348 return 0; 366 return 0;
349} 367}
350 368
351void rt2800_init_led(struct rt2x00_dev *rt2x00dev, 369static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
352 struct rt2x00_led *led, enum led_type type) 370 struct rt2x00_led *led, enum led_type type)
353{ 371{
354 led->rt2x00dev = rt2x00dev; 372 led->rt2x00dev = rt2x00dev;
@@ -357,7 +375,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
357 led->led_dev.blink_set = rt2800_blink_set; 375 led->led_dev.blink_set = rt2800_blink_set;
358 led->flags = LED_INITIALIZED; 376 led->flags = LED_INITIALIZED;
359} 377}
360EXPORT_SYMBOL_GPL(rt2800_init_led);
361#endif /* CONFIG_RT2X00_LIB_LEDS */ 378#endif /* CONFIG_RT2X00_LIB_LEDS */
362 379
363/* 380/*
@@ -806,12 +823,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
806 unsigned int tx_pin; 823 unsigned int tx_pin;
807 u8 bbp; 824 u8 bbp;
808 825
809 if ((rt2x00_rt(&rt2x00dev->chip, RT3070) || 826 if ((rt2x00_rt(rt2x00dev, RT3070) ||
810 rt2x00_rt(&rt2x00dev->chip, RT3090)) && 827 rt2x00_rt(rt2x00dev, RT3090)) &&
811 (rt2x00_rf(&rt2x00dev->chip, RF2020) || 828 (rt2x00_rf(rt2x00dev, RF2020) ||
812 rt2x00_rf(&rt2x00dev->chip, RF3020) || 829 rt2x00_rf(rt2x00dev, RF3020) ||
813 rt2x00_rf(&rt2x00dev->chip, RF3021) || 830 rt2x00_rf(rt2x00dev, RF3021) ||
814 rt2x00_rf(&rt2x00dev->chip, RF3022))) 831 rt2x00_rf(rt2x00dev, RF3022)))
815 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info); 832 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
816 else 833 else
817 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info); 834 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
@@ -878,7 +895,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
878 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf)); 895 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
879 rt2800_bbp_write(rt2x00dev, 3, bbp); 896 rt2800_bbp_write(rt2x00dev, 3, bbp);
880 897
881 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) { 898 if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
882 if (conf_is_ht40(conf)) { 899 if (conf_is_ht40(conf)) {
883 rt2800_bbp_write(rt2x00dev, 69, 0x1a); 900 rt2800_bbp_write(rt2x00dev, 69, 0x1a);
884 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 901 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -1041,7 +1058,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1041{ 1058{
1042 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 1059 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1043 if (rt2x00_intf_is_usb(rt2x00dev) && 1060 if (rt2x00_intf_is_usb(rt2x00dev) &&
1044 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) 1061 rt2x00_rev(rt2x00dev) == RT3070_VERSION)
1045 return 0x1c + (2 * rt2x00dev->lna_gain); 1062 return 0x1c + (2 * rt2x00dev->lna_gain);
1046 else 1063 else
1047 return 0x2e + rt2x00dev->lna_gain; 1064 return 0x2e + rt2x00dev->lna_gain;
@@ -1072,7 +1089,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
1072void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 1089void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
1073 const u32 count) 1090 const u32 count)
1074{ 1091{
1075 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) 1092 if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)
1076 return; 1093 return;
1077 1094
1078 /* 1095 /*
@@ -1121,7 +1138,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1121 1138
1122 if (rt2x00_intf_is_usb(rt2x00dev)) { 1139 if (rt2x00_intf_is_usb(rt2x00dev)) {
1123 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); 1140 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1124#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) 1141#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
1125 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 1142 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1126 USB_MODE_RESET, REGISTER_TIMEOUT); 1143 USB_MODE_RESET, REGISTER_TIMEOUT);
1127#endif 1144#endif
@@ -1158,7 +1175,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1158 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1175 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1159 1176
1160 if (rt2x00_intf_is_usb(rt2x00dev) && 1177 if (rt2x00_intf_is_usb(rt2x00dev) &&
1161 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) { 1178 rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
1162 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 1179 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1163 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 1180 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1164 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 1181 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -1185,8 +1202,8 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1185 1202
1186 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg); 1203 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1187 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); 1204 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1188 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION && 1205 if (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION &&
1189 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION) 1206 rt2x00_rev(rt2x00dev) < RT3070_VERSION)
1190 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2); 1207 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1191 else 1208 else
1192 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1); 1209 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1465,22 +1482,22 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1465 rt2800_bbp_write(rt2x00dev, 103, 0x00); 1482 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1466 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1483 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1467 1484
1468 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) { 1485 if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
1469 rt2800_bbp_write(rt2x00dev, 69, 0x16); 1486 rt2800_bbp_write(rt2x00dev, 69, 0x16);
1470 rt2800_bbp_write(rt2x00dev, 73, 0x12); 1487 rt2800_bbp_write(rt2x00dev, 73, 0x12);
1471 } 1488 }
1472 1489
1473 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION) 1490 if (rt2x00_rev(rt2x00dev) > RT2860D_VERSION)
1474 rt2800_bbp_write(rt2x00dev, 84, 0x19); 1491 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1475 1492
1476 if (rt2x00_intf_is_usb(rt2x00dev) && 1493 if (rt2x00_intf_is_usb(rt2x00dev) &&
1477 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) { 1494 rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
1478 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 1495 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1479 rt2800_bbp_write(rt2x00dev, 84, 0x99); 1496 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1480 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1497 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1481 } 1498 }
1482 1499
1483 if (rt2x00_rt(&rt2x00dev->chip, RT3052)) { 1500 if (rt2x00_rt(rt2x00dev, RT3052)) {
1484 rt2800_bbp_write(rt2x00dev, 31, 0x08); 1501 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1485 rt2800_bbp_write(rt2x00dev, 78, 0x0e); 1502 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1486 rt2800_bbp_write(rt2x00dev, 80, 0x08); 1503 rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1566,13 +1583,13 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1566 u8 bbp; 1583 u8 bbp;
1567 1584
1568 if (rt2x00_intf_is_usb(rt2x00dev) && 1585 if (rt2x00_intf_is_usb(rt2x00dev) &&
1569 rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION) 1586 rt2x00_rev(rt2x00dev) != RT3070_VERSION)
1570 return 0; 1587 return 0;
1571 1588
1572 if (rt2x00_intf_is_pci(rt2x00dev)) { 1589 if (rt2x00_intf_is_pci(rt2x00dev)) {
1573 if (!rt2x00_rf(&rt2x00dev->chip, RF3020) && 1590 if (!rt2x00_rf(rt2x00dev, RF3020) &&
1574 !rt2x00_rf(&rt2x00dev->chip, RF3021) && 1591 !rt2x00_rf(rt2x00dev, RF3021) &&
1575 !rt2x00_rf(&rt2x00dev->chip, RF3022)) 1592 !rt2x00_rf(rt2x00dev, RF3022))
1576 return 0; 1593 return 0;
1577 } 1594 }
1578 1595
@@ -1737,7 +1754,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1737 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820); 1754 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
1738 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 1755 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1739 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 1756 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
1740 } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) { 1757 } else if (rt2x00_rev(rt2x00dev) < RT2883_VERSION) {
1741 /* 1758 /*
1742 * There is a max of 2 RX streams for RT28x0 series 1759 * There is a max of 2 RX streams for RT28x0 series
1743 */ 1760 */
@@ -1839,17 +1856,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1839 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1856 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1840 1857
1841 if (rt2x00_intf_is_usb(rt2x00dev)) { 1858 if (rt2x00_intf_is_usb(rt2x00dev)) {
1842 struct rt2x00_chip *chip = &rt2x00dev->chip;
1843
1844 /* 1859 /*
1845 * The check for rt2860 is not a typo, some rt2870 hardware 1860 * The check for rt2860 is not a typo, some rt2870 hardware
1846 * identifies itself as rt2860 in the CSR register. 1861 * identifies itself as rt2860 in the CSR register.
1847 */ 1862 */
1848 if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) || 1863 if (rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28600000) ||
1849 rt2x00_check_rev(chip, 0xfff00000, 0x28700000) || 1864 rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28700000) ||
1850 rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) { 1865 rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28800000)) {
1851 rt2x00_set_chip_rt(rt2x00dev, RT2870); 1866 rt2x00_set_chip_rt(rt2x00dev, RT2870);
1852 } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) { 1867 } else if (rt2x00_check_rev(rt2x00dev, 0xffff0000, 0x30700000)) {
1853 rt2x00_set_chip_rt(rt2x00dev, RT3070); 1868 rt2x00_set_chip_rt(rt2x00dev, RT3070);
1854 } else { 1869 } else {
1855 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1870 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
@@ -1858,14 +1873,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1858 } 1873 }
1859 rt2x00_print_chip(rt2x00dev); 1874 rt2x00_print_chip(rt2x00dev);
1860 1875
1861 if (!rt2x00_rf(&rt2x00dev->chip, RF2820) && 1876 if (!rt2x00_rf(rt2x00dev, RF2820) &&
1862 !rt2x00_rf(&rt2x00dev->chip, RF2850) && 1877 !rt2x00_rf(rt2x00dev, RF2850) &&
1863 !rt2x00_rf(&rt2x00dev->chip, RF2720) && 1878 !rt2x00_rf(rt2x00dev, RF2720) &&
1864 !rt2x00_rf(&rt2x00dev->chip, RF2750) && 1879 !rt2x00_rf(rt2x00dev, RF2750) &&
1865 !rt2x00_rf(&rt2x00dev->chip, RF3020) && 1880 !rt2x00_rf(rt2x00dev, RF3020) &&
1866 !rt2x00_rf(&rt2x00dev->chip, RF2020) && 1881 !rt2x00_rf(rt2x00dev, RF2020) &&
1867 !rt2x00_rf(&rt2x00dev->chip, RF3021) && 1882 !rt2x00_rf(rt2x00dev, RF3021) &&
1868 !rt2x00_rf(&rt2x00dev->chip, RF3022)) { 1883 !rt2x00_rf(rt2x00dev, RF3022) &&
1884 !rt2x00_rf(rt2x00dev, RF3052)) {
1869 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1885 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1870 return -ENODEV; 1886 return -ENODEV;
1871 } 1887 }
@@ -2013,7 +2029,6 @@ static const struct rf_channel rf_vals_302x[] = {
2013 2029
2014int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2030int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2015{ 2031{
2016 struct rt2x00_chip *chip = &rt2x00dev->chip;
2017 struct hw_mode_spec *spec = &rt2x00dev->spec; 2032 struct hw_mode_spec *spec = &rt2x00dev->spec;
2018 struct channel_info *info; 2033 struct channel_info *info;
2019 char *tx_power1; 2034 char *tx_power1;
@@ -2049,19 +2064,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2049 spec->supported_bands = SUPPORT_BAND_2GHZ; 2064 spec->supported_bands = SUPPORT_BAND_2GHZ;
2050 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2065 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2051 2066
2052 if (rt2x00_rf(chip, RF2820) || 2067 if (rt2x00_rf(rt2x00dev, RF2820) ||
2053 rt2x00_rf(chip, RF2720) || 2068 rt2x00_rf(rt2x00dev, RF2720) ||
2054 (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) { 2069 rt2x00_rf(rt2x00dev, RF3052)) {
2055 spec->num_channels = 14; 2070 spec->num_channels = 14;
2056 spec->channels = rf_vals; 2071 spec->channels = rf_vals;
2057 } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) { 2072 } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
2058 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2073 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2059 spec->num_channels = ARRAY_SIZE(rf_vals); 2074 spec->num_channels = ARRAY_SIZE(rf_vals);
2060 spec->channels = rf_vals; 2075 spec->channels = rf_vals;
2061 } else if (rt2x00_rf(chip, RF3020) || 2076 } else if (rt2x00_rf(rt2x00dev, RF3020) ||
2062 rt2x00_rf(chip, RF2020) || 2077 rt2x00_rf(rt2x00dev, RF2020) ||
2063 rt2x00_rf(chip, RF3021) || 2078 rt2x00_rf(rt2x00dev, RF3021) ||
2064 rt2x00_rf(chip, RF3022)) { 2079 rt2x00_rf(rt2x00dev, RF3022)) {
2065 spec->num_channels = ARRAY_SIZE(rf_vals_302x); 2080 spec->num_channels = ARRAY_SIZE(rf_vals_302x);
2066 spec->channels = rf_vals_302x; 2081 spec->channels = rf_vals_302x;
2067 } 2082 }
@@ -2069,7 +2084,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2069 /* 2084 /*
2070 * Initialize HT information. 2085 * Initialize HT information.
2071 */ 2086 */
2072 if (!rt2x00_rf(chip, RF2020)) 2087 if (!rt2x00_rf(rt2x00dev, RF2020))
2073 spec->ht.ht_supported = true; 2088 spec->ht.ht_supported = true;
2074 else 2089 else
2075 spec->ht.ht_supported = false; 2090 spec->ht.ht_supported = false;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac..ebabeae62d1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
114extern const struct rt2x00debug rt2800_rt2x00debug; 114extern const struct rt2x00debug rt2800_rt2x00debug;
115 115
116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev); 116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
117void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
118 struct rt2x00_led *led, enum led_type type);
119int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, 117int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
120 struct rt2x00lib_crypto *crypto, 118 struct rt2x00lib_crypto *crypto,
121 struct ieee80211_key_conf *key); 119 struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
139int rt2800_init_registers(struct rt2x00_dev *rt2x00dev); 137int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
140int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev); 138int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
141int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev); 139int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
140int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
142 141
143int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); 142int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
144void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); 143void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dfc886fcb44..d64181cbc9c 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -48,14 +48,6 @@
48#include "rt2800.h" 48#include "rt2800.h"
49#include "rt2800pci.h" 49#include "rt2800pci.h"
50 50
51#ifdef CONFIG_RT2800PCI_PCI_MODULE
52#define CONFIG_RT2800PCI_PCI
53#endif
54
55#ifdef CONFIG_RT2800PCI_WISOC_MODULE
56#define CONFIG_RT2800PCI_WISOC
57#endif
58
59/* 51/*
60 * Allow hardware encryption to be disabled. 52 * Allow hardware encryption to be disabled.
61 */ 53 */
@@ -87,7 +79,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
87 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 79 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
88} 80}
89 81
90#ifdef CONFIG_RT2800PCI_WISOC 82#ifdef CONFIG_RT2800PCI_SOC
91static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 83static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
92{ 84{
93 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */ 85 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
@@ -98,7 +90,7 @@ static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
98static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 90static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
99{ 91{
100} 92}
101#endif /* CONFIG_RT2800PCI_WISOC */ 93#endif /* CONFIG_RT2800PCI_SOC */
102 94
103#ifdef CONFIG_RT2800PCI_PCI 95#ifdef CONFIG_RT2800PCI_PCI
104static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 96static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -461,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
461 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 453 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
462} 454}
463 455
464static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
465{
466 unsigned int i;
467 u32 reg;
468
469 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
470 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
471 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
472 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
473 return 0;
474
475 msleep(1);
476 }
477
478 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
479 return -EACCES;
480}
481
482static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) 456static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
483{ 457{
484 u32 reg; 458 u32 reg;
@@ -487,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
487 /* 461 /*
488 * Initialize all registers. 462 * Initialize all registers.
489 */ 463 */
490 if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) || 464 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
491 rt2800pci_init_queues(rt2x00dev) || 465 rt2800pci_init_queues(rt2x00dev) ||
492 rt2800_init_registers(rt2x00dev) || 466 rt2800_init_registers(rt2x00dev) ||
493 rt2800pci_wait_wpdma_ready(rt2x00dev) || 467 rt2800_wait_wpdma_ready(rt2x00dev) ||
494 rt2800_init_bbp(rt2x00dev) || 468 rt2800_init_bbp(rt2x00dev) ||
495 rt2800_init_rfcsr(rt2x00dev))) 469 rt2800_init_rfcsr(rt2x00dev)))
496 return -EIO; 470 return -EIO;
@@ -570,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
570 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 544 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
571 545
572 /* Wait for DMA, ignore error */ 546 /* Wait for DMA, ignore error */
573 rt2800pci_wait_wpdma_ready(rt2x00dev); 547 rt2800_wait_wpdma_ready(rt2x00dev);
574} 548}
575 549
576static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 550static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -835,7 +809,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
835 struct rxdone_entry_desc *rxdesc) 809 struct rxdone_entry_desc *rxdesc)
836{ 810{
837 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 811 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
838 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
839 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 812 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
840 __le32 *rxd = entry_priv->desc; 813 __le32 *rxd = entry_priv->desc;
841 __le32 *rxwi = (__le32 *)entry->skb->data; 814 __le32 *rxwi = (__le32 *)entry->skb->data;
@@ -883,10 +856,8 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
883 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS)) 856 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
884 rxdesc->dev_flags |= RXDONE_MY_BSS; 857 rxdesc->dev_flags |= RXDONE_MY_BSS;
885 858
886 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) { 859 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
887 rxdesc->dev_flags |= RXDONE_L2PAD; 860 rxdesc->dev_flags |= RXDONE_L2PAD;
888 skbdesc->flags |= SKBDESC_L2_PADDED;
889 }
890 861
891 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) 862 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
892 rxdesc->flags |= RX_FLAG_SHORT_GI; 863 rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -927,7 +898,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
927 * Remove TXWI descriptor from start of buffer. 898 * Remove TXWI descriptor from start of buffer.
928 */ 899 */
929 skb_pull(entry->skb, RXWI_DESC_SIZE); 900 skb_pull(entry->skb, RXWI_DESC_SIZE);
930 skb_trim(entry->skb, rxdesc->size);
931} 901}
932 902
933/* 903/*
@@ -1133,8 +1103,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1133 /* 1103 /*
1134 * This device requires firmware. 1104 * This device requires firmware.
1135 */ 1105 */
1136 if (!rt2x00_rt(&rt2x00dev->chip, RT2880) && 1106 if (!rt2x00_rt(rt2x00dev, RT2880) && !rt2x00_rt(rt2x00dev, RT3052))
1137 !rt2x00_rt(&rt2x00dev->chip, RT3052))
1138 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 1107 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1139 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1108 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
1140 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 1109 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
@@ -1221,7 +1190,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
1221/* 1190/*
1222 * RT2800pci module information. 1191 * RT2800pci module information.
1223 */ 1192 */
1224static struct pci_device_id rt2800pci_device_table[] = { 1193static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1225 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1194 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
1226 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1195 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
1227 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1196 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1255,7 +1224,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1255#endif /* CONFIG_RT2800PCI_PCI */ 1224#endif /* CONFIG_RT2800PCI_PCI */
1256MODULE_LICENSE("GPL"); 1225MODULE_LICENSE("GPL");
1257 1226
1258#ifdef CONFIG_RT2800PCI_WISOC 1227#ifdef CONFIG_RT2800PCI_SOC
1259#if defined(CONFIG_RALINK_RT288X) 1228#if defined(CONFIG_RALINK_RT288X)
1260__rt2x00soc_probe(RT2880, &rt2800pci_ops); 1229__rt2x00soc_probe(RT2880, &rt2800pci_ops);
1261#elif defined(CONFIG_RALINK_RT305X) 1230#elif defined(CONFIG_RALINK_RT305X)
@@ -1273,7 +1242,7 @@ static struct platform_driver rt2800soc_driver = {
1273 .suspend = rt2x00soc_suspend, 1242 .suspend = rt2x00soc_suspend,
1274 .resume = rt2x00soc_resume, 1243 .resume = rt2x00soc_resume,
1275}; 1244};
1276#endif /* CONFIG_RT2800PCI_WISOC */ 1245#endif /* CONFIG_RT2800PCI_SOC */
1277 1246
1278#ifdef CONFIG_RT2800PCI_PCI 1247#ifdef CONFIG_RT2800PCI_PCI
1279static struct pci_driver rt2800pci_driver = { 1248static struct pci_driver rt2800pci_driver = {
@@ -1290,7 +1259,7 @@ static int __init rt2800pci_init(void)
1290{ 1259{
1291 int ret = 0; 1260 int ret = 0;
1292 1261
1293#ifdef CONFIG_RT2800PCI_WISOC 1262#ifdef CONFIG_RT2800PCI_SOC
1294 ret = platform_driver_register(&rt2800soc_driver); 1263 ret = platform_driver_register(&rt2800soc_driver);
1295 if (ret) 1264 if (ret)
1296 return ret; 1265 return ret;
@@ -1298,7 +1267,7 @@ static int __init rt2800pci_init(void)
1298#ifdef CONFIG_RT2800PCI_PCI 1267#ifdef CONFIG_RT2800PCI_PCI
1299 ret = pci_register_driver(&rt2800pci_driver); 1268 ret = pci_register_driver(&rt2800pci_driver);
1300 if (ret) { 1269 if (ret) {
1301#ifdef CONFIG_RT2800PCI_WISOC 1270#ifdef CONFIG_RT2800PCI_SOC
1302 platform_driver_unregister(&rt2800soc_driver); 1271 platform_driver_unregister(&rt2800soc_driver);
1303#endif 1272#endif
1304 return ret; 1273 return ret;
@@ -1313,7 +1282,7 @@ static void __exit rt2800pci_exit(void)
1313#ifdef CONFIG_RT2800PCI_PCI 1282#ifdef CONFIG_RT2800PCI_PCI
1314 pci_unregister_driver(&rt2800pci_driver); 1283 pci_unregister_driver(&rt2800pci_driver);
1315#endif 1284#endif
1316#ifdef CONFIG_RT2800PCI_WISOC 1285#ifdef CONFIG_RT2800PCI_SOC
1317 platform_driver_unregister(&rt2800soc_driver); 1286 platform_driver_unregister(&rt2800soc_driver);
1318#endif 1287#endif
1319} 1288}
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ab95346cf6a..82755cf8b73 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -92,7 +92,7 @@ static bool rt2800usb_check_crc(const u8 *data, const size_t len)
92static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev, 92static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
93 const u8 *data, const size_t len) 93 const u8 *data, const size_t len)
94{ 94{
95 u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff; 95 u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
96 size_t offset = 0; 96 size_t offset = 0;
97 97
98 /* 98 /*
@@ -138,7 +138,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
138 u32 reg; 138 u32 reg;
139 u32 offset; 139 u32 offset;
140 u32 length; 140 u32 length;
141 u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff; 141 u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
142 142
143 /* 143 /*
144 * Check which section of the firmware we need. 144 * Check which section of the firmware we need.
@@ -248,24 +248,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
248 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 248 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
249} 249}
250 250
251static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
252{
253 unsigned int i;
254 u32 reg;
255
256 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
257 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
258 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
259 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
260 return 0;
261
262 msleep(1);
263 }
264
265 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
266 return -EACCES;
267}
268
269static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) 251static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
270{ 252{
271 u32 reg; 253 u32 reg;
@@ -274,7 +256,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
274 /* 256 /*
275 * Initialize all registers. 257 * Initialize all registers.
276 */ 258 */
277 if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) || 259 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
278 rt2800_init_registers(rt2x00dev) || 260 rt2800_init_registers(rt2x00dev) ||
279 rt2800_init_bbp(rt2x00dev) || 261 rt2800_init_bbp(rt2x00dev) ||
280 rt2800_init_rfcsr(rt2x00dev))) 262 rt2800_init_rfcsr(rt2x00dev)))
@@ -295,9 +277,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
295 277
296 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg); 278 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
297 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 279 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
298 /* Don't use bulk in aggregation when working with USB 1.1 */ 280 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
299 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
300 (rt2x00dev->rx->usb_maxpacket == 512));
301 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128); 281 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
302 /* 282 /*
303 * Total room for RX frames in kilobytes, PBF might still exceed 283 * Total room for RX frames in kilobytes, PBF might still exceed
@@ -346,7 +326,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
346 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); 326 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
347 327
348 /* Wait for DMA, ignore error */ 328 /* Wait for DMA, ignore error */
349 rt2800usb_wait_wpdma_ready(rt2x00dev); 329 rt2800_wait_wpdma_ready(rt2x00dev);
350 330
351 rt2x00usb_disable_radio(rt2x00dev); 331 rt2x00usb_disable_radio(rt2x00dev);
352} 332}
@@ -573,41 +553,57 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
573{ 553{
574 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 554 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
575 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 555 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
576 __le32 *rxd = (__le32 *)entry->skb->data; 556 __le32 *rxi = (__le32 *)entry->skb->data;
577 __le32 *rxwi; 557 __le32 *rxwi;
578 u32 rxd0; 558 __le32 *rxd;
559 u32 rxi0;
579 u32 rxwi0; 560 u32 rxwi0;
580 u32 rxwi1; 561 u32 rxwi1;
581 u32 rxwi2; 562 u32 rxwi2;
582 u32 rxwi3; 563 u32 rxwi3;
564 u32 rxd0;
565 int rx_pkt_len;
566
567 /*
568 * RX frame format is :
569 * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
570 * |<------------ rx_pkt_len -------------->|
571 */
572 rt2x00_desc_read(rxi, 0, &rxi0);
573 rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
574
575 rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
576
577 /*
578 * FIXME : we need to check for rx_pkt_len validity
579 */
580 rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
583 581
584 /* 582 /*
585 * Copy descriptor to the skbdesc->desc buffer, making it safe from 583 * Copy descriptor to the skbdesc->desc buffer, making it safe from
586 * moving of frame data in rt2x00usb. 584 * moving of frame data in rt2x00usb.
587 */ 585 */
588 memcpy(skbdesc->desc, rxd, skbdesc->desc_len); 586 memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
589 rxd = (__le32 *)skbdesc->desc;
590 rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
591 587
592 /* 588 /*
593 * It is now safe to read the descriptor on all architectures. 589 * It is now safe to read the descriptor on all architectures.
594 */ 590 */
595 rt2x00_desc_read(rxd, 0, &rxd0);
596 rt2x00_desc_read(rxwi, 0, &rxwi0); 591 rt2x00_desc_read(rxwi, 0, &rxwi0);
597 rt2x00_desc_read(rxwi, 1, &rxwi1); 592 rt2x00_desc_read(rxwi, 1, &rxwi1);
598 rt2x00_desc_read(rxwi, 2, &rxwi2); 593 rt2x00_desc_read(rxwi, 2, &rxwi2);
599 rt2x00_desc_read(rxwi, 3, &rxwi3); 594 rt2x00_desc_read(rxwi, 3, &rxwi3);
595 rt2x00_desc_read(rxd, 0, &rxd0);
600 596
601 if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR)) 597 if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
602 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 598 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
603 599
604 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 600 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
605 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF); 601 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
606 rxdesc->cipher_status = 602 rxdesc->cipher_status =
607 rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR); 603 rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
608 } 604 }
609 605
610 if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) { 606 if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
611 /* 607 /*
612 * Hardware has stripped IV/EIV data from 802.11 frame during 608 * Hardware has stripped IV/EIV data from 802.11 frame during
613 * decryption. Unfortunately the descriptor doesn't contain 609 * decryption. Unfortunately the descriptor doesn't contain
@@ -622,13 +618,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
622 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 618 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
623 } 619 }
624 620
625 if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS)) 621 if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
626 rxdesc->dev_flags |= RXDONE_MY_BSS; 622 rxdesc->dev_flags |= RXDONE_MY_BSS;
627 623
628 if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) { 624 if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
629 rxdesc->dev_flags |= RXDONE_L2PAD; 625 rxdesc->dev_flags |= RXDONE_L2PAD;
630 skbdesc->flags |= SKBDESC_L2_PADDED;
631 }
632 626
633 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) 627 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
634 rxdesc->flags |= RX_FLAG_SHORT_GI; 628 rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -663,7 +657,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
663 * Remove RXWI descriptor from start of buffer. 657 * Remove RXWI descriptor from start of buffer.
664 */ 658 */
665 skb_pull(entry->skb, skbdesc->desc_len); 659 skb_pull(entry->skb, skbdesc->desc_len);
666 skb_trim(entry->skb, rxdesc->size);
667} 660}
668 661
669/* 662/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 1e4340a182e..d1d8ae94b4d 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,6 +79,8 @@
79 */ 79 */
80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
82#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
83#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
82 84
83/* 85/*
84 * TX Info structure 86 * TX Info structure
@@ -101,6 +103,54 @@
101#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000) 103#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
102 104
103/* 105/*
106 * RX Info structure
107 */
108
109/*
110 * Word 0
111 */
112
113#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
114
115/*
116 * RX WI structure
117 */
118
119/*
120 * Word0
121 */
122#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
123#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
124#define RXWI_W0_BSSID FIELD32(0x00001c00)
125#define RXWI_W0_UDF FIELD32(0x0000e000)
126#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
127#define RXWI_W0_TID FIELD32(0xf0000000)
128
129/*
130 * Word1
131 */
132#define RXWI_W1_FRAG FIELD32(0x0000000f)
133#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
134#define RXWI_W1_MCS FIELD32(0x007f0000)
135#define RXWI_W1_BW FIELD32(0x00800000)
136#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
137#define RXWI_W1_STBC FIELD32(0x06000000)
138#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
139
140/*
141 * Word2
142 */
143#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
144#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
145#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
146
147/*
148 * Word3
149 */
150#define RXWI_W3_SNR0 FIELD32(0x000000ff)
151#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
152
153/*
104 * RX descriptor format for RX Ring. 154 * RX descriptor format for RX Ring.
105 */ 155 */
106 156
@@ -115,25 +165,25 @@
115 * AMSDU: rx with 802.3 header, not 802.11 header. 165 * AMSDU: rx with 802.3 header, not 802.11 header.
116 */ 166 */
117 167
118#define RXINFO_W0_BA FIELD32(0x00000001) 168#define RXD_W0_BA FIELD32(0x00000001)
119#define RXINFO_W0_DATA FIELD32(0x00000002) 169#define RXD_W0_DATA FIELD32(0x00000002)
120#define RXINFO_W0_NULLDATA FIELD32(0x00000004) 170#define RXD_W0_NULLDATA FIELD32(0x00000004)
121#define RXINFO_W0_FRAG FIELD32(0x00000008) 171#define RXD_W0_FRAG FIELD32(0x00000008)
122#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010) 172#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
123#define RXINFO_W0_MULTICAST FIELD32(0x00000020) 173#define RXD_W0_MULTICAST FIELD32(0x00000020)
124#define RXINFO_W0_BROADCAST FIELD32(0x00000040) 174#define RXD_W0_BROADCAST FIELD32(0x00000040)
125#define RXINFO_W0_MY_BSS FIELD32(0x00000080) 175#define RXD_W0_MY_BSS FIELD32(0x00000080)
126#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100) 176#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
127#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600) 177#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
128#define RXINFO_W0_AMSDU FIELD32(0x00000800) 178#define RXD_W0_AMSDU FIELD32(0x00000800)
129#define RXINFO_W0_HTC FIELD32(0x00001000) 179#define RXD_W0_HTC FIELD32(0x00001000)
130#define RXINFO_W0_RSSI FIELD32(0x00002000) 180#define RXD_W0_RSSI FIELD32(0x00002000)
131#define RXINFO_W0_L2PAD FIELD32(0x00004000) 181#define RXD_W0_L2PAD FIELD32(0x00004000)
132#define RXINFO_W0_AMPDU FIELD32(0x00008000) 182#define RXD_W0_AMPDU FIELD32(0x00008000)
133#define RXINFO_W0_DECRYPTED FIELD32(0x00010000) 183#define RXD_W0_DECRYPTED FIELD32(0x00010000)
134#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000) 184#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
135#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000) 185#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
136#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000) 186#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
137#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000) 187#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
138 188
139#endif /* RT2800USB_H */ 189#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index dcfc8c25d1a..096da85a66f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -104,6 +104,12 @@
104#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate)) 104#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
105 105
106/* 106/*
107 * Determine the number of L2 padding bytes required between the header and
108 * the payload.
109 */
110#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3)
111
112/*
107 * Determine the alignment requirement, 113 * Determine the alignment requirement,
108 * to make sure the 802.11 payload is padded to a 4-byte boundrary 114 * to make sure the 802.11 payload is padded to a 4-byte boundrary
109 * we must determine the address of the payload and calculate the 115 * we must determine the address of the payload and calculate the
@@ -937,25 +943,25 @@ static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
937 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev); 943 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
938} 944}
939 945
940static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip) 946static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
941{ 947{
942 return (chipset->rt == chip); 948 return (rt2x00dev->chip.rt == rt);
943} 949}
944 950
945static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip) 951static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
946{ 952{
947 return (chipset->rf == chip); 953 return (rt2x00dev->chip.rf == rf);
948} 954}
949 955
950static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset) 956static inline u32 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
951{ 957{
952 return chipset->rev; 958 return rt2x00dev->chip.rev;
953} 959}
954 960
955static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset, 961static inline bool rt2x00_check_rev(struct rt2x00_dev *rt2x00dev,
956 const u32 mask, const u32 rev) 962 const u32 mask, const u32 rev)
957{ 963{
958 return ((chipset->rev & mask) == rev); 964 return ((rt2x00dev->chip.rev & mask) == rev);
959} 965}
960 966
961static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev, 967static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
@@ -964,20 +970,20 @@ static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
964 rt2x00dev->chip.intf = intf; 970 rt2x00dev->chip.intf = intf;
965} 971}
966 972
967static inline bool rt2x00_intf(const struct rt2x00_chip *chipset, 973static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
968 enum rt2x00_chip_intf intf) 974 enum rt2x00_chip_intf intf)
969{ 975{
970 return (chipset->intf == intf); 976 return (rt2x00dev->chip.intf == intf);
971} 977}
972 978
973static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev) 979static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
974{ 980{
975 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI); 981 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
976} 982}
977 983
978static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev) 984static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
979{ 985{
980 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB); 986 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
981} 987}
982 988
983/** 989/**
@@ -1019,9 +1025,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
1019int rt2x00mac_start(struct ieee80211_hw *hw); 1025int rt2x00mac_start(struct ieee80211_hw *hw);
1020void rt2x00mac_stop(struct ieee80211_hw *hw); 1026void rt2x00mac_stop(struct ieee80211_hw *hw);
1021int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1027int rt2x00mac_add_interface(struct ieee80211_hw *hw,
1022 struct ieee80211_if_init_conf *conf); 1028 struct ieee80211_vif *vif);
1023void rt2x00mac_remove_interface(struct ieee80211_hw *hw, 1029void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
1024 struct ieee80211_if_init_conf *conf); 1030 struct ieee80211_vif *vif);
1025int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed); 1031int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
1026void rt2x00mac_configure_filter(struct ieee80211_hw *hw, 1032void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
1027 unsigned int changed_flags, 1033 unsigned int changed_flags,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 265e66dba55..b93731b7990 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -385,9 +385,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
385 memset(&rxdesc, 0, sizeof(rxdesc)); 385 memset(&rxdesc, 0, sizeof(rxdesc));
386 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); 386 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
387 387
388 /* Trim buffer to correct size */
389 skb_trim(entry->skb, rxdesc.size);
390
391 /* 388 /*
392 * The data behind the ieee80211 header must be 389 * The data behind the ieee80211 header must be
393 * aligned on a 4 byte boundary. 390 * aligned on a 4 byte boundary.
@@ -404,11 +401,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
404 (rxdesc.flags & RX_FLAG_IV_STRIPPED)) 401 (rxdesc.flags & RX_FLAG_IV_STRIPPED))
405 rt2x00crypto_rx_insert_iv(entry->skb, header_length, 402 rt2x00crypto_rx_insert_iv(entry->skb, header_length,
406 &rxdesc); 403 &rxdesc);
407 else if (rxdesc.dev_flags & RXDONE_L2PAD) 404 else if (header_length &&
405 (rxdesc.size > header_length) &&
406 (rxdesc.dev_flags & RXDONE_L2PAD))
408 rt2x00queue_remove_l2pad(entry->skb, header_length); 407 rt2x00queue_remove_l2pad(entry->skb, header_length);
409 else 408 else
410 rt2x00queue_align_payload(entry->skb, header_length); 409 rt2x00queue_align_payload(entry->skb, header_length);
411 410
411 /* Trim buffer to correct size */
412 skb_trim(entry->skb, rxdesc.size);
413
412 /* 414 /*
413 * Check if the frame was received using HT. In that case, 415 * Check if the frame was received using HT. In that case,
414 * the rate is the MCS index and should be passed to mac80211 416 * the rate is the MCS index and should be passed to mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index de549c244ed..00f1f939f1b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -187,10 +187,10 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
187EXPORT_SYMBOL_GPL(rt2x00mac_stop); 187EXPORT_SYMBOL_GPL(rt2x00mac_stop);
188 188
189int rt2x00mac_add_interface(struct ieee80211_hw *hw, 189int rt2x00mac_add_interface(struct ieee80211_hw *hw,
190 struct ieee80211_if_init_conf *conf) 190 struct ieee80211_vif *vif)
191{ 191{
192 struct rt2x00_dev *rt2x00dev = hw->priv; 192 struct rt2x00_dev *rt2x00dev = hw->priv;
193 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 193 struct rt2x00_intf *intf = vif_to_intf(vif);
194 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); 194 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
195 struct queue_entry *entry = NULL; 195 struct queue_entry *entry = NULL;
196 unsigned int i; 196 unsigned int i;
@@ -203,7 +203,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
203 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) 203 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
204 return -ENODEV; 204 return -ENODEV;
205 205
206 switch (conf->type) { 206 switch (vif->type) {
207 case NL80211_IFTYPE_AP: 207 case NL80211_IFTYPE_AP:
208 /* 208 /*
209 * We don't support mixed combinations of 209 * We don't support mixed combinations of
@@ -263,7 +263,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
263 * increase interface count and start initialization. 263 * increase interface count and start initialization.
264 */ 264 */
265 265
266 if (conf->type == NL80211_IFTYPE_AP) 266 if (vif->type == NL80211_IFTYPE_AP)
267 rt2x00dev->intf_ap_count++; 267 rt2x00dev->intf_ap_count++;
268 else 268 else
269 rt2x00dev->intf_sta_count++; 269 rt2x00dev->intf_sta_count++;
@@ -273,16 +273,16 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
273 mutex_init(&intf->beacon_skb_mutex); 273 mutex_init(&intf->beacon_skb_mutex);
274 intf->beacon = entry; 274 intf->beacon = entry;
275 275
276 if (conf->type == NL80211_IFTYPE_AP) 276 if (vif->type == NL80211_IFTYPE_AP)
277 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); 277 memcpy(&intf->bssid, vif->addr, ETH_ALEN);
278 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); 278 memcpy(&intf->mac, vif->addr, ETH_ALEN);
279 279
280 /* 280 /*
281 * The MAC adddress must be configured after the device 281 * The MAC adddress must be configured after the device
282 * has been initialized. Otherwise the device can reset 282 * has been initialized. Otherwise the device can reset
283 * the MAC registers. 283 * the MAC registers.
284 */ 284 */
285 rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL); 285 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
286 286
287 /* 287 /*
288 * Some filters depend on the current working mode. We can force 288 * Some filters depend on the current working mode. We can force
@@ -296,10 +296,10 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
296EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); 296EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
297 297
298void rt2x00mac_remove_interface(struct ieee80211_hw *hw, 298void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
299 struct ieee80211_if_init_conf *conf) 299 struct ieee80211_vif *vif)
300{ 300{
301 struct rt2x00_dev *rt2x00dev = hw->priv; 301 struct rt2x00_dev *rt2x00dev = hw->priv;
302 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 302 struct rt2x00_intf *intf = vif_to_intf(vif);
303 303
304 /* 304 /*
305 * Don't allow interfaces to be remove while 305 * Don't allow interfaces to be remove while
@@ -307,11 +307,11 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
307 * no interface is present. 307 * no interface is present.
308 */ 308 */
309 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || 309 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
310 (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) || 310 (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
311 (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count)) 311 (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
312 return; 312 return;
313 313
314 if (conf->type == NL80211_IFTYPE_AP) 314 if (vif->type == NL80211_IFTYPE_AP)
315 rt2x00dev->intf_ap_count--; 315 rt2x00dev->intf_ap_count--;
316 else 316 else
317 rt2x00dev->intf_sta_count--; 317 rt2x00dev->intf_sta_count--;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0feb4d0e466..801be436cf1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -41,6 +41,9 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
41{ 41{
42 unsigned int i; 42 unsigned int i;
43 43
44 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
45 return 0;
46
44 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
45 rt2x00pci_register_read(rt2x00dev, offset, reg); 48 rt2x00pci_register_read(rt2x00dev, offset, reg);
46 if (!rt2x00_get_field32(*reg, field)) 49 if (!rt2x00_get_field32(*reg, field))
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9915a09141e..0b4801a1460 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -177,55 +177,45 @@ void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
177 177
178void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 178void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
179{ 179{
180 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 180 unsigned int payload_length = skb->len - header_length;
181 unsigned int frame_length = skb->len;
182 unsigned int header_align = ALIGN_SIZE(skb, 0); 181 unsigned int header_align = ALIGN_SIZE(skb, 0);
183 unsigned int payload_align = ALIGN_SIZE(skb, header_length); 182 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
184 unsigned int l2pad = 4 - (payload_align - header_align); 183 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
185 184
186 if (header_align == payload_align) { 185 /*
187 /* 186 * Adjust the header alignment if the payload needs to be moved more
188 * Both header and payload must be moved the same 187 * than the header.
189 * amount of bytes to align them properly. This means 188 */
190 * we don't use the L2 padding but just move the entire 189 if (payload_align > header_align)
191 * frame. 190 header_align += 4;
192 */ 191
193 rt2x00queue_align_frame(skb); 192 /* There is nothing to do if no alignment is needed */
194 } else if (!payload_align) { 193 if (!header_align)
195 /* 194 return;
196 * Simple L2 padding, only the header needs to be moved, 195
197 * the payload is already properly aligned. 196 /* Reserve the amount of space needed in front of the frame */
198 */ 197 skb_push(skb, header_align);
199 skb_push(skb, header_align); 198
200 memmove(skb->data, skb->data + header_align, frame_length); 199 /*
201 skbdesc->flags |= SKBDESC_L2_PADDED; 200 * Move the header.
202 } else { 201 */
203 /* 202 memmove(skb->data, skb->data + header_align, header_length);
204 *
205 * Complicated L2 padding, both header and payload need
206 * to be moved. By default we only move to the start
207 * of the buffer, so our header alignment needs to be
208 * increased if there is not enough room for the header
209 * to be moved.
210 */
211 if (payload_align > header_align)
212 header_align += 4;
213 203
214 skb_push(skb, header_align); 204 /* Move the payload, if present and if required */
215 memmove(skb->data, skb->data + header_align, header_length); 205 if (payload_length && payload_align)
216 memmove(skb->data + header_length + l2pad, 206 memmove(skb->data + header_length + l2pad,
217 skb->data + header_length + l2pad + payload_align, 207 skb->data + header_length + l2pad + payload_align,
218 frame_length - header_length); 208 payload_length);
219 skbdesc->flags |= SKBDESC_L2_PADDED; 209
220 } 210 /* Trim the skb to the correct size */
211 skb_trim(skb, header_length + l2pad + payload_length);
221} 212}
222 213
223void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 214void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
224{ 215{
225 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 216 unsigned int l2pad = L2PAD_SIZE(header_length);
226 unsigned int l2pad = 4 - (header_length & 3);
227 217
228 if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED)) 218 if (!l2pad)
229 return; 219 return;
230 220
231 memmove(skb->data + l2pad, skb->data, header_length); 221 memmove(skb->data + l2pad, skb->data, header_length);
@@ -346,7 +336,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
346 * Header and alignment information. 336 * Header and alignment information.
347 */ 337 */
348 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 338 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
349 txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length); 339 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
340 (entry->skb->len > txdesc->header_length))
341 txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
350 342
351 /* 343 /*
352 * Check whether this frame is to be acked. 344 * Check whether this frame is to be acked.
@@ -387,10 +379,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
387 379
388 /* 380 /*
389 * Beacons and probe responses require the tsf timestamp 381 * Beacons and probe responses require the tsf timestamp
390 * to be inserted into the frame. 382 * to be inserted into the frame, except for a frame that has been injected
383 * through a monitor interface. This latter is needed for testing a
384 * monitor interface.
391 */ 385 */
392 if (ieee80211_is_beacon(hdr->frame_control) || 386 if ((ieee80211_is_beacon(hdr->frame_control) ||
393 ieee80211_is_probe_resp(hdr->frame_control)) 387 ieee80211_is_probe_resp(hdr->frame_control)) &&
388 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
394 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 389 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
395 390
396 /* 391 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 70775e5ba1a..c1e482bb37b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -92,8 +92,6 @@ enum data_queue_qid {
92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX 92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
93 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by 93 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
94 * mac80211 but was stripped for processing by the driver. 94 * mac80211 but was stripped for processing by the driver.
95 * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
96 * the padded bytes are located between header and payload.
97 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211, 95 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
98 * don't try to pass it back. 96 * don't try to pass it back.
99 */ 97 */
@@ -101,8 +99,7 @@ enum skb_frame_desc_flags {
101 SKBDESC_DMA_MAPPED_RX = 1 << 0, 99 SKBDESC_DMA_MAPPED_RX = 1 << 0,
102 SKBDESC_DMA_MAPPED_TX = 1 << 1, 100 SKBDESC_DMA_MAPPED_TX = 1 << 1,
103 SKBDESC_IV_STRIPPED = 1 << 2, 101 SKBDESC_IV_STRIPPED = 1 << 2,
104 SKBDESC_L2_PADDED = 1 << 3, 102 SKBDESC_NOT_MAC80211 = 1 << 3,
105 SKBDESC_NOT_MAC80211 = 1 << 4,
106}; 103};
107 104
108/** 105/**
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0ca589306d7..1f97a797bc4 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -637,8 +637,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
637 rt61pci_bbp_read(rt2x00dev, 4, &r4); 637 rt61pci_bbp_read(rt2x00dev, 4, &r4);
638 rt61pci_bbp_read(rt2x00dev, 77, &r77); 638 rt61pci_bbp_read(rt2x00dev, 77, &r77);
639 639
640 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 640 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
641 rt2x00_rf(&rt2x00dev->chip, RF5325));
642 641
643 /* 642 /*
644 * Configure the RX antenna. 643 * Configure the RX antenna.
@@ -684,8 +683,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
684 rt61pci_bbp_read(rt2x00dev, 4, &r4); 683 rt61pci_bbp_read(rt2x00dev, 4, &r4);
685 rt61pci_bbp_read(rt2x00dev, 77, &r77); 684 rt61pci_bbp_read(rt2x00dev, 77, &r77);
686 685
687 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 686 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
688 rt2x00_rf(&rt2x00dev->chip, RF2529));
689 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 687 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
690 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); 688 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
691 689
@@ -833,12 +831,11 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
833 831
834 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); 832 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
835 833
836 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 834 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
837 rt2x00_rf(&rt2x00dev->chip, RF5325))
838 rt61pci_config_antenna_5x(rt2x00dev, ant); 835 rt61pci_config_antenna_5x(rt2x00dev, ant);
839 else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) 836 else if (rt2x00_rf(rt2x00dev, RF2527))
840 rt61pci_config_antenna_2x(rt2x00dev, ant); 837 rt61pci_config_antenna_2x(rt2x00dev, ant);
841 else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) { 838 else if (rt2x00_rf(rt2x00dev, RF2529)) {
842 if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) 839 if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
843 rt61pci_config_antenna_2x(rt2x00dev, ant); 840 rt61pci_config_antenna_2x(rt2x00dev, ant);
844 else 841 else
@@ -879,8 +876,7 @@ static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
879 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 876 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
880 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 877 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
881 878
882 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || 879 smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
883 rt2x00_rf(&rt2x00dev->chip, RF2527));
884 880
885 rt61pci_bbp_read(rt2x00dev, 3, &r3); 881 rt61pci_bbp_read(rt2x00dev, 3, &r3);
886 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); 882 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -2302,10 +2298,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2302 rt2x00_set_chip_rf(rt2x00dev, value, reg); 2298 rt2x00_set_chip_rf(rt2x00dev, value, reg);
2303 rt2x00_print_chip(rt2x00dev); 2299 rt2x00_print_chip(rt2x00dev);
2304 2300
2305 if (!rt2x00_rf(&rt2x00dev->chip, RF5225) && 2301 if (!rt2x00_rf(rt2x00dev, RF5225) &&
2306 !rt2x00_rf(&rt2x00dev->chip, RF5325) && 2302 !rt2x00_rf(rt2x00dev, RF5325) &&
2307 !rt2x00_rf(&rt2x00dev->chip, RF2527) && 2303 !rt2x00_rf(rt2x00dev, RF2527) &&
2308 !rt2x00_rf(&rt2x00dev->chip, RF2529)) { 2304 !rt2x00_rf(rt2x00dev, RF2529)) {
2309 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 2305 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
2310 return -ENODEV; 2306 return -ENODEV;
2311 } 2307 }
@@ -2360,7 +2356,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2360 * the antenna settings should be gathered from the NIC 2356 * the antenna settings should be gathered from the NIC
2361 * eeprom word. 2357 * eeprom word.
2362 */ 2358 */
2363 if (rt2x00_rf(&rt2x00dev->chip, RF2529) && 2359 if (rt2x00_rf(rt2x00dev, RF2529) &&
2364 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) { 2360 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
2365 rt2x00dev->default_ant.rx = 2361 rt2x00dev->default_ant.rx =
2366 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED); 2362 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@ -2571,8 +2567,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2571 spec->channels = rf_vals_seq; 2567 spec->channels = rf_vals_seq;
2572 } 2568 }
2573 2569
2574 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 2570 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
2575 rt2x00_rf(&rt2x00dev->chip, RF5325)) {
2576 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2571 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2577 spec->num_channels = ARRAY_SIZE(rf_vals_seq); 2572 spec->num_channels = ARRAY_SIZE(rf_vals_seq);
2578 } 2573 }
@@ -2812,7 +2807,7 @@ static const struct rt2x00_ops rt61pci_ops = {
2812/* 2807/*
2813 * RT61pci module information. 2808 * RT61pci module information.
2814 */ 2809 */
2815static struct pci_device_id rt61pci_device_table[] = { 2810static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
2816 /* RT2561s */ 2811 /* RT2561s */
2817 { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) }, 2812 { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
2818 /* RT2561 v2 */ 2813 /* RT2561 v2 */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ced3b6ab5e1..a0269129439 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -136,8 +136,8 @@ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
136 * all others contain 20 bits. 136 * all others contain 20 bits.
137 */ 137 */
138 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 138 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
139 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) || 139 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
140 rt2x00_rf(&rt2x00dev->chip, RF2527))); 140 rt2x00_rf(rt2x00dev, RF2527)));
141 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); 141 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
142 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); 142 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
143 143
@@ -741,11 +741,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
741 741
742 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); 742 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
743 743
744 if (rt2x00_rf(&rt2x00dev->chip, RF5226) || 744 if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
745 rt2x00_rf(&rt2x00dev->chip, RF5225))
746 rt73usb_config_antenna_5x(rt2x00dev, ant); 745 rt73usb_config_antenna_5x(rt2x00dev, ant);
747 else if (rt2x00_rf(&rt2x00dev->chip, RF2528) || 746 else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
748 rt2x00_rf(&rt2x00dev->chip, RF2527))
749 rt73usb_config_antenna_2x(rt2x00dev, ant); 747 rt73usb_config_antenna_2x(rt2x00dev, ant);
750} 748}
751 749
@@ -779,8 +777,7 @@ static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
779 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 777 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
780 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 778 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
781 779
782 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || 780 smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
783 rt2x00_rf(&rt2x00dev->chip, RF2527));
784 781
785 rt73usb_bbp_read(rt2x00dev, 3, &r3); 782 rt73usb_bbp_read(rt2x00dev, 3, &r3);
786 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); 783 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1210,8 +1207,7 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1210 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); 1207 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
1211 1208
1212 reg = 0x000023b0; 1209 reg = 0x000023b0;
1213 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 1210 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
1214 rt2x00_rf(&rt2x00dev->chip, RF2527))
1215 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1); 1211 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
1216 rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg); 1212 rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
1217 1213
@@ -1827,16 +1823,16 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1827 rt2x00_set_chip(rt2x00dev, RT2571, value, reg); 1823 rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
1828 rt2x00_print_chip(rt2x00dev); 1824 rt2x00_print_chip(rt2x00dev);
1829 1825
1830 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) || 1826 if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0x25730) ||
1831 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { 1827 rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
1832 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1828 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1833 return -ENODEV; 1829 return -ENODEV;
1834 } 1830 }
1835 1831
1836 if (!rt2x00_rf(&rt2x00dev->chip, RF5226) && 1832 if (!rt2x00_rf(rt2x00dev, RF5226) &&
1837 !rt2x00_rf(&rt2x00dev->chip, RF2528) && 1833 !rt2x00_rf(rt2x00dev, RF2528) &&
1838 !rt2x00_rf(&rt2x00dev->chip, RF5225) && 1834 !rt2x00_rf(rt2x00dev, RF5225) &&
1839 !rt2x00_rf(&rt2x00dev->chip, RF2527)) { 1835 !rt2x00_rf(rt2x00dev, RF2527)) {
1840 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1836 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1841 return -ENODEV; 1837 return -ENODEV;
1842 } 1838 }
@@ -2081,17 +2077,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2081 spec->supported_bands = SUPPORT_BAND_2GHZ; 2077 spec->supported_bands = SUPPORT_BAND_2GHZ;
2082 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2078 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2083 2079
2084 if (rt2x00_rf(&rt2x00dev->chip, RF2528)) { 2080 if (rt2x00_rf(rt2x00dev, RF2528)) {
2085 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); 2081 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
2086 spec->channels = rf_vals_bg_2528; 2082 spec->channels = rf_vals_bg_2528;
2087 } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) { 2083 } else if (rt2x00_rf(rt2x00dev, RF5226)) {
2088 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2084 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2089 spec->num_channels = ARRAY_SIZE(rf_vals_5226); 2085 spec->num_channels = ARRAY_SIZE(rf_vals_5226);
2090 spec->channels = rf_vals_5226; 2086 spec->channels = rf_vals_5226;
2091 } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) { 2087 } else if (rt2x00_rf(rt2x00dev, RF2527)) {
2092 spec->num_channels = 14; 2088 spec->num_channels = 14;
2093 spec->channels = rf_vals_5225_2527; 2089 spec->channels = rf_vals_5225_2527;
2094 } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) { 2090 } else if (rt2x00_rf(rt2x00dev, RF5225)) {
2095 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2091 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2096 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); 2092 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
2097 spec->channels = rf_vals_5225_2527; 2093 spec->channels = rf_vals_5225_2527;
@@ -2354,6 +2350,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2354 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) }, 2350 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
2355 /* Buffalo */ 2351 /* Buffalo */
2356 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) }, 2352 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
2353 { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
2357 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, 2354 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
2358 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) }, 2355 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
2359 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) }, 2356 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a818..de3844fe06d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -60,7 +60,6 @@ struct rtl8180_priv {
60 struct rtl818x_csr __iomem *map; 60 struct rtl818x_csr __iomem *map;
61 const struct rtl818x_rf_ops *rf; 61 const struct rtl818x_rf_ops *rf;
62 struct ieee80211_vif *vif; 62 struct ieee80211_vif *vif;
63 int mode;
64 63
65 /* rtl8180 driver specific */ 64 /* rtl8180 driver specific */
66 spinlock_t lock; 65 spinlock_t lock;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 8a40a143998..b9192bfcc55 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -33,7 +33,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
33MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver"); 33MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35 35
36static struct pci_device_id rtl8180_table[] __devinitdata = { 36static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
37 /* rtl8185 */ 37 /* rtl8185 */
38 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) }, 38 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
39 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) }, 39 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -82,8 +82,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
82}; 82};
83 83
84 84
85
86
87void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) 85void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
88{ 86{
89 struct rtl8180_priv *priv = dev->priv; 87 struct rtl8180_priv *priv = dev->priv;
@@ -615,7 +613,6 @@ static int rtl8180_start(struct ieee80211_hw *dev)
615 reg |= RTL818X_CMD_TX_ENABLE; 613 reg |= RTL818X_CMD_TX_ENABLE;
616 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 614 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
617 615
618 priv->mode = NL80211_IFTYPE_MONITOR;
619 return 0; 616 return 0;
620 617
621 err_free_rings: 618 err_free_rings:
@@ -633,8 +630,6 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
633 u8 reg; 630 u8 reg;
634 int i; 631 int i;
635 632
636 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
637
638 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 633 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
639 634
640 reg = rtl818x_ioread8(priv, &priv->map->CMD); 635 reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -657,38 +652,39 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
657} 652}
658 653
659static int rtl8180_add_interface(struct ieee80211_hw *dev, 654static int rtl8180_add_interface(struct ieee80211_hw *dev,
660 struct ieee80211_if_init_conf *conf) 655 struct ieee80211_vif *vif)
661{ 656{
662 struct rtl8180_priv *priv = dev->priv; 657 struct rtl8180_priv *priv = dev->priv;
663 658
664 if (priv->mode != NL80211_IFTYPE_MONITOR) 659 /*
665 return -EOPNOTSUPP; 660 * We only support one active interface at a time.
661 */
662 if (priv->vif)
663 return -EBUSY;
666 664
667 switch (conf->type) { 665 switch (vif->type) {
668 case NL80211_IFTYPE_STATION: 666 case NL80211_IFTYPE_STATION:
669 priv->mode = conf->type;
670 break; 667 break;
671 default: 668 default:
672 return -EOPNOTSUPP; 669 return -EOPNOTSUPP;
673 } 670 }
674 671
675 priv->vif = conf->vif; 672 priv->vif = vif;
676 673
677 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 674 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
678 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0], 675 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
679 le32_to_cpu(*(__le32 *)conf->mac_addr)); 676 le32_to_cpu(*(__le32 *)vif->addr));
680 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4], 677 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
681 le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); 678 le16_to_cpu(*(__le16 *)(vif->addr + 4)));
682 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 679 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
683 680
684 return 0; 681 return 0;
685} 682}
686 683
687static void rtl8180_remove_interface(struct ieee80211_hw *dev, 684static void rtl8180_remove_interface(struct ieee80211_hw *dev,
688 struct ieee80211_if_init_conf *conf) 685 struct ieee80211_vif *vif)
689{ 686{
690 struct rtl8180_priv *priv = dev->priv; 687 struct rtl8180_priv *priv = dev->priv;
691 priv->mode = NL80211_IFTYPE_MONITOR;
692 priv->vif = NULL; 688 priv->vif = NULL;
693} 689}
694 690
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6af0f3f71f3..6bb32112e65 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -92,7 +92,7 @@ struct rtl8187_priv {
92 struct rtl818x_csr *map; 92 struct rtl818x_csr *map;
93 const struct rtl818x_rf_ops *rf; 93 const struct rtl818x_rf_ops *rf;
94 struct ieee80211_vif *vif; 94 struct ieee80211_vif *vif;
95 int mode; 95
96 /* The mutex protects the TX loopback state. 96 /* The mutex protects the TX loopback state.
97 * Any attempt to set channels concurrently locks the device. 97 * Any attempt to set channels concurrently locks the device.
98 */ 98 */
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe..f336c63053c 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1018,31 +1018,30 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
1018} 1018}
1019 1019
1020static int rtl8187_add_interface(struct ieee80211_hw *dev, 1020static int rtl8187_add_interface(struct ieee80211_hw *dev,
1021 struct ieee80211_if_init_conf *conf) 1021 struct ieee80211_vif *vif)
1022{ 1022{
1023 struct rtl8187_priv *priv = dev->priv; 1023 struct rtl8187_priv *priv = dev->priv;
1024 int i; 1024 int i;
1025 int ret = -EOPNOTSUPP; 1025 int ret = -EOPNOTSUPP;
1026 1026
1027 mutex_lock(&priv->conf_mutex); 1027 mutex_lock(&priv->conf_mutex);
1028 if (priv->mode != NL80211_IFTYPE_MONITOR) 1028 if (priv->vif)
1029 goto exit; 1029 goto exit;
1030 1030
1031 switch (conf->type) { 1031 switch (vif->type) {
1032 case NL80211_IFTYPE_STATION: 1032 case NL80211_IFTYPE_STATION:
1033 priv->mode = conf->type;
1034 break; 1033 break;
1035 default: 1034 default:
1036 goto exit; 1035 goto exit;
1037 } 1036 }
1038 1037
1039 ret = 0; 1038 ret = 0;
1040 priv->vif = conf->vif; 1039 priv->vif = vif;
1041 1040
1042 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 1041 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
1043 for (i = 0; i < ETH_ALEN; i++) 1042 for (i = 0; i < ETH_ALEN; i++)
1044 rtl818x_iowrite8(priv, &priv->map->MAC[i], 1043 rtl818x_iowrite8(priv, &priv->map->MAC[i],
1045 ((u8 *)conf->mac_addr)[i]); 1044 ((u8 *)vif->addr)[i]);
1046 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 1045 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
1047 1046
1048exit: 1047exit:
@@ -1051,11 +1050,10 @@ exit:
1051} 1050}
1052 1051
1053static void rtl8187_remove_interface(struct ieee80211_hw *dev, 1052static void rtl8187_remove_interface(struct ieee80211_hw *dev,
1054 struct ieee80211_if_init_conf *conf) 1053 struct ieee80211_vif *vif)
1055{ 1054{
1056 struct rtl8187_priv *priv = dev->priv; 1055 struct rtl8187_priv *priv = dev->priv;
1057 mutex_lock(&priv->conf_mutex); 1056 mutex_lock(&priv->conf_mutex);
1058 priv->mode = NL80211_IFTYPE_MONITOR;
1059 priv->vif = NULL; 1057 priv->vif = NULL;
1060 mutex_unlock(&priv->conf_mutex); 1058 mutex_unlock(&priv->conf_mutex);
1061} 1059}
@@ -1365,7 +1363,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1365 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1363 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
1366 1364
1367 1365
1368 priv->mode = NL80211_IFTYPE_MONITOR;
1369 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1366 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1370 IEEE80211_HW_SIGNAL_DBM | 1367 IEEE80211_HW_SIGNAL_DBM |
1371 IEEE80211_HW_RX_INCLUDES_FCS; 1368 IEEE80211_HW_RX_INCLUDES_FCS;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index ded44c045eb..f82aa8b4bdd 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -33,7 +33,7 @@ static void led_turn_on(struct work_struct *work)
33 struct rtl8187_led *led = &priv->led_tx; 33 struct rtl8187_led *led = &priv->led_tx;
34 34
35 /* Don't change the LED, when the device is down. */ 35 /* Don't change the LED, when the device is down. */
36 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) 36 if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
37 return ; 37 return ;
38 38
39 /* Skip if the LED is not registered. */ 39 /* Skip if the LED is not registered. */
@@ -71,7 +71,7 @@ static void led_turn_off(struct work_struct *work)
71 struct rtl8187_led *led = &priv->led_tx; 71 struct rtl8187_led *led = &priv->led_tx;
72 72
73 /* Don't change the LED, when the device is down. */ 73 /* Don't change the LED, when the device is down. */
74 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) 74 if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
75 return ; 75 return ;
76 76
77 /* Skip if the LED is not registered. */ 77 /* Skip if the LED is not registered. */
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 054533f7a12..6301578d156 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -247,6 +247,7 @@ struct wl1251_debugfs {
247 struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data; 247 struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
248 248
249 struct dentry *tx_queue_len; 249 struct dentry *tx_queue_len;
250 struct dentry *tx_queue_status;
250 251
251 struct dentry *retry_count; 252 struct dentry *retry_count;
252 struct dentry *excessive_retries; 253 struct dentry *excessive_retries;
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index acfa086dbfc..beff084040b 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -976,3 +976,72 @@ out:
976 kfree(acx); 976 kfree(acx);
977 return ret; 977 return ret;
978} 978}
979
980int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
981 u8 aifs, u16 txop)
982{
983 struct wl1251_acx_ac_cfg *acx;
984 int ret = 0;
985
986 wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
987 "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
988
989 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
990
991 if (!acx) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 acx->ac = ac;
997 acx->cw_min = cw_min;
998 acx->cw_max = cw_max;
999 acx->aifsn = aifs;
1000 acx->txop_limit = txop;
1001
1002 ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
1003 if (ret < 0) {
1004 wl1251_warning("acx ac cfg failed: %d", ret);
1005 goto out;
1006 }
1007
1008out:
1009 kfree(acx);
1010 return ret;
1011}
1012
1013int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
1014 enum wl1251_acx_channel_type type,
1015 u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
1016 enum wl1251_acx_ack_policy ack_policy)
1017{
1018 struct wl1251_acx_tid_cfg *acx;
1019 int ret = 0;
1020
1021 wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
1022 "ps_scheme %d ack_policy %d", queue, type, tsid,
1023 ps_scheme, ack_policy);
1024
1025 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1026
1027 if (!acx) {
1028 ret = -ENOMEM;
1029 goto out;
1030 }
1031
1032 acx->queue = queue;
1033 acx->type = type;
1034 acx->tsid = tsid;
1035 acx->ps_scheme = ps_scheme;
1036 acx->ack_policy = ack_policy;
1037
1038 ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
1039 if (ret < 0) {
1040 wl1251_warning("acx tid cfg failed: %d", ret);
1041 goto out;
1042 }
1043
1044out:
1045 kfree(acx);
1046 return ret;
1047}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 652371432cd..26160c45784 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -1166,6 +1166,87 @@ struct wl1251_acx_wr_tbtt_and_dtim {
1166 u8 padding; 1166 u8 padding;
1167} __attribute__ ((packed)); 1167} __attribute__ ((packed));
1168 1168
1169struct wl1251_acx_ac_cfg {
1170 struct acx_header header;
1171
1172 /*
1173 * Access Category - The TX queue's access category
1174 * (refer to AccessCategory_enum)
1175 */
1176 u8 ac;
1177
1178 /*
1179 * The contention window minimum size (in slots) for
1180 * the access class.
1181 */
1182 u8 cw_min;
1183
1184 /*
1185 * The contention window maximum size (in slots) for
1186 * the access class.
1187 */
1188 u16 cw_max;
1189
1190 /* The AIF value (in slots) for the access class. */
1191 u8 aifsn;
1192
1193 u8 reserved;
1194
1195 /* The TX Op Limit (in microseconds) for the access class. */
1196 u16 txop_limit;
1197} __attribute__ ((packed));
1198
1199
1200enum wl1251_acx_channel_type {
1201 CHANNEL_TYPE_DCF = 0,
1202 CHANNEL_TYPE_EDCF = 1,
1203 CHANNEL_TYPE_HCCA = 2,
1204};
1205
1206enum wl1251_acx_ps_scheme {
1207 /* regular ps: simple sending of packets */
1208 WL1251_ACX_PS_SCHEME_LEGACY = 0,
1209
1210 /* sending a packet triggers a unscheduled apsd downstream */
1211 WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
1212
1213 /* a pspoll packet will be sent before every data packet */
1214 WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
1215
1216 /* scheduled apsd mode */
1217 WL1251_ACX_PS_SCHEME_SAPSD = 3,
1218};
1219
1220enum wl1251_acx_ack_policy {
1221 WL1251_ACX_ACK_POLICY_LEGACY = 0,
1222 WL1251_ACX_ACK_POLICY_NO_ACK = 1,
1223 WL1251_ACX_ACK_POLICY_BLOCK = 2,
1224};
1225
1226struct wl1251_acx_tid_cfg {
1227 struct acx_header header;
1228
1229 /* tx queue id number (0-7) */
1230 u8 queue;
1231
1232 /* channel access type for the queue, enum wl1251_acx_channel_type */
1233 u8 type;
1234
1235 /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */
1236 u8 tsid;
1237
1238 /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */
1239 u8 ps_scheme;
1240
1241 /* the tx queue ack policy, enum wl1251_acx_ack_policy */
1242 u8 ack_policy;
1243
1244 u8 padding[3];
1245
1246 /* not supported */
1247 u32 apsdconf[2];
1248} __attribute__ ((packed));
1249
1169/************************************************************************* 1250/*************************************************************************
1170 1251
1171 Host Interrupt Register (WiLink -> Host) 1252 Host Interrupt Register (WiLink -> Host)
@@ -1322,5 +1403,11 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
1322int wl1251_acx_rate_policies(struct wl1251 *wl); 1403int wl1251_acx_rate_policies(struct wl1251 *wl);
1323int wl1251_acx_mem_cfg(struct wl1251 *wl); 1404int wl1251_acx_mem_cfg(struct wl1251 *wl);
1324int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); 1405int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
1406int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
1407 u8 aifs, u16 txop);
1408int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
1409 enum wl1251_acx_channel_type type,
1410 u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
1411 enum wl1251_acx_ack_policy ack_policy);
1325 1412
1326#endif /* __WL1251_ACX_H__ */ 1413#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726b..0320b478bb3 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -410,3 +410,86 @@ out:
410 kfree(cmd); 410 kfree(cmd);
411 return ret; 411 return ret;
412} 412}
413
414int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
415 struct ieee80211_channel *channels[],
416 unsigned int n_channels, unsigned int n_probes)
417{
418 struct wl1251_cmd_scan *cmd;
419 int i, ret = 0;
420
421 wl1251_debug(DEBUG_CMD, "cmd scan");
422
423 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
424 if (!cmd)
425 return -ENOMEM;
426
427 cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
428 cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
429 CFG_RX_MGMT_EN |
430 CFG_RX_BCN_EN);
431 cmd->params.scan_options = 0;
432 cmd->params.num_channels = n_channels;
433 cmd->params.num_probe_requests = n_probes;
434 cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
435 cmd->params.tid_trigger = 0;
436
437 for (i = 0; i < n_channels; i++) {
438 cmd->channels[i].min_duration =
439 cpu_to_le32(WL1251_SCAN_MIN_DURATION);
440 cmd->channels[i].max_duration =
441 cpu_to_le32(WL1251_SCAN_MAX_DURATION);
442 memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
443 memset(&cmd->channels[i].bssid_msb, 0xff, 2);
444 cmd->channels[i].early_termination = 0;
445 cmd->channels[i].tx_power_att = 0;
446 cmd->channels[i].channel = channels[i]->hw_value;
447 }
448
449 cmd->params.ssid_len = ssid_len;
450 if (ssid)
451 memcpy(cmd->params.ssid, ssid, ssid_len);
452
453 ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
454 if (ret < 0) {
455 wl1251_error("cmd scan failed: %d", ret);
456 goto out;
457 }
458
459 wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
460
461 if (cmd->header.status != CMD_STATUS_SUCCESS) {
462 wl1251_error("cmd scan status wasn't success: %d",
463 cmd->header.status);
464 ret = -EIO;
465 goto out;
466 }
467
468out:
469 kfree(cmd);
470 return ret;
471}
472
473int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
474{
475 struct wl1251_cmd_trigger_scan_to *cmd;
476 int ret;
477
478 wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
479
480 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
481 if (!cmd)
482 return -ENOMEM;
483
484 cmd->timeout = timeout;
485
486 ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
487 if (ret < 0) {
488 wl1251_error("cmd trigger scan to failed: %d", ret);
489 goto out;
490 }
491
492out:
493 kfree(cmd);
494 return ret;
495}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef..4ad67cae94d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
27 27
28#include "wl1251.h" 28#include "wl1251.h"
29 29
30#include <net/cfg80211.h>
31
30struct acx_header; 32struct acx_header;
31 33
32int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len); 34int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
43 size_t len); 45 size_t len);
44int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id, 46int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
45 void *buf, size_t buf_len); 47 void *buf, size_t buf_len);
48int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
49 struct ieee80211_channel *channels[],
50 unsigned int n_channels, unsigned int n_probes);
51int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
46 52
47/* unit ms */ 53/* unit ms */
48#define WL1251_COMMAND_TIMEOUT 2000 54#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
163#define CMDMBOX_HEADER_LEN 4 169#define CMDMBOX_HEADER_LEN 4
164#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 170#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
165 171
172#define WL1251_SCAN_MIN_DURATION 30000
173#define WL1251_SCAN_MAX_DURATION 60000
174
175#define WL1251_SCAN_NUM_PROBES 3
166 176
167struct basic_scan_parameters { 177struct wl1251_scan_parameters {
168 u32 rx_config_options; 178 u32 rx_config_options;
169 u32 rx_filter_options; 179 u32 rx_filter_options;
170 180
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
189 199
190 u8 tid_trigger; 200 u8 tid_trigger;
191 u8 ssid_len; 201 u8 ssid_len;
192 u32 ssid[8]; 202 u8 ssid[32];
193 203
194} __attribute__ ((packed)); 204} __attribute__ ((packed));
195 205
196struct basic_scan_channel_parameters { 206struct wl1251_scan_ch_parameters {
197 u32 min_duration; /* in TU */ 207 u32 min_duration; /* in TU */
198 u32 max_duration; /* in TU */ 208 u32 max_duration; /* in TU */
199 u32 bssid_lsb; 209 u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
213/* SCAN parameters */ 223/* SCAN parameters */
214#define SCAN_MAX_NUM_OF_CHANNELS 16 224#define SCAN_MAX_NUM_OF_CHANNELS 16
215 225
216struct cmd_scan { 226struct wl1251_cmd_scan {
217 struct wl1251_cmd_header header; 227 struct wl1251_cmd_header header;
218 228
219 struct basic_scan_parameters params; 229 struct wl1251_scan_parameters params;
220 struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; 230 struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
221} __attribute__ ((packed)); 231} __attribute__ ((packed));
222 232
223enum { 233enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index a00723059f8..0ccba57fb9f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -237,6 +237,27 @@ static const struct file_operations tx_queue_len_ops = {
237 .open = wl1251_open_file_generic, 237 .open = wl1251_open_file_generic,
238}; 238};
239 239
240static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
241 size_t count, loff_t *ppos)
242{
243 struct wl1251 *wl = file->private_data;
244 char buf[3], status;
245 int len;
246
247 if (wl->tx_queue_stopped)
248 status = 's';
249 else
250 status = 'r';
251
252 len = scnprintf(buf, sizeof(buf), "%c\n", status);
253 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
254}
255
256static const struct file_operations tx_queue_status_ops = {
257 .read = tx_queue_status_read,
258 .open = wl1251_open_file_generic,
259};
260
240static void wl1251_debugfs_delete_files(struct wl1251 *wl) 261static void wl1251_debugfs_delete_files(struct wl1251 *wl)
241{ 262{
242 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); 263 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -331,6 +352,7 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
331 DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data); 352 DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
332 353
333 DEBUGFS_DEL(tx_queue_len); 354 DEBUGFS_DEL(tx_queue_len);
355 DEBUGFS_DEL(tx_queue_status);
334 DEBUGFS_DEL(retry_count); 356 DEBUGFS_DEL(retry_count);
335 DEBUGFS_DEL(excessive_retries); 357 DEBUGFS_DEL(excessive_retries);
336} 358}
@@ -431,6 +453,7 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
431 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); 453 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
432 454
433 DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir); 455 DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
456 DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
434 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); 457 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
435 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); 458 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
436 459
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index 5cb573383ee..5aad56ea715 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -294,6 +294,11 @@ static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
294 goto out; 294 goto out;
295 } 295 }
296 296
297 wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
298 wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
299 wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
300 wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
301
297out: 302out:
298 kfree(config); 303 kfree(config);
299 return ret; 304 return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index b3b25ec885e..269cefb3e7d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -26,6 +26,53 @@
26 26
27#include "wl1251.h" 27#include "wl1251.h"
28 28
29enum {
30 /* best effort/legacy */
31 AC_BE = 0,
32
33 /* background */
34 AC_BK = 1,
35
36 /* video */
37 AC_VI = 2,
38
39 /* voice */
40 AC_VO = 3,
41
42 /* broadcast dummy access category */
43 AC_BCAST = 4,
44
45 NUM_ACCESS_CATEGORIES = 4
46};
47
48/* following are defult values for the IE fields*/
49#define CWMIN_BK 15
50#define CWMIN_BE 15
51#define CWMIN_VI 7
52#define CWMIN_VO 3
53#define CWMAX_BK 1023
54#define CWMAX_BE 63
55#define CWMAX_VI 15
56#define CWMAX_VO 7
57
58/* slot number setting to start transmission at PIFS interval */
59#define AIFS_PIFS 1
60
61/*
62 * slot number setting to start transmission at DIFS interval - normal DCF
63 * access
64 */
65#define AIFS_DIFS 2
66
67#define AIFSN_BK 7
68#define AIFSN_BE 3
69#define AIFSN_VI AIFS_PIFS
70#define AIFSN_VO AIFS_PIFS
71#define TXOP_BK 0
72#define TXOP_BE 0
73#define TXOP_VI 3008
74#define TXOP_VO 1504
75
29int wl1251_hw_init_hwenc_config(struct wl1251 *wl); 76int wl1251_hw_init_hwenc_config(struct wl1251 *wl);
30int wl1251_hw_init_templates_config(struct wl1251 *wl); 77int wl1251_hw_init_templates_config(struct wl1251 *wl);
31int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter); 78int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 2f50a256efa..595f0f94d16 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -395,6 +395,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
395 * the queue here, otherwise the queue will get too long. 395 * the queue here, otherwise the queue will get too long.
396 */ 396 */
397 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) { 397 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
398 wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
398 ieee80211_stop_queues(wl->hw); 399 ieee80211_stop_queues(wl->hw);
399 400
400 /* 401 /*
@@ -510,13 +511,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
510} 511}
511 512
512static int wl1251_op_add_interface(struct ieee80211_hw *hw, 513static int wl1251_op_add_interface(struct ieee80211_hw *hw,
513 struct ieee80211_if_init_conf *conf) 514 struct ieee80211_vif *vif)
514{ 515{
515 struct wl1251 *wl = hw->priv; 516 struct wl1251 *wl = hw->priv;
516 int ret = 0; 517 int ret = 0;
517 518
518 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 519 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
519 conf->type, conf->mac_addr); 520 vif->type, vif->addr);
520 521
521 mutex_lock(&wl->mutex); 522 mutex_lock(&wl->mutex);
522 if (wl->vif) { 523 if (wl->vif) {
@@ -524,9 +525,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
524 goto out; 525 goto out;
525 } 526 }
526 527
527 wl->vif = conf->vif; 528 wl->vif = vif;
528 529
529 switch (conf->type) { 530 switch (vif->type) {
530 case NL80211_IFTYPE_STATION: 531 case NL80211_IFTYPE_STATION:
531 wl->bss_type = BSS_TYPE_STA_BSS; 532 wl->bss_type = BSS_TYPE_STA_BSS;
532 break; 533 break;
@@ -538,8 +539,8 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
538 goto out; 539 goto out;
539 } 540 }
540 541
541 if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) { 542 if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
542 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 543 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
543 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); 544 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
544 ret = wl1251_acx_station_id(wl); 545 ret = wl1251_acx_station_id(wl);
545 if (ret < 0) 546 if (ret < 0)
@@ -552,7 +553,7 @@ out:
552} 553}
553 554
554static void wl1251_op_remove_interface(struct ieee80211_hw *hw, 555static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
555 struct ieee80211_if_init_conf *conf) 556 struct ieee80211_vif *vif)
556{ 557{
557 struct wl1251 *wl = hw->priv; 558 struct wl1251 *wl = hw->priv;
558 559
@@ -562,43 +563,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
562 mutex_unlock(&wl->mutex); 563 mutex_unlock(&wl->mutex);
563} 564}
564 565
565static int wl1251_build_null_data(struct wl1251 *wl) 566static int wl1251_build_qos_null_data(struct wl1251 *wl)
566{ 567{
567 struct wl12xx_null_data_template template; 568 struct ieee80211_qos_hdr template;
568 569
569 if (!is_zero_ether_addr(wl->bssid)) { 570 memset(&template, 0, sizeof(template));
570 memcpy(template.header.da, wl->bssid, ETH_ALEN);
571 memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
572 } else {
573 memset(template.header.da, 0xff, ETH_ALEN);
574 memset(template.header.bssid, 0xff, ETH_ALEN);
575 }
576
577 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
578 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
579 IEEE80211_STYPE_NULLFUNC |
580 IEEE80211_FCTL_TODS);
581
582 return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
583 sizeof(template));
584
585}
586
587static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
588{
589 struct wl12xx_ps_poll_template template;
590 571
591 memcpy(template.bssid, wl->bssid, ETH_ALEN); 572 memcpy(template.addr1, wl->bssid, ETH_ALEN);
592 memcpy(template.ta, wl->mac_addr, ETH_ALEN); 573 memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
574 memcpy(template.addr3, wl->bssid, ETH_ALEN);
593 575
594 /* aid in PS-Poll has its two MSBs each set to 1 */ 576 template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
595 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid); 577 IEEE80211_STYPE_QOS_NULLFUNC |
578 IEEE80211_FCTL_TODS);
596 579
597 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 580 /* FIXME: not sure what priority to use here */
581 template.qos_ctrl = cpu_to_le16(0);
598 582
599 return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template, 583 return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
600 sizeof(template)); 584 sizeof(template));
601
602} 585}
603 586
604static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed) 587static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -640,20 +623,25 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
640 * through the bss_info_changed() hook. 623 * through the bss_info_changed() hook.
641 */ 624 */
642 ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); 625 ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
626 if (ret < 0)
627 goto out_sleep;
643 } else if (!(conf->flags & IEEE80211_CONF_PS) && 628 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
644 wl->psm_requested) { 629 wl->psm_requested) {
645 wl1251_debug(DEBUG_PSM, "psm disabled"); 630 wl1251_debug(DEBUG_PSM, "psm disabled");
646 631
647 wl->psm_requested = false; 632 wl->psm_requested = false;
648 633
649 if (wl->psm) 634 if (wl->psm) {
650 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); 635 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
636 if (ret < 0)
637 goto out_sleep;
638 }
651 } 639 }
652 640
653 if (conf->power_level != wl->power_level) { 641 if (conf->power_level != wl->power_level) {
654 ret = wl1251_acx_tx_power(wl, conf->power_level); 642 ret = wl1251_acx_tx_power(wl, conf->power_level);
655 if (ret < 0) 643 if (ret < 0)
656 goto out; 644 goto out_sleep;
657 645
658 wl->power_level = conf->power_level; 646 wl->power_level = conf->power_level;
659 } 647 }
@@ -864,199 +852,61 @@ out:
864 return ret; 852 return ret;
865} 853}
866 854
867static int wl1251_build_basic_rates(char *rates) 855static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
868{ 856 struct cfg80211_scan_request *req)
869 u8 index = 0;
870
871 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
872 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
873 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
874 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
875
876 return index;
877}
878
879static int wl1251_build_extended_rates(char *rates)
880{ 857{
881 u8 index = 0; 858 struct wl1251 *wl = hw->priv;
882 859 struct sk_buff *skb;
883 rates[index++] = IEEE80211_OFDM_RATE_6MB; 860 size_t ssid_len = 0;
884 rates[index++] = IEEE80211_OFDM_RATE_9MB; 861 u8 *ssid = NULL;
885 rates[index++] = IEEE80211_OFDM_RATE_12MB; 862 int ret;
886 rates[index++] = IEEE80211_OFDM_RATE_18MB;
887 rates[index++] = IEEE80211_OFDM_RATE_24MB;
888 rates[index++] = IEEE80211_OFDM_RATE_36MB;
889 rates[index++] = IEEE80211_OFDM_RATE_48MB;
890 rates[index++] = IEEE80211_OFDM_RATE_54MB;
891
892 return index;
893}
894
895 863
896static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len) 864 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
897{
898 struct wl12xx_probe_req_template template;
899 struct wl12xx_ie_rates *rates;
900 char *ptr;
901 u16 size;
902
903 ptr = (char *)&template;
904 size = sizeof(struct ieee80211_header);
905
906 memset(template.header.da, 0xff, ETH_ALEN);
907 memset(template.header.bssid, 0xff, ETH_ALEN);
908 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
909 template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
910
911 /* IEs */
912 /* SSID */
913 template.ssid.header.id = WLAN_EID_SSID;
914 template.ssid.header.len = ssid_len;
915 if (ssid_len && ssid)
916 memcpy(template.ssid.ssid, ssid, ssid_len);
917 size += sizeof(struct wl12xx_ie_header) + ssid_len;
918 ptr += size;
919
920 /* Basic Rates */
921 rates = (struct wl12xx_ie_rates *)ptr;
922 rates->header.id = WLAN_EID_SUPP_RATES;
923 rates->header.len = wl1251_build_basic_rates(rates->rates);
924 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
925 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
926
927 /* Extended rates */
928 rates = (struct wl12xx_ie_rates *)ptr;
929 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
930 rates->header.len = wl1251_build_extended_rates(rates->rates);
931 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
932
933 wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
934
935 return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
936 size);
937}
938 865
939static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len, 866 if (req->n_ssids) {
940 u8 active_scan, u8 high_prio, u8 num_channels, 867 ssid = req->ssids[0].ssid;
941 u8 probe_requests) 868 ssid_len = req->ssids[0].ssid_len;
942{
943 struct wl1251_cmd_trigger_scan_to *trigger = NULL;
944 struct cmd_scan *params = NULL;
945 int i, ret;
946 u16 scan_options = 0;
947
948 if (wl->scanning)
949 return -EINVAL;
950
951 params = kzalloc(sizeof(*params), GFP_KERNEL);
952 if (!params)
953 return -ENOMEM;
954
955 params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
956 params->params.rx_filter_options =
957 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
958
959 /* High priority scan */
960 if (!active_scan)
961 scan_options |= SCAN_PASSIVE;
962 if (high_prio)
963 scan_options |= SCAN_PRIORITY_HIGH;
964 params->params.scan_options = scan_options;
965
966 params->params.num_channels = num_channels;
967 params->params.num_probe_requests = probe_requests;
968 params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
969 params->params.tid_trigger = 0;
970
971 for (i = 0; i < num_channels; i++) {
972 params->channels[i].min_duration = cpu_to_le32(30000);
973 params->channels[i].max_duration = cpu_to_le32(60000);
974 memset(&params->channels[i].bssid_lsb, 0xff, 4);
975 memset(&params->channels[i].bssid_msb, 0xff, 2);
976 params->channels[i].early_termination = 0;
977 params->channels[i].tx_power_att = 0;
978 params->channels[i].channel = i + 1;
979 memset(params->channels[i].pad, 0, 3);
980 } 869 }
981 870
982 for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++) 871 mutex_lock(&wl->mutex);
983 memset(&params->channels[i], 0,
984 sizeof(struct basic_scan_channel_parameters));
985
986 if (len && ssid) {
987 params->params.ssid_len = len;
988 memcpy(params->params.ssid, ssid, len);
989 } else {
990 params->params.ssid_len = 0;
991 memset(params->params.ssid, 0, 32);
992 }
993 872
994 ret = wl1251_build_probe_req(wl, ssid, len); 873 if (wl->scanning) {
995 if (ret < 0) { 874 wl1251_debug(DEBUG_SCAN, "scan already in progress");
996 wl1251_error("PROBE request template failed"); 875 ret = -EINVAL;
997 goto out; 876 goto out;
998 } 877 }
999 878
1000 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); 879 ret = wl1251_ps_elp_wakeup(wl);
1001 if (!trigger) 880 if (ret < 0)
1002 goto out; 881 goto out;
1003 882
1004 trigger->timeout = 0; 883 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
1005 884 req->ie, req->ie_len);
1006 ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, 885 if (!skb) {
1007 sizeof(*trigger)); 886 ret = -ENOMEM;
1008 if (ret < 0) {
1009 wl1251_error("trigger scan to failed for hw scan");
1010 goto out; 887 goto out;
1011 } 888 }
1012 889
1013 wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 890 ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
1014 891 skb->len);
1015 wl->scanning = true; 892 dev_kfree_skb(skb);
893 if (ret < 0)
894 goto out_sleep;
1016 895
1017 ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); 896 ret = wl1251_cmd_trigger_scan_to(wl, 0);
1018 if (ret < 0) 897 if (ret < 0)
1019 wl1251_error("SCAN failed"); 898 goto out_sleep;
1020 899
1021 wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params)); 900 wl->scanning = true;
1022 901
1023 if (params->header.status != CMD_STATUS_SUCCESS) { 902 ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
1024 wl1251_error("TEST command answer error: %d", 903 req->n_channels, WL1251_SCAN_NUM_PROBES);
1025 params->header.status); 904 if (ret < 0) {
1026 wl->scanning = false; 905 wl->scanning = false;
1027 ret = -EIO; 906 goto out_sleep;
1028 goto out;
1029 }
1030
1031out:
1032 kfree(params);
1033 return ret;
1034
1035}
1036
1037static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
1038 struct cfg80211_scan_request *req)
1039{
1040 struct wl1251 *wl = hw->priv;
1041 int ret;
1042 u8 *ssid = NULL;
1043 size_t ssid_len = 0;
1044
1045 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
1046
1047 if (req->n_ssids) {
1048 ssid = req->ssids[0].ssid;
1049 ssid_len = req->ssids[0].ssid_len;
1050 } 907 }
1051 908
1052 mutex_lock(&wl->mutex); 909out_sleep:
1053
1054 ret = wl1251_ps_elp_wakeup(wl);
1055 if (ret < 0)
1056 goto out;
1057
1058 ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
1059
1060 wl1251_ps_elp_sleep(wl); 910 wl1251_ps_elp_sleep(wl);
1061 911
1062out: 912out:
@@ -1095,7 +945,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1095{ 945{
1096 enum wl1251_cmd_ps_mode mode; 946 enum wl1251_cmd_ps_mode mode;
1097 struct wl1251 *wl = hw->priv; 947 struct wl1251 *wl = hw->priv;
1098 struct sk_buff *beacon; 948 struct sk_buff *beacon, *skb;
1099 int ret; 949 int ret;
1100 950
1101 wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 951 wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1109,7 +959,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1109 if (changed & BSS_CHANGED_BSSID) { 959 if (changed & BSS_CHANGED_BSSID) {
1110 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 960 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1111 961
1112 ret = wl1251_build_null_data(wl); 962 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
963 if (!skb)
964 goto out_sleep;
965
966 ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
967 skb->data, skb->len);
968 dev_kfree_skb(skb);
969 if (ret < 0)
970 goto out_sleep;
971
972 ret = wl1251_build_qos_null_data(wl);
1113 if (ret < 0) 973 if (ret < 0)
1114 goto out; 974 goto out;
1115 975
@@ -1130,7 +990,14 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1130 wl->dtim_period); 990 wl->dtim_period);
1131 wl->aid = bss_conf->aid; 991 wl->aid = bss_conf->aid;
1132 992
1133 ret = wl1251_build_ps_poll(wl, wl->aid); 993 skb = ieee80211_pspoll_get(wl->hw, wl->vif);
994 if (!skb)
995 goto out_sleep;
996
997 ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
998 skb->data,
999 skb->len);
1000 dev_kfree_skb(skb);
1134 if (ret < 0) 1001 if (ret < 0)
1135 goto out_sleep; 1002 goto out_sleep;
1136 1003
@@ -1176,7 +1043,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1176 ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE); 1043 ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
1177 if (ret < 0) { 1044 if (ret < 0) {
1178 wl1251_warning("Set ctsprotect failed %d", ret); 1045 wl1251_warning("Set ctsprotect failed %d", ret);
1179 goto out; 1046 goto out_sleep;
1180 } 1047 }
1181 } 1048 }
1182 1049
@@ -1187,7 +1054,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1187 1054
1188 if (ret < 0) { 1055 if (ret < 0) {
1189 dev_kfree_skb(beacon); 1056 dev_kfree_skb(beacon);
1190 goto out; 1057 goto out_sleep;
1191 } 1058 }
1192 1059
1193 ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data, 1060 ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1196,13 +1063,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1196 dev_kfree_skb(beacon); 1063 dev_kfree_skb(beacon);
1197 1064
1198 if (ret < 0) 1065 if (ret < 0)
1199 goto out; 1066 goto out_sleep;
1200 1067
1201 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int, 1068 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
1202 wl->channel, wl->dtim_period); 1069 wl->channel, wl->dtim_period);
1203 1070
1204 if (ret < 0) 1071 if (ret < 0)
1205 goto out; 1072 goto out_sleep;
1206 } 1073 }
1207 1074
1208out_sleep: 1075out_sleep:
@@ -1273,6 +1140,48 @@ static struct ieee80211_channel wl1251_channels[] = {
1273 { .hw_value = 13, .center_freq = 2472}, 1140 { .hw_value = 13, .center_freq = 2472},
1274}; 1141};
1275 1142
1143static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1144 const struct ieee80211_tx_queue_params *params)
1145{
1146 enum wl1251_acx_ps_scheme ps_scheme;
1147 struct wl1251 *wl = hw->priv;
1148 int ret;
1149
1150 mutex_lock(&wl->mutex);
1151
1152 wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
1153
1154 ret = wl1251_ps_elp_wakeup(wl);
1155 if (ret < 0)
1156 goto out;
1157
1158 ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
1159 params->cw_min, params->cw_max,
1160 params->aifs, params->txop);
1161 if (ret < 0)
1162 goto out_sleep;
1163
1164 if (params->uapsd)
1165 ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
1166 else
1167 ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
1168
1169 ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
1170 CHANNEL_TYPE_EDCF,
1171 wl1251_tx_get_queue(queue), ps_scheme,
1172 WL1251_ACX_ACK_POLICY_LEGACY);
1173 if (ret < 0)
1174 goto out_sleep;
1175
1176out_sleep:
1177 wl1251_ps_elp_sleep(wl);
1178
1179out:
1180 mutex_unlock(&wl->mutex);
1181
1182 return ret;
1183}
1184
1276/* can't be const, mac80211 writes to this */ 1185/* can't be const, mac80211 writes to this */
1277static struct ieee80211_supported_band wl1251_band_2ghz = { 1186static struct ieee80211_supported_band wl1251_band_2ghz = {
1278 .channels = wl1251_channels, 1187 .channels = wl1251_channels,
@@ -1293,6 +1202,7 @@ static const struct ieee80211_ops wl1251_ops = {
1293 .hw_scan = wl1251_op_hw_scan, 1202 .hw_scan = wl1251_op_hw_scan,
1294 .bss_info_changed = wl1251_op_bss_info_changed, 1203 .bss_info_changed = wl1251_op_bss_info_changed,
1295 .set_rts_threshold = wl1251_op_set_rts_threshold, 1204 .set_rts_threshold = wl1251_op_set_rts_threshold,
1205 .conf_tx = wl1251_op_conf_tx,
1296}; 1206};
1297 1207
1298static int wl1251_register_hw(struct wl1251 *wl) 1208static int wl1251_register_hw(struct wl1251 *wl)
@@ -1332,12 +1242,15 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1332 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1242 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1333 IEEE80211_HW_NOISE_DBM | 1243 IEEE80211_HW_NOISE_DBM |
1334 IEEE80211_HW_SUPPORTS_PS | 1244 IEEE80211_HW_SUPPORTS_PS |
1335 IEEE80211_HW_BEACON_FILTER; 1245 IEEE80211_HW_BEACON_FILTER |
1246 IEEE80211_HW_SUPPORTS_UAPSD;
1336 1247
1337 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1248 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1338 wl->hw->wiphy->max_scan_ssids = 1; 1249 wl->hw->wiphy->max_scan_ssids = 1;
1339 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; 1250 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
1340 1251
1252 wl->hw->queues = 4;
1253
1341 ret = wl1251_register_hw(wl); 1254 ret = wl1251_register_hw(wl);
1342 if (ret) 1255 if (ret)
1343 goto out; 1256 goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 9931b197ff7..851dfb65e47 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -26,7 +26,8 @@
26#include "wl1251_cmd.h" 26#include "wl1251_cmd.h"
27#include "wl1251_io.h" 27#include "wl1251_io.h"
28 28
29#define WL1251_WAKEUP_TIMEOUT 2000 29/* in ms */
30#define WL1251_WAKEUP_TIMEOUT 100
30 31
31void wl1251_elp_work(struct work_struct *work) 32void wl1251_elp_work(struct work_struct *work)
32{ 33{
@@ -67,7 +68,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
67 68
68int wl1251_ps_elp_wakeup(struct wl1251 *wl) 69int wl1251_ps_elp_wakeup(struct wl1251 *wl)
69{ 70{
70 unsigned long timeout; 71 unsigned long timeout, start;
71 u32 elp_reg; 72 u32 elp_reg;
72 73
73 if (!wl->elp) 74 if (!wl->elp)
@@ -75,6 +76,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
75 76
76 wl1251_debug(DEBUG_PSM, "waking up chip from elp"); 77 wl1251_debug(DEBUG_PSM, "waking up chip from elp");
77 78
79 start = jiffies;
78 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); 80 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
79 81
80 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 82 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
@@ -95,8 +97,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
95 } 97 }
96 98
97 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", 99 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
98 jiffies_to_msecs(jiffies) - 100 jiffies_to_msecs(jiffies - start));
99 (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT));
100 101
101 wl->elp = false; 102 wl->elp = false;
102 103
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index f84cc89cbff..b56732226cc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -126,7 +126,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
126 if (wl->rx_current_buffer) 126 if (wl->rx_current_buffer)
127 rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; 127 rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
128 128
129 skb = dev_alloc_skb(length); 129 skb = __dev_alloc_skb(length, GFP_KERNEL);
130 if (!skb) { 130 if (!skb) {
131 wl1251_error("Couldn't allocate RX frame"); 131 wl1251_error("Couldn't allocate RX frame");
132 return; 132 return;
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index f8597061584..c8223185efd 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -167,8 +167,7 @@ static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
167 tx_hdr->expiry_time = cpu_to_le32(1 << 16); 167 tx_hdr->expiry_time = cpu_to_le32(1 << 16);
168 tx_hdr->id = id; 168 tx_hdr->id = id;
169 169
170 /* FIXME: how to get the correct queue id? */ 170 tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
171 tx_hdr->xmit_queue = 0;
172 171
173 wl1251_tx_control(tx_hdr, control, fc); 172 wl1251_tx_control(tx_hdr, control, fc);
174 wl1251_tx_frag_block_num(tx_hdr); 173 wl1251_tx_frag_block_num(tx_hdr);
@@ -220,6 +219,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
220 /* align the buffer on a 4-byte boundary */ 219 /* align the buffer on a 4-byte boundary */
221 skb_reserve(skb, offset); 220 skb_reserve(skb, offset);
222 memmove(skb->data, src, skb->len); 221 memmove(skb->data, src, skb->len);
222 tx_hdr = (struct tx_double_buffer_desc *) skb->data;
223 } else { 223 } else {
224 wl1251_info("No handler, fixme!"); 224 wl1251_info("No handler, fixme!");
225 return -EINVAL; 225 return -EINVAL;
@@ -237,8 +237,9 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
237 237
238 wl1251_mem_write(wl, addr, skb->data, len); 238 wl1251_mem_write(wl, addr, skb->data, len);
239 239
240 wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x", 240 wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
241 tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate); 241 "queue %d", tx_hdr->id, skb, tx_hdr->length,
242 tx_hdr->rate, tx_hdr->xmit_queue);
242 243
243 return 0; 244 return 0;
244} 245}
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 7c1c1665c81..55856c6bb97 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -26,6 +26,7 @@
26#define __WL1251_TX_H__ 26#define __WL1251_TX_H__
27 27
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include "wl1251_acx.h"
29 30
30/* 31/*
31 * 32 *
@@ -209,6 +210,22 @@ struct tx_result {
209 u8 done_2; 210 u8 done_2;
210} __attribute__ ((packed)); 211} __attribute__ ((packed));
211 212
213static inline int wl1251_tx_get_queue(int queue)
214{
215 switch (queue) {
216 case 0:
217 return QOS_AC_VO;
218 case 1:
219 return QOS_AC_VI;
220 case 2:
221 return QOS_AC_BE;
222 case 3:
223 return QOS_AC_BK;
224 default:
225 return QOS_AC_BE;
226 }
227}
228
212void wl1251_tx_work(struct work_struct *work); 229void wl1251_tx_work(struct work_struct *work);
213void wl1251_tx_complete(struct wl1251 *wl); 230void wl1251_tx_complete(struct wl1251 *wl);
214void wl1251_tx_flush(struct wl1251 *wl); 231void wl1251_tx_flush(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 94359b1a861..d0938db043b 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -107,10 +107,9 @@ enum {
107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
109 109
110#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
111
112#define WL1271_FW_NAME "wl1271-fw.bin" 110#define WL1271_FW_NAME "wl1271-fw.bin"
113#define WL1271_NVS_NAME "wl1271-nvs.bin" 111#define WL1271_NVS_NAME "wl1271-nvs.bin"
112#define WL1271_NVS_LEN 468
114 113
115/* 114/*
116 * Enable/disable 802.11a support for WL1273 115 * Enable/disable 802.11a support for WL1273
@@ -276,6 +275,7 @@ struct wl1271_debugfs {
276 275
277 struct dentry *retry_count; 276 struct dentry *retry_count;
278 struct dentry *excessive_retries; 277 struct dentry *excessive_retries;
278 struct dentry *gpio_power;
279}; 279};
280 280
281#define NUM_TX_QUEUES 4 281#define NUM_TX_QUEUES 4
@@ -322,6 +322,17 @@ struct wl1271 {
322 enum wl1271_state state; 322 enum wl1271_state state;
323 struct mutex mutex; 323 struct mutex mutex;
324 324
325#define WL1271_FLAG_STA_RATES_CHANGED (0)
326#define WL1271_FLAG_STA_ASSOCIATED (1)
327#define WL1271_FLAG_JOINED (2)
328#define WL1271_FLAG_GPIO_POWER (3)
329#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
330#define WL1271_FLAG_SCANNING (5)
331#define WL1271_FLAG_IN_ELP (6)
332#define WL1271_FLAG_PSM (7)
333#define WL1271_FLAG_PSM_REQUESTED (8)
334 unsigned long flags;
335
325 struct wl1271_partition_set part; 336 struct wl1271_partition_set part;
326 337
327 struct wl1271_chip chip; 338 struct wl1271_chip chip;
@@ -359,7 +370,6 @@ struct wl1271 {
359 370
360 /* Frames scheduled for transmission, not handled yet */ 371 /* Frames scheduled for transmission, not handled yet */
361 struct sk_buff_head tx_queue; 372 struct sk_buff_head tx_queue;
362 bool tx_queue_stopped;
363 373
364 struct work_struct tx_work; 374 struct work_struct tx_work;
365 375
@@ -387,14 +397,15 @@ struct wl1271 {
387 u32 mbox_ptr[2]; 397 u32 mbox_ptr[2];
388 398
389 /* Are we currently scanning */ 399 /* Are we currently scanning */
390 bool scanning;
391 struct wl1271_scan scan; 400 struct wl1271_scan scan;
392 401
393 /* Our association ID */ 402 /* Our association ID */
394 u16 aid; 403 u16 aid;
395 404
396 /* currently configured rate set */ 405 /* currently configured rate set */
406 u32 sta_rate_set;
397 u32 basic_rate_set; 407 u32 basic_rate_set;
408 u32 rate_set;
398 409
399 /* The current band */ 410 /* The current band */
400 enum ieee80211_band band; 411 enum ieee80211_band band;
@@ -405,18 +416,9 @@ struct wl1271 {
405 unsigned int rx_config; 416 unsigned int rx_config;
406 unsigned int rx_filter; 417 unsigned int rx_filter;
407 418
408 /* is firmware in elp mode */
409 bool elp;
410
411 struct completion *elp_compl; 419 struct completion *elp_compl;
412 struct delayed_work elp_work; 420 struct delayed_work elp_work;
413 421
414 /* we can be in psm, but not in elp, we have to differentiate */
415 bool psm;
416
417 /* PSM mode requested */
418 bool psm_requested;
419
420 /* retry counter for PSM entries */ 422 /* retry counter for PSM entries */
421 u8 psm_entry_retry; 423 u8 psm_entry_retry;
422 424
@@ -435,9 +437,6 @@ struct wl1271 {
435 437
436 struct ieee80211_vif *vif; 438 struct ieee80211_vif *vif;
437 439
438 /* Used for a workaround to send disconnect before rejoining */
439 bool joined;
440
441 /* Current chipset configuration */ 440 /* Current chipset configuration */
442 struct conf_drv_settings conf; 441 struct conf_drv_settings conf;
443 442
@@ -455,7 +454,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
455 454
456#define WL1271_TX_QUEUE_MAX_LENGTH 20 455#define WL1271_TX_QUEUE_MAX_LENGTH 20
457 456
458/* WL1271 needs a 200ms sleep after power on */ 457/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
458 on in case is has been shut down shortly before */
459#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
459#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 460#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
460 461
461static inline bool wl1271_11a_enabled(void) 462static inline bool wl1271_11a_enabled(void)
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 5cc89bbdac7..0b343484347 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -390,6 +390,35 @@ out:
390 return ret; 390 return ret;
391} 391}
392 392
393int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
394{
395 struct acx_dco_itrim_params *dco;
396 struct conf_itrim_settings *c = &wl->conf.itrim;
397 int ret;
398
399 wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
400
401 dco = kzalloc(sizeof(*dco), GFP_KERNEL);
402 if (!dco) {
403 ret = -ENOMEM;
404 goto out;
405 }
406
407 dco->enable = c->enable;
408 dco->timeout = cpu_to_le32(c->timeout);
409
410 ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
411 dco, sizeof(*dco));
412 if (ret < 0) {
413 wl1271_warning("failed to set dco itrim parameters: %d", ret);
414 goto out;
415 }
416
417out:
418 kfree(dco);
419 return ret;
420}
421
393int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) 422int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
394{ 423{
395 struct acx_beacon_filter_option *beacon_filter = NULL; 424 struct acx_beacon_filter_option *beacon_filter = NULL;
@@ -758,10 +787,11 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
758 return 0; 787 return 0;
759} 788}
760 789
761int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates) 790int wl1271_acx_rate_policies(struct wl1271 *wl)
762{ 791{
763 struct acx_rate_policy *acx; 792 struct acx_rate_policy *acx;
764 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf; 793 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
794 int idx = 0;
765 int ret = 0; 795 int ret = 0;
766 796
767 wl1271_debug(DEBUG_ACX, "acx rate policies"); 797 wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -773,12 +803,21 @@ int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
773 goto out; 803 goto out;
774 } 804 }
775 805
776 /* configure one default (one-size-fits-all) rate class */ 806 /* configure one basic rate class */
777 acx->rate_class_cnt = cpu_to_le32(1); 807 idx = ACX_TX_BASIC_RATE;
778 acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates); 808 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
779 acx->rate_class[0].short_retry_limit = c->short_retry_limit; 809 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
780 acx->rate_class[0].long_retry_limit = c->long_retry_limit; 810 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
781 acx->rate_class[0].aflags = c->aflags; 811 acx->rate_class[idx].aflags = c->aflags;
812
813 /* configure one AP supported rate class */
814 idx = ACX_TX_AP_FULL_RATE;
815 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
816 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
817 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
818 acx->rate_class[idx].aflags = c->aflags;
819
820 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
782 821
783 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 822 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
784 if (ret < 0) { 823 if (ret < 0) {
@@ -1012,59 +1051,6 @@ out:
1012 return ret; 1051 return ret;
1013} 1052}
1014 1053
1015int wl1271_acx_smart_reflex(struct wl1271 *wl)
1016{
1017 struct acx_smart_reflex_state *sr_state = NULL;
1018 struct acx_smart_reflex_config_params *sr_param = NULL;
1019 int i, ret;
1020
1021 wl1271_debug(DEBUG_ACX, "acx smart reflex");
1022
1023 sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
1024 if (!sr_param) {
1025 ret = -ENOMEM;
1026 goto out;
1027 }
1028
1029 for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
1030 struct conf_mart_reflex_err_table *e =
1031 &(wl->conf.init.sr_err_tbl[i]);
1032
1033 sr_param->error_table[i].len = e->len;
1034 sr_param->error_table[i].upper_limit = e->upper_limit;
1035 memcpy(sr_param->error_table[i].values, e->values, e->len);
1036 }
1037
1038 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
1039 sr_param, sizeof(*sr_param));
1040 if (ret < 0) {
1041 wl1271_warning("failed to set smart reflex params: %d", ret);
1042 goto out;
1043 }
1044
1045 sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
1046 if (!sr_state) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
1051 /* enable smart reflex */
1052 sr_state->enable = wl->conf.init.sr_enable;
1053
1054 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
1055 sr_state, sizeof(*sr_state));
1056 if (ret < 0) {
1057 wl1271_warning("failed to set smart reflex params: %d", ret);
1058 goto out;
1059 }
1060
1061out:
1062 kfree(sr_state);
1063 kfree(sr_param);
1064 return ret;
1065
1066}
1067
1068int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) 1054int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1069{ 1055{
1070 struct wl1271_acx_bet_enable *acx = NULL; 1056 struct wl1271_acx_bet_enable *acx = NULL;
@@ -1132,3 +1118,31 @@ out:
1132 kfree(acx); 1118 kfree(acx);
1133 return ret; 1119 return ret;
1134} 1120}
1121
1122int wl1271_acx_pm_config(struct wl1271 *wl)
1123{
1124 struct wl1271_acx_pm_config *acx = NULL;
1125 struct conf_pm_config_settings *c = &wl->conf.pm_config;
1126 int ret = 0;
1127
1128 wl1271_debug(DEBUG_ACX, "acx pm config");
1129
1130 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1131 if (!acx) {
1132 ret = -ENOMEM;
1133 goto out;
1134 }
1135
1136 acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
1137 acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
1138
1139 ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
1140 if (ret < 0) {
1141 wl1271_warning("acx pm config failed: %d", ret);
1142 goto out;
1143 }
1144
1145out:
1146 kfree(acx);
1147 return ret;
1148}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 2ce0a812854..1bb63af64f0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -415,23 +415,12 @@ struct acx_bt_wlan_coex {
415 u8 pad[3]; 415 u8 pad[3];
416} __attribute__ ((packed)); 416} __attribute__ ((packed));
417 417
418struct acx_smart_reflex_state { 418struct acx_dco_itrim_params {
419 struct acx_header header; 419 struct acx_header header;
420 420
421 u8 enable; 421 u8 enable;
422 u8 padding[3]; 422 u8 padding[3];
423} __attribute__ ((packed)); 423 __le32 timeout;
424
425struct smart_reflex_err_table {
426 u8 len;
427 s8 upper_limit;
428 s8 values[14];
429} __attribute__ ((packed));
430
431struct acx_smart_reflex_config_params {
432 struct acx_header header;
433
434 struct smart_reflex_err_table error_table[3];
435} __attribute__ ((packed)); 424} __attribute__ ((packed));
436 425
437#define PTA_ANTENNA_TYPE_DEF (0) 426#define PTA_ANTENNA_TYPE_DEF (0)
@@ -837,6 +826,9 @@ struct acx_rate_class {
837 u8 reserved; 826 u8 reserved;
838}; 827};
839 828
829#define ACX_TX_BASIC_RATE 0
830#define ACX_TX_AP_FULL_RATE 1
831#define ACX_TX_RATE_POLICY_CNT 2
840struct acx_rate_policy { 832struct acx_rate_policy {
841 struct acx_header header; 833 struct acx_header header;
842 834
@@ -877,8 +869,8 @@ struct acx_tx_config_options {
877 __le16 tx_compl_threshold; /* number of packets */ 869 __le16 tx_compl_threshold; /* number of packets */
878} __attribute__ ((packed)); 870} __attribute__ ((packed));
879 871
880#define ACX_RX_MEM_BLOCKS 64 872#define ACX_RX_MEM_BLOCKS 70
881#define ACX_TX_MIN_MEM_BLOCKS 64 873#define ACX_TX_MIN_MEM_BLOCKS 40
882#define ACX_TX_DESCRIPTORS 32 874#define ACX_TX_DESCRIPTORS 32
883#define ACX_NUM_SSID_PROFILES 1 875#define ACX_NUM_SSID_PROFILES 1
884 876
@@ -969,6 +961,13 @@ struct wl1271_acx_arp_filter {
969 used. */ 961 used. */
970} __attribute__((packed)); 962} __attribute__((packed));
971 963
964struct wl1271_acx_pm_config {
965 struct acx_header header;
966
967 __le32 host_clk_settling_time;
968 u8 host_fast_wakeup_support;
969 u8 padding[3];
970} __attribute__ ((packed));
972 971
973enum { 972enum {
974 ACX_WAKE_UP_CONDITIONS = 0x0002, 973 ACX_WAKE_UP_CONDITIONS = 0x0002,
@@ -1027,13 +1026,13 @@ enum {
1027 ACX_HT_BSS_OPERATION = 0x0058, 1026 ACX_HT_BSS_OPERATION = 0x0058,
1028 ACX_COEX_ACTIVITY = 0x0059, 1027 ACX_COEX_ACTIVITY = 0x0059,
1029 ACX_SET_SMART_REFLEX_DEBUG = 0x005A, 1028 ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
1030 ACX_SET_SMART_REFLEX_STATE = 0x005B, 1029 ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
1031 ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
1032 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1030 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1033 DOT11_CUR_TX_PWR = 0x100D, 1031 DOT11_CUR_TX_PWR = 0x100D,
1034 DOT11_RX_DOT11_MODE = 0x1012, 1032 DOT11_RX_DOT11_MODE = 0x1012,
1035 DOT11_RTS_THRESHOLD = 0x1013, 1033 DOT11_RTS_THRESHOLD = 0x1013,
1036 DOT11_GROUP_ADDRESS_TBL = 0x1014, 1034 DOT11_GROUP_ADDRESS_TBL = 0x1014,
1035 ACX_PM_CONFIG = 0x1016,
1037 1036
1038 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, 1037 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
1039 1038
@@ -1056,6 +1055,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
1056 void *mc_list, u32 mc_list_len); 1055 void *mc_list, u32 mc_list_len);
1057int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1056int wl1271_acx_service_period_timeout(struct wl1271 *wl);
1058int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); 1057int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1058int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); 1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl); 1061int wl1271_acx_conn_monit_params(struct wl1271 *wl);
@@ -1069,7 +1069,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1069int wl1271_acx_cts_protect(struct wl1271 *wl, 1069int wl1271_acx_cts_protect(struct wl1271 *wl,
1070 enum acx_ctsprotect_type ctsprotect); 1070 enum acx_ctsprotect_type ctsprotect);
1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1072int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates); 1072int wl1271_acx_rate_policies(struct wl1271 *wl);
1073int wl1271_acx_ac_cfg(struct wl1271 *wl); 1073int wl1271_acx_ac_cfg(struct wl1271 *wl);
1074int wl1271_acx_tid_cfg(struct wl1271 *wl); 1074int wl1271_acx_tid_cfg(struct wl1271 *wl);
1075int wl1271_acx_frag_threshold(struct wl1271 *wl); 1075int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1081,5 +1081,6 @@ int wl1271_acx_smart_reflex(struct wl1271 *wl);
1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); 1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, 1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1083 u8 version); 1083 u8 version);
1084int wl1271_acx_pm_config(struct wl1271 *wl);
1084 1085
1085#endif /* __WL1271_ACX_H__ */ 1086#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index b7c96454cca..e803b876f3f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -225,9 +225,15 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
225 if (nvs == NULL) 225 if (nvs == NULL)
226 return -ENODEV; 226 return -ENODEV;
227 227
228 if (wl->nvs_len < WL1271_NVS_LEN)
229 return -EINVAL;
230
228 nvs_ptr = nvs; 231 nvs_ptr = nvs;
229 232
230 nvs_len = wl->nvs_len; 233 /* only the first part of the NVS needs to be uploaded */
234 nvs_len = WL1271_NVS_LEN;
235
236 /* FIXME: read init settings from the remaining part of the NVS */
231 237
232 /* Update the device MAC address into the nvs */ 238 /* Update the device MAC address into the nvs */
233 nvs[11] = wl->mac_addr[0]; 239 nvs[11] = wl->mac_addr[0];
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index c3385b3d246..a74259bb596 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -209,6 +209,26 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
209 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer; 209 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
210 gen_parms->settings = g->settings; 210 gen_parms->settings = g->settings;
211 211
212 gen_parms->sr_state = g->sr_state;
213
214 memcpy(gen_parms->srf1,
215 g->srf1,
216 CONF_MAX_SMART_REFLEX_PARAMS);
217 memcpy(gen_parms->srf2,
218 g->srf2,
219 CONF_MAX_SMART_REFLEX_PARAMS);
220 memcpy(gen_parms->srf3,
221 g->srf3,
222 CONF_MAX_SMART_REFLEX_PARAMS);
223 memcpy(gen_parms->sr_debug_table,
224 g->sr_debug_table,
225 CONF_MAX_SMART_REFLEX_PARAMS);
226
227 gen_parms->sr_sen_n_p = g->sr_sen_n_p;
228 gen_parms->sr_sen_n_p_gain = g->sr_sen_n_p_gain;
229 gen_parms->sr_sen_nrn = g->sr_sen_nrn;
230 gen_parms->sr_sen_prn = g->sr_sen_prn;
231
212 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 232 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
213 if (ret < 0) 233 if (ret < 0)
214 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); 234 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -253,6 +273,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
253 CONF_NUMBER_OF_RATE_GROUPS); 273 CONF_NUMBER_OF_RATE_GROUPS);
254 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded, 274 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
255 CONF_NUMBER_OF_RATE_GROUPS); 275 CONF_NUMBER_OF_RATE_GROUPS);
276 memcpy(radio_parms->tx_rate_limits_extreme, r->tx_rate_limits_extreme,
277 CONF_NUMBER_OF_RATE_GROUPS);
256 278
257 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b, 279 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
258 CONF_NUMBER_OF_CHANNELS_2_4); 280 CONF_NUMBER_OF_CHANNELS_2_4);
@@ -263,6 +285,11 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
263 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS); 285 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
264 286
265 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss; 287 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
288 radio_parms->degraded_low_to_normal_threshold =
289 r->degraded_low_to_normal_threshold;
290 radio_parms->degraded_normal_to_high_threshold =
291 r->degraded_normal_to_high_threshold;
292
266 293
267 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++) 294 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
268 radio_parms->tx_ref_pd_voltage_5[i] = 295 radio_parms->tx_ref_pd_voltage_5[i] =
@@ -275,6 +302,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
275 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS); 302 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
276 memcpy(radio_parms->tx_rate_limits_degraded_5, 303 memcpy(radio_parms->tx_rate_limits_degraded_5,
277 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS); 304 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
305 memcpy(radio_parms->tx_rate_limits_extreme_5,
306 r->tx_rate_limits_extreme_5, CONF_NUMBER_OF_RATE_GROUPS);
278 memcpy(radio_parms->tx_channel_limits_ofdm_5, 307 memcpy(radio_parms->tx_channel_limits_ofdm_5,
279 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5); 308 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
280 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5, 309 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
@@ -283,6 +312,10 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
283 CONF_NUMBER_OF_RATE_GROUPS); 312 CONF_NUMBER_OF_RATE_GROUPS);
284 memcpy(radio_parms->rx_fem_insertion_loss_5, 313 memcpy(radio_parms->rx_fem_insertion_loss_5,
285 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5); 314 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
315 radio_parms->degraded_low_to_normal_threshold_5 =
316 r->degraded_low_to_normal_threshold_5;
317 radio_parms->degraded_normal_to_high_threshold_5 =
318 r->degraded_normal_to_high_threshold_5;
286 319
287 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", 320 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
288 radio_parms, sizeof(*radio_parms)); 321 radio_parms, sizeof(*radio_parms));
@@ -311,19 +344,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
311 do_cal = false; 344 do_cal = false;
312 } 345 }
313 346
314 /* FIXME: This is a workaround, because with the current stack, we
315 * cannot know when we have disassociated. So, if we have already
316 * joined, we disconnect before joining again. */
317 if (wl->joined) {
318 ret = wl1271_cmd_disconnect(wl);
319 if (ret < 0) {
320 wl1271_error("failed to disconnect before rejoining");
321 goto out;
322 }
323
324 wl->joined = false;
325 }
326
327 join = kzalloc(sizeof(*join), GFP_KERNEL); 347 join = kzalloc(sizeof(*join), GFP_KERNEL);
328 if (!join) { 348 if (!join) {
329 ret = -ENOMEM; 349 ret = -ENOMEM;
@@ -388,8 +408,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
388 goto out_free; 408 goto out_free;
389 } 409 }
390 410
391 wl->joined = true;
392
393 /* 411 /*
394 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 412 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
395 * simplify locking we just sleep instead, for now 413 * simplify locking we just sleep instead, for now
@@ -487,7 +505,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
487 return 0; 505 return 0;
488} 506}
489 507
490int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable) 508int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
491{ 509{
492 struct cmd_enabledisable_path *cmd; 510 struct cmd_enabledisable_path *cmd;
493 int ret; 511 int ret;
@@ -501,7 +519,8 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
501 goto out; 519 goto out;
502 } 520 }
503 521
504 cmd->channel = channel; 522 /* the channel here is only used for calibration, so hardcoded to 1 */
523 cmd->channel = 1;
505 524
506 if (enable) { 525 if (enable) {
507 cmd_rx = CMD_ENABLE_RX; 526 cmd_rx = CMD_ENABLE_RX;
@@ -514,22 +533,22 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
514 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0); 533 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
515 if (ret < 0) { 534 if (ret < 0) {
516 wl1271_error("rx %s cmd for channel %d failed", 535 wl1271_error("rx %s cmd for channel %d failed",
517 enable ? "start" : "stop", channel); 536 enable ? "start" : "stop", cmd->channel);
518 goto out; 537 goto out;
519 } 538 }
520 539
521 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", 540 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
522 enable ? "start" : "stop", channel); 541 enable ? "start" : "stop", cmd->channel);
523 542
524 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0); 543 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
525 if (ret < 0) { 544 if (ret < 0) {
526 wl1271_error("tx %s cmd for channel %d failed", 545 wl1271_error("tx %s cmd for channel %d failed",
527 enable ? "start" : "stop", channel); 546 enable ? "start" : "stop", cmd->channel);
528 return ret; 547 return ret;
529 } 548 }
530 549
531 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", 550 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
532 enable ? "start" : "stop", channel); 551 enable ? "start" : "stop", cmd->channel);
533 552
534out: 553out:
535 kfree(cmd); 554 kfree(cmd);
@@ -636,7 +655,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
636 channels = wl->hw->wiphy->bands[ieee_band]->channels; 655 channels = wl->hw->wiphy->bands[ieee_band]->channels;
637 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels; 656 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
638 657
639 if (wl->scanning) 658 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
640 return -EINVAL; 659 return -EINVAL;
641 660
642 params = kzalloc(sizeof(*params), GFP_KERNEL); 661 params = kzalloc(sizeof(*params), GFP_KERNEL);
@@ -711,7 +730,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
711 730
712 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 731 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
713 732
714 wl->scanning = true; 733 set_bit(WL1271_FLAG_SCANNING, &wl->flags);
715 if (wl1271_11a_enabled()) { 734 if (wl1271_11a_enabled()) {
716 wl->scan.state = band; 735 wl->scan.state = band;
717 if (band == WL1271_SCAN_BAND_DUAL) { 736 if (band == WL1271_SCAN_BAND_DUAL) {
@@ -729,7 +748,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
729 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0); 748 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
730 if (ret < 0) { 749 if (ret < 0) {
731 wl1271_error("SCAN failed"); 750 wl1271_error("SCAN failed");
732 wl->scanning = false; 751 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
733 goto out; 752 goto out;
734 } 753 }
735 754
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index b4fa4acb922..09fe91297ac 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -37,7 +37,7 @@ int wl1271_cmd_join(struct wl1271 *wl);
37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
40int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable); 40int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); 41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
43 size_t len); 43 size_t len);
@@ -437,6 +437,21 @@ struct wl1271_general_parms_cmd {
437 u8 tx_bip_fem_autodetect; 437 u8 tx_bip_fem_autodetect;
438 u8 tx_bip_fem_manufacturer; 438 u8 tx_bip_fem_manufacturer;
439 u8 settings; 439 u8 settings;
440
441 u8 sr_state;
442
443 s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
444 s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
445 s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
446
447 s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
448
449 u8 sr_sen_n_p;
450 u8 sr_sen_n_p_gain;
451 u8 sr_sen_nrn;
452 u8 sr_sen_prn;
453
454 u8 padding[3];
440} __attribute__ ((packed)); 455} __attribute__ ((packed));
441 456
442struct wl1271_radio_parms_cmd { 457struct wl1271_radio_parms_cmd {
@@ -458,11 +473,12 @@ struct wl1271_radio_parms_cmd {
458 /* Dynamic radio parameters */ 473 /* Dynamic radio parameters */
459 /* 2.4GHz */ 474 /* 2.4GHz */
460 __le16 tx_ref_pd_voltage; 475 __le16 tx_ref_pd_voltage;
461 s8 tx_ref_power; 476 u8 tx_ref_power;
462 s8 tx_offset_db; 477 s8 tx_offset_db;
463 478
464 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS]; 479 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
465 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS]; 480 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
481 s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
466 482
467 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4]; 483 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
468 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4]; 484 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -471,15 +487,19 @@ struct wl1271_radio_parms_cmd {
471 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS]; 487 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
472 u8 rx_fem_insertion_loss; 488 u8 rx_fem_insertion_loss;
473 489
474 u8 padding2; 490 u8 degraded_low_to_normal_threshold;
491 u8 degraded_normal_to_high_threshold;
492
493 u8 padding1; /* our own padding, not in ref driver */
475 494
476 /* 5GHz */ 495 /* 5GHz */
477 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5]; 496 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
478 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5]; 497 u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
479 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5]; 498 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
480 499
481 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS]; 500 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
482 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS]; 501 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
502 s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
483 503
484 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5]; 504 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
485 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS]; 505 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -488,7 +508,10 @@ struct wl1271_radio_parms_cmd {
488 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS]; 508 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
489 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5]; 509 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
490 510
491 u8 padding3[2]; 511 u8 degraded_low_to_normal_threshold_5;
512 u8 degraded_normal_to_high_threshold_5;
513
514 u8 padding2[2];
492} __attribute__ ((packed)); 515} __attribute__ ((packed));
493 516
494struct wl1271_cmd_cal_channel_tune { 517struct wl1271_cmd_cal_channel_tune {
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 565373ede26..1993d63c214 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -258,7 +258,8 @@ struct conf_rx_settings {
258#define CONF_TX_MAX_RATE_CLASSES 8 258#define CONF_TX_MAX_RATE_CLASSES 8
259 259
260#define CONF_TX_RATE_MASK_UNSPECIFIED 0 260#define CONF_TX_RATE_MASK_UNSPECIFIED 0
261#define CONF_TX_RATE_MASK_ALL 0x1eff 261#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
262 CONF_HW_BIT_RATE_2MBPS)
262#define CONF_TX_RATE_RETRY_LIMIT 10 263#define CONF_TX_RATE_RETRY_LIMIT 10
263 264
264struct conf_tx_rate_class { 265struct conf_tx_rate_class {
@@ -722,31 +723,6 @@ struct conf_conn_settings {
722 u8 psm_entry_retries; 723 u8 psm_entry_retries;
723}; 724};
724 725
725#define CONF_SR_ERR_TBL_MAX_VALUES 14
726
727struct conf_mart_reflex_err_table {
728 /*
729 * Length of the error table values table.
730 *
731 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
732 */
733 u8 len;
734
735 /*
736 * Smart Reflex error table upper limit.
737 *
738 * Range: s8
739 */
740 s8 upper_limit;
741
742 /*
743 * Smart Reflex error table values.
744 *
745 * Range: s8
746 */
747 s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
748};
749
750enum { 726enum {
751 CONF_REF_CLK_19_2_E, 727 CONF_REF_CLK_19_2_E,
752 CONF_REF_CLK_26_E, 728 CONF_REF_CLK_26_E,
@@ -759,6 +735,9 @@ enum single_dual_band_enum {
759 CONF_DUAL_BAND 735 CONF_DUAL_BAND
760}; 736};
761 737
738
739#define CONF_MAX_SMART_REFLEX_PARAMS 16
740
762struct conf_general_parms { 741struct conf_general_parms {
763 /* 742 /*
764 * RF Reference Clock type / speed 743 * RF Reference Clock type / speed
@@ -815,6 +794,20 @@ struct conf_general_parms {
815 * Range: Unknown 794 * Range: Unknown
816 */ 795 */
817 u8 settings; 796 u8 settings;
797
798 /* Smart reflex settings */
799 u8 sr_state;
800
801 s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
802 s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
803 s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
804
805 s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
806
807 u8 sr_sen_n_p;
808 u8 sr_sen_n_p_gain;
809 u8 sr_sen_nrn;
810 u8 sr_sen_prn;
818}; 811};
819 812
820#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15 813#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
@@ -847,12 +840,13 @@ struct conf_radio_parms {
847 * 840 *
848 * Range: unknown 841 * Range: unknown
849 */ 842 */
850 s16 tx_ref_pd_voltage; 843 u16 tx_ref_pd_voltage;
851 s8 tx_ref_power; 844 u8 tx_ref_power;
852 s8 tx_offset_db; 845 s8 tx_offset_db;
853 846
854 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS]; 847 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
855 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS]; 848 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
849 s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
856 850
857 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4]; 851 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
858 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4]; 852 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -861,17 +855,22 @@ struct conf_radio_parms {
861 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS]; 855 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
862 u8 rx_fem_insertion_loss; 856 u8 rx_fem_insertion_loss;
863 857
858 u8 degraded_low_to_normal_threshold;
859 u8 degraded_normal_to_high_threshold;
860
861
864 /* 862 /*
865 * Dynamic radio parameters for 5GHz 863 * Dynamic radio parameters for 5GHz
866 * 864 *
867 * Range: unknown 865 * Range: unknown
868 */ 866 */
869 s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5]; 867 u16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
870 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5]; 868 u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
871 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5]; 869 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
872 870
873 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS]; 871 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
874 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS]; 872 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
873 s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
875 874
876 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5]; 875 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
877 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS]; 876 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -879,33 +878,46 @@ struct conf_radio_parms {
879 /* FIXME: this is inconsistent with the types for 2.4GHz */ 878 /* FIXME: this is inconsistent with the types for 2.4GHz */
880 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS]; 879 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
881 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5]; 880 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
882};
883 881
884#define CONF_SR_ERR_TBL_COUNT 3 882 u8 degraded_low_to_normal_threshold_5;
883 u8 degraded_normal_to_high_threshold_5;
884};
885 885
886struct conf_init_settings { 886struct conf_init_settings {
887 /* 887 /*
888 * Configure Smart Reflex error table values. 888 * Configure general parameters.
889 */ 889 */
890 struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT]; 890 struct conf_general_parms genparam;
891 891
892 /* 892 /*
893 * Smart Reflex enable flag. 893 * Configure radio parameters.
894 *
895 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
896 */ 894 */
897 u8 sr_enable; 895 struct conf_radio_parms radioparam;
898 896
897};
898
899struct conf_itrim_settings {
900 /* enable dco itrim */
901 u8 enable;
902
903 /* moderation timeout in microsecs from the last TX */
904 u32 timeout;
905};
906
907struct conf_pm_config_settings {
899 /* 908 /*
900 * Configure general parameters. 909 * Host clock settling time
910 *
911 * Range: 0 - 30000 us
901 */ 912 */
902 struct conf_general_parms genparam; 913 u32 host_clk_settling_time;
903 914
904 /* 915 /*
905 * Configure radio parameters. 916 * Host fast wakeup support
917 *
918 * Range: true, false
906 */ 919 */
907 struct conf_radio_parms radioparam; 920 bool host_fast_wakeup_support;
908
909}; 921};
910 922
911struct conf_drv_settings { 923struct conf_drv_settings {
@@ -914,6 +926,8 @@ struct conf_drv_settings {
914 struct conf_tx_settings tx; 926 struct conf_tx_settings tx;
915 struct conf_conn_settings conn; 927 struct conf_conn_settings conn;
916 struct conf_init_settings init; 928 struct conf_init_settings init;
929 struct conf_itrim_settings itrim;
930 struct conf_pm_config_settings pm_config;
917}; 931};
918 932
919#endif 933#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index c1805e5f896..8d7588ca68f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -237,6 +237,64 @@ static const struct file_operations tx_queue_len_ops = {
237 .open = wl1271_open_file_generic, 237 .open = wl1271_open_file_generic,
238}; 238};
239 239
240static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
241 size_t count, loff_t *ppos)
242{
243 struct wl1271 *wl = file->private_data;
244 bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
245
246 int res;
247 char buf[10];
248
249 res = scnprintf(buf, sizeof(buf), "%d\n", state);
250
251 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
252}
253
254static ssize_t gpio_power_write(struct file *file,
255 const char __user *user_buf,
256 size_t count, loff_t *ppos)
257{
258 struct wl1271 *wl = file->private_data;
259 char buf[10];
260 size_t len;
261 unsigned long value;
262 int ret;
263
264 mutex_lock(&wl->mutex);
265
266 len = min(count, sizeof(buf) - 1);
267 if (copy_from_user(buf, user_buf, len)) {
268 ret = -EFAULT;
269 goto out;
270 }
271 buf[len] = '\0';
272
273 ret = strict_strtoul(buf, 0, &value);
274 if (ret < 0) {
275 wl1271_warning("illegal value in gpio_power");
276 goto out;
277 }
278
279 if (value) {
280 wl->set_power(true);
281 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
282 } else {
283 wl->set_power(false);
284 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
285 }
286
287out:
288 mutex_unlock(&wl->mutex);
289 return count;
290}
291
292static const struct file_operations gpio_power_ops = {
293 .read = gpio_power_read,
294 .write = gpio_power_write,
295 .open = wl1271_open_file_generic
296};
297
240static void wl1271_debugfs_delete_files(struct wl1271 *wl) 298static void wl1271_debugfs_delete_files(struct wl1271 *wl)
241{ 299{
242 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); 300 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -333,6 +391,8 @@ static void wl1271_debugfs_delete_files(struct wl1271 *wl)
333 DEBUGFS_DEL(tx_queue_len); 391 DEBUGFS_DEL(tx_queue_len);
334 DEBUGFS_DEL(retry_count); 392 DEBUGFS_DEL(retry_count);
335 DEBUGFS_DEL(excessive_retries); 393 DEBUGFS_DEL(excessive_retries);
394
395 DEBUGFS_DEL(gpio_power);
336} 396}
337 397
338static int wl1271_debugfs_add_files(struct wl1271 *wl) 398static int wl1271_debugfs_add_files(struct wl1271 *wl)
@@ -434,6 +494,8 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
434 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); 494 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
435 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); 495 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
436 496
497 DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
498
437out: 499out:
438 if (ret < 0) 500 if (ret < 0)
439 wl1271_debugfs_delete_files(wl); 501 wl1271_debugfs_delete_files(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index d13fdd99c85..0a145afc990 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -35,7 +35,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
35 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 35 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
36 mbox->scheduled_scan_status); 36 mbox->scheduled_scan_status);
37 37
38 if (wl->scanning) { 38 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { 39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
41 NULL, size); 41 NULL, size);
@@ -43,7 +43,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
43 * to the wl1271_cmd_scan function that we are not 43 * to the wl1271_cmd_scan function that we are not
44 * scanning as it checks that. 44 * scanning as it checks that.
45 */ 45 */
46 wl->scanning = false; 46 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, 47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
48 wl->scan.active, 48 wl->scan.active,
49 wl->scan.high_prio, 49 wl->scan.high_prio,
@@ -62,7 +62,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
62 mutex_unlock(&wl->mutex); 62 mutex_unlock(&wl->mutex);
63 ieee80211_scan_completed(wl->hw, false); 63 ieee80211_scan_completed(wl->hw, false);
64 mutex_lock(&wl->mutex); 64 mutex_lock(&wl->mutex);
65 wl->scanning = false; 65 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
66 } 66 }
67 } 67 }
68 return 0; 68 return 0;
@@ -78,7 +78,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
78 78
79 switch (mbox->ps_status) { 79 switch (mbox->ps_status) {
80 case EVENT_ENTER_POWER_SAVE_FAIL: 80 case EVENT_ENTER_POWER_SAVE_FAIL:
81 if (!wl->psm) { 81 if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
82 wl->psm_entry_retry = 0; 82 wl->psm_entry_retry = 0;
83 break; 83 break;
84 } 84 }
@@ -89,7 +89,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
89 } else { 89 } else {
90 wl1271_error("PSM entry failed, giving up.\n"); 90 wl1271_error("PSM entry failed, giving up.\n");
91 wl->psm_entry_retry = 0; 91 wl->psm_entry_retry = 0;
92 *beacon_loss = true;
93 } 92 }
94 break; 93 break;
95 case EVENT_ENTER_POWER_SAVE_SUCCESS: 94 case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -136,7 +135,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
136 * filtering) is enabled. Without PSM, the stack will receive all 135 * filtering) is enabled. Without PSM, the stack will receive all
137 * beacons and can detect beacon loss by itself. 136 * beacons and can detect beacon loss by itself.
138 */ 137 */
139 if (vector & BSS_LOSE_EVENT_ID && wl->psm) { 138 if (vector & BSS_LOSE_EVENT_ID &&
139 test_bit(WL1271_FLAG_PSM, &wl->flags)) {
140 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); 140 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
141 141
142 /* indicate to the stack, that beacons have been lost */ 142 /* indicate to the stack, that beacons have been lost */
@@ -150,7 +150,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
150 return ret; 150 return ret;
151 } 151 }
152 152
153 if (beacon_loss) { 153 if (wl->vif && beacon_loss) {
154 /* Obviously, it's dangerous to release the mutex while 154 /* Obviously, it's dangerous to release the mutex while
155 we are holding many of the variables in the wl struct. 155 we are holding many of the variables in the wl struct.
156 That's why it's done last in the function, and care must 156 That's why it's done last in the function, and care must
@@ -184,7 +184,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl)
184 wl->mbox_ptr[0], wl->mbox_ptr[1]); 184 wl->mbox_ptr[0], wl->mbox_ptr[1]);
185} 185}
186 186
187int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack) 187int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
188{ 188{
189 struct event_mailbox mbox; 189 struct event_mailbox mbox;
190 int ret; 190 int ret;
@@ -204,9 +204,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
204 return ret; 204 return ret;
205 205
206 /* then we let the firmware know it can go on...*/ 206 /* then we let the firmware know it can go on...*/
207 if (do_ack) 207 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
208 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
209 INTR_TRIG_EVENT_ACK);
210 208
211 return 0; 209 return 0;
212} 210}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 4e3f55ebb1a..278f9206aa5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -112,6 +112,6 @@ struct event_mailbox {
112 112
113int wl1271_event_unmask(struct wl1271 *wl); 113int wl1271_event_unmask(struct wl1271 *wl);
114void wl1271_event_mbox_config(struct wl1271 *wl); 114void wl1271_event_mbox_config(struct wl1271 *wl);
115int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack); 115int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
116 116
117#endif 117#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 11249b436cf..c9848eecb76 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -229,6 +229,10 @@ int wl1271_hw_init(struct wl1271 *wl)
229 if (ret < 0) 229 if (ret < 0)
230 goto out_free_memmap; 230 goto out_free_memmap;
231 231
232 ret = wl1271_acx_dco_itrim_params(wl);
233 if (ret < 0)
234 goto out_free_memmap;
235
232 /* Initialize connection monitoring thresholds */ 236 /* Initialize connection monitoring thresholds */
233 ret = wl1271_acx_conn_monit_params(wl); 237 ret = wl1271_acx_conn_monit_params(wl);
234 if (ret < 0) 238 if (ret < 0)
@@ -280,12 +284,12 @@ int wl1271_hw_init(struct wl1271 *wl)
280 goto out_free_memmap; 284 goto out_free_memmap;
281 285
282 /* Configure TX rate classes */ 286 /* Configure TX rate classes */
283 ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL); 287 ret = wl1271_acx_rate_policies(wl);
284 if (ret < 0) 288 if (ret < 0)
285 goto out_free_memmap; 289 goto out_free_memmap;
286 290
287 /* Enable data path */ 291 /* Enable data path */
288 ret = wl1271_cmd_data_path(wl, wl->channel, 1); 292 ret = wl1271_cmd_data_path(wl, 1);
289 if (ret < 0) 293 if (ret < 0)
290 goto out_free_memmap; 294 goto out_free_memmap;
291 295
@@ -299,8 +303,8 @@ int wl1271_hw_init(struct wl1271 *wl)
299 if (ret < 0) 303 if (ret < 0)
300 goto out_free_memmap; 304 goto out_free_memmap;
301 305
302 /* Configure smart reflex */ 306 /* configure PM */
303 ret = wl1271_acx_smart_reflex(wl); 307 ret = wl1271_acx_pm_config(wl);
304 if (ret < 0) 308 if (ret < 0)
305 goto out_free_memmap; 309 goto out_free_memmap;
306 310
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b62c00ff42f..e4867b895c4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -47,6 +47,8 @@
47#include "wl1271_cmd.h" 47#include "wl1271_cmd.h"
48#include "wl1271_boot.h" 48#include "wl1271_boot.h"
49 49
50#define WL1271_BOOT_RETRIES 3
51
50static struct conf_drv_settings default_conf = { 52static struct conf_drv_settings default_conf = {
51 .sg = { 53 .sg = {
52 .per_threshold = 7500, 54 .per_threshold = 7500,
@@ -67,16 +69,17 @@ static struct conf_drv_settings default_conf = {
67 .ps_poll_timeout = 15, 69 .ps_poll_timeout = 15,
68 .upsd_timeout = 15, 70 .upsd_timeout = 15,
69 .rts_threshold = 2347, 71 .rts_threshold = 2347,
70 .rx_cca_threshold = 0xFFEF, 72 .rx_cca_threshold = 0,
71 .irq_blk_threshold = 0, 73 .irq_blk_threshold = 0xFFFF,
72 .irq_pkt_threshold = USHORT_MAX, 74 .irq_pkt_threshold = 0,
73 .irq_timeout = 5, 75 .irq_timeout = 600,
74 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, 76 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
75 }, 77 },
76 .tx = { 78 .tx = {
77 .tx_energy_detection = 0, 79 .tx_energy_detection = 0,
78 .rc_conf = { 80 .rc_conf = {
79 .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED, 81 .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
82 CONF_HW_BIT_RATE_2MBPS,
80 .short_retry_limit = 10, 83 .short_retry_limit = 10,
81 .long_retry_limit = 10, 84 .long_retry_limit = 10,
82 .aflags = 0 85 .aflags = 0
@@ -172,8 +175,8 @@ static struct conf_drv_settings default_conf = {
172 } 175 }
173 }, 176 },
174 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, 177 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
175 .tx_compl_timeout = 5, 178 .tx_compl_timeout = 700,
176 .tx_compl_threshold = 5 179 .tx_compl_threshold = 4
177 }, 180 },
178 .conn = { 181 .conn = {
179 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 182 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -186,12 +189,12 @@ static struct conf_drv_settings default_conf = {
186 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, 189 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
187 } 190 }
188 }, 191 },
189 .synch_fail_thold = 5, 192 .synch_fail_thold = 10,
190 .bss_lose_timeout = 100, 193 .bss_lose_timeout = 100,
191 .beacon_rx_timeout = 10000, 194 .beacon_rx_timeout = 10000,
192 .broadcast_timeout = 20000, 195 .broadcast_timeout = 20000,
193 .rx_broadcast_in_ps = 1, 196 .rx_broadcast_in_ps = 1,
194 .ps_poll_threshold = 4, 197 .ps_poll_threshold = 20,
195 .sig_trigger_count = 2, 198 .sig_trigger_count = 2,
196 .sig_trigger = { 199 .sig_trigger = {
197 [0] = { 200 [0] = {
@@ -226,46 +229,35 @@ static struct conf_drv_settings default_conf = {
226 .psm_entry_retries = 3 229 .psm_entry_retries = 3
227 }, 230 },
228 .init = { 231 .init = {
229 .sr_err_tbl = {
230 [0] = {
231 .len = 7,
232 .upper_limit = 0x03,
233 .values = {
234 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
235 0x00 }
236 },
237 [1] = {
238 .len = 7,
239 .upper_limit = 0x03,
240 .values = {
241 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
242 0x00 }
243 },
244 [2] = {
245 .len = 7,
246 .upper_limit = 0x03,
247 .values = {
248 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
249 0x00 }
250 }
251 },
252 .sr_enable = 1,
253 .genparam = { 232 .genparam = {
254 .ref_clk = CONF_REF_CLK_38_4_E, 233 .ref_clk = CONF_REF_CLK_38_4_E,
255 .settling_time = 5, 234 .settling_time = 5,
256 .clk_valid_on_wakeup = 0, 235 .clk_valid_on_wakeup = 0,
257 .dc2dcmode = 0, 236 .dc2dcmode = 0,
258 .single_dual_band = CONF_SINGLE_BAND, 237 .single_dual_band = CONF_SINGLE_BAND,
259 .tx_bip_fem_autodetect = 0, 238 .tx_bip_fem_autodetect = 1,
260 .tx_bip_fem_manufacturer = 1, 239 .tx_bip_fem_manufacturer = 1,
261 .settings = 1, 240 .settings = 1,
241 .sr_state = 1,
242 .srf1 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
243 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
244 .srf2 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
245 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
246 .srf3 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
247 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
248 .sr_debug_table = { 0, 0, 0, 0, 0, 0, 0, 0,
249 0, 0, 0, 0, 0, 0, 0, 0 },
250 .sr_sen_n_p = 0,
251 .sr_sen_n_p_gain = 0,
252 .sr_sen_nrn = 0,
253 .sr_sen_prn = 0,
262 }, 254 },
263 .radioparam = { 255 .radioparam = {
264 .rx_trace_loss = 10, 256 .rx_trace_loss = 0x24,
265 .tx_trace_loss = 10, 257 .tx_trace_loss = 0x0,
266 .rx_rssi_and_proc_compens = { 258 .rx_rssi_and_proc_compens = {
267 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 259 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
268 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8, 260 0xfc, 0x00, 0x80, 0x10, 0xf0, 0xf8,
269 0x00, 0x0a, 0x14 }, 261 0x00, 0x0a, 0x14 },
270 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 }, 262 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
271 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 }, 263 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
@@ -273,13 +265,15 @@ static struct conf_drv_settings default_conf = {
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00 }, 267 0x00, 0x00, 0x00 },
276 .tx_ref_pd_voltage = 0x24e, 268 .tx_ref_pd_voltage = 0x1a9,
277 .tx_ref_power = 0x78, 269 .tx_ref_power = 0x80,
278 .tx_offset_db = 0x0, 270 .tx_offset_db = 0x0,
279 .tx_rate_limits_normal = { 271 .tx_rate_limits_normal = {
280 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 }, 272 0x1d, 0x1f, 0x24, 0x28, 0x28, 0x29 },
281 .tx_rate_limits_degraded = { 273 .tx_rate_limits_degraded = {
282 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 }, 274 0x19, 0x1f, 0x22, 0x23, 0x27, 0x28 },
275 .tx_rate_limits_extreme = {
276 0x19, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
283 .tx_channel_limits_11b = { 277 .tx_channel_limits_11b = {
284 0x22, 0x50, 0x50, 0x50, 0x50, 0x50, 278 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
285 0x50, 0x50, 0x50, 0x50, 0x22, 0x50, 279 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
@@ -289,10 +283,12 @@ static struct conf_drv_settings default_conf = {
289 0x50, 0x50, 0x50, 0x50, 0x20, 0x50, 283 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
290 0x20, 0x50 }, 284 0x20, 0x50 },
291 .tx_pdv_rate_offsets = { 285 .tx_pdv_rate_offsets = {
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 286 0x07, 0x08, 0x04, 0x02, 0x02, 0x00 },
293 .tx_ibias = { 287 .tx_ibias = {
294 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 }, 288 0x11, 0x11, 0x15, 0x11, 0x15, 0x0f },
295 .rx_fem_insertion_loss = 0x14, 289 .rx_fem_insertion_loss = 0x0e,
290 .degraded_low_to_normal_threshold = 0x1e,
291 .degraded_normal_to_high_threshold = 0x2d,
296 .tx_ref_pd_voltage_5 = { 292 .tx_ref_pd_voltage_5 = {
297 0x0190, 0x01a4, 0x01c3, 0x01d8, 293 0x0190, 0x01a4, 0x01c3, 0x01d8,
298 0x020a, 0x021c }, 294 0x020a, 0x021c },
@@ -304,6 +300,8 @@ static struct conf_drv_settings default_conf = {
304 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 }, 300 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
305 .tx_rate_limits_degraded_5 = { 301 .tx_rate_limits_degraded_5 = {
306 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 }, 302 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
303 .tx_rate_limits_extreme_5 = {
304 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
307 .tx_channel_limits_ofdm_5 = { 305 .tx_channel_limits_ofdm_5 = {
308 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 306 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
309 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 307 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
@@ -315,8 +313,18 @@ static struct conf_drv_settings default_conf = {
315 .tx_ibias_5 = { 313 .tx_ibias_5 = {
316 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }, 314 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
317 .rx_fem_insertion_loss_5 = { 315 .rx_fem_insertion_loss_5 = {
318 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 } 316 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
317 .degraded_low_to_normal_threshold_5 = 0x00,
318 .degraded_normal_to_high_threshold_5 = 0x00
319 } 319 }
320 },
321 .itrim = {
322 .enable = false,
323 .timeout = 50000,
324 },
325 .pm_config = {
326 .host_clk_settling_time = 5000,
327 .host_fast_wakeup_support = false
320 } 328 }
321}; 329};
322 330
@@ -359,7 +367,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
359 if (ret < 0) 367 if (ret < 0)
360 return ret; 368 return ret;
361 369
362 ret = wl1271_cmd_data_path(wl, wl->channel, 1); 370 ret = wl1271_cmd_data_path(wl, 1);
363 if (ret < 0) 371 if (ret < 0)
364 return ret; 372 return ret;
365 373
@@ -374,11 +382,13 @@ static void wl1271_disable_interrupts(struct wl1271 *wl)
374static void wl1271_power_off(struct wl1271 *wl) 382static void wl1271_power_off(struct wl1271 *wl)
375{ 383{
376 wl->set_power(false); 384 wl->set_power(false);
385 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
377} 386}
378 387
379static void wl1271_power_on(struct wl1271 *wl) 388static void wl1271_power_on(struct wl1271 *wl)
380{ 389{
381 wl->set_power(true); 390 wl->set_power(true);
391 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
382} 392}
383 393
384static void wl1271_fw_status(struct wl1271 *wl, 394static void wl1271_fw_status(struct wl1271 *wl,
@@ -447,14 +457,13 @@ static void wl1271_irq_work(struct work_struct *work)
447 intr &= WL1271_INTR_MASK; 457 intr &= WL1271_INTR_MASK;
448 458
449 if (intr & WL1271_ACX_INTR_EVENT_A) { 459 if (intr & WL1271_ACX_INTR_EVENT_A) {
450 bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
451 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 460 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
452 wl1271_event_handle(wl, 0, do_ack); 461 wl1271_event_handle(wl, 0);
453 } 462 }
454 463
455 if (intr & WL1271_ACX_INTR_EVENT_B) { 464 if (intr & WL1271_ACX_INTR_EVENT_B) {
456 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 465 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
457 wl1271_event_handle(wl, 1, true); 466 wl1271_event_handle(wl, 1);
458 } 467 }
459 468
460 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 469 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -614,6 +623,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
614 struct wl1271_partition_set partition; 623 struct wl1271_partition_set partition;
615 int ret = 0; 624 int ret = 0;
616 625
626 msleep(WL1271_PRE_POWER_ON_SLEEP);
617 wl1271_power_on(wl); 627 wl1271_power_on(wl);
618 msleep(WL1271_POWER_ON_SLEEP); 628 msleep(WL1271_POWER_ON_SLEEP);
619 wl1271_spi_reset(wl); 629 wl1271_spi_reset(wl);
@@ -643,7 +653,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
643 653
644 ret = wl1271_setup(wl); 654 ret = wl1271_setup(wl);
645 if (ret < 0) 655 if (ret < 0)
646 goto out_power_off; 656 goto out;
647 break; 657 break;
648 case CHIP_ID_1271_PG20: 658 case CHIP_ID_1271_PG20:
649 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 659 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -651,38 +661,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
651 661
652 ret = wl1271_setup(wl); 662 ret = wl1271_setup(wl);
653 if (ret < 0) 663 if (ret < 0)
654 goto out_power_off; 664 goto out;
655 break; 665 break;
656 default: 666 default:
657 wl1271_error("unsupported chip id: 0x%x", wl->chip.id); 667 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
658 ret = -ENODEV; 668 ret = -ENODEV;
659 goto out_power_off; 669 goto out;
660 } 670 }
661 671
662 if (wl->fw == NULL) { 672 if (wl->fw == NULL) {
663 ret = wl1271_fetch_firmware(wl); 673 ret = wl1271_fetch_firmware(wl);
664 if (ret < 0) 674 if (ret < 0)
665 goto out_power_off; 675 goto out;
666 } 676 }
667 677
668 /* No NVS from netlink, try to get it from the filesystem */ 678 /* No NVS from netlink, try to get it from the filesystem */
669 if (wl->nvs == NULL) { 679 if (wl->nvs == NULL) {
670 ret = wl1271_fetch_nvs(wl); 680 ret = wl1271_fetch_nvs(wl);
671 if (ret < 0) 681 if (ret < 0)
672 goto out_power_off; 682 goto out;
673 } 683 }
674 684
675 goto out;
676
677out_power_off:
678 wl1271_power_off(wl);
679
680out: 685out:
681 return ret; 686 return ret;
682} 687}
683 688
684int wl1271_plt_start(struct wl1271 *wl) 689int wl1271_plt_start(struct wl1271 *wl)
685{ 690{
691 int retries = WL1271_BOOT_RETRIES;
686 int ret; 692 int ret;
687 693
688 mutex_lock(&wl->mutex); 694 mutex_lock(&wl->mutex);
@@ -696,35 +702,48 @@ int wl1271_plt_start(struct wl1271 *wl)
696 goto out; 702 goto out;
697 } 703 }
698 704
699 wl->state = WL1271_STATE_PLT; 705 while (retries) {
700 706 retries--;
701 ret = wl1271_chip_wakeup(wl); 707 ret = wl1271_chip_wakeup(wl);
702 if (ret < 0) 708 if (ret < 0)
703 goto out; 709 goto power_off;
704
705 ret = wl1271_boot(wl);
706 if (ret < 0)
707 goto out_power_off;
708
709 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
710 710
711 ret = wl1271_plt_init(wl); 711 ret = wl1271_boot(wl);
712 if (ret < 0) 712 if (ret < 0)
713 goto out_irq_disable; 713 goto power_off;
714 714
715 /* Make sure power saving is disabled */ 715 ret = wl1271_plt_init(wl);
716 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 716 if (ret < 0)
717 if (ret < 0) 717 goto irq_disable;
718 goto out_irq_disable;
719 718
720 goto out; 719 /* Make sure power saving is disabled */
720 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
721 if (ret < 0)
722 goto irq_disable;
721 723
722out_irq_disable: 724 wl->state = WL1271_STATE_PLT;
723 wl1271_disable_interrupts(wl); 725 wl1271_notice("firmware booted in PLT mode (%s)",
726 wl->chip.fw_ver);
727 goto out;
724 728
725out_power_off: 729irq_disable:
726 wl1271_power_off(wl); 730 wl1271_disable_interrupts(wl);
731 mutex_unlock(&wl->mutex);
732 /* Unlocking the mutex in the middle of handling is
733 inherently unsafe. In this case we deem it safe to do,
734 because we need to let any possibly pending IRQ out of
735 the system (and while we are WL1271_STATE_OFF the IRQ
736 work function will not do anything.) Also, any other
737 possible concurrent operations will fail due to the
738 current state, hence the wl1271 struct should be safe. */
739 cancel_work_sync(&wl->irq_work);
740 mutex_lock(&wl->mutex);
741power_off:
742 wl1271_power_off(wl);
743 }
727 744
745 wl1271_error("firmware boot in PLT mode failed despite %d retries",
746 WL1271_BOOT_RETRIES);
728out: 747out:
729 mutex_unlock(&wl->mutex); 748 mutex_unlock(&wl->mutex);
730 749
@@ -762,7 +781,20 @@ out:
762static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 781static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
763{ 782{
764 struct wl1271 *wl = hw->priv; 783 struct wl1271 *wl = hw->priv;
784 struct ieee80211_conf *conf = &hw->conf;
785 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
786 struct ieee80211_sta *sta = txinfo->control.sta;
787 unsigned long flags;
765 788
789 /* peek into the rates configured in the STA entry */
790 spin_lock_irqsave(&wl->wl_lock, flags);
791 if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
792 wl->sta_rate_set = sta->supp_rates[conf->channel->band];
793 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
794 }
795 spin_unlock_irqrestore(&wl->wl_lock, flags);
796
797 /* queue the packet */
766 skb_queue_tail(&wl->tx_queue, skb); 798 skb_queue_tail(&wl->tx_queue, skb);
767 799
768 /* 800 /*
@@ -784,7 +816,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
784 * protected. Maybe fix this by removing the stupid 816 * protected. Maybe fix this by removing the stupid
785 * variable altogether and checking the real queue state? 817 * variable altogether and checking the real queue state?
786 */ 818 */
787 wl->tx_queue_stopped = true; 819 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
788 } 820 }
789 821
790 return NETDEV_TX_OK; 822 return NETDEV_TX_OK;
@@ -880,6 +912,7 @@ static struct notifier_block wl1271_dev_notifier = {
880static int wl1271_op_start(struct ieee80211_hw *hw) 912static int wl1271_op_start(struct ieee80211_hw *hw)
881{ 913{
882 struct wl1271 *wl = hw->priv; 914 struct wl1271 *wl = hw->priv;
915 int retries = WL1271_BOOT_RETRIES;
883 int ret = 0; 916 int ret = 0;
884 917
885 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 918 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -893,30 +926,42 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
893 goto out; 926 goto out;
894 } 927 }
895 928
896 ret = wl1271_chip_wakeup(wl); 929 while (retries) {
897 if (ret < 0) 930 retries--;
898 goto out; 931 ret = wl1271_chip_wakeup(wl);
899 932 if (ret < 0)
900 ret = wl1271_boot(wl); 933 goto power_off;
901 if (ret < 0)
902 goto out_power_off;
903
904 ret = wl1271_hw_init(wl);
905 if (ret < 0)
906 goto out_irq_disable;
907
908 wl->state = WL1271_STATE_ON;
909 934
910 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 935 ret = wl1271_boot(wl);
936 if (ret < 0)
937 goto power_off;
911 938
912 goto out; 939 ret = wl1271_hw_init(wl);
940 if (ret < 0)
941 goto irq_disable;
913 942
914out_irq_disable: 943 wl->state = WL1271_STATE_ON;
915 wl1271_disable_interrupts(wl); 944 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
945 goto out;
916 946
917out_power_off: 947irq_disable:
918 wl1271_power_off(wl); 948 wl1271_disable_interrupts(wl);
949 mutex_unlock(&wl->mutex);
950 /* Unlocking the mutex in the middle of handling is
951 inherently unsafe. In this case we deem it safe to do,
952 because we need to let any possibly pending IRQ out of
953 the system (and while we are WL1271_STATE_OFF the IRQ
954 work function will not do anything.) Also, any other
955 possible concurrent operations will fail due to the
956 current state, hence the wl1271 struct should be safe. */
957 cancel_work_sync(&wl->irq_work);
958 mutex_lock(&wl->mutex);
959power_off:
960 wl1271_power_off(wl);
961 }
919 962
963 wl1271_error("firmware boot failed despite %d retries",
964 WL1271_BOOT_RETRIES);
920out: 965out:
921 mutex_unlock(&wl->mutex); 966 mutex_unlock(&wl->mutex);
922 967
@@ -944,11 +989,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
944 989
945 WARN_ON(wl->state != WL1271_STATE_ON); 990 WARN_ON(wl->state != WL1271_STATE_ON);
946 991
947 if (wl->scanning) { 992 if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
948 mutex_unlock(&wl->mutex); 993 mutex_unlock(&wl->mutex);
949 ieee80211_scan_completed(wl->hw, true); 994 ieee80211_scan_completed(wl->hw, true);
950 mutex_lock(&wl->mutex); 995 mutex_lock(&wl->mutex);
951 wl->scanning = false;
952 } 996 }
953 997
954 wl->state = WL1271_STATE_OFF; 998 wl->state = WL1271_STATE_OFF;
@@ -973,10 +1017,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
973 wl->band = IEEE80211_BAND_2GHZ; 1017 wl->band = IEEE80211_BAND_2GHZ;
974 1018
975 wl->rx_counter = 0; 1019 wl->rx_counter = 0;
976 wl->elp = false;
977 wl->psm = 0;
978 wl->psm_entry_retry = 0; 1020 wl->psm_entry_retry = 0;
979 wl->tx_queue_stopped = false;
980 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1021 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
981 wl->tx_blocks_available = 0; 1022 wl->tx_blocks_available = 0;
982 wl->tx_results_count = 0; 1023 wl->tx_results_count = 0;
@@ -986,7 +1027,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
986 wl->tx_security_seq_32 = 0; 1027 wl->tx_security_seq_32 = 0;
987 wl->time_offset = 0; 1028 wl->time_offset = 0;
988 wl->session_counter = 0; 1029 wl->session_counter = 0;
989 wl->joined = false; 1030 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1031 wl->sta_rate_set = 0;
1032 wl->flags = 0;
990 1033
991 for (i = 0; i < NUM_TX_QUEUES; i++) 1034 for (i = 0; i < NUM_TX_QUEUES; i++)
992 wl->tx_blocks_freed[i] = 0; 1035 wl->tx_blocks_freed[i] = 0;
@@ -996,13 +1039,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
996} 1039}
997 1040
998static int wl1271_op_add_interface(struct ieee80211_hw *hw, 1041static int wl1271_op_add_interface(struct ieee80211_hw *hw,
999 struct ieee80211_if_init_conf *conf) 1042 struct ieee80211_vif *vif)
1000{ 1043{
1001 struct wl1271 *wl = hw->priv; 1044 struct wl1271 *wl = hw->priv;
1002 int ret = 0; 1045 int ret = 0;
1003 1046
1004 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 1047 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
1005 conf->type, conf->mac_addr); 1048 vif->type, vif->addr);
1006 1049
1007 mutex_lock(&wl->mutex); 1050 mutex_lock(&wl->mutex);
1008 if (wl->vif) { 1051 if (wl->vif) {
@@ -1010,9 +1053,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1010 goto out; 1053 goto out;
1011 } 1054 }
1012 1055
1013 wl->vif = conf->vif; 1056 wl->vif = vif;
1014 1057
1015 switch (conf->type) { 1058 switch (vif->type) {
1016 case NL80211_IFTYPE_STATION: 1059 case NL80211_IFTYPE_STATION:
1017 wl->bss_type = BSS_TYPE_STA_BSS; 1060 wl->bss_type = BSS_TYPE_STA_BSS;
1018 break; 1061 break;
@@ -1032,7 +1075,7 @@ out:
1032} 1075}
1033 1076
1034static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1077static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1035 struct ieee80211_if_init_conf *conf) 1078 struct ieee80211_vif *vif)
1036{ 1079{
1037 struct wl1271 *wl = hw->priv; 1080 struct wl1271 *wl = hw->priv;
1038 1081
@@ -1109,6 +1152,51 @@ out:
1109} 1152}
1110#endif 1153#endif
1111 1154
1155static int wl1271_join_channel(struct wl1271 *wl, int channel)
1156{
1157 int ret = 0;
1158 /* we need to use a dummy BSSID for now */
1159 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
1160 0xad, 0xbe, 0xef };
1161
1162 /* the dummy join is not required for ad-hoc */
1163 if (wl->bss_type == BSS_TYPE_IBSS)
1164 goto out;
1165
1166 /* disable mac filter, so we hear everything */
1167 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1168
1169 wl->channel = channel;
1170 memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
1171
1172 ret = wl1271_cmd_join(wl);
1173 if (ret < 0)
1174 goto out;
1175
1176 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1177
1178out:
1179 return ret;
1180}
1181
1182static int wl1271_unjoin_channel(struct wl1271 *wl)
1183{
1184 int ret;
1185
1186 /* to stop listening to a channel, we disconnect */
1187 ret = wl1271_cmd_disconnect(wl);
1188 if (ret < 0)
1189 goto out;
1190
1191 clear_bit(WL1271_FLAG_JOINED, &wl->flags);
1192 wl->channel = 0;
1193 memset(wl->bssid, 0, ETH_ALEN);
1194 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1195
1196out:
1197 return ret;
1198}
1199
1112static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 1200static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1113{ 1201{
1114 struct wl1271 *wl = hw->priv; 1202 struct wl1271 *wl = hw->priv;
@@ -1117,10 +1205,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1117 1205
1118 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 1206 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
1119 1207
1120 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d", 1208 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
1121 channel, 1209 channel,
1122 conf->flags & IEEE80211_CONF_PS ? "on" : "off", 1210 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
1123 conf->power_level); 1211 conf->power_level,
1212 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
1124 1213
1125 mutex_lock(&wl->mutex); 1214 mutex_lock(&wl->mutex);
1126 1215
@@ -1130,34 +1219,44 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1130 if (ret < 0) 1219 if (ret < 0)
1131 goto out; 1220 goto out;
1132 1221
1133 if (channel != wl->channel) { 1222 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1134 /* 1223 if (conf->flags & IEEE80211_CONF_IDLE &&
1135 * We assume that the stack will configure the right channel 1224 test_bit(WL1271_FLAG_JOINED, &wl->flags))
1136 * before associating, so we don't need to send a join 1225 wl1271_unjoin_channel(wl);
1137 * command here. We will join the right channel when the 1226 else if (!(conf->flags & IEEE80211_CONF_IDLE))
1138 * BSSID changes 1227 wl1271_join_channel(wl, channel);
1139 */ 1228
1140 wl->channel = channel; 1229 if (conf->flags & IEEE80211_CONF_IDLE) {
1230 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1231 wl->sta_rate_set = 0;
1232 wl1271_acx_rate_policies(wl);
1233 }
1141 } 1234 }
1142 1235
1143 if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { 1236 /* if the channel changes while joined, join again */
1144 wl1271_info("psm enabled"); 1237 if (channel != wl->channel && test_bit(WL1271_FLAG_JOINED, &wl->flags))
1238 wl1271_join_channel(wl, channel);
1145 1239
1146 wl->psm_requested = true; 1240 if (conf->flags & IEEE80211_CONF_PS &&
1241 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1242 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1147 1243
1148 /* 1244 /*
1149 * We enter PSM only if we're already associated. 1245 * We enter PSM only if we're already associated.
1150 * If we're not, we'll enter it when joining an SSID, 1246 * If we're not, we'll enter it when joining an SSID,
1151 * through the bss_info_changed() hook. 1247 * through the bss_info_changed() hook.
1152 */ 1248 */
1153 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE); 1249 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1250 wl1271_info("psm enabled");
1251 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
1252 }
1154 } else if (!(conf->flags & IEEE80211_CONF_PS) && 1253 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
1155 wl->psm_requested) { 1254 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1156 wl1271_info("psm disabled"); 1255 wl1271_info("psm disabled");
1157 1256
1158 wl->psm_requested = false; 1257 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1159 1258
1160 if (wl->psm) 1259 if (test_bit(WL1271_FLAG_PSM, &wl->flags))
1161 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); 1260 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
1162 } 1261 }
1163 1262
@@ -1440,22 +1539,6 @@ out:
1440 return ret; 1539 return ret;
1441} 1540}
1442 1541
1443static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
1444{
1445 struct ieee80211_supported_band *band;
1446 u32 enabled_rates = 0;
1447 int bit;
1448
1449 band = wl->hw->wiphy->bands[wl->band];
1450 for (bit = 0; bit < band->n_bitrates; bit++) {
1451 if (basic_rate_set & 0x1)
1452 enabled_rates |= band->bitrates[bit].hw_value;
1453 basic_rate_set >>= 1;
1454 }
1455
1456 return enabled_rates;
1457}
1458
1459static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 1542static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1460 struct ieee80211_vif *vif, 1543 struct ieee80211_vif *vif,
1461 struct ieee80211_bss_conf *bss_conf, 1544 struct ieee80211_bss_conf *bss_conf,
@@ -1473,9 +1556,68 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1473 if (ret < 0) 1556 if (ret < 0)
1474 goto out; 1557 goto out;
1475 1558
1559 if ((changed & BSS_CHANGED_BSSID) &&
1560 /*
1561 * Now we know the correct bssid, so we send a new join command
1562 * and enable the BSSID filter
1563 */
1564 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1565 wl->rx_config |= CFG_BSSID_FILTER_EN;
1566 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1567 ret = wl1271_cmd_build_null_data(wl);
1568 if (ret < 0) {
1569 wl1271_warning("cmd buld null data failed %d",
1570 ret);
1571 goto out_sleep;
1572 }
1573 ret = wl1271_cmd_join(wl);
1574 if (ret < 0) {
1575 wl1271_warning("cmd join failed %d", ret);
1576 goto out_sleep;
1577 }
1578 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1579 }
1580
1581 if (wl->bss_type == BSS_TYPE_IBSS) {
1582 /* FIXME: This implements rudimentary ad-hoc support -
1583 proper templates are on the wish list and notification
1584 on when they change. This patch will update the templates
1585 on every call to this function. Also, the firmware will not
1586 answer to probe-requests as it does not have the proper
1587 SSID set in the JOIN command. The probe-response template
1588 is set nevertheless, as the FW will ASSERT without it */
1589 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1590
1591 if (beacon) {
1592 struct ieee80211_hdr *hdr;
1593 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
1594 beacon->data,
1595 beacon->len);
1596
1597 if (ret < 0) {
1598 dev_kfree_skb(beacon);
1599 goto out_sleep;
1600 }
1601
1602 hdr = (struct ieee80211_hdr *) beacon->data;
1603 hdr->frame_control = cpu_to_le16(
1604 IEEE80211_FTYPE_MGMT |
1605 IEEE80211_STYPE_PROBE_RESP);
1606
1607 ret = wl1271_cmd_template_set(wl,
1608 CMD_TEMPL_PROBE_RESPONSE,
1609 beacon->data,
1610 beacon->len);
1611 dev_kfree_skb(beacon);
1612 if (ret < 0)
1613 goto out_sleep;
1614 }
1615 }
1616
1476 if (changed & BSS_CHANGED_ASSOC) { 1617 if (changed & BSS_CHANGED_ASSOC) {
1477 if (bss_conf->assoc) { 1618 if (bss_conf->assoc) {
1478 wl->aid = bss_conf->aid; 1619 wl->aid = bss_conf->aid;
1620 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1479 1621
1480 /* 1622 /*
1481 * with wl1271, we don't need to update the 1623 * with wl1271, we don't need to update the
@@ -1492,7 +1634,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1492 goto out_sleep; 1634 goto out_sleep;
1493 1635
1494 /* If we want to go in PSM but we're not there yet */ 1636 /* If we want to go in PSM but we're not there yet */
1495 if (wl->psm_requested && !wl->psm) { 1637 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
1638 !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
1496 mode = STATION_POWER_SAVE_MODE; 1639 mode = STATION_POWER_SAVE_MODE;
1497 ret = wl1271_ps_set_mode(wl, mode); 1640 ret = wl1271_ps_set_mode(wl, mode);
1498 if (ret < 0) 1641 if (ret < 0)
@@ -1500,7 +1643,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1500 } 1643 }
1501 } else { 1644 } else {
1502 /* use defaults when not associated */ 1645 /* use defaults when not associated */
1503 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET; 1646 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1504 wl->aid = 0; 1647 wl->aid = 0;
1505 } 1648 }
1506 1649
@@ -1535,17 +1678,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1535 } 1678 }
1536 } 1679 }
1537 1680
1538 if (changed & BSS_CHANGED_BASIC_RATES) {
1539 wl->basic_rate_set = wl1271_enabled_rates_get(
1540 wl, bss_conf->basic_rates);
1541
1542 ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
1543 if (ret < 0) {
1544 wl1271_warning("Set rate policies failed %d", ret);
1545 goto out_sleep;
1546 }
1547 }
1548
1549out_sleep: 1681out_sleep:
1550 wl1271_ps_elp_sleep(wl); 1682 wl1271_ps_elp_sleep(wl);
1551 1683
@@ -1599,19 +1731,19 @@ static struct ieee80211_rate wl1271_rates[] = {
1599 1731
1600/* can't be const, mac80211 writes to this */ 1732/* can't be const, mac80211 writes to this */
1601static struct ieee80211_channel wl1271_channels[] = { 1733static struct ieee80211_channel wl1271_channels[] = {
1602 { .hw_value = 1, .center_freq = 2412}, 1734 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
1603 { .hw_value = 2, .center_freq = 2417}, 1735 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
1604 { .hw_value = 3, .center_freq = 2422}, 1736 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
1605 { .hw_value = 4, .center_freq = 2427}, 1737 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
1606 { .hw_value = 5, .center_freq = 2432}, 1738 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
1607 { .hw_value = 6, .center_freq = 2437}, 1739 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
1608 { .hw_value = 7, .center_freq = 2442}, 1740 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
1609 { .hw_value = 8, .center_freq = 2447}, 1741 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
1610 { .hw_value = 9, .center_freq = 2452}, 1742 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
1611 { .hw_value = 10, .center_freq = 2457}, 1743 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
1612 { .hw_value = 11, .center_freq = 2462}, 1744 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
1613 { .hw_value = 12, .center_freq = 2467}, 1745 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
1614 { .hw_value = 13, .center_freq = 2472}, 1746 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
1615}; 1747};
1616 1748
1617/* can't be const, mac80211 writes to this */ 1749/* can't be const, mac80211 writes to this */
@@ -1757,7 +1889,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
1757 IEEE80211_HW_BEACON_FILTER | 1889 IEEE80211_HW_BEACON_FILTER |
1758 IEEE80211_HW_SUPPORTS_PS; 1890 IEEE80211_HW_SUPPORTS_PS;
1759 1891
1760 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1892 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1893 BIT(NL80211_IFTYPE_ADHOC);
1761 wl->hw->wiphy->max_scan_ssids = 1; 1894 wl->hw->wiphy->max_scan_ssids = 1;
1762 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 1895 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
1763 1896
@@ -1818,21 +1951,18 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1818 1951
1819 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 1952 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
1820 wl->channel = WL1271_DEFAULT_CHANNEL; 1953 wl->channel = WL1271_DEFAULT_CHANNEL;
1821 wl->scanning = false;
1822 wl->default_key = 0; 1954 wl->default_key = 0;
1823 wl->rx_counter = 0; 1955 wl->rx_counter = 0;
1824 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1956 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1825 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1957 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1826 wl->elp = false;
1827 wl->psm = 0;
1828 wl->psm_requested = false;
1829 wl->psm_entry_retry = 0; 1958 wl->psm_entry_retry = 0;
1830 wl->tx_queue_stopped = false;
1831 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1959 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1832 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET; 1960 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1961 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1962 wl->sta_rate_set = 0;
1833 wl->band = IEEE80211_BAND_2GHZ; 1963 wl->band = IEEE80211_BAND_2GHZ;
1834 wl->vif = NULL; 1964 wl->vif = NULL;
1835 wl->joined = false; 1965 wl->flags = 0;
1836 1966
1837 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 1967 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
1838 wl->tx_frames[i] = NULL; 1968 wl->tx_frames[i] = NULL;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 507cd91d7ee..e407790f677 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -39,12 +39,13 @@ void wl1271_elp_work(struct work_struct *work)
39 39
40 mutex_lock(&wl->mutex); 40 mutex_lock(&wl->mutex);
41 41
42 if (wl->elp || !wl->psm) 42 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
43 !test_bit(WL1271_FLAG_PSM, &wl->flags))
43 goto out; 44 goto out;
44 45
45 wl1271_debug(DEBUG_PSM, "chip to elp"); 46 wl1271_debug(DEBUG_PSM, "chip to elp");
46 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 47 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
47 wl->elp = true; 48 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
48 49
49out: 50out:
50 mutex_unlock(&wl->mutex); 51 mutex_unlock(&wl->mutex);
@@ -55,7 +56,7 @@ out:
55/* Routines to toggle sleep mode while in ELP */ 56/* Routines to toggle sleep mode while in ELP */
56void wl1271_ps_elp_sleep(struct wl1271 *wl) 57void wl1271_ps_elp_sleep(struct wl1271 *wl)
57{ 58{
58 if (wl->psm) { 59 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
59 cancel_delayed_work(&wl->elp_work); 60 cancel_delayed_work(&wl->elp_work);
60 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 61 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
61 msecs_to_jiffies(ELP_ENTRY_DELAY)); 62 msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -70,7 +71,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
70 u32 start_time = jiffies; 71 u32 start_time = jiffies;
71 bool pending = false; 72 bool pending = false;
72 73
73 if (!wl->elp) 74 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
74 return 0; 75 return 0;
75 76
76 wl1271_debug(DEBUG_PSM, "waking up chip from elp"); 77 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
@@ -101,7 +102,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
101 } 102 }
102 } 103 }
103 104
104 wl->elp = false; 105 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
105 106
106 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", 107 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
107 jiffies_to_msecs(jiffies - start_time)); 108 jiffies_to_msecs(jiffies - start_time));
@@ -143,7 +144,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
143 if (ret < 0) 144 if (ret < 0)
144 return ret; 145 return ret;
145 146
146 wl->psm = 1; 147 set_bit(WL1271_FLAG_PSM, &wl->flags);
147 break; 148 break;
148 case STATION_ACTIVE_MODE: 149 case STATION_ACTIVE_MODE:
149 default: 150 default:
@@ -166,7 +167,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
166 if (ret < 0) 167 if (ret < 0)
167 return ret; 168 return ret;
168 169
169 wl->psm = 0; 170 clear_bit(WL1271_FLAG_PSM, &wl->flags);
170 break; 171 break;
171 } 172 }
172 173
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index 1f237389d1c..99096077152 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -62,73 +62,10 @@
62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) 62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) 63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) 64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
65/*
66 * Interrupt registers.
67 * 64 bit interrupt sources registers ws ced.
68 * sme interupts were removed and new ones were added.
69 * Order was changed.
70 */
71#define FIQ_MASK (REGISTERS_BASE + 0x0400)
72#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
73#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
74#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
75#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
76#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
77#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
78#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
79#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
80#define IRQ_MASK (REGISTERS_BASE + 0x0418)
81#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
82#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
83#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
84#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
85#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
86#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
87#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
88#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
89#define ECPU_MASK (REGISTERS_BASE + 0x0448)
90#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
91#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
92#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
93#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
94#define INT_STS_ND (REGISTERS_BASE + 0x0464)
95#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
96#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
97#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
98#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
99#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
100#define INT_ACK (REGISTERS_BASE + 0x046C)
101#define INT_ACK_L (REGISTERS_BASE + 0x046C)
102#define INT_ACK_H (REGISTERS_BASE + 0x0470)
103#define INT_TRIG (REGISTERS_BASE + 0x0474)
104#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
105#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
106#define HOST_STS_L (REGISTERS_BASE + 0x045C)
107#define HOST_STS_H (REGISTERS_BASE + 0x0460)
108#define HOST_MASK (REGISTERS_BASE + 0x0430)
109#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
110#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
111#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
112#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
113#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
114#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
115#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
116#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
117 65
118#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) 66#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
119#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) 67#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
120 68
121/* Host Interrupts*/
122#define HINT_MASK (REGISTERS_BASE + 0x0494)
123#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
124#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
125#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
126/*1150 spec calls this HINT_STS_RAW*/
127#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
128#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
129#define HINT_ACK (REGISTERS_BASE + 0x04A8)
130#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
131
132/*============================================= 69/*=============================================
133 Host Interrupt Mask Register - 32bit (RW) 70 Host Interrupt Mask Register - 32bit (RW)
134 ------------------------------------------ 71 ------------------------------------------
@@ -433,16 +370,6 @@
433 370
434 371
435/*=============================================== 372/*===============================================
436 Phy regs
437 ===============================================*/
438#define ACX_PHY_ADDR_REG SBB_ADDR
439#define ACX_PHY_DATA_REG SBB_DATA
440#define ACX_PHY_CTRL_REG SBB_CTL
441#define ACX_PHY_REG_WR_MASK 0x00000001ul
442#define ACX_PHY_REG_RD_MASK 0x00000002ul
443
444
445/*===============================================
446 EEPROM Read/Write Request 32bit RW 373 EEPROM Read/Write Request 32bit RW
447 ------------------------------------------ 374 ------------------------------------------
448 1 EE_READ - EEPROM Read Request 1 - Setting this bit 375 1 EE_READ - EEPROM Read Request 1 - Setting this bit
@@ -511,28 +438,6 @@
511#define ACX_CONT_WIND_MIN_MASK 0x0000007f 438#define ACX_CONT_WIND_MIN_MASK 0x0000007f
512#define ACX_CONT_WIND_MAX 0x03ff0000 439#define ACX_CONT_WIND_MAX 0x03ff0000
513 440
514/*
515 * Indirect slave register/memory registers
516 * ----------------------------------------
517 */
518#define HW_SLAVE_REG_ADDR_REG 0x00000004
519#define HW_SLAVE_REG_DATA_REG 0x00000008
520#define HW_SLAVE_REG_CTRL_REG 0x0000000c
521
522#define SLAVE_AUTO_INC 0x00010000
523#define SLAVE_NO_AUTO_INC 0x00000000
524#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
525
526#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
527#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
528#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
529#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
530
531#define HW_FUNC_EVENT_INT_EN 0x8000
532#define HW_FUNC_EVENT_MASK_REG 0x00000034
533
534#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
535
536/*=============================================== 441/*===============================================
537 HI_CFG Interface Configuration Register Values 442 HI_CFG Interface Configuration Register Values
538 ------------------------------------------ 443 ------------------------------------------
@@ -647,10 +552,6 @@ b12-b0 - Supported Rate indicator bits as defined below.
647******************************************************************************/ 552******************************************************************************/
648 553
649 554
650#define TNETW1251_CHIP_ID_PG1_0 0x07010101
651#define TNETW1251_CHIP_ID_PG1_1 0x07020101
652#define TNETW1251_CHIP_ID_PG1_2 0x07030101
653
654/************************************************************************* 555/*************************************************************************
655 556
656 Interrupt Trigger Register (Host -> WiLink) 557 Interrupt Trigger Register (Host -> WiLink)
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e73..ee9564aa6ec 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -397,8 +397,7 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
397 /* poll for data ready */ 397 /* poll for data ready */
398 do { 398 do {
399 val = wl1271_spi_read32(wl, OCP_DATA_READ); 399 val = wl1271_spi_read32(wl, OCP_DATA_READ);
400 timeout--; 400 } while (!(val & OCP_READY_MASK) && --timeout);
401 } while (!(val & OCP_READY_MASK) && timeout);
402 401
403 if (!timeout) { 402 if (!timeout) {
404 wl1271_warning("Top register access timed out."); 403 wl1271_warning("Top register access timed out.");
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 00af065c77c..a288cc317d7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -121,6 +121,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
121 pad = pad - skb->len; 121 pad = pad - skb->len;
122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
123 123
124 /* if the packets are destined for AP (have a STA entry) send them
125 with AP rate policies, otherwise use default basic rates */
126 if (control->control.sta)
127 tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
128
124 desc->tx_attr = cpu_to_le16(tx_attr); 129 desc->tx_attr = cpu_to_le16(tx_attr);
125 130
126 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 131 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
@@ -214,18 +219,50 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
214 return ret; 219 return ret;
215} 220}
216 221
222static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
223{
224 struct ieee80211_supported_band *band;
225 u32 enabled_rates = 0;
226 int bit;
227
228 band = wl->hw->wiphy->bands[wl->band];
229 for (bit = 0; bit < band->n_bitrates; bit++) {
230 if (rate_set & 0x1)
231 enabled_rates |= band->bitrates[bit].hw_value;
232 rate_set >>= 1;
233 }
234
235 return enabled_rates;
236}
237
217void wl1271_tx_work(struct work_struct *work) 238void wl1271_tx_work(struct work_struct *work)
218{ 239{
219 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 240 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
220 struct sk_buff *skb; 241 struct sk_buff *skb;
221 bool woken_up = false; 242 bool woken_up = false;
243 u32 sta_rates = 0;
222 int ret; 244 int ret;
223 245
246 /* check if the rates supported by the AP have changed */
247 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
248 &wl->flags))) {
249 unsigned long flags;
250 spin_lock_irqsave(&wl->wl_lock, flags);
251 sta_rates = wl->sta_rate_set;
252 spin_unlock_irqrestore(&wl->wl_lock, flags);
253 }
254
224 mutex_lock(&wl->mutex); 255 mutex_lock(&wl->mutex);
225 256
226 if (unlikely(wl->state == WL1271_STATE_OFF)) 257 if (unlikely(wl->state == WL1271_STATE_OFF))
227 goto out; 258 goto out;
228 259
260 /* if rates have changed, re-configure the rate policy */
261 if (unlikely(sta_rates)) {
262 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
263 wl1271_acx_rate_policies(wl);
264 }
265
229 while ((skb = skb_dequeue(&wl->tx_queue))) { 266 while ((skb = skb_dequeue(&wl->tx_queue))) {
230 if (!woken_up) { 267 if (!woken_up) {
231 ret = wl1271_ps_elp_wakeup(wl, false); 268 ret = wl1271_ps_elp_wakeup(wl, false);
@@ -240,18 +277,18 @@ void wl1271_tx_work(struct work_struct *work)
240 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, " 277 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
241 "stop queues"); 278 "stop queues");
242 ieee80211_stop_queues(wl->hw); 279 ieee80211_stop_queues(wl->hw);
243 wl->tx_queue_stopped = true; 280 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
244 skb_queue_head(&wl->tx_queue, skb); 281 skb_queue_head(&wl->tx_queue, skb);
245 goto out; 282 goto out;
246 } else if (ret < 0) { 283 } else if (ret < 0) {
247 dev_kfree_skb(skb); 284 dev_kfree_skb(skb);
248 goto out; 285 goto out;
249 } else if (wl->tx_queue_stopped) { 286 } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
287 &wl->flags)) {
250 /* firmware buffer has space, restart queues */ 288 /* firmware buffer has space, restart queues */
251 wl1271_debug(DEBUG_TX, 289 wl1271_debug(DEBUG_TX,
252 "complete_packet: waking queues"); 290 "complete_packet: waking queues");
253 ieee80211_wake_queues(wl->hw); 291 ieee80211_wake_queues(wl->hw);
254 wl->tx_queue_stopped = false;
255 } 292 }
256 } 293 }
257 294
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index f14deb0c851..2d555cc3050 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -869,7 +869,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
869} 869}
870 870
871static int zd_op_add_interface(struct ieee80211_hw *hw, 871static int zd_op_add_interface(struct ieee80211_hw *hw,
872 struct ieee80211_if_init_conf *conf) 872 struct ieee80211_vif *vif)
873{ 873{
874 struct zd_mac *mac = zd_hw_mac(hw); 874 struct zd_mac *mac = zd_hw_mac(hw);
875 875
@@ -877,22 +877,22 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
877 if (mac->type != NL80211_IFTYPE_UNSPECIFIED) 877 if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
878 return -EOPNOTSUPP; 878 return -EOPNOTSUPP;
879 879
880 switch (conf->type) { 880 switch (vif->type) {
881 case NL80211_IFTYPE_MONITOR: 881 case NL80211_IFTYPE_MONITOR:
882 case NL80211_IFTYPE_MESH_POINT: 882 case NL80211_IFTYPE_MESH_POINT:
883 case NL80211_IFTYPE_STATION: 883 case NL80211_IFTYPE_STATION:
884 case NL80211_IFTYPE_ADHOC: 884 case NL80211_IFTYPE_ADHOC:
885 mac->type = conf->type; 885 mac->type = vif->type;
886 break; 886 break;
887 default: 887 default:
888 return -EOPNOTSUPP; 888 return -EOPNOTSUPP;
889 } 889 }
890 890
891 return zd_write_mac_addr(&mac->chip, conf->mac_addr); 891 return zd_write_mac_addr(&mac->chip, vif->addr);
892} 892}
893 893
894static void zd_op_remove_interface(struct ieee80211_hw *hw, 894static void zd_op_remove_interface(struct ieee80211_hw *hw,
895 struct ieee80211_if_init_conf *conf) 895 struct ieee80211_vif *vif)
896{ 896{
897 struct zd_mac *mac = zd_hw_mac(hw); 897 struct zd_mac *mac = zd_hw_mac(hw);
898 mac->type = NL80211_IFTYPE_UNSPECIFIED; 898 mac->type = NL80211_IFTYPE_UNSPECIFIED;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72d3e437e19..442fc111732 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1079,11 +1079,15 @@ static int eject_installer(struct usb_interface *intf)
1079 int r; 1079 int r;
1080 1080
1081 /* Find bulk out endpoint */ 1081 /* Find bulk out endpoint */
1082 endpoint = &iface_desc->endpoint[1].desc; 1082 for (r = 1; r >= 0; r--) {
1083 if (usb_endpoint_dir_out(endpoint) && 1083 endpoint = &iface_desc->endpoint[r].desc;
1084 usb_endpoint_xfer_bulk(endpoint)) { 1084 if (usb_endpoint_dir_out(endpoint) &&
1085 bulk_out_ep = endpoint->bEndpointAddress; 1085 usb_endpoint_xfer_bulk(endpoint)) {
1086 } else { 1086 bulk_out_ep = endpoint->bEndpointAddress;
1087 break;
1088 }
1089 }
1090 if (r == -1) {
1087 dev_err(&udev->dev, 1091 dev_err(&udev->dev,
1088 "zd1211rw: Could not find bulk out endpoint\n"); 1092 "zd1211rw: Could not find bulk out endpoint\n");
1089 return -ENODEV; 1093 return -ENODEV;
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 8c777ba4e2b..f7fe1aa03b4 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -925,11 +925,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
925 /* Set the MAC address in the EmacLite device */ 925 /* Set the MAC address in the EmacLite device */
926 xemaclite_set_mac_address(lp, ndev->dev_addr); 926 xemaclite_set_mac_address(lp, ndev->dev_addr);
927 927
928 dev_info(dev, 928 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
929 "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
930 ndev->dev_addr[0], ndev->dev_addr[1],
931 ndev->dev_addr[2], ndev->dev_addr[3],
932 ndev->dev_addr[4], ndev->dev_addr[5]);
933 929
934 ndev->netdev_ops = &xemaclite_netdev_ops; 930 ndev->netdev_ops = &xemaclite_netdev_ops;
935 ndev->flags &= ~IFF_MULTICAST; 931 ndev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff..8b231b30fd1 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -237,7 +237,7 @@ static const struct pci_id_info pci_id_tbl[] = {
237 { } 237 { }
238}; 238};
239 239
240static const struct pci_device_id yellowfin_pci_tbl[] = { 240static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
241 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 241 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
242 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 242 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
243 { } 243 { }
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b232693378c..a3ac4456e0b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -649,6 +649,7 @@ struct qeth_card_options {
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation; 651 enum qeth_ipa_isolation_modes isolation;
652 int sniffer;
652}; 653};
653 654
654/* 655/*
@@ -737,6 +738,7 @@ struct qeth_card {
737 struct qeth_discipline discipline; 738 struct qeth_discipline discipline;
738 atomic_t force_alloc_skb; 739 atomic_t force_alloc_skb;
739 struct service_level qeth_service_level; 740 struct service_level qeth_service_level;
741 struct qdio_ssqd_desc ssqd;
740}; 742};
741 743
742struct qeth_card_list_struct { 744struct qeth_card_list_struct {
@@ -811,7 +813,8 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
811struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, 813struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
812 enum qeth_ipa_cmds, enum qeth_prot_versions); 814 enum qeth_ipa_cmds, enum qeth_prot_versions);
813int qeth_query_setadapterparms(struct qeth_card *); 815int qeth_query_setadapterparms(struct qeth_card *);
814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *); 816int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
817 unsigned int, const char *);
815void qeth_queue_input_buffer(struct qeth_card *, int); 818void qeth_queue_input_buffer(struct qeth_card *, int);
816struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 819struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
817 struct qdio_buffer *, struct qdio_buffer_element **, int *, 820 struct qdio_buffer *, struct qdio_buffer_element **, int *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d34804d5ece..fa8a519218a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -269,6 +269,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
269 card->qdio.init_pool.buf_count = bufcnt; 269 card->qdio.init_pool.buf_count = bufcnt;
270 return qeth_alloc_buffer_pool(card); 270 return qeth_alloc_buffer_pool(card);
271} 271}
272EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
272 273
273static int qeth_issue_next_read(struct qeth_card *card) 274static int qeth_issue_next_read(struct qeth_card *card)
274{ 275{
@@ -350,8 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
350 if (IS_IPA(iob->data)) { 351 if (IS_IPA(iob->data)) {
351 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); 352 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
352 if (IS_IPA_REPLY(cmd)) { 353 if (IS_IPA_REPLY(cmd)) {
353 if (cmd->hdr.command < IPA_CMD_SETCCID || 354 if (cmd->hdr.command != IPA_CMD_SETCCID &&
354 cmd->hdr.command > IPA_CMD_MODCCID) 355 cmd->hdr.command != IPA_CMD_DELCCID &&
356 cmd->hdr.command != IPA_CMD_MODCCID &&
357 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
355 qeth_issue_ipa_msg(cmd, 358 qeth_issue_ipa_msg(cmd,
356 cmd->hdr.return_code, card); 359 cmd->hdr.return_code, card);
357 return cmd; 360 return cmd;
@@ -1100,11 +1103,6 @@ static int qeth_setup_card(struct qeth_card *card)
1100 card->thread_running_mask = 0; 1103 card->thread_running_mask = 0;
1101 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1104 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1102 INIT_LIST_HEAD(&card->ip_list); 1105 INIT_LIST_HEAD(&card->ip_list);
1103 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1104 if (!card->ip_tbd_list) {
1105 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1106 return -ENOMEM;
1107 }
1108 INIT_LIST_HEAD(card->ip_tbd_list); 1106 INIT_LIST_HEAD(card->ip_tbd_list);
1109 INIT_LIST_HEAD(&card->cmd_waiter_list); 1107 INIT_LIST_HEAD(&card->cmd_waiter_list);
1110 init_waitqueue_head(&card->wait_q); 1108 init_waitqueue_head(&card->wait_q);
@@ -1138,21 +1136,30 @@ static struct qeth_card *qeth_alloc_card(void)
1138 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1136 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1139 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); 1137 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1140 if (!card) 1138 if (!card)
1141 return NULL; 1139 goto out;
1142 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1140 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1143 if (qeth_setup_channel(&card->read)) { 1141 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1144 kfree(card); 1142 if (!card->ip_tbd_list) {
1145 return NULL; 1143 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1146 } 1144 goto out_card;
1147 if (qeth_setup_channel(&card->write)) {
1148 qeth_clean_channel(&card->read);
1149 kfree(card);
1150 return NULL;
1151 } 1145 }
1146 if (qeth_setup_channel(&card->read))
1147 goto out_ip;
1148 if (qeth_setup_channel(&card->write))
1149 goto out_channel;
1152 card->options.layer2 = -1; 1150 card->options.layer2 = -1;
1153 card->qeth_service_level.seq_print = qeth_core_sl_print; 1151 card->qeth_service_level.seq_print = qeth_core_sl_print;
1154 register_service_level(&card->qeth_service_level); 1152 register_service_level(&card->qeth_service_level);
1155 return card; 1153 return card;
1154
1155out_channel:
1156 qeth_clean_channel(&card->read);
1157out_ip:
1158 kfree(card->ip_tbd_list);
1159out_card:
1160 kfree(card);
1161out:
1162 return NULL;
1156} 1163}
1157 1164
1158static int qeth_determine_card_type(struct qeth_card *card) 1165static int qeth_determine_card_type(struct qeth_card *card)
@@ -1355,26 +1362,29 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1355 return ret; 1362 return ret;
1356} 1363}
1357 1364
1358static int qeth_get_unitaddr(struct qeth_card *card) 1365static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1359{ 1366{
1360 int length; 1367 QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1361 char *prcd;
1362 int rc;
1363
1364 QETH_DBF_TEXT(SETUP, 2, "getunit");
1365 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1366 if (rc) {
1367 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
1368 dev_name(&card->gdev->dev), rc);
1369 return rc;
1370 }
1371 card->info.chpid = prcd[30]; 1368 card->info.chpid = prcd[30];
1372 card->info.unit_addr2 = prcd[31]; 1369 card->info.unit_addr2 = prcd[31];
1373 card->info.cula = prcd[63]; 1370 card->info.cula = prcd[63];
1374 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && 1371 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1375 (prcd[0x11] == _ascebc['M'])); 1372 (prcd[0x11] == _ascebc['M']));
1376 kfree(prcd); 1373}
1377 return 0; 1374
1375static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1376{
1377 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1378
1379 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
1380 card->info.blkt.time_total = 250;
1381 card->info.blkt.inter_packet = 5;
1382 card->info.blkt.inter_packet_jumbo = 15;
1383 } else {
1384 card->info.blkt.time_total = 0;
1385 card->info.blkt.inter_packet = 0;
1386 card->info.blkt.inter_packet_jumbo = 0;
1387 }
1378} 1388}
1379 1389
1380static void qeth_init_tokens(struct qeth_card *card) 1390static void qeth_init_tokens(struct qeth_card *card)
@@ -2573,8 +2583,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2573} 2583}
2574EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2584EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2575 2585
2576int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, 2586int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2577 const char *dbftext) 2587 unsigned int qdio_error, const char *dbftext)
2578{ 2588{
2579 if (qdio_error) { 2589 if (qdio_error) {
2580 QETH_DBF_TEXT(TRACE, 2, dbftext); 2590 QETH_DBF_TEXT(TRACE, 2, dbftext);
@@ -2584,7 +2594,11 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2584 QETH_DBF_TEXT_(QERR, 2, " F14=%02X", 2594 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2585 buf->element[14].flags & 0xff); 2595 buf->element[14].flags & 0xff);
2586 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); 2596 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2587 return 1; 2597 if ((buf->element[15].flags & 0xff) == 0x12) {
2598 card->stats.rx_dropped++;
2599 return 0;
2600 } else
2601 return 1;
2588 } 2602 }
2589 return 0; 2603 return 0;
2590} 2604}
@@ -2667,7 +2681,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
2667 qdio_err = 1; 2681 qdio_err = 1;
2668 } 2682 }
2669 } 2683 }
2670 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr"); 2684 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
2671 2685
2672 if (!qdio_err) 2686 if (!qdio_err)
2673 return QETH_SEND_ERROR_NONE; 2687 return QETH_SEND_ERROR_NONE;
@@ -3509,6 +3523,7 @@ void qeth_tx_timeout(struct net_device *dev)
3509{ 3523{
3510 struct qeth_card *card; 3524 struct qeth_card *card;
3511 3525
3526 QETH_DBF_TEXT(TRACE, 4, "txtimeo");
3512 card = dev->ml_priv; 3527 card = dev->ml_priv;
3513 card->stats.tx_errors++; 3528 card->stats.tx_errors++;
3514 qeth_schedule_recovery(card); 3529 qeth_schedule_recovery(card);
@@ -3847,9 +3862,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3847 3862
3848int qeth_core_hardsetup_card(struct qeth_card *card) 3863int qeth_core_hardsetup_card(struct qeth_card *card)
3849{ 3864{
3850 struct qdio_ssqd_desc *ssqd;
3851 int retries = 0; 3865 int retries = 0;
3852 int mpno = 0;
3853 int rc; 3866 int rc;
3854 3867
3855 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3868 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3882,31 +3895,6 @@ retriable:
3882 else 3895 else
3883 goto retry; 3896 goto retry;
3884 } 3897 }
3885
3886 rc = qeth_get_unitaddr(card);
3887 if (rc) {
3888 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3889 return rc;
3890 }
3891
3892 ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL);
3893 if (!ssqd) {
3894 rc = -ENOMEM;
3895 goto out;
3896 }
3897 rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd);
3898 if (rc == 0)
3899 mpno = ssqd->pcnt;
3900 kfree(ssqd);
3901
3902 if (mpno)
3903 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3904 if (card->info.portno > mpno) {
3905 QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d"
3906 "\n.", CARD_BUS_ID(card), card->info.portno);
3907 rc = -ENODEV;
3908 goto out;
3909 }
3910 qeth_init_tokens(card); 3898 qeth_init_tokens(card);
3911 qeth_init_func_level(card); 3899 qeth_init_func_level(card);
3912 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); 3900 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -3990,7 +3978,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3990 struct qdio_buffer_element *element = *__element; 3978 struct qdio_buffer_element *element = *__element;
3991 int offset = *__offset; 3979 int offset = *__offset;
3992 struct sk_buff *skb = NULL; 3980 struct sk_buff *skb = NULL;
3993 int skb_len; 3981 int skb_len = 0;
3994 void *data_ptr; 3982 void *data_ptr;
3995 int data_len; 3983 int data_len;
3996 int headroom = 0; 3984 int headroom = 0;
@@ -4009,20 +3997,24 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4009 *hdr = element->addr + offset; 3997 *hdr = element->addr + offset;
4010 3998
4011 offset += sizeof(struct qeth_hdr); 3999 offset += sizeof(struct qeth_hdr);
4012 if (card->options.layer2) { 4000 switch ((*hdr)->hdr.l2.id) {
4013 if (card->info.type == QETH_CARD_TYPE_OSN) { 4001 case QETH_HEADER_TYPE_LAYER2:
4014 skb_len = (*hdr)->hdr.osn.pdu_length; 4002 skb_len = (*hdr)->hdr.l2.pkt_length;
4015 headroom = sizeof(struct qeth_hdr); 4003 break;
4016 } else { 4004 case QETH_HEADER_TYPE_LAYER3:
4017 skb_len = (*hdr)->hdr.l2.pkt_length;
4018 }
4019 } else {
4020 skb_len = (*hdr)->hdr.l3.length; 4005 skb_len = (*hdr)->hdr.l3.length;
4021 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 4006 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
4022 (card->info.link_type == QETH_LINK_TYPE_HSTR)) 4007 (card->info.link_type == QETH_LINK_TYPE_HSTR))
4023 headroom = TR_HLEN; 4008 headroom = TR_HLEN;
4024 else 4009 else
4025 headroom = ETH_HLEN; 4010 headroom = ETH_HLEN;
4011 break;
4012 case QETH_HEADER_TYPE_OSN:
4013 skb_len = (*hdr)->hdr.osn.pdu_length;
4014 headroom = sizeof(struct qeth_hdr);
4015 break;
4016 default:
4017 break;
4026 } 4018 }
4027 4019
4028 if (!skb_len) 4020 if (!skb_len)
@@ -4177,6 +4169,41 @@ void qeth_core_free_discipline(struct qeth_card *card)
4177 card->discipline.ccwgdriver = NULL; 4169 card->discipline.ccwgdriver = NULL;
4178} 4170}
4179 4171
4172static void qeth_determine_capabilities(struct qeth_card *card)
4173{
4174 int rc;
4175 int length;
4176 char *prcd;
4177
4178 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4179 rc = ccw_device_set_online(CARD_DDEV(card));
4180 if (rc) {
4181 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4182 goto out;
4183 }
4184
4185
4186 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4187 if (rc) {
4188 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4189 dev_name(&card->gdev->dev), rc);
4190 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4191 goto out_offline;
4192 }
4193 qeth_configure_unitaddr(card, prcd);
4194 qeth_configure_blkt_default(card, prcd);
4195 kfree(prcd);
4196
4197 rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
4198 if (rc)
4199 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4200
4201out_offline:
4202 ccw_device_set_offline(CARD_DDEV(card));
4203out:
4204 return;
4205}
4206
4180static int qeth_core_probe_device(struct ccwgroup_device *gdev) 4207static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4181{ 4208{
4182 struct qeth_card *card; 4209 struct qeth_card *card;
@@ -4242,6 +4269,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4242 write_lock_irqsave(&qeth_core_card_list.rwlock, flags); 4269 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4243 list_add_tail(&card->list, &qeth_core_card_list.list); 4270 list_add_tail(&card->list, &qeth_core_card_list.list);
4244 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); 4271 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4272
4273 qeth_determine_capabilities(card);
4245 return 0; 4274 return 0;
4246 4275
4247err_card: 4276err_card:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1ba51152f66..104a3351e02 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -156,6 +156,8 @@ enum qeth_ipa_return_codes {
156 IPA_RC_IP_TABLE_FULL = 0x0002, 156 IPA_RC_IP_TABLE_FULL = 0x0002,
157 IPA_RC_UNKNOWN_ERROR = 0x0003, 157 IPA_RC_UNKNOWN_ERROR = 0x0003,
158 IPA_RC_UNSUPPORTED_COMMAND = 0x0004, 158 IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
159 IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
160 IPA_RC_INVALID_FORMAT = 0x0006,
159 IPA_RC_DUP_IPV6_REMOTE = 0x0008, 161 IPA_RC_DUP_IPV6_REMOTE = 0x0008,
160 IPA_RC_DUP_IPV6_HOME = 0x0010, 162 IPA_RC_DUP_IPV6_HOME = 0x0010,
161 IPA_RC_UNREGISTERED_ADDR = 0x0011, 163 IPA_RC_UNREGISTERED_ADDR = 0x0011,
@@ -196,6 +198,11 @@ enum qeth_ipa_return_codes {
196 IPA_RC_INVALID_IP_VERSION2 = 0xf001, 198 IPA_RC_INVALID_IP_VERSION2 = 0xf001,
197 IPA_RC_FFFF = 0xffff 199 IPA_RC_FFFF = 0xffff
198}; 200};
201/* for DELIP */
202#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED
203/* for SET_DIAGNOSTIC_ASSIST */
204#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
205#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
199 206
200/* IPA function flags; each flag marks availability of respective function */ 207/* IPA function flags; each flag marks availability of respective function */
201enum qeth_ipa_funcs { 208enum qeth_ipa_funcs {
@@ -246,6 +253,7 @@ enum qeth_ipa_setadp_cmd {
246 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L, 253 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L, 254 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L, 255 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
256 IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
249 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L, 257 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
250}; 258};
251enum qeth_ipa_mac_ops { 259enum qeth_ipa_mac_ops {
@@ -424,6 +432,40 @@ struct qeth_create_destroy_address {
424 __u8 unique_id[8]; 432 __u8 unique_id[8];
425} __attribute__ ((packed)); 433} __attribute__ ((packed));
426 434
435/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
436
437enum qeth_diags_cmds {
438 QETH_DIAGS_CMD_QUERY = 0x0001,
439 QETH_DIAGS_CMD_TRAP = 0x0002,
440 QETH_DIAGS_CMD_TRACE = 0x0004,
441 QETH_DIAGS_CMD_NOLOG = 0x0008,
442 QETH_DIAGS_CMD_DUMP = 0x0010,
443};
444
445enum qeth_diags_trace_types {
446 QETH_DIAGS_TYPE_HIPERSOCKET = 0x02,
447};
448
449enum qeth_diags_trace_cmds {
450 QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001,
451 QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002,
452 QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004,
453 QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008,
454 QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
455};
456
457struct qeth_ipacmd_diagass {
458 __u32 host_tod2;
459 __u32:32;
460 __u16 subcmd_len;
461 __u16:16;
462 __u32 subcmd;
463 __u8 type;
464 __u8 action;
465 __u16 options;
466 __u32:32;
467} __attribute__ ((packed));
468
427/* Header for each IPA command */ 469/* Header for each IPA command */
428struct qeth_ipacmd_hdr { 470struct qeth_ipacmd_hdr {
429 __u8 command; 471 __u8 command;
@@ -452,6 +494,7 @@ struct qeth_ipa_cmd {
452 struct qeth_create_destroy_address create_destroy_addr; 494 struct qeth_create_destroy_address create_destroy_addr;
453 struct qeth_ipacmd_setadpparms setadapterparms; 495 struct qeth_ipacmd_setadpparms setadapterparms;
454 struct qeth_set_routing setrtg; 496 struct qeth_set_routing setrtg;
497 struct qeth_ipacmd_diagass diagass;
455 } data; 498 } data;
456} __attribute__ ((packed)); 499} __attribute__ ((packed));
457 500
@@ -469,7 +512,6 @@ enum qeth_ipa_arp_return_codes {
469 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, 512 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
470}; 513};
471 514
472
473extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); 515extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
474extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); 516extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
475 517
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9ff2b36fdc4..88ae4357136 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -118,7 +118,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
118{ 118{
119 struct qeth_card *card = dev_get_drvdata(dev); 119 struct qeth_card *card = dev_get_drvdata(dev);
120 char *tmp; 120 char *tmp;
121 unsigned int portno; 121 unsigned int portno, limit;
122 122
123 if (!card) 123 if (!card)
124 return -EINVAL; 124 return -EINVAL;
@@ -128,9 +128,11 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
128 return -EPERM; 128 return -EPERM;
129 129
130 portno = simple_strtoul(buf, &tmp, 16); 130 portno = simple_strtoul(buf, &tmp, 16);
131 if (portno > QETH_MAX_PORTNO) { 131 if (portno > QETH_MAX_PORTNO)
132 return -EINVAL;
133 limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
134 if (portno > limit)
132 return -EINVAL; 135 return -EINVAL;
133 }
134 136
135 card->info.portno = portno; 137 card->info.portno = portno;
136 return count; 138 return count;
@@ -537,7 +539,7 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
537 struct qeth_card *card = dev_get_drvdata(dev); 539 struct qeth_card *card = dev_get_drvdata(dev);
538 540
539 return qeth_dev_blkt_store(card, buf, count, 541 return qeth_dev_blkt_store(card, buf, count,
540 &card->info.blkt.time_total, 1000); 542 &card->info.blkt.time_total, 5000);
541} 543}
542 544
543 545
@@ -559,7 +561,7 @@ static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
559 struct qeth_card *card = dev_get_drvdata(dev); 561 struct qeth_card *card = dev_get_drvdata(dev);
560 562
561 return qeth_dev_blkt_store(card, buf, count, 563 return qeth_dev_blkt_store(card, buf, count,
562 &card->info.blkt.inter_packet, 100); 564 &card->info.blkt.inter_packet, 1000);
563} 565}
564 566
565static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show, 567static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
@@ -580,7 +582,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
580 struct qeth_card *card = dev_get_drvdata(dev); 582 struct qeth_card *card = dev_get_drvdata(dev);
581 583
582 return qeth_dev_blkt_store(card, buf, count, 584 return qeth_dev_blkt_store(card, buf, count,
583 &card->info.blkt.inter_packet_jumbo, 100); 585 &card->info.blkt.inter_packet_jumbo, 1000);
584} 586}
585 587
586static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show, 588static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0b763396d5d..51fde6f2e0b 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -486,22 +486,14 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
486 case IPA_RC_L2_DUP_MAC: 486 case IPA_RC_L2_DUP_MAC:
487 case IPA_RC_L2_DUP_LAYER3_MAC: 487 case IPA_RC_L2_DUP_LAYER3_MAC:
488 dev_warn(&card->gdev->dev, 488 dev_warn(&card->gdev->dev,
489 "MAC address " 489 "MAC address %pM already exists\n",
490 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 490 card->dev->dev_addr);
491 "already exists\n",
492 card->dev->dev_addr[0], card->dev->dev_addr[1],
493 card->dev->dev_addr[2], card->dev->dev_addr[3],
494 card->dev->dev_addr[4], card->dev->dev_addr[5]);
495 break; 491 break;
496 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 492 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
497 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 493 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
498 dev_warn(&card->gdev->dev, 494 dev_warn(&card->gdev->dev,
499 "MAC address " 495 "MAC address %pM is not authorized\n",
500 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 496 card->dev->dev_addr);
501 "is not authorized\n",
502 card->dev->dev_addr[0], card->dev->dev_addr[1],
503 card->dev->dev_addr[2], card->dev->dev_addr[3],
504 card->dev->dev_addr[4], card->dev->dev_addr[5]);
505 break; 497 break;
506 default: 498 default:
507 break; 499 break;
@@ -512,12 +504,8 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
512 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, 504 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
513 OSA_ADDR_LEN); 505 OSA_ADDR_LEN);
514 dev_info(&card->gdev->dev, 506 dev_info(&card->gdev->dev,
515 "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 507 "MAC address %pM successfully registered on device %s\n",
516 "successfully registered on device %s\n", 508 card->dev->dev_addr, card->dev->name);
517 card->dev->dev_addr[0], card->dev->dev_addr[1],
518 card->dev->dev_addr[2], card->dev->dev_addr[3],
519 card->dev->dev_addr[4], card->dev->dev_addr[5],
520 card->dev->name);
521 } 509 }
522 return 0; 510 return 0;
523} 511}
@@ -634,7 +622,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
634 for (dm = dev->mc_list; dm; dm = dm->next) 622 for (dm = dev->mc_list; dm; dm = dm->next)
635 qeth_l2_add_mc(card, dm->da_addr, 0); 623 qeth_l2_add_mc(card, dm->da_addr, 0);
636 624
637 list_for_each_entry(ha, &dev->uc.list, list) 625 netdev_for_each_uc_addr(ha, dev)
638 qeth_l2_add_mc(card, ha->addr, 1); 626 qeth_l2_add_mc(card, ha->addr, 1);
639 627
640 spin_unlock_bh(&card->mclock); 628 spin_unlock_bh(&card->mclock);
@@ -781,7 +769,8 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
781 index = i % QDIO_MAX_BUFFERS_PER_Q; 769 index = i % QDIO_MAX_BUFFERS_PER_Q;
782 buffer = &card->qdio.in_q->bufs[index]; 770 buffer = &card->qdio.in_q->bufs[index];
783 if (!(qdio_err && 771 if (!(qdio_err &&
784 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr"))) 772 qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
773 "qinerr")))
785 qeth_l2_process_inbound_buffer(card, buffer, index); 774 qeth_l2_process_inbound_buffer(card, buffer, index);
786 /* clear buffer and give back to hardware */ 775 /* clear buffer and give back to hardware */
787 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 776 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
@@ -938,7 +927,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
938 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 927 QETH_DBF_TEXT(SETUP, 2, "setonlin");
939 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 928 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
940 929
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 930 recover_flag = card->state;
943 rc = qeth_core_hardsetup_card(card); 931 rc = qeth_core_hardsetup_card(card);
944 if (rc) { 932 if (rc) {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 321988fa9f7..8447d233d0b 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,6 +13,8 @@
13 13
14#include "qeth_core.h" 14#include "qeth_core.h"
15 15
16#define QETH_SNIFF_AVAIL 0x0008
17
16struct qeth_ipaddr { 18struct qeth_ipaddr {
17 struct list_head entry; 19 struct list_head entry;
18 enum qeth_ip_types type; 20 enum qeth_ip_types type;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fd1b6ed3721..5475834ab91 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -242,6 +242,8 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
242 struct qeth_ipaddr *tmp, *t; 242 struct qeth_ipaddr *tmp, *t;
243 int found = 0; 243 int found = 0;
244 244
245 if (card->options.sniffer)
246 return 0;
245 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { 247 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
246 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && 248 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
247 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) 249 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
@@ -457,6 +459,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
457 QETH_DBF_TEXT(TRACE, 2, "sdiplist"); 459 QETH_DBF_TEXT(TRACE, 2, "sdiplist");
458 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); 460 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
459 461
462 if (card->options.sniffer)
463 return;
460 spin_lock_irqsave(&card->ip_lock, flags); 464 spin_lock_irqsave(&card->ip_lock, flags);
461 tbd_list = card->ip_tbd_list; 465 tbd_list = card->ip_tbd_list;
462 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 466 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -495,7 +499,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
495 spin_unlock_irqrestore(&card->ip_lock, flags); 499 spin_unlock_irqrestore(&card->ip_lock, flags);
496 rc = qeth_l3_deregister_addr_entry(card, addr); 500 rc = qeth_l3_deregister_addr_entry(card, addr);
497 spin_lock_irqsave(&card->ip_lock, flags); 501 spin_lock_irqsave(&card->ip_lock, flags);
498 if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED)) 502 if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
499 kfree(addr); 503 kfree(addr);
500 else 504 else
501 list_add_tail(&addr->entry, &card->ip_list); 505 list_add_tail(&addr->entry, &card->ip_list);
@@ -513,6 +517,8 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
513 unsigned long flags; 517 unsigned long flags;
514 518
515 QETH_DBF_TEXT(TRACE, 4, "clearip"); 519 QETH_DBF_TEXT(TRACE, 4, "clearip");
520 if (recover && card->options.sniffer)
521 return;
516 spin_lock_irqsave(&card->ip_lock, flags); 522 spin_lock_irqsave(&card->ip_lock, flags);
517 /* clear todo list */ 523 /* clear todo list */
518 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { 524 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
@@ -1674,6 +1680,76 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
1674 return rc; 1680 return rc;
1675} 1681}
1676 1682
1683static int
1684qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
1685 unsigned long data)
1686{
1687 struct qeth_ipa_cmd *cmd;
1688 __u16 rc;
1689
1690 QETH_DBF_TEXT(SETUP, 2, "diastrcb");
1691
1692 cmd = (struct qeth_ipa_cmd *)data;
1693 rc = cmd->hdr.return_code;
1694 if (rc) {
1695 QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
1696 if (cmd->data.diagass.action == QETH_DIAGS_CMD_TRACE_ENABLE) {
1697 switch (rc) {
1698 case IPA_RC_HARDWARE_AUTH_ERROR:
1699 dev_warn(&card->gdev->dev, "The device is not "
1700 "authorized to run as a HiperSockets "
1701 "network traffic analyzer\n");
1702 break;
1703 case IPA_RC_TRACE_ALREADY_ACTIVE:
1704 dev_warn(&card->gdev->dev, "A HiperSockets "
1705 "network traffic analyzer is already "
1706 "active in the HiperSockets LAN\n");
1707 break;
1708 default:
1709 break;
1710 }
1711 }
1712 return 0;
1713 }
1714
1715 switch (cmd->data.diagass.action) {
1716 case QETH_DIAGS_CMD_TRACE_QUERY:
1717 break;
1718 case QETH_DIAGS_CMD_TRACE_DISABLE:
1719 card->info.promisc_mode = SET_PROMISC_MODE_OFF;
1720 dev_info(&card->gdev->dev, "The HiperSockets network traffic "
1721 "analyzer is deactivated\n");
1722 break;
1723 case QETH_DIAGS_CMD_TRACE_ENABLE:
1724 card->info.promisc_mode = SET_PROMISC_MODE_ON;
1725 dev_info(&card->gdev->dev, "The HiperSockets network traffic "
1726 "analyzer is activated\n");
1727 break;
1728 default:
1729 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
1730 cmd->data.diagass.action, QETH_CARD_IFNAME(card));
1731 }
1732
1733 return 0;
1734}
1735
1736static int
1737qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1738{
1739 struct qeth_cmd_buffer *iob;
1740 struct qeth_ipa_cmd *cmd;
1741
1742 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1743
1744 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1745 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1746 cmd->data.diagass.subcmd_len = 16;
1747 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
1748 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
1749 cmd->data.diagass.action = diags_cmd;
1750 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
1751}
1752
1677static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1753static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1678 struct net_device *dev) 1754 struct net_device *dev)
1679{ 1755{
@@ -1951,7 +2027,10 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1951 case QETH_CAST_ANYCAST: 2027 case QETH_CAST_ANYCAST:
1952 case QETH_CAST_NOCAST: 2028 case QETH_CAST_NOCAST:
1953 default: 2029 default:
1954 skb->pkt_type = PACKET_HOST; 2030 if (card->options.sniffer)
2031 skb->pkt_type = PACKET_OTHERHOST;
2032 else
2033 skb->pkt_type = PACKET_HOST;
1955 memcpy(tg_addr, card->dev->dev_addr, 2034 memcpy(tg_addr, card->dev->dev_addr,
1956 card->dev->addr_len); 2035 card->dev->addr_len);
1957 } 2036 }
@@ -2007,7 +2086,6 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2007 int offset; 2086 int offset;
2008 __u16 vlan_tag = 0; 2087 __u16 vlan_tag = 0;
2009 unsigned int len; 2088 unsigned int len;
2010
2011 /* get first element of current buffer */ 2089 /* get first element of current buffer */
2012 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 2090 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2013 offset = 0; 2091 offset = 0;
@@ -2026,7 +2104,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2026 case QETH_HEADER_TYPE_LAYER3: 2104 case QETH_HEADER_TYPE_LAYER3:
2027 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2105 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
2028 len = skb->len; 2106 len = skb->len;
2029 if (vlan_tag) 2107 if (vlan_tag && !card->options.sniffer)
2030 if (card->vlangrp) 2108 if (card->vlangrp)
2031 vlan_hwaccel_rx(skb, card->vlangrp, 2109 vlan_hwaccel_rx(skb, card->vlangrp,
2032 vlan_tag); 2110 vlan_tag);
@@ -2037,6 +2115,16 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2037 else 2115 else
2038 netif_rx(skb); 2116 netif_rx(skb);
2039 break; 2117 break;
2118 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2119 skb->pkt_type = PACKET_HOST;
2120 skb->protocol = eth_type_trans(skb, skb->dev);
2121 if (card->options.checksum_type == NO_CHECKSUMMING)
2122 skb->ip_summed = CHECKSUM_UNNECESSARY;
2123 else
2124 skb->ip_summed = CHECKSUM_NONE;
2125 len = skb->len;
2126 netif_receive_skb(skb);
2127 break;
2040 default: 2128 default:
2041 dev_kfree_skb_any(skb); 2129 dev_kfree_skb_any(skb);
2042 QETH_DBF_TEXT(TRACE, 3, "inbunkno"); 2130 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
@@ -2118,6 +2206,9 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2118 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2206 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2119 2207
2120 qeth_set_allowed_threads(card, 0, 1); 2208 qeth_set_allowed_threads(card, 0, 1);
2209 if (card->options.sniffer &&
2210 (card->info.promisc_mode == SET_PROMISC_MODE_ON))
2211 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
2121 if (card->read.state == CH_STATE_UP && 2212 if (card->read.state == CH_STATE_UP &&
2122 card->write.state == CH_STATE_UP && 2213 card->write.state == CH_STATE_UP &&
2123 (card->state == CARD_STATE_UP)) { 2214 (card->state == CARD_STATE_UP)) {
@@ -2162,6 +2253,36 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2162 return rc; 2253 return rc;
2163} 2254}
2164 2255
2256/*
2257 * test for and Switch promiscuous mode (on or off)
2258 * either for guestlan or HiperSocket Sniffer
2259 */
2260static void
2261qeth_l3_handle_promisc_mode(struct qeth_card *card)
2262{
2263 struct net_device *dev = card->dev;
2264
2265 if (((dev->flags & IFF_PROMISC) &&
2266 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
2267 (!(dev->flags & IFF_PROMISC) &&
2268 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
2269 return;
2270
2271 if (card->info.guestlan) { /* Guestlan trace */
2272 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2273 qeth_setadp_promisc_mode(card);
2274 } else if (card->options.sniffer && /* HiperSockets trace */
2275 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
2276 if (dev->flags & IFF_PROMISC) {
2277 QETH_DBF_TEXT(TRACE, 3, "+promisc");
2278 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
2279 } else {
2280 QETH_DBF_TEXT(TRACE, 3, "-promisc");
2281 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
2282 }
2283 }
2284}
2285
2165static void qeth_l3_set_multicast_list(struct net_device *dev) 2286static void qeth_l3_set_multicast_list(struct net_device *dev)
2166{ 2287{
2167 struct qeth_card *card = dev->ml_priv; 2288 struct qeth_card *card = dev->ml_priv;
@@ -2170,15 +2291,17 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
2170 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 2291 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
2171 (card->state != CARD_STATE_UP)) 2292 (card->state != CARD_STATE_UP))
2172 return; 2293 return;
2173 qeth_l3_delete_mc_addresses(card); 2294 if (!card->options.sniffer) {
2174 qeth_l3_add_multicast_ipv4(card); 2295 qeth_l3_delete_mc_addresses(card);
2296 qeth_l3_add_multicast_ipv4(card);
2175#ifdef CONFIG_QETH_IPV6 2297#ifdef CONFIG_QETH_IPV6
2176 qeth_l3_add_multicast_ipv6(card); 2298 qeth_l3_add_multicast_ipv6(card);
2177#endif 2299#endif
2178 qeth_l3_set_ip_addr_list(card); 2300 qeth_l3_set_ip_addr_list(card);
2179 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 2301 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2180 return; 2302 return;
2181 qeth_setadp_promisc_mode(card); 2303 }
2304 qeth_l3_handle_promisc_mode(card);
2182} 2305}
2183 2306
2184static const char *qeth_l3_arp_get_error_cause(int *rc) 2307static const char *qeth_l3_arp_get_error_cause(int *rc)
@@ -2778,8 +2901,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2778 int nr_frags; 2901 int nr_frags;
2779 2902
2780 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2903 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2781 (skb->protocol != htons(ETH_P_IPV6)) && 2904 (((skb->protocol != htons(ETH_P_IPV6)) &&
2782 (skb->protocol != htons(ETH_P_IP))) 2905 (skb->protocol != htons(ETH_P_IP))) ||
2906 card->options.sniffer))
2783 goto tx_drop; 2907 goto tx_drop;
2784 2908
2785 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2909 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -3155,7 +3279,7 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
3155 index = i % QDIO_MAX_BUFFERS_PER_Q; 3279 index = i % QDIO_MAX_BUFFERS_PER_Q;
3156 buffer = &card->qdio.in_q->bufs[index]; 3280 buffer = &card->qdio.in_q->bufs[index];
3157 if (!(qdio_err && 3281 if (!(qdio_err &&
3158 qeth_check_qdio_errors(buffer->buffer, 3282 qeth_check_qdio_errors(card, buffer->buffer,
3159 qdio_err, "qinerr"))) 3283 qdio_err, "qinerr")))
3160 qeth_l3_process_inbound_buffer(card, buffer, index); 3284 qeth_l3_process_inbound_buffer(card, buffer, index);
3161 /* clear buffer and give back to hardware */ 3285 /* clear buffer and give back to hardware */
@@ -3214,8 +3338,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3214 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 3338 QETH_DBF_TEXT(SETUP, 2, "setonlin");
3215 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 3339 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3216 3340
3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3218
3219 recover_flag = card->state; 3341 recover_flag = card->state;
3220 rc = qeth_core_hardsetup_card(card); 3342 rc = qeth_core_hardsetup_card(card);
3221 if (rc) { 3343 if (rc) {
@@ -3250,20 +3372,22 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3250 goto out_remove; 3372 goto out_remove;
3251 } else 3373 } else
3252 card->lan_online = 1; 3374 card->lan_online = 1;
3253 qeth_l3_set_large_send(card, card->options.large_send);
3254 3375
3255 rc = qeth_l3_setadapter_parms(card); 3376 rc = qeth_l3_setadapter_parms(card);
3256 if (rc) 3377 if (rc)
3257 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3378 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3258 rc = qeth_l3_start_ipassists(card); 3379 if (!card->options.sniffer) {
3259 if (rc) 3380 rc = qeth_l3_start_ipassists(card);
3260 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 3381 if (rc)
3261 rc = qeth_l3_setrouting_v4(card); 3382 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3262 if (rc) 3383 qeth_l3_set_large_send(card, card->options.large_send);
3263 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3384 rc = qeth_l3_setrouting_v4(card);
3264 rc = qeth_l3_setrouting_v6(card); 3385 if (rc)
3265 if (rc) 3386 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3266 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 3387 rc = qeth_l3_setrouting_v6(card);
3388 if (rc)
3389 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3390 }
3267 netif_tx_disable(card->dev); 3391 netif_tx_disable(card->dev);
3268 3392
3269 rc = qeth_init_qdio_queues(card); 3393 rc = qeth_init_qdio_queues(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 3360b0941aa..3f08b11274a 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -319,6 +319,61 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
320 qeth_l3_dev_checksum_store); 320 qeth_l3_dev_checksum_store);
321 321
322static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct qeth_card *card = dev_get_drvdata(dev);
326
327 if (!card)
328 return -EINVAL;
329
330 return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
331}
332
333static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
334 struct device_attribute *attr, const char *buf, size_t count)
335{
336 struct qeth_card *card = dev_get_drvdata(dev);
337 int ret;
338 unsigned long i;
339
340 if (!card)
341 return -EINVAL;
342
343 if (card->info.type != QETH_CARD_TYPE_IQD)
344 return -EPERM;
345
346 if ((card->state != CARD_STATE_DOWN) &&
347 (card->state != CARD_STATE_RECOVER))
348 return -EPERM;
349
350 ret = strict_strtoul(buf, 16, &i);
351 if (ret)
352 return -EINVAL;
353 switch (i) {
354 case 0:
355 card->options.sniffer = i;
356 break;
357 case 1:
358 ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
359 if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
360 card->options.sniffer = i;
361 if (card->qdio.init_pool.buf_count !=
362 QETH_IN_BUF_COUNT_MAX)
363 qeth_realloc_buffer_pool(card,
364 QETH_IN_BUF_COUNT_MAX);
365 break;
366 } else
367 return -EPERM;
368 default: /* fall through */
369 return -EINVAL;
370 }
371 return count;
372}
373
374static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
375 qeth_l3_dev_sniffer_store);
376
322static ssize_t qeth_l3_dev_large_send_show(struct device *dev, 377static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
323 struct device_attribute *attr, char *buf) 378 struct device_attribute *attr, char *buf)
324{ 379{
@@ -373,6 +428,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
373 &dev_attr_broadcast_mode.attr, 428 &dev_attr_broadcast_mode.attr,
374 &dev_attr_canonical_macaddr.attr, 429 &dev_attr_canonical_macaddr.attr,
375 &dev_attr_checksumming.attr, 430 &dev_attr_checksumming.attr,
431 &dev_attr_sniffer.attr,
376 &dev_attr_large_send.attr, 432 &dev_attr_large_send.attr,
377 NULL, 433 NULL,
378}; 434};
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 0d490c164db..9086047c32d 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -482,15 +482,6 @@ struct ieee80211_header_data {
482 u16 seq_ctrl; 482 u16 seq_ctrl;
483}; 483};
484 484
485struct ieee80211_hdr_3addr {
486 u16 frame_ctl;
487 u16 duration_id;
488 u8 addr1[ETH_ALEN];
489 u8 addr2[ETH_ALEN];
490 u8 addr3[ETH_ALEN];
491 u16 seq_ctl;
492} __attribute__ ((packed));
493
494struct ieee80211_hdr_4addr { 485struct ieee80211_hdr_4addr {
495 u16 frame_ctl; 486 u16 frame_ctl;
496 u16 duration_id; 487 u16 duration_id;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index c7c645af0eb..a2150670ef5 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -203,7 +203,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
203 203
204 enqueue_mgmt(ieee,skb); 204 enqueue_mgmt(ieee,skb);
205 }else{ 205 }else{
206 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4); 206 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
207 207
208 if (ieee->seq_ctrl[0] == 0xFFF) 208 if (ieee->seq_ctrl[0] == 0xFFF)
209 ieee->seq_ctrl[0] = 0; 209 ieee->seq_ctrl[0] = 0;
@@ -220,7 +220,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
220 spin_unlock_irqrestore(&ieee->lock, flags); 220 spin_unlock_irqrestore(&ieee->lock, flags);
221 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags); 221 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
222 222
223 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 223 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
224 224
225 if (ieee->seq_ctrl[0] == 0xFFF) 225 if (ieee->seq_ctrl[0] == 0xFFF)
226 ieee->seq_ctrl[0] = 0; 226 ieee->seq_ctrl[0] = 0;
@@ -246,7 +246,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
246 246
247 if(single){ 247 if(single){
248 248
249 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 249 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
250 250
251 if (ieee->seq_ctrl[0] == 0xFFF) 251 if (ieee->seq_ctrl[0] == 0xFFF)
252 ieee->seq_ctrl[0] = 0; 252 ieee->seq_ctrl[0] = 0;
@@ -259,7 +259,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
259 259
260 }else{ 260 }else{
261 261
262 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 262 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
263 263
264 if (ieee->seq_ctrl[0] == 0xFFF) 264 if (ieee->seq_ctrl[0] == 0xFFF)
265 ieee->seq_ctrl[0] = 0; 265 ieee->seq_ctrl[0] = 0;
@@ -287,7 +287,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
287 return NULL; 287 return NULL;
288 288
289 disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame)); 289 disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame));
290 disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC); 290 disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
291 disass->header.duration_id = 0; 291 disass->header.duration_id = 0;
292 292
293 memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN); 293 memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
@@ -905,7 +905,7 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
905 assoc = (struct ieee80211_assoc_response_frame *) 905 assoc = (struct ieee80211_assoc_response_frame *)
906 skb_put(skb,sizeof(struct ieee80211_assoc_response_frame)); 906 skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
907 907
908 assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP); 908 assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
909 memcpy(assoc->header.addr1, dest,ETH_ALEN); 909 memcpy(assoc->header.addr1, dest,ETH_ALEN);
910 memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN); 910 memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
911 memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN); 911 memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -981,7 +981,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
981 memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN); 981 memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
982 memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN); 982 memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
983 983
984 hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 984 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
985 IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS | 985 IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
986 (pwr ? IEEE80211_FCTL_PM:0)); 986 (pwr ? IEEE80211_FCTL_PM:0));
987 987
@@ -1084,7 +1084,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
1084 skb_put(skb, sizeof(struct ieee80211_assoc_request_frame)); 1084 skb_put(skb, sizeof(struct ieee80211_assoc_request_frame));
1085 1085
1086 1086
1087 hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ; 1087 hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
1088 hdr->header.duration_id= 37; //FIXME 1088 hdr->header.duration_id= 37; //FIXME
1089 memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN); 1089 memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
1090 memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN); 1090 memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -1786,11 +1786,11 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
1786 1786
1787 tasklet_schedule(&ieee->ps_task); 1787 tasklet_schedule(&ieee->ps_task);
1788 1788
1789 if(WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP && 1789 if (WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_PROBE_RESP &&
1790 WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON) 1790 WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_BEACON)
1791 ieee->last_rx_ps_time = jiffies; 1791 ieee->last_rx_ps_time = jiffies;
1792 1792
1793 switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { 1793 switch (WLAN_FC_GET_STYPE(header->frame_control)) {
1794 1794
1795 case IEEE80211_STYPE_ASSOC_RESP: 1795 case IEEE80211_STYPE_ASSOC_RESP:
1796 case IEEE80211_STYPE_REASSOC_RESP: 1796 case IEEE80211_STYPE_REASSOC_RESP:
@@ -2064,7 +2064,7 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
2064 2064
2065 header = (struct ieee80211_hdr_3addr *) skb->data; 2065 header = (struct ieee80211_hdr_3addr *) skb->data;
2066 2066
2067 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 2067 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
2068 2068
2069 if (ieee->seq_ctrl[0] == 0xFFF) 2069 if (ieee->seq_ctrl[0] == 0xFFF)
2070 ieee->seq_ctrl[0] = 0; 2070 ieee->seq_ctrl[0] = 0;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index e0f13efdb15..1847f38b9f2 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1890,7 +1890,7 @@ rate)
1890 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 1890 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1891 int mode; 1891 int mode;
1892 struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data; 1892 struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
1893 short morefrag = (h->frame_ctl) & IEEE80211_FCTL_MOREFRAGS; 1893 short morefrag = (h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
1894 unsigned long flags; 1894 unsigned long flags;
1895 int priority; 1895 int priority;
1896 1896
@@ -2158,7 +2158,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
2158 TxDescDuration = ThisFrameTime + aSifsTime + AckTime; 2158 TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
2159 } 2159 }
2160 2160
2161 if(!(frag_hdr->frame_ctl & IEEE80211_FCTL_MOREFRAGS)) { //no more fragment 2161 if (!(frag_hdr->frame_control & IEEE80211_FCTL_MOREFRAGS)) {
2162 // ThisFrame-ACK. 2162 // ThisFrame-ACK.
2163 Duration = aSifsTime + AckTime; 2163 Duration = aSifsTime + AckTime;
2164 } else { // One or more fragments remained. 2164 } else { // One or more fragments remained.
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index ac389024796..0d22e3692fe 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1194,9 +1194,7 @@ static const struct net_device_ops wl_netdev_ops =
1194 .ndo_stop = &wl_adapter_close, 1194 .ndo_stop = &wl_adapter_close,
1195 .ndo_do_ioctl = &wl_ioctl, 1195 .ndo_do_ioctl = &wl_ioctl,
1196 1196
1197#ifdef HAVE_TX_TIMEOUT
1198 .ndo_tx_timeout = &wl_tx_timeout, 1197 .ndo_tx_timeout = &wl_tx_timeout,
1199#endif
1200 1198
1201#ifdef CONFIG_NET_POLL_CONTROLLER 1199#ifdef CONFIG_NET_POLL_CONTROLLER
1202 .ndo_poll_controller = wl_poll, 1200 .ndo_poll_controller = wl_poll,
@@ -1270,9 +1268,7 @@ struct net_device * wl_device_alloc( void )
1270 dev->stop = &wl_adapter_close; 1268 dev->stop = &wl_adapter_close;
1271 dev->do_ioctl = &wl_ioctl; 1269 dev->do_ioctl = &wl_ioctl;
1272 1270
1273#ifdef HAVE_TX_TIMEOUT
1274 dev->tx_timeout = &wl_tx_timeout; 1271 dev->tx_timeout = &wl_tx_timeout;
1275#endif
1276 1272
1277#ifdef CONFIG_NET_POLL_CONTROLLER 1273#ifdef CONFIG_NET_POLL_CONTROLLER
1278 dev->poll_controller = wl_poll; 1274 dev->poll_controller = wl_poll;
@@ -1280,9 +1276,7 @@ struct net_device * wl_device_alloc( void )
1280 1276
1281#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) 1277#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
1282 1278
1283#ifdef HAVE_TX_TIMEOUT
1284 dev->watchdog_timeo = TX_TIMEOUT; 1279 dev->watchdog_timeo = TX_TIMEOUT;
1285#endif
1286 1280
1287 dev->ethtool_ops = &wl_ethtool_ops; 1281 dev->ethtool_ops = &wl_ethtool_ops;
1288 1282
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
new file mode 100644
index 00000000000..9e9355367bb
--- /dev/null
+++ b/drivers/vhost/Kconfig
@@ -0,0 +1,11 @@
1config VHOST_NET
2 tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)"
3 depends on NET && EVENTFD && (TUN || !TUN) && EXPERIMENTAL
4 ---help---
5 This kernel module can be loaded in host kernel to accelerate
6 guest networking with virtio_net. Not to be confused with virtio_net
7 module itself which needs to be loaded in guest kernel.
8
9 To compile this driver as a module, choose M here: the module will
10 be called vhost_net.
11
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
new file mode 100644
index 00000000000..72dd02050bb
--- /dev/null
+++ b/drivers/vhost/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_VHOST_NET) += vhost_net.o
2vhost_net-y := vhost.o net.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
new file mode 100644
index 00000000000..4c8928319e1
--- /dev/null
+++ b/drivers/vhost/net.c
@@ -0,0 +1,661 @@
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * virtio-net server in host kernel.
7 */
8
9#include <linux/compat.h>
10#include <linux/eventfd.h>
11#include <linux/vhost.h>
12#include <linux/virtio_net.h>
13#include <linux/mmu_context.h>
14#include <linux/miscdevice.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/workqueue.h>
18#include <linux/rcupdate.h>
19#include <linux/file.h>
20
21#include <linux/net.h>
22#include <linux/if_packet.h>
23#include <linux/if_arp.h>
24#include <linux/if_tun.h>
25
26#include <net/sock.h>
27
28#include "vhost.h"
29
30/* Max number of bytes transferred before requeueing the job.
31 * Using this limit prevents one virtqueue from starving others. */
32#define VHOST_NET_WEIGHT 0x80000
33
34enum {
35 VHOST_NET_VQ_RX = 0,
36 VHOST_NET_VQ_TX = 1,
37 VHOST_NET_VQ_MAX = 2,
38};
39
40enum vhost_net_poll_state {
41 VHOST_NET_POLL_DISABLED = 0,
42 VHOST_NET_POLL_STARTED = 1,
43 VHOST_NET_POLL_STOPPED = 2,
44};
45
46struct vhost_net {
47 struct vhost_dev dev;
48 struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
49 struct vhost_poll poll[VHOST_NET_VQ_MAX];
50 /* Tells us whether we are polling a socket for TX.
51 * We only do this when socket buffer fills up.
52 * Protected by tx vq lock. */
53 enum vhost_net_poll_state tx_poll_state;
54};
55
56/* Pop first len bytes from iovec. Return number of segments used. */
57static int move_iovec_hdr(struct iovec *from, struct iovec *to,
58 size_t len, int iov_count)
59{
60 int seg = 0;
61 size_t size;
62 while (len && seg < iov_count) {
63 size = min(from->iov_len, len);
64 to->iov_base = from->iov_base;
65 to->iov_len = size;
66 from->iov_len -= size;
67 from->iov_base += size;
68 len -= size;
69 ++from;
70 ++to;
71 ++seg;
72 }
73 return seg;
74}
75
76/* Caller must have TX VQ lock */
77static void tx_poll_stop(struct vhost_net *net)
78{
79 if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
80 return;
81 vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
82 net->tx_poll_state = VHOST_NET_POLL_STOPPED;
83}
84
85/* Caller must have TX VQ lock */
86static void tx_poll_start(struct vhost_net *net, struct socket *sock)
87{
88 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
89 return;
90 vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
91 net->tx_poll_state = VHOST_NET_POLL_STARTED;
92}
93
94/* Expects to be always run from workqueue - which acts as
95 * read-size critical section for our kind of RCU. */
96static void handle_tx(struct vhost_net *net)
97{
98 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
99 unsigned head, out, in, s;
100 struct msghdr msg = {
101 .msg_name = NULL,
102 .msg_namelen = 0,
103 .msg_control = NULL,
104 .msg_controllen = 0,
105 .msg_iov = vq->iov,
106 .msg_flags = MSG_DONTWAIT,
107 };
108 size_t len, total_len = 0;
109 int err, wmem;
110 size_t hdr_size;
111 struct socket *sock = rcu_dereference(vq->private_data);
112 if (!sock)
113 return;
114
115 wmem = atomic_read(&sock->sk->sk_wmem_alloc);
116 if (wmem >= sock->sk->sk_sndbuf)
117 return;
118
119 use_mm(net->dev.mm);
120 mutex_lock(&vq->mutex);
121 vhost_disable_notify(vq);
122
123 if (wmem < sock->sk->sk_sndbuf * 2)
124 tx_poll_stop(net);
125 hdr_size = vq->hdr_size;
126
127 for (;;) {
128 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
129 ARRAY_SIZE(vq->iov),
130 &out, &in,
131 NULL, NULL);
132 /* Nothing new? Wait for eventfd to tell us they refilled. */
133 if (head == vq->num) {
134 wmem = atomic_read(&sock->sk->sk_wmem_alloc);
135 if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
136 tx_poll_start(net, sock);
137 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
138 break;
139 }
140 if (unlikely(vhost_enable_notify(vq))) {
141 vhost_disable_notify(vq);
142 continue;
143 }
144 break;
145 }
146 if (in) {
147 vq_err(vq, "Unexpected descriptor format for TX: "
148 "out %d, int %d\n", out, in);
149 break;
150 }
151 /* Skip header. TODO: support TSO. */
152 s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
153 msg.msg_iovlen = out;
154 len = iov_length(vq->iov, out);
155 /* Sanity check */
156 if (!len) {
157 vq_err(vq, "Unexpected header len for TX: "
158 "%zd expected %zd\n",
159 iov_length(vq->hdr, s), hdr_size);
160 break;
161 }
162 /* TODO: Check specific error and bomb out unless ENOBUFS? */
163 err = sock->ops->sendmsg(NULL, sock, &msg, len);
164 if (unlikely(err < 0)) {
165 vhost_discard_vq_desc(vq);
166 tx_poll_start(net, sock);
167 break;
168 }
169 if (err != len)
170 pr_err("Truncated TX packet: "
171 " len %d != %zd\n", err, len);
172 vhost_add_used_and_signal(&net->dev, vq, head, 0);
173 total_len += len;
174 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
175 vhost_poll_queue(&vq->poll);
176 break;
177 }
178 }
179
180 mutex_unlock(&vq->mutex);
181 unuse_mm(net->dev.mm);
182}
183
184/* Expects to be always run from workqueue - which acts as
185 * read-size critical section for our kind of RCU. */
186static void handle_rx(struct vhost_net *net)
187{
188 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
189 unsigned head, out, in, log, s;
190 struct vhost_log *vq_log;
191 struct msghdr msg = {
192 .msg_name = NULL,
193 .msg_namelen = 0,
194 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
195 .msg_controllen = 0,
196 .msg_iov = vq->iov,
197 .msg_flags = MSG_DONTWAIT,
198 };
199
200 struct virtio_net_hdr hdr = {
201 .flags = 0,
202 .gso_type = VIRTIO_NET_HDR_GSO_NONE
203 };
204
205 size_t len, total_len = 0;
206 int err;
207 size_t hdr_size;
208 struct socket *sock = rcu_dereference(vq->private_data);
209 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
210 return;
211
212 use_mm(net->dev.mm);
213 mutex_lock(&vq->mutex);
214 vhost_disable_notify(vq);
215 hdr_size = vq->hdr_size;
216
217 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
218 vq->log : NULL;
219
220 for (;;) {
221 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
222 ARRAY_SIZE(vq->iov),
223 &out, &in,
224 vq_log, &log);
225 /* OK, now we need to know about added descriptors. */
226 if (head == vq->num) {
227 if (unlikely(vhost_enable_notify(vq))) {
228 /* They have slipped one in as we were
229 * doing that: check again. */
230 vhost_disable_notify(vq);
231 continue;
232 }
233 /* Nothing new? Wait for eventfd to tell us
234 * they refilled. */
235 break;
236 }
237 /* We don't need to be notified again. */
238 if (out) {
239 vq_err(vq, "Unexpected descriptor format for RX: "
240 "out %d, int %d\n",
241 out, in);
242 break;
243 }
244 /* Skip header. TODO: support TSO/mergeable rx buffers. */
245 s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
246 msg.msg_iovlen = in;
247 len = iov_length(vq->iov, in);
248 /* Sanity check */
249 if (!len) {
250 vq_err(vq, "Unexpected header len for RX: "
251 "%zd expected %zd\n",
252 iov_length(vq->hdr, s), hdr_size);
253 break;
254 }
255 err = sock->ops->recvmsg(NULL, sock, &msg,
256 len, MSG_DONTWAIT | MSG_TRUNC);
257 /* TODO: Check specific error and bomb out unless EAGAIN? */
258 if (err < 0) {
259 vhost_discard_vq_desc(vq);
260 break;
261 }
262 /* TODO: Should check and handle checksum. */
263 if (err > len) {
264 pr_err("Discarded truncated rx packet: "
265 " len %d > %zd\n", err, len);
266 vhost_discard_vq_desc(vq);
267 continue;
268 }
269 len = err;
270 err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
271 if (err) {
272 vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
273 vq->iov->iov_base, err);
274 break;
275 }
276 len += hdr_size;
277 vhost_add_used_and_signal(&net->dev, vq, head, len);
278 if (unlikely(vq_log))
279 vhost_log_write(vq, vq_log, log, len);
280 total_len += len;
281 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
282 vhost_poll_queue(&vq->poll);
283 break;
284 }
285 }
286
287 mutex_unlock(&vq->mutex);
288 unuse_mm(net->dev.mm);
289}
290
291static void handle_tx_kick(struct work_struct *work)
292{
293 struct vhost_virtqueue *vq;
294 struct vhost_net *net;
295 vq = container_of(work, struct vhost_virtqueue, poll.work);
296 net = container_of(vq->dev, struct vhost_net, dev);
297 handle_tx(net);
298}
299
300static void handle_rx_kick(struct work_struct *work)
301{
302 struct vhost_virtqueue *vq;
303 struct vhost_net *net;
304 vq = container_of(work, struct vhost_virtqueue, poll.work);
305 net = container_of(vq->dev, struct vhost_net, dev);
306 handle_rx(net);
307}
308
309static void handle_tx_net(struct work_struct *work)
310{
311 struct vhost_net *net;
312 net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
313 handle_tx(net);
314}
315
316static void handle_rx_net(struct work_struct *work)
317{
318 struct vhost_net *net;
319 net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
320 handle_rx(net);
321}
322
323static int vhost_net_open(struct inode *inode, struct file *f)
324{
325 struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
326 int r;
327 if (!n)
328 return -ENOMEM;
329 n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
330 n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
331 r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
332 if (r < 0) {
333 kfree(n);
334 return r;
335 }
336
337 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
338 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
339 n->tx_poll_state = VHOST_NET_POLL_DISABLED;
340
341 f->private_data = n;
342
343 return 0;
344}
345
346static void vhost_net_disable_vq(struct vhost_net *n,
347 struct vhost_virtqueue *vq)
348{
349 if (!vq->private_data)
350 return;
351 if (vq == n->vqs + VHOST_NET_VQ_TX) {
352 tx_poll_stop(n);
353 n->tx_poll_state = VHOST_NET_POLL_DISABLED;
354 } else
355 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
356}
357
358static void vhost_net_enable_vq(struct vhost_net *n,
359 struct vhost_virtqueue *vq)
360{
361 struct socket *sock = vq->private_data;
362 if (!sock)
363 return;
364 if (vq == n->vqs + VHOST_NET_VQ_TX) {
365 n->tx_poll_state = VHOST_NET_POLL_STOPPED;
366 tx_poll_start(n, sock);
367 } else
368 vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
369}
370
371static struct socket *vhost_net_stop_vq(struct vhost_net *n,
372 struct vhost_virtqueue *vq)
373{
374 struct socket *sock;
375
376 mutex_lock(&vq->mutex);
377 sock = vq->private_data;
378 vhost_net_disable_vq(n, vq);
379 rcu_assign_pointer(vq->private_data, NULL);
380 mutex_unlock(&vq->mutex);
381 return sock;
382}
383
384static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
385 struct socket **rx_sock)
386{
387 *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
388 *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
389}
390
391static void vhost_net_flush_vq(struct vhost_net *n, int index)
392{
393 vhost_poll_flush(n->poll + index);
394 vhost_poll_flush(&n->dev.vqs[index].poll);
395}
396
397static void vhost_net_flush(struct vhost_net *n)
398{
399 vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
400 vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
401}
402
403static int vhost_net_release(struct inode *inode, struct file *f)
404{
405 struct vhost_net *n = f->private_data;
406 struct socket *tx_sock;
407 struct socket *rx_sock;
408
409 vhost_net_stop(n, &tx_sock, &rx_sock);
410 vhost_net_flush(n);
411 vhost_dev_cleanup(&n->dev);
412 if (tx_sock)
413 fput(tx_sock->file);
414 if (rx_sock)
415 fput(rx_sock->file);
416 /* We do an extra flush before freeing memory,
417 * since jobs can re-queue themselves. */
418 vhost_net_flush(n);
419 kfree(n);
420 return 0;
421}
422
423static struct socket *get_raw_socket(int fd)
424{
425 struct {
426 struct sockaddr_ll sa;
427 char buf[MAX_ADDR_LEN];
428 } uaddr;
429 int uaddr_len = sizeof uaddr, r;
430 struct socket *sock = sockfd_lookup(fd, &r);
431 if (!sock)
432 return ERR_PTR(-ENOTSOCK);
433
434 /* Parameter checking */
435 if (sock->sk->sk_type != SOCK_RAW) {
436 r = -ESOCKTNOSUPPORT;
437 goto err;
438 }
439
440 r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
441 &uaddr_len, 0);
442 if (r)
443 goto err;
444
445 if (uaddr.sa.sll_family != AF_PACKET) {
446 r = -EPFNOSUPPORT;
447 goto err;
448 }
449 return sock;
450err:
451 fput(sock->file);
452 return ERR_PTR(r);
453}
454
455static struct socket *get_tun_socket(int fd)
456{
457 struct file *file = fget(fd);
458 struct socket *sock;
459 if (!file)
460 return ERR_PTR(-EBADF);
461 sock = tun_get_socket(file);
462 if (IS_ERR(sock))
463 fput(file);
464 return sock;
465}
466
467static struct socket *get_socket(int fd)
468{
469 struct socket *sock;
470 /* special case to disable backend */
471 if (fd == -1)
472 return NULL;
473 sock = get_raw_socket(fd);
474 if (!IS_ERR(sock))
475 return sock;
476 sock = get_tun_socket(fd);
477 if (!IS_ERR(sock))
478 return sock;
479 return ERR_PTR(-ENOTSOCK);
480}
481
482static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
483{
484 struct socket *sock, *oldsock;
485 struct vhost_virtqueue *vq;
486 int r;
487
488 mutex_lock(&n->dev.mutex);
489 r = vhost_dev_check_owner(&n->dev);
490 if (r)
491 goto err;
492
493 if (index >= VHOST_NET_VQ_MAX) {
494 r = -ENOBUFS;
495 goto err;
496 }
497 vq = n->vqs + index;
498 mutex_lock(&vq->mutex);
499
500 /* Verify that ring has been setup correctly. */
501 if (!vhost_vq_access_ok(vq)) {
502 r = -EFAULT;
503 goto err;
504 }
505 sock = get_socket(fd);
506 if (IS_ERR(sock)) {
507 r = PTR_ERR(sock);
508 goto err;
509 }
510
511 /* start polling new socket */
512 oldsock = vq->private_data;
513 if (sock == oldsock)
514 goto done;
515
516 vhost_net_disable_vq(n, vq);
517 rcu_assign_pointer(vq->private_data, sock);
518 vhost_net_enable_vq(n, vq);
519 mutex_unlock(&vq->mutex);
520done:
521 if (oldsock) {
522 vhost_net_flush_vq(n, index);
523 fput(oldsock->file);
524 }
525err:
526 mutex_unlock(&n->dev.mutex);
527 return r;
528}
529
530static long vhost_net_reset_owner(struct vhost_net *n)
531{
532 struct socket *tx_sock = NULL;
533 struct socket *rx_sock = NULL;
534 long err;
535 mutex_lock(&n->dev.mutex);
536 err = vhost_dev_check_owner(&n->dev);
537 if (err)
538 goto done;
539 vhost_net_stop(n, &tx_sock, &rx_sock);
540 vhost_net_flush(n);
541 err = vhost_dev_reset_owner(&n->dev);
542done:
543 mutex_unlock(&n->dev.mutex);
544 if (tx_sock)
545 fput(tx_sock->file);
546 if (rx_sock)
547 fput(rx_sock->file);
548 return err;
549}
550
551static int vhost_net_set_features(struct vhost_net *n, u64 features)
552{
553 size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
554 sizeof(struct virtio_net_hdr) : 0;
555 int i;
556 mutex_lock(&n->dev.mutex);
557 if ((features & (1 << VHOST_F_LOG_ALL)) &&
558 !vhost_log_access_ok(&n->dev)) {
559 mutex_unlock(&n->dev.mutex);
560 return -EFAULT;
561 }
562 n->dev.acked_features = features;
563 smp_wmb();
564 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
565 mutex_lock(&n->vqs[i].mutex);
566 n->vqs[i].hdr_size = hdr_size;
567 mutex_unlock(&n->vqs[i].mutex);
568 }
569 vhost_net_flush(n);
570 mutex_unlock(&n->dev.mutex);
571 return 0;
572}
573
574static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
575 unsigned long arg)
576{
577 struct vhost_net *n = f->private_data;
578 void __user *argp = (void __user *)arg;
579 u64 __user *featurep = argp;
580 struct vhost_vring_file backend;
581 u64 features;
582 int r;
583 switch (ioctl) {
584 case VHOST_NET_SET_BACKEND:
585 r = copy_from_user(&backend, argp, sizeof backend);
586 if (r < 0)
587 return r;
588 return vhost_net_set_backend(n, backend.index, backend.fd);
589 case VHOST_GET_FEATURES:
590 features = VHOST_FEATURES;
591 return copy_to_user(featurep, &features, sizeof features);
592 case VHOST_SET_FEATURES:
593 r = copy_from_user(&features, featurep, sizeof features);
594 if (r < 0)
595 return r;
596 if (features & ~VHOST_FEATURES)
597 return -EOPNOTSUPP;
598 return vhost_net_set_features(n, features);
599 case VHOST_RESET_OWNER:
600 return vhost_net_reset_owner(n);
601 default:
602 mutex_lock(&n->dev.mutex);
603 r = vhost_dev_ioctl(&n->dev, ioctl, arg);
604 vhost_net_flush(n);
605 mutex_unlock(&n->dev.mutex);
606 return r;
607 }
608}
609
610#ifdef CONFIG_COMPAT
611static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
612 unsigned long arg)
613{
614 return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
615}
616#endif
617
618const static struct file_operations vhost_net_fops = {
619 .owner = THIS_MODULE,
620 .release = vhost_net_release,
621 .unlocked_ioctl = vhost_net_ioctl,
622#ifdef CONFIG_COMPAT
623 .compat_ioctl = vhost_net_compat_ioctl,
624#endif
625 .open = vhost_net_open,
626};
627
628static struct miscdevice vhost_net_misc = {
629 VHOST_NET_MINOR,
630 "vhost-net",
631 &vhost_net_fops,
632};
633
634int vhost_net_init(void)
635{
636 int r = vhost_init();
637 if (r)
638 goto err_init;
639 r = misc_register(&vhost_net_misc);
640 if (r)
641 goto err_reg;
642 return 0;
643err_reg:
644 vhost_cleanup();
645err_init:
646 return r;
647
648}
649module_init(vhost_net_init);
650
651void vhost_net_exit(void)
652{
653 misc_deregister(&vhost_net_misc);
654 vhost_cleanup();
655}
656module_exit(vhost_net_exit);
657
658MODULE_VERSION("0.0.1");
659MODULE_LICENSE("GPL v2");
660MODULE_AUTHOR("Michael S. Tsirkin");
661MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
new file mode 100644
index 00000000000..c8c25dbc585
--- /dev/null
+++ b/drivers/vhost/vhost.c
@@ -0,0 +1,1098 @@
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
3 *
4 * Author: Michael S. Tsirkin <mst@redhat.com>
5 *
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/lguest/lguest.c, by Rusty Russell
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 *
11 * Generic code for virtio server in host kernel.
12 */
13
14#include <linux/eventfd.h>
15#include <linux/vhost.h>
16#include <linux/virtio_net.h>
17#include <linux/mm.h>
18#include <linux/miscdevice.h>
19#include <linux/mutex.h>
20#include <linux/workqueue.h>
21#include <linux/rcupdate.h>
22#include <linux/poll.h>
23#include <linux/file.h>
24#include <linux/highmem.h>
25
26#include <linux/net.h>
27#include <linux/if_packet.h>
28#include <linux/if_arp.h>
29
30#include <net/sock.h>
31
32#include "vhost.h"
33
34enum {
35 VHOST_MEMORY_MAX_NREGIONS = 64,
36 VHOST_MEMORY_F_LOG = 0x1,
37};
38
39static struct workqueue_struct *vhost_workqueue;
40
41static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
42 poll_table *pt)
43{
44 struct vhost_poll *poll;
45 poll = container_of(pt, struct vhost_poll, table);
46
47 poll->wqh = wqh;
48 add_wait_queue(wqh, &poll->wait);
49}
50
51static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
52 void *key)
53{
54 struct vhost_poll *poll;
55 poll = container_of(wait, struct vhost_poll, wait);
56 if (!((unsigned long)key & poll->mask))
57 return 0;
58
59 queue_work(vhost_workqueue, &poll->work);
60 return 0;
61}
62
63/* Init poll structure */
64void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
65 unsigned long mask)
66{
67 INIT_WORK(&poll->work, func);
68 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
69 init_poll_funcptr(&poll->table, vhost_poll_func);
70 poll->mask = mask;
71}
72
73/* Start polling a file. We add ourselves to file's wait queue. The caller must
74 * keep a reference to a file until after vhost_poll_stop is called. */
75void vhost_poll_start(struct vhost_poll *poll, struct file *file)
76{
77 unsigned long mask;
78 mask = file->f_op->poll(file, &poll->table);
79 if (mask)
80 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
81}
82
83/* Stop polling a file. After this function returns, it becomes safe to drop the
84 * file reference. You must also flush afterwards. */
85void vhost_poll_stop(struct vhost_poll *poll)
86{
87 remove_wait_queue(poll->wqh, &poll->wait);
88}
89
90/* Flush any work that has been scheduled. When calling this, don't hold any
91 * locks that are also used by the callback. */
92void vhost_poll_flush(struct vhost_poll *poll)
93{
94 flush_work(&poll->work);
95}
96
97void vhost_poll_queue(struct vhost_poll *poll)
98{
99 queue_work(vhost_workqueue, &poll->work);
100}
101
102static void vhost_vq_reset(struct vhost_dev *dev,
103 struct vhost_virtqueue *vq)
104{
105 vq->num = 1;
106 vq->desc = NULL;
107 vq->avail = NULL;
108 vq->used = NULL;
109 vq->last_avail_idx = 0;
110 vq->avail_idx = 0;
111 vq->last_used_idx = 0;
112 vq->used_flags = 0;
113 vq->used_flags = 0;
114 vq->log_used = false;
115 vq->log_addr = -1ull;
116 vq->hdr_size = 0;
117 vq->private_data = NULL;
118 vq->log_base = NULL;
119 vq->error_ctx = NULL;
120 vq->error = NULL;
121 vq->kick = NULL;
122 vq->call_ctx = NULL;
123 vq->call = NULL;
124}
125
126long vhost_dev_init(struct vhost_dev *dev,
127 struct vhost_virtqueue *vqs, int nvqs)
128{
129 int i;
130 dev->vqs = vqs;
131 dev->nvqs = nvqs;
132 mutex_init(&dev->mutex);
133 dev->log_ctx = NULL;
134 dev->log_file = NULL;
135 dev->memory = NULL;
136 dev->mm = NULL;
137
138 for (i = 0; i < dev->nvqs; ++i) {
139 dev->vqs[i].dev = dev;
140 mutex_init(&dev->vqs[i].mutex);
141 vhost_vq_reset(dev, dev->vqs + i);
142 if (dev->vqs[i].handle_kick)
143 vhost_poll_init(&dev->vqs[i].poll,
144 dev->vqs[i].handle_kick,
145 POLLIN);
146 }
147 return 0;
148}
149
150/* Caller should have device mutex */
151long vhost_dev_check_owner(struct vhost_dev *dev)
152{
153 /* Are you the owner? If not, I don't think you mean to do that */
154 return dev->mm == current->mm ? 0 : -EPERM;
155}
156
157/* Caller should have device mutex */
158static long vhost_dev_set_owner(struct vhost_dev *dev)
159{
160 /* Is there an owner already? */
161 if (dev->mm)
162 return -EBUSY;
163 /* No owner, become one */
164 dev->mm = get_task_mm(current);
165 return 0;
166}
167
168/* Caller should have device mutex */
169long vhost_dev_reset_owner(struct vhost_dev *dev)
170{
171 struct vhost_memory *memory;
172
173 /* Restore memory to default empty mapping. */
174 memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
175 if (!memory)
176 return -ENOMEM;
177
178 vhost_dev_cleanup(dev);
179
180 memory->nregions = 0;
181 dev->memory = memory;
182 return 0;
183}
184
185/* Caller should have device mutex */
186void vhost_dev_cleanup(struct vhost_dev *dev)
187{
188 int i;
189 for (i = 0; i < dev->nvqs; ++i) {
190 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
191 vhost_poll_stop(&dev->vqs[i].poll);
192 vhost_poll_flush(&dev->vqs[i].poll);
193 }
194 if (dev->vqs[i].error_ctx)
195 eventfd_ctx_put(dev->vqs[i].error_ctx);
196 if (dev->vqs[i].error)
197 fput(dev->vqs[i].error);
198 if (dev->vqs[i].kick)
199 fput(dev->vqs[i].kick);
200 if (dev->vqs[i].call_ctx)
201 eventfd_ctx_put(dev->vqs[i].call_ctx);
202 if (dev->vqs[i].call)
203 fput(dev->vqs[i].call);
204 vhost_vq_reset(dev, dev->vqs + i);
205 }
206 if (dev->log_ctx)
207 eventfd_ctx_put(dev->log_ctx);
208 dev->log_ctx = NULL;
209 if (dev->log_file)
210 fput(dev->log_file);
211 dev->log_file = NULL;
212 /* No one will access memory at this point */
213 kfree(dev->memory);
214 dev->memory = NULL;
215 if (dev->mm)
216 mmput(dev->mm);
217 dev->mm = NULL;
218}
219
220static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
221{
222 u64 a = addr / VHOST_PAGE_SIZE / 8;
223 /* Make sure 64 bit math will not overflow. */
224 if (a > ULONG_MAX - (unsigned long)log_base ||
225 a + (unsigned long)log_base > ULONG_MAX)
226 return -EFAULT;
227
228 return access_ok(VERIFY_WRITE, log_base + a,
229 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
230}
231
232/* Caller should have vq mutex and device mutex. */
233static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
234 int log_all)
235{
236 int i;
237 for (i = 0; i < mem->nregions; ++i) {
238 struct vhost_memory_region *m = mem->regions + i;
239 unsigned long a = m->userspace_addr;
240 if (m->memory_size > ULONG_MAX)
241 return 0;
242 else if (!access_ok(VERIFY_WRITE, (void __user *)a,
243 m->memory_size))
244 return 0;
245 else if (log_all && !log_access_ok(log_base,
246 m->guest_phys_addr,
247 m->memory_size))
248 return 0;
249 }
250 return 1;
251}
252
253/* Can we switch to this memory table? */
254/* Caller should have device mutex but not vq mutex */
255static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
256 int log_all)
257{
258 int i;
259 for (i = 0; i < d->nvqs; ++i) {
260 int ok;
261 mutex_lock(&d->vqs[i].mutex);
262 /* If ring is inactive, will check when it's enabled. */
263 if (d->vqs[i].private_data)
264 ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
265 log_all);
266 else
267 ok = 1;
268 mutex_unlock(&d->vqs[i].mutex);
269 if (!ok)
270 return 0;
271 }
272 return 1;
273}
274
275static int vq_access_ok(unsigned int num,
276 struct vring_desc __user *desc,
277 struct vring_avail __user *avail,
278 struct vring_used __user *used)
279{
280 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
281 access_ok(VERIFY_READ, avail,
282 sizeof *avail + num * sizeof *avail->ring) &&
283 access_ok(VERIFY_WRITE, used,
284 sizeof *used + num * sizeof *used->ring);
285}
286
287/* Can we log writes? */
288/* Caller should have device mutex but not vq mutex */
289int vhost_log_access_ok(struct vhost_dev *dev)
290{
291 return memory_access_ok(dev, dev->memory, 1);
292}
293
294/* Verify access for write logging. */
295/* Caller should have vq mutex and device mutex */
296static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
297{
298 return vq_memory_access_ok(log_base, vq->dev->memory,
299 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
300 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
301 sizeof *vq->used +
302 vq->num * sizeof *vq->used->ring));
303}
304
305/* Can we start vq? */
306/* Caller should have vq mutex and device mutex */
307int vhost_vq_access_ok(struct vhost_virtqueue *vq)
308{
309 return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
310 vq_log_access_ok(vq, vq->log_base);
311}
312
313static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
314{
315 struct vhost_memory mem, *newmem, *oldmem;
316 unsigned long size = offsetof(struct vhost_memory, regions);
317 long r;
318 r = copy_from_user(&mem, m, size);
319 if (r)
320 return r;
321 if (mem.padding)
322 return -EOPNOTSUPP;
323 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
324 return -E2BIG;
325 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
326 if (!newmem)
327 return -ENOMEM;
328
329 memcpy(newmem, &mem, size);
330 r = copy_from_user(newmem->regions, m->regions,
331 mem.nregions * sizeof *m->regions);
332 if (r) {
333 kfree(newmem);
334 return r;
335 }
336
337 if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL)))
338 return -EFAULT;
339 oldmem = d->memory;
340 rcu_assign_pointer(d->memory, newmem);
341 synchronize_rcu();
342 kfree(oldmem);
343 return 0;
344}
345
346static int init_used(struct vhost_virtqueue *vq,
347 struct vring_used __user *used)
348{
349 int r = put_user(vq->used_flags, &used->flags);
350 if (r)
351 return r;
352 return get_user(vq->last_used_idx, &used->idx);
353}
354
355static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
356{
357 struct file *eventfp, *filep = NULL,
358 *pollstart = NULL, *pollstop = NULL;
359 struct eventfd_ctx *ctx = NULL;
360 u32 __user *idxp = argp;
361 struct vhost_virtqueue *vq;
362 struct vhost_vring_state s;
363 struct vhost_vring_file f;
364 struct vhost_vring_addr a;
365 u32 idx;
366 long r;
367
368 r = get_user(idx, idxp);
369 if (r < 0)
370 return r;
371 if (idx > d->nvqs)
372 return -ENOBUFS;
373
374 vq = d->vqs + idx;
375
376 mutex_lock(&vq->mutex);
377
378 switch (ioctl) {
379 case VHOST_SET_VRING_NUM:
380 /* Resizing ring with an active backend?
381 * You don't want to do that. */
382 if (vq->private_data) {
383 r = -EBUSY;
384 break;
385 }
386 r = copy_from_user(&s, argp, sizeof s);
387 if (r < 0)
388 break;
389 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
390 r = -EINVAL;
391 break;
392 }
393 vq->num = s.num;
394 break;
395 case VHOST_SET_VRING_BASE:
396 /* Moving base with an active backend?
397 * You don't want to do that. */
398 if (vq->private_data) {
399 r = -EBUSY;
400 break;
401 }
402 r = copy_from_user(&s, argp, sizeof s);
403 if (r < 0)
404 break;
405 if (s.num > 0xffff) {
406 r = -EINVAL;
407 break;
408 }
409 vq->last_avail_idx = s.num;
410 /* Forget the cached index value. */
411 vq->avail_idx = vq->last_avail_idx;
412 break;
413 case VHOST_GET_VRING_BASE:
414 s.index = idx;
415 s.num = vq->last_avail_idx;
416 r = copy_to_user(argp, &s, sizeof s);
417 break;
418 case VHOST_SET_VRING_ADDR:
419 r = copy_from_user(&a, argp, sizeof a);
420 if (r < 0)
421 break;
422 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
423 r = -EOPNOTSUPP;
424 break;
425 }
426 /* For 32bit, verify that the top 32bits of the user
427 data are set to zero. */
428 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
429 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
430 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
431 r = -EFAULT;
432 break;
433 }
434 if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
435 (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
436 (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
437 r = -EINVAL;
438 break;
439 }
440
441 /* We only verify access here if backend is configured.
442 * If it is not, we don't as size might not have been setup.
443 * We will verify when backend is configured. */
444 if (vq->private_data) {
445 if (!vq_access_ok(vq->num,
446 (void __user *)(unsigned long)a.desc_user_addr,
447 (void __user *)(unsigned long)a.avail_user_addr,
448 (void __user *)(unsigned long)a.used_user_addr)) {
449 r = -EINVAL;
450 break;
451 }
452
453 /* Also validate log access for used ring if enabled. */
454 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
455 !log_access_ok(vq->log_base, a.log_guest_addr,
456 sizeof *vq->used +
457 vq->num * sizeof *vq->used->ring)) {
458 r = -EINVAL;
459 break;
460 }
461 }
462
463 r = init_used(vq, (struct vring_used __user *)(unsigned long)
464 a.used_user_addr);
465 if (r)
466 break;
467 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
468 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
469 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
470 vq->log_addr = a.log_guest_addr;
471 vq->used = (void __user *)(unsigned long)a.used_user_addr;
472 break;
473 case VHOST_SET_VRING_KICK:
474 r = copy_from_user(&f, argp, sizeof f);
475 if (r < 0)
476 break;
477 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
478 if (IS_ERR(eventfp))
479 return PTR_ERR(eventfp);
480 if (eventfp != vq->kick) {
481 pollstop = filep = vq->kick;
482 pollstart = vq->kick = eventfp;
483 } else
484 filep = eventfp;
485 break;
486 case VHOST_SET_VRING_CALL:
487 r = copy_from_user(&f, argp, sizeof f);
488 if (r < 0)
489 break;
490 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
491 if (IS_ERR(eventfp))
492 return PTR_ERR(eventfp);
493 if (eventfp != vq->call) {
494 filep = vq->call;
495 ctx = vq->call_ctx;
496 vq->call = eventfp;
497 vq->call_ctx = eventfp ?
498 eventfd_ctx_fileget(eventfp) : NULL;
499 } else
500 filep = eventfp;
501 break;
502 case VHOST_SET_VRING_ERR:
503 r = copy_from_user(&f, argp, sizeof f);
504 if (r < 0)
505 break;
506 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
507 if (IS_ERR(eventfp))
508 return PTR_ERR(eventfp);
509 if (eventfp != vq->error) {
510 filep = vq->error;
511 vq->error = eventfp;
512 ctx = vq->error_ctx;
513 vq->error_ctx = eventfp ?
514 eventfd_ctx_fileget(eventfp) : NULL;
515 } else
516 filep = eventfp;
517 break;
518 default:
519 r = -ENOIOCTLCMD;
520 }
521
522 if (pollstop && vq->handle_kick)
523 vhost_poll_stop(&vq->poll);
524
525 if (ctx)
526 eventfd_ctx_put(ctx);
527 if (filep)
528 fput(filep);
529
530 if (pollstart && vq->handle_kick)
531 vhost_poll_start(&vq->poll, vq->kick);
532
533 mutex_unlock(&vq->mutex);
534
535 if (pollstop && vq->handle_kick)
536 vhost_poll_flush(&vq->poll);
537 return r;
538}
539
540/* Caller must have device mutex */
541long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
542{
543 void __user *argp = (void __user *)arg;
544 struct file *eventfp, *filep = NULL;
545 struct eventfd_ctx *ctx = NULL;
546 u64 p;
547 long r;
548 int i, fd;
549
550 /* If you are not the owner, you can become one */
551 if (ioctl == VHOST_SET_OWNER) {
552 r = vhost_dev_set_owner(d);
553 goto done;
554 }
555
556 /* You must be the owner to do anything else */
557 r = vhost_dev_check_owner(d);
558 if (r)
559 goto done;
560
561 switch (ioctl) {
562 case VHOST_SET_MEM_TABLE:
563 r = vhost_set_memory(d, argp);
564 break;
565 case VHOST_SET_LOG_BASE:
566 r = copy_from_user(&p, argp, sizeof p);
567 if (r < 0)
568 break;
569 if ((u64)(unsigned long)p != p) {
570 r = -EFAULT;
571 break;
572 }
573 for (i = 0; i < d->nvqs; ++i) {
574 struct vhost_virtqueue *vq;
575 void __user *base = (void __user *)(unsigned long)p;
576 vq = d->vqs + i;
577 mutex_lock(&vq->mutex);
578 /* If ring is inactive, will check when it's enabled. */
579 if (vq->private_data && !vq_log_access_ok(vq, base))
580 r = -EFAULT;
581 else
582 vq->log_base = base;
583 mutex_unlock(&vq->mutex);
584 }
585 break;
586 case VHOST_SET_LOG_FD:
587 r = get_user(fd, (int __user *)argp);
588 if (r < 0)
589 break;
590 eventfp = fd == -1 ? NULL : eventfd_fget(fd);
591 if (IS_ERR(eventfp)) {
592 r = PTR_ERR(eventfp);
593 break;
594 }
595 if (eventfp != d->log_file) {
596 filep = d->log_file;
597 ctx = d->log_ctx;
598 d->log_ctx = eventfp ?
599 eventfd_ctx_fileget(eventfp) : NULL;
600 } else
601 filep = eventfp;
602 for (i = 0; i < d->nvqs; ++i) {
603 mutex_lock(&d->vqs[i].mutex);
604 d->vqs[i].log_ctx = d->log_ctx;
605 mutex_unlock(&d->vqs[i].mutex);
606 }
607 if (ctx)
608 eventfd_ctx_put(ctx);
609 if (filep)
610 fput(filep);
611 break;
612 default:
613 r = vhost_set_vring(d, ioctl, argp);
614 break;
615 }
616done:
617 return r;
618}
619
620static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
621 __u64 addr, __u32 len)
622{
623 struct vhost_memory_region *reg;
624 int i;
625 /* linear search is not brilliant, but we really have on the order of 6
626 * regions in practice */
627 for (i = 0; i < mem->nregions; ++i) {
628 reg = mem->regions + i;
629 if (reg->guest_phys_addr <= addr &&
630 reg->guest_phys_addr + reg->memory_size - 1 >= addr)
631 return reg;
632 }
633 return NULL;
634}
635
636/* TODO: This is really inefficient. We need something like get_user()
637 * (instruction directly accesses the data, with an exception table entry
638 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
639 */
640static int set_bit_to_user(int nr, void __user *addr)
641{
642 unsigned long log = (unsigned long)addr;
643 struct page *page;
644 void *base;
645 int bit = nr + (log % PAGE_SIZE) * 8;
646 int r;
647 r = get_user_pages_fast(log, 1, 1, &page);
648 if (r)
649 return r;
650 base = kmap_atomic(page, KM_USER0);
651 set_bit(bit, base);
652 kunmap_atomic(base, KM_USER0);
653 set_page_dirty_lock(page);
654 put_page(page);
655 return 0;
656}
657
658static int log_write(void __user *log_base,
659 u64 write_address, u64 write_length)
660{
661 int r;
662 if (!write_length)
663 return 0;
664 write_address /= VHOST_PAGE_SIZE;
665 for (;;) {
666 u64 base = (u64)(unsigned long)log_base;
667 u64 log = base + write_address / 8;
668 int bit = write_address % 8;
669 if ((u64)(unsigned long)log != log)
670 return -EFAULT;
671 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
672 if (r < 0)
673 return r;
674 if (write_length <= VHOST_PAGE_SIZE)
675 break;
676 write_length -= VHOST_PAGE_SIZE;
677 write_address += VHOST_PAGE_SIZE;
678 }
679 return r;
680}
681
682int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
683 unsigned int log_num, u64 len)
684{
685 int i, r;
686
687 /* Make sure data written is seen before log. */
688 wmb();
689 for (i = 0; i < log_num; ++i) {
690 u64 l = min(log[i].len, len);
691 r = log_write(vq->log_base, log[i].addr, l);
692 if (r < 0)
693 return r;
694 len -= l;
695 if (!len)
696 return 0;
697 }
698 if (vq->log_ctx)
699 eventfd_signal(vq->log_ctx, 1);
700 /* Length written exceeds what we have stored. This is a bug. */
701 BUG();
702 return 0;
703}
704
705int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
706 struct iovec iov[], int iov_size)
707{
708 const struct vhost_memory_region *reg;
709 struct vhost_memory *mem;
710 struct iovec *_iov;
711 u64 s = 0;
712 int ret = 0;
713
714 rcu_read_lock();
715
716 mem = rcu_dereference(dev->memory);
717 while ((u64)len > s) {
718 u64 size;
719 if (ret >= iov_size) {
720 ret = -ENOBUFS;
721 break;
722 }
723 reg = find_region(mem, addr, len);
724 if (!reg) {
725 ret = -EFAULT;
726 break;
727 }
728 _iov = iov + ret;
729 size = reg->memory_size - addr + reg->guest_phys_addr;
730 _iov->iov_len = min((u64)len, size);
731 _iov->iov_base = (void *)(unsigned long)
732 (reg->userspace_addr + addr - reg->guest_phys_addr);
733 s += size;
734 addr += size;
735 ++ret;
736 }
737
738 rcu_read_unlock();
739 return ret;
740}
741
742/* Each buffer in the virtqueues is actually a chain of descriptors. This
743 * function returns the next descriptor in the chain,
744 * or -1U if we're at the end. */
745static unsigned next_desc(struct vring_desc *desc)
746{
747 unsigned int next;
748
749 /* If this descriptor says it doesn't chain, we're done. */
750 if (!(desc->flags & VRING_DESC_F_NEXT))
751 return -1U;
752
753 /* Check they're not leading us off end of descriptors. */
754 next = desc->next;
755 /* Make sure compiler knows to grab that: we don't want it changing! */
756 /* We will use the result as an index in an array, so most
757 * architectures only need a compiler barrier here. */
758 read_barrier_depends();
759
760 return next;
761}
762
763static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
764 struct iovec iov[], unsigned int iov_size,
765 unsigned int *out_num, unsigned int *in_num,
766 struct vhost_log *log, unsigned int *log_num,
767 struct vring_desc *indirect)
768{
769 struct vring_desc desc;
770 unsigned int i = 0, count, found = 0;
771 int ret;
772
773 /* Sanity check */
774 if (indirect->len % sizeof desc) {
775 vq_err(vq, "Invalid length in indirect descriptor: "
776 "len 0x%llx not multiple of 0x%zx\n",
777 (unsigned long long)indirect->len,
778 sizeof desc);
779 return -EINVAL;
780 }
781
782 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
783 ARRAY_SIZE(vq->indirect));
784 if (ret < 0) {
785 vq_err(vq, "Translation failure %d in indirect.\n", ret);
786 return ret;
787 }
788
789 /* We will use the result as an address to read from, so most
790 * architectures only need a compiler barrier here. */
791 read_barrier_depends();
792
793 count = indirect->len / sizeof desc;
794 /* Buffers are chained via a 16 bit next field, so
795 * we can have at most 2^16 of these. */
796 if (count > USHORT_MAX + 1) {
797 vq_err(vq, "Indirect buffer length too big: %d\n",
798 indirect->len);
799 return -E2BIG;
800 }
801
802 do {
803 unsigned iov_count = *in_num + *out_num;
804 if (++found > count) {
805 vq_err(vq, "Loop detected: last one at %u "
806 "indirect size %u\n",
807 i, count);
808 return -EINVAL;
809 }
810 if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
811 sizeof desc)) {
812 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
813 i, (size_t)indirect->addr + i * sizeof desc);
814 return -EINVAL;
815 }
816 if (desc.flags & VRING_DESC_F_INDIRECT) {
817 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
818 i, (size_t)indirect->addr + i * sizeof desc);
819 return -EINVAL;
820 }
821
822 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
823 iov_size - iov_count);
824 if (ret < 0) {
825 vq_err(vq, "Translation failure %d indirect idx %d\n",
826 ret, i);
827 return ret;
828 }
829 /* If this is an input descriptor, increment that count. */
830 if (desc.flags & VRING_DESC_F_WRITE) {
831 *in_num += ret;
832 if (unlikely(log)) {
833 log[*log_num].addr = desc.addr;
834 log[*log_num].len = desc.len;
835 ++*log_num;
836 }
837 } else {
838 /* If it's an output descriptor, they're all supposed
839 * to come before any input descriptors. */
840 if (*in_num) {
841 vq_err(vq, "Indirect descriptor "
842 "has out after in: idx %d\n", i);
843 return -EINVAL;
844 }
845 *out_num += ret;
846 }
847 } while ((i = next_desc(&desc)) != -1);
848 return 0;
849}
850
851/* This looks in the virtqueue and for the first available buffer, and converts
852 * it to an iovec for convenient access. Since descriptors consist of some
853 * number of output then some number of input descriptors, it's actually two
854 * iovecs, but we pack them into one and note how many of each there were.
855 *
856 * This function returns the descriptor number found, or vq->num (which
857 * is never a valid descriptor number) if none was found. */
858unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
859 struct iovec iov[], unsigned int iov_size,
860 unsigned int *out_num, unsigned int *in_num,
861 struct vhost_log *log, unsigned int *log_num)
862{
863 struct vring_desc desc;
864 unsigned int i, head, found = 0;
865 u16 last_avail_idx;
866 int ret;
867
868 /* Check it isn't doing very strange things with descriptor numbers. */
869 last_avail_idx = vq->last_avail_idx;
870 if (get_user(vq->avail_idx, &vq->avail->idx)) {
871 vq_err(vq, "Failed to access avail idx at %p\n",
872 &vq->avail->idx);
873 return vq->num;
874 }
875
876 if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
877 vq_err(vq, "Guest moved used index from %u to %u",
878 last_avail_idx, vq->avail_idx);
879 return vq->num;
880 }
881
882 /* If there's nothing new since last we looked, return invalid. */
883 if (vq->avail_idx == last_avail_idx)
884 return vq->num;
885
886 /* Only get avail ring entries after they have been exposed by guest. */
887 rmb();
888
889 /* Grab the next descriptor number they're advertising, and increment
890 * the index we've seen. */
891 if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
892 vq_err(vq, "Failed to read head: idx %d address %p\n",
893 last_avail_idx,
894 &vq->avail->ring[last_avail_idx % vq->num]);
895 return vq->num;
896 }
897
898 /* If their number is silly, that's an error. */
899 if (head >= vq->num) {
900 vq_err(vq, "Guest says index %u > %u is available",
901 head, vq->num);
902 return vq->num;
903 }
904
905 /* When we start there are none of either input nor output. */
906 *out_num = *in_num = 0;
907 if (unlikely(log))
908 *log_num = 0;
909
910 i = head;
911 do {
912 unsigned iov_count = *in_num + *out_num;
913 if (i >= vq->num) {
914 vq_err(vq, "Desc index is %u > %u, head = %u",
915 i, vq->num, head);
916 return vq->num;
917 }
918 if (++found > vq->num) {
919 vq_err(vq, "Loop detected: last one at %u "
920 "vq size %u head %u\n",
921 i, vq->num, head);
922 return vq->num;
923 }
924 ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
925 if (ret) {
926 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
927 i, vq->desc + i);
928 return vq->num;
929 }
930 if (desc.flags & VRING_DESC_F_INDIRECT) {
931 ret = get_indirect(dev, vq, iov, iov_size,
932 out_num, in_num,
933 log, log_num, &desc);
934 if (ret < 0) {
935 vq_err(vq, "Failure detected "
936 "in indirect descriptor at idx %d\n", i);
937 return vq->num;
938 }
939 continue;
940 }
941
942 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
943 iov_size - iov_count);
944 if (ret < 0) {
945 vq_err(vq, "Translation failure %d descriptor idx %d\n",
946 ret, i);
947 return vq->num;
948 }
949 if (desc.flags & VRING_DESC_F_WRITE) {
950 /* If this is an input descriptor,
951 * increment that count. */
952 *in_num += ret;
953 if (unlikely(log)) {
954 log[*log_num].addr = desc.addr;
955 log[*log_num].len = desc.len;
956 ++*log_num;
957 }
958 } else {
959 /* If it's an output descriptor, they're all supposed
960 * to come before any input descriptors. */
961 if (*in_num) {
962 vq_err(vq, "Descriptor has out after in: "
963 "idx %d\n", i);
964 return vq->num;
965 }
966 *out_num += ret;
967 }
968 } while ((i = next_desc(&desc)) != -1);
969
970 /* On success, increment avail index. */
971 vq->last_avail_idx++;
972 return head;
973}
974
975/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
976void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
977{
978 vq->last_avail_idx--;
979}
980
981/* After we've used one of their buffers, we tell them about it. We'll then
982 * want to notify the guest, using eventfd. */
983int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
984{
985 struct vring_used_elem *used;
986
987 /* The virtqueue contains a ring of used buffers. Get a pointer to the
988 * next entry in that used ring. */
989 used = &vq->used->ring[vq->last_used_idx % vq->num];
990 if (put_user(head, &used->id)) {
991 vq_err(vq, "Failed to write used id");
992 return -EFAULT;
993 }
994 if (put_user(len, &used->len)) {
995 vq_err(vq, "Failed to write used len");
996 return -EFAULT;
997 }
998 /* Make sure buffer is written before we update index. */
999 wmb();
1000 if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1001 vq_err(vq, "Failed to increment used idx");
1002 return -EFAULT;
1003 }
1004 if (unlikely(vq->log_used)) {
1005 /* Make sure data is seen before log. */
1006 wmb();
1007 log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring *
1008 (vq->last_used_idx % vq->num),
1009 sizeof *vq->used->ring);
1010 log_write(vq->log_base, vq->log_addr, sizeof *vq->used->ring);
1011 if (vq->log_ctx)
1012 eventfd_signal(vq->log_ctx, 1);
1013 }
1014 vq->last_used_idx++;
1015 return 0;
1016}
1017
1018/* This actually signals the guest, using eventfd. */
1019void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1020{
1021 __u16 flags = 0;
1022 if (get_user(flags, &vq->avail->flags)) {
1023 vq_err(vq, "Failed to get flags");
1024 return;
1025 }
1026
1027 /* If they don't want an interrupt, don't signal, unless empty. */
1028 if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
1029 (vq->avail_idx != vq->last_avail_idx ||
1030 !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
1031 return;
1032
1033 /* Signal the Guest tell them we used something up. */
1034 if (vq->call_ctx)
1035 eventfd_signal(vq->call_ctx, 1);
1036}
1037
1038/* And here's the combo meal deal. Supersize me! */
1039void vhost_add_used_and_signal(struct vhost_dev *dev,
1040 struct vhost_virtqueue *vq,
1041 unsigned int head, int len)
1042{
1043 vhost_add_used(vq, head, len);
1044 vhost_signal(dev, vq);
1045}
1046
1047/* OK, now we need to know about added descriptors. */
1048bool vhost_enable_notify(struct vhost_virtqueue *vq)
1049{
1050 u16 avail_idx;
1051 int r;
1052 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1053 return false;
1054 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1055 r = put_user(vq->used_flags, &vq->used->flags);
1056 if (r) {
1057 vq_err(vq, "Failed to enable notification at %p: %d\n",
1058 &vq->used->flags, r);
1059 return false;
1060 }
1061 /* They could have slipped one in as we were doing that: make
1062 * sure it's written, then check again. */
1063 mb();
1064 r = get_user(avail_idx, &vq->avail->idx);
1065 if (r) {
1066 vq_err(vq, "Failed to check avail idx at %p: %d\n",
1067 &vq->avail->idx, r);
1068 return false;
1069 }
1070
1071 return avail_idx != vq->last_avail_idx;
1072}
1073
1074/* We don't need to be notified again. */
1075void vhost_disable_notify(struct vhost_virtqueue *vq)
1076{
1077 int r;
1078 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1079 return;
1080 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1081 r = put_user(vq->used_flags, &vq->used->flags);
1082 if (r)
1083 vq_err(vq, "Failed to enable notification at %p: %d\n",
1084 &vq->used->flags, r);
1085}
1086
1087int vhost_init(void)
1088{
1089 vhost_workqueue = create_singlethread_workqueue("vhost");
1090 if (!vhost_workqueue)
1091 return -ENOMEM;
1092 return 0;
1093}
1094
1095void vhost_cleanup(void)
1096{
1097 destroy_workqueue(vhost_workqueue);
1098}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
new file mode 100644
index 00000000000..44591ba9b07
--- /dev/null
+++ b/drivers/vhost/vhost.h
@@ -0,0 +1,161 @@
1#ifndef _VHOST_H
2#define _VHOST_H
3
4#include <linux/eventfd.h>
5#include <linux/vhost.h>
6#include <linux/mm.h>
7#include <linux/mutex.h>
8#include <linux/workqueue.h>
9#include <linux/poll.h>
10#include <linux/file.h>
11#include <linux/skbuff.h>
12#include <linux/uio.h>
13#include <linux/virtio_config.h>
14#include <linux/virtio_ring.h>
15
16struct vhost_device;
17
18enum {
19 /* Enough place for all fragments, head, and virtio net header. */
20 VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
21};
22
23/* Poll a file (eventfd or socket) */
24/* Note: there's nothing vhost specific about this structure. */
25struct vhost_poll {
26 poll_table table;
27 wait_queue_head_t *wqh;
28 wait_queue_t wait;
29 /* struct which will handle all actual work. */
30 struct work_struct work;
31 unsigned long mask;
32};
33
34void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
35 unsigned long mask);
36void vhost_poll_start(struct vhost_poll *poll, struct file *file);
37void vhost_poll_stop(struct vhost_poll *poll);
38void vhost_poll_flush(struct vhost_poll *poll);
39void vhost_poll_queue(struct vhost_poll *poll);
40
41struct vhost_log {
42 u64 addr;
43 u64 len;
44};
45
46/* The virtqueue structure describes a queue attached to a device. */
47struct vhost_virtqueue {
48 struct vhost_dev *dev;
49
50 /* The actual ring of buffers. */
51 struct mutex mutex;
52 unsigned int num;
53 struct vring_desc __user *desc;
54 struct vring_avail __user *avail;
55 struct vring_used __user *used;
56 struct file *kick;
57 struct file *call;
58 struct file *error;
59 struct eventfd_ctx *call_ctx;
60 struct eventfd_ctx *error_ctx;
61 struct eventfd_ctx *log_ctx;
62
63 struct vhost_poll poll;
64
65 /* The routine to call when the Guest pings us, or timeout. */
66 work_func_t handle_kick;
67
68 /* Last available index we saw. */
69 u16 last_avail_idx;
70
71 /* Caches available index value from user. */
72 u16 avail_idx;
73
74 /* Last index we used. */
75 u16 last_used_idx;
76
77 /* Used flags */
78 u16 used_flags;
79
80 /* Log writes to used structure. */
81 bool log_used;
82 u64 log_addr;
83
84 struct iovec indirect[VHOST_NET_MAX_SG];
85 struct iovec iov[VHOST_NET_MAX_SG];
86 struct iovec hdr[VHOST_NET_MAX_SG];
87 size_t hdr_size;
88 /* We use a kind of RCU to access private pointer.
89 * All readers access it from workqueue, which makes it possible to
90 * flush the workqueue instead of synchronize_rcu. Therefore readers do
91 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
92 * work item execution acts instead of rcu_read_lock() and the end of
93 * work item execution acts instead of rcu_read_lock().
94 * Writers use virtqueue mutex. */
95 void *private_data;
96 /* Log write descriptors */
97 void __user *log_base;
98 struct vhost_log log[VHOST_NET_MAX_SG];
99};
100
101struct vhost_dev {
102 /* Readers use RCU to access memory table pointer
103 * log base pointer and features.
104 * Writers use mutex below.*/
105 struct vhost_memory *memory;
106 struct mm_struct *mm;
107 struct mutex mutex;
108 unsigned acked_features;
109 struct vhost_virtqueue *vqs;
110 int nvqs;
111 struct file *log_file;
112 struct eventfd_ctx *log_ctx;
113};
114
115long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
116long vhost_dev_check_owner(struct vhost_dev *);
117long vhost_dev_reset_owner(struct vhost_dev *);
118void vhost_dev_cleanup(struct vhost_dev *);
119long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
120int vhost_vq_access_ok(struct vhost_virtqueue *vq);
121int vhost_log_access_ok(struct vhost_dev *);
122
123unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
124 struct iovec iov[], unsigned int iov_count,
125 unsigned int *out_num, unsigned int *in_num,
126 struct vhost_log *log, unsigned int *log_num);
127void vhost_discard_vq_desc(struct vhost_virtqueue *);
128
129int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
130void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
131void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
132 unsigned int head, int len);
133void vhost_disable_notify(struct vhost_virtqueue *);
134bool vhost_enable_notify(struct vhost_virtqueue *);
135
136int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
137 unsigned int log_num, u64 len);
138
139int vhost_init(void);
140void vhost_cleanup(void);
141
142#define vq_err(vq, fmt, ...) do { \
143 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
144 if ((vq)->error_ctx) \
145 eventfd_signal((vq)->error_ctx, 1);\
146 } while (0)
147
148enum {
149 VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
150 (1 << VIRTIO_RING_F_INDIRECT_DESC) |
151 (1 << VHOST_F_LOG_ALL) |
152 (1 << VHOST_NET_F_VIRTIO_NET_HDR),
153};
154
155static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
156{
157 unsigned acked_features = rcu_dereference(dev->acked_features);
158 return acked_features & (1 << bit);
159}
160
161#endif